spatstat.core/0000755000176200001440000000000014150203345013041 5ustar liggesusersspatstat.core/NAMESPACE0000644000176200001440000013402114145332773014275 0ustar liggesusers## spatstat.core NAMESPACE file ## ................ Import packages .................. import(stats,graphics,grDevices,utils,methods) import(spatstat.utils,spatstat.data,spatstat.sparse,spatstat.geom) import(goftest) import(Matrix,nlme,rpart) importFrom(abind,abind) importFrom(tensor,tensor) importFrom(mgcv, gam,gam.control,anova.gam,formula.gam,predict.gam, print.gam,summary.gam,vcov.gam,s) ## ................ Load dynamic library .............. ## (native routines are registered in init.c) ## (entry points are symbols with prefix "SC_") useDynLib(spatstat.core, .registration=TRUE, .fixes="SC_") ## ////////// DO NOT EDIT THE FOLLOWING /////////////// ## //////// it is generated automatically ///////////// # .................................................. # Automatically-generated list of documented objects # .................................................. export("$<-.fv") export("accumulateStatus") export("active.interactions") export("adaptcoef") export("adaptive.density") export("addvar") export("adjust.ratfv") export("affine.msr") export("AIC.dppm") export("AIC.kppm") export("AIC.mppm") export("AIC.ppm") export("allstats") export("alltypes") export("ang2rad") export("anova.mppm") export("anova.ppm") export("anova.slrm") export("apply.ssf") export("areadelta2") export("AreaInter") export("as.data.frame.bw.optim") export("as.data.frame.envelope") export("as.data.frame.fv") export("as.function.fv") export("as.function.leverage.ppm") export("as.function.rhohat") export("as.function.ssf") export("as.fv") export("as.fv.bw.optim") export("as.fv.data.frame") export("as.fv.dppm") export("as.fv.fasp") export("as.fv.fv") export("as.fv.kppm") export("as.fv.matrix") export("as.fv.minconfit") export("as.im.leverage.ppm") export("as.im.scan.test") export("as.im.ssf") export("as.interact") export("as.interact.fii") export("as.interact.interact") export("as.interact.ppm") export("as.layered.msr") export("as.owin.dppm") export("as.owin.influence.ppm") export("as.owin.kppm") export("as.owin.leverage.ppm") export("as.owin.msr") export("as.owin.ppm") export("as.owin.quadrattest") export("as.owin.rmhmodel") export("as.owin.slrm") export("as.ppm") export("as.ppm.dppm") export("as.ppm.kppm") export("as.ppm.ppm") export("as.ppm.profilepl") export("as.ppm.rppm") export("as.ppp.influence.ppm") export("as.ppp.ssf") export("assemble.plot.objects") export("auc") export("auc.kppm") export("auc.ppm") export("auc.ppp") export("auc.slrm") export("augment.msr") export("BadGey") export("bandwidth.is.infinite") export("BartCalc") export("bc") export("bc.ppm") export("berman.test") export("bermantestCalc") export("bermantestEngine") export("berman.test.ppm") export("berman.test.ppp") export("bigvaluerule") export("bind.fv") export("bind.ratfv") export("bits.envelope") export("bits.test") export("blankcoefnames") export("blur") export("bt.frame") export("bw.abram") export("bw.CvL") export("bw.CvLHeat") export("bw.diggle") export("bw.frac") export("bw.optim") export("bw.pcf") export("bw.ppl") export("bw.pplHeat") export("bw.relrisk") export("bw.scott") export("bw.scott.iso") export("bw.smoothppp") export("bw.stoyan") export("calc.DR") export("calc.NNIR") export("calc.SAVE") export("calc.SIR") export("calc.TSE") export("cannot.update") export("cauchy.estK") export("cauchy.estpcf") export("cbind.fv") export("CDF") export("CDF.density") export("cdf.test") export("cdf.test.mppm") export("cdf.test.ppm") export("cdf.test.ppp") export("cdf.test.slrm") export("censtimeCDFest") export("change.default.expand") export("check.separable") export("check.testfun") export("circdensity") export("circticks") export("clarkevans") export("clarkevansCalc") export("clarkevans.test") export("closepaircounts") export("clusterfield") export("clusterfield.character") export("clusterfield.function") export("clusterfield.kppm") export("clusterfit") export("clusterkernel") export("clusterkernel.character") export("clusterkernel.kppm") export("clusterradius") export("clusterradius.character") export("clusterradius.kppm") export("clusterradius.zclustermodel") export("clusterset") export("coef.dppm") export("coef.fii") export("coef<-.fii") export("coef.kppm") export("coef.mppm") export("coef.ppm") export("coef.slrm") export("coef.summary.fii") export("coef.summary.kppm") export("coef.summary.ppm") export("coef.summary.slrm") export("coef.vblogit") export("collapse.anylist") export("collapse.fv") export("compareFit") export("compatible.fasp") export("compatible.fv") export("compatible.rat") export("compileCDF") export("compileK") export("compilepcf") export("Concom") export("condSimCox") export("conform.ratfv") export("contour.leverage.ppm") export("contour.objsurf") export("contour.ssf") export("cor.im") export("cov.im") export("CressieReadName") export("CressieReadStatistic") export("CressieReadSymbol") export("crosspaircounts") export("cutoff2Dkernel") export("CVforPCF") export("damaged.ppm") export("datagen.rpoisppOnLines") export("datagen.runifpointOnLines") export("datagen.runifpoisppOnLines") export("data.mppm") export("data.ppm") export("dclf.progress") export("dclf.sigtrace") export("dclf.test") export("default.clipwindow") export("default.expand") export("default.rmhcontrol") export("deltasuffstat") export("densityAdaptiveKernel") export("densityAdaptiveKernel.ppp") export("densitycrossEngine") export("densityfun") export("densityfun.ppp") export("densityHeat") export("densityHeat.ppp") export("densitypointsEngine") export("density.ppp") export("density.ppplist") export("density.psp") export("density.splitppp") export("densityVoronoi") export("densityVoronoi.ppp") export("deriv.fv") export("detpointprocfamilyfun") export("deviance.ppm") export("deviance.slrm") export("Deviation") export("dfbetas.ppm") export("dfbetas.ppmInfluence") export("dfbetas.slrm") export("dffit") export("dffit.ppm") export("dffit.slrm") export("dg.envelope") export("dg.progress") export("dg.sigtrace") export("dg.test") export("diagnose.ppm") export("diagnose.ppm.engine") export("digestCovariates") export("DiggleGatesStibbard") export("DiggleGratton") export("digital.volume") export("dim.detpointprocfamily") export("dim.fasp") export("dimhat") export("dim.msr") export("dimnames.fasp") export("dimnames<-.fasp") export("dimnames.msr") export("distcdf") export("distributecbind") export("dkernel") export("dknn") export("dmixpois") export("domain.dppm") export("domain.influence.ppm") export("domain.kppm") export("domain.leverage.ppm") export("domain.msr") export("domain.ppm") export("domain.quadrattest") export("domain.rmhmodel") export("domain.slrm") export("doMultiStraussHard") export("dppapproxkernel") export("dppapproxpcf") export("dppBessel") export("dppCauchy") export("dppDpcf") export("dppeigen") export("dppGauss") export("dppkernel") export("dppm") export("dppMatern") export("dppmFixAlgorithm") export("dppmFixIntensity") export("dppparbounds") export("dppPowerExp") export("dppspecden") export("dppspecdenrange") export("dummify") export("dummy.ppm") export("edge.Ripley") export("edge.Trans") export("eem") export("eem.ppm") export("eem.slrm") export("effectfun") export("Emark") export("emend") export("emend.ppm") export("emend.slrm") export("envelope") export("envelopeArray") export("envelopeEngine") export("envelope.envelope") export("envelope.hasenvelope") export("envelope.kppm") export("envelope.matrix") export("envelope.pp3") export("envelope.ppm") export("envelope.ppp") export("envelopeProgressData") export("envelope.slrm") export("envelopeTest") export("equalpairs") export("evalCovar") export("evalCovariate") export("evalCovar.ppm") export("evalCovar.slrm") export("eval.fasp") export("eval.fv") export("evalInteraction") export("evalInterEngine") export("evalPairPotential") export("evaluate2Dkernel") export("exactMPLEstrauss") export("expand.owin") export("expandwinPerfect") export("ExpSmoothLog") export("extractAIC.dppm") export("extractAIC.kppm") export("extractAIC.mppm") export("extractAIC.ppm") export("extractAIC.slrm") export("extractAtomicQtests") export("f3Cengine") export("f3engine") export("F3est") export("fakeNeyScot") export("family.vblogit") export("[.fasp") export("fasp") export("Fest") export("Fhazard") export("fii") export("Fiksel") export("fill.coefs") export("findbestlegendpos") export("findcbind") export("findCovariate") export("Finhom") export("fitin") export("fitin.ppm") export("fitin.profilepl") export("fitted.dppm") export("fitted.kppm") export("fitted.mppm") export("fitted.ppm") export("fitted.rppm") export("fitted.slrm") export("fixef.mppm") export("flatfname") export("flipxy.msr") export("FmultiInhom") export("forbid.logi") export("FormatFaspFormulae") export("formula<-") export("formula.dppm") export("formula.fv") export("formula<-.fv") export("formula.kppm") export("formula.ppm") export("formula.slrm") export("fryplot") export("frypoints") export("[.fv") export("[<-.fv") export("fv") export("fvexprmap") export("fvlabelmap") export("fvlabels") export("fvlabels<-") export("fvlegend") export("fvnames") export("fvnames<-") export("g3Cengine") export("g3engine") export("G3est") export("gauss.hermite") export("Gcom") export("Gcross") export("Gdot") export("Gest") export("getCall.mppm") export("getdataname") export("getglmdata") export("getglmfit") export("getglmsubset") export("getppmdatasubset") export("getppmOriginalCovariates") export("getRandomFieldsModelGen") export("getSumFun") export("Geyer") export("geyercounts") export("geyerdelta2") export("Gfox") export("Ginhom") export("GLMpredict") export("Gmulti") export("GmultiInhom") export("good.correction.K") export("Gres") export("hackglmmPQL") export("handle.rshift.args") export("Hardcore") export("harmonic") export("harmonise.fv") export("harmonise.msr") export("harmonize.fv") export("hasenvelope") export("hasglmfit") export("has.offset") export("has.offset.term") export("HermiteCoefs") export("Hest") export("hierarchicalordering") export("HierHard") export("hiermat") export("hierpair.family") export("HierStrauss") export("HierStraussHard") export("ho.engine") export("hopskel") export("hopskel.test") export("hotbox") export("Hybrid") export("hybrid.family") export("ic") export("ic.kppm") export("ic.ppm") export("idw") export("Iest") export("illegal.iformula") export("image.objsurf") export("image.ssf") export("implemented.for.K") export("impliedcoefficients") export("impliedpresence") export("improve.kppm") export("increment.fv") export("[.influence.ppm") export("influence.ppm") export("influence.ppmInfluence") export("influence.slrm") export("inforder.family") export("instantiate.interact") export("integral.influence.ppm") export("integral.leverage.ppm") export("integral.msr") export("integral.ssf") export("intensity.detpointprocfamily") export("intensity.dppm") export("intensity.ppm") export("intensity.slrm") export("intensity.zclustermodel") export("interactionfamilyname") export("intermaker") export("ippm") export("is.atomicQtest") export("is.cadlag") export("is.dppm") export("is.expandable") export("is.expandable.ppm") export("is.expandable.rmhmodel") export("is.hybrid") export("is.hybrid.interact") export("is.hybrid.ppm") export("is.interact") export("is.kppm") export("is.lppm") export("is.marked.mppm") export("is.marked.msr") export("is.marked.ppm") export("is.marked.slrm") export("is.mppm") export("is.multitype.mppm") export("is.multitype.msr") export("is.multitype.ppm") export("is.multitype.slrm") export("is.poisson") export("is.poisson.interact") export("is.poisson.kppm") export("is.poisson.mppm") export("is.poisson.ppm") export("is.poisson.rmhmodel") export("is.poisson.slrm") export("is.ppm") export("is.scov") export("is.slrm") export("is.stationary") export("is.stationary.detpointprocfamily") export("is.stationary.dppm") export("is.stationary.kppm") export("is.stationary.ppm") export("is.stationary.rmhmodel") export("is.stationary.slrm") export("Jcross") export("Jdot") export("Jest") export("Jfox") export("Jinhom") export("Jmulti") export("k3engine") export("K3est") export("kaplan.meier") export("Kborder.engine") export("Kcom") export("Kcross") export("Kcross.inhom") export("Kdot") export("Kdot.inhom") export("kernel.factor") export("kernel.moment") export("kernel.squint") export("Kest") export("Kest.fft") export("killinteraction") export("Kinhom") export("Kmark") export("Kmeasure") export("Kmodel") export("Kmodel.detpointprocfamily") export("Kmodel.dppm") export("Kmodel.kppm") export("Kmodel.ppm") export("Kmodel.slrm") export("Kmodel.zclustermodel") export("km.rs") export("km.rs.opt") export("Kmulti") export("Kmulti.inhom") export("Knone.engine") export("Kount") export("Kpcf.kppm") export("kppm") export("kppmCLadap") export("kppmComLik") export("kppm.formula") export("kppmMinCon") export("kppmPalmLik") export("kppm.ppp") export("kppm.quad") export("kraever") export("kraeverRandomFields") export("Krect.engine") export("Kres") export("Kscaled") export("Ksector") export("Kwtsum") export("labels.dppm") export("labels.kppm") export("labels.ppm") export("labels.slrm") export("LambertW") export("laslett") export("Lcross") export("Lcross.inhom") export("Ldot") export("Ldot.inhom") export("LennardJones") export("Lest") export("leverage") export("[.leverage.ppm") export("leverage.ppm") export("leverage.ppmInfluence") export("leverage.slrm") export("lgcp.estK") export("lgcp.estpcf") export("Linhom") export("localK") export("localKcross") export("localKcross.inhom") export("localKdot") export("localKengine") export("localKinhom") export("localKmultiEngine") export("localL") export("localLcross") export("localLcross.inhom") export("localLdot") export("localLinhom") export("localpcf") export("localpcfengine") export("localpcfinhom") export("[.localpcfmatrix") export("localpcfmatrix") export("logi.engine") export("logLik.dppm") export("logLik.kppm") export("logLik.mppm") export("logLik.ppm") export("logLik.slrm") export("logLik.vblogit") export("lohboot") export("lookup2DkernelInfo") export("Lscaled") export("LurkEngine") export("lurking") export("lurking.mppm") export("lurking.ppm") export("lurking.ppp") export("lurking.slrm") export("mad.progress") export("mad.sigtrace") export("mad.test") export("makefvlabel") export("markconnect") export("markcorr") export("markcorrint") export("markcrosscorr") export("markmarkscatter") export("markmean") export("marks.ssf") export("marks<-.ssf") export("marktable") export("markvar") export("markvario") export("maskLaslett") export("match2DkernelName") export("match.kernel") export("matclust.estK") export("matclust.estpcf") export("max.fv") export("max.ssf") export("mctest.progress") export("mctest.sigtrace") export("mctestSigtraceEngine") export("mean.leverage.ppm") export("meanlistfv") export("measureContinuous") export("measureDiscrete") export("measureNegative") export("measurePositive") export("measureVariation") export("mincontrast") export("min.fv") export("min.ssf") export("miplot") export("model.covariates") export("model.depends") export("model.frame.dppm") export("modelFrameGam") export("model.frame.kppm") export("model.frame.ppm") export("model.frame.slrm") export("model.images") export("model.images.dppm") export("model.images.kppm") export("model.images.ppm") export("model.images.slrm") export("model.is.additive") export("model.matrix.dppm") export("model.matrix.ippm") export("model.matrix.kppm") export("model.matrix.mppm") export("model.matrix.ppm") export("model.matrix.slrm") export("model.se.image") export("mpl.engine") export("mpl.get.covariates") export("mpl.prepare") export("mpl.usable") export("mppm") export("[.msr") export("msr") export("MultiHard") export("MultiPair.checkmatrix") export("MultiStrauss") export("MultiStraussHard") export("names<-.fv") export("nearest.neighbour") export("newformula") export("newstyle.coeff.handling") export("nnclean") export("nncleanEngine") export("nnclean.pp3") export("nnclean.ppp") export("nncorr") export("nndcumfun") export("nndensity") export("nndensity.ppp") export("nnmean") export("nnorient") export("nnvario") export("nobs.dppm") export("nobs.kppm") export("nobs.mppm") export("nobs.ppm") export("no.trend.ppm") export("npfun") export("objsurf") export("objsurf.dppm") export("objsurfEngine") export("objsurf.kppm") export("objsurf.minconfit") export("Ops.msr") export("optimConverged") export("optimNsteps") export("optimStatus") export("Ord") export("ord.family") export("OrdThresh") export("outdated.interact") export("oversize.quad") export("pairMean") export("pairorient") export("PairPiece") export("PairPotentialType") export("pairsat.family") export("pairs.im") export("pairs.listof") export("pairs.solist") export("Pairwise") export("pairwise.family") export("panel.contour") export("panel.histogram") export("panel.image") export("parameters") export("parameters.dppm") export("parameters.fii") export("parameters.interact") export("parameters.kppm") export("parameters.ppm") export("parameters.profilepl") export("parameters.slrm") export("parres") export("partialModelMatrix") export("pcf") export("pcf3engine") export("pcf3est") export("pcfcross") export("pcfcross.inhom") export("pcfdot") export("pcfdot.inhom") export("pcf.fasp") export("pcf.fv") export("pcfinhom") export("pcfmodel") export("pcfmodel.detpointprocfamily") export("pcfmodel.dppm") export("pcfmodel.kppm") export("pcfmodel.ppm") export("pcfmodel.slrm") export("pcfmodel.zclustermodel") export("pcfmulti") export("pcfmulti.inhom") export("pcf.ppp") export("Penttinen") export("persp.leverage.ppm") export("persp.objsurf") export("pkernel") export("pknn") export("plot.addvar") export("plot.bermantest") export("plot.bw.frac") export("plot.bw.optim") export("plot.cdftest") export("plot.diagppm") export("plot.dppm") export("plot.envelope") export("ploterodeimage") export("ploterodewin") export("plot.fasp") export("plot.fii") export("plot.fv") export("plot.influence.ppm") export("plot.kppm") export("plot.laslett") export("plot.leverage.ppm") export("plot.localpcfmatrix") export("plot.lurk") export("plot.minconfit") export("plot.mppm") export("plot.msr") export("plot.objsurf") export("plot.parres") export("plot.plotpairsim") export("plot.plotppm") export("plot.ppm") export("plot.profilepl") export("plot.qqppm") export("plot.quadrattest") export("plot.rho2hat") export("plot.rhohat") export("plot.rppm") export("plot.scan.test") export("plot.slrm") export("plot.spatialcdf") export("plot.ssf") export("plot.studpermutest") export("pmixpois") export("PoisSaddle") export("PoisSaddleArea") export("PoisSaddleGeyer") export("PoisSaddlePairwise") export("Poisson") export("polyLaslett") export("polynom") export("pool") export("pool.anylist") export("pool.envelope") export("pool.fasp") export("pool.fv") export("pool.quadrattest") export("pool.rat") export("ppm") export("ppmCovariates") export("ppm.default") export("ppmDerivatives") export("ppm.formula") export("ppmInfluence") export("ppmInfluenceEngine") export("PPMmodelmatrix") export("ppm.ppp") export("ppm.quad") export("PPversion") export("predict.dppm") export("predict.kppm") export("predict.mppm") export("predict.ppm") export("predict.profilepl") export("predict.rho2hat") export("predict.rhohat") export("predict.rppm") export("predict.slrm") export("predict.vblogit") export("predict.zclustermodel") export("prefixfv") export("print.addvar") export("print.bt.frame") export("print.bw.frac") export("print.bw.optim") export("print.densityfun") export("print.detpointprocfamily") export("print.detpointprocfamilyfun") export("print.diagppm") export("print.dppm") export("print.envelope") export("print.fasp") export("print.fii") export("print.fv") export("print.fvfun") export("print.hasenvelope") export("print.hierarchicalordering") export("print.influence.ppm") export("print.interact") export("print.intermaker") export("print.isf") export("print.kppm") export("print.laslett") export("print.leverage.ppm") export("print.localpcfmatrix") export("print.lurk") export("print.minconfit") export("print.mppm") export("print.msr") export("print.objsurf") export("print.parres") export("print.plotpairsim") export("print.plotppm") export("print.ppm") export("print.profilepl") export("print.qqppm") export("print.quadrattest") export("print.rat") export("print.rho2hat") export("print.rhohat") export("print.rmhcontrol") export("print.rmhexpand") export("print.rmhInfoList") export("print.rmhmodel") export("print.rmhstart") export("print.rppm") export("print.slrm") export("print.Smoothfun") export("print.ssf") export("printStatus") export("printStatusList") export("print.summary.dppm") export("print.summary.fii") export("print.summary.kppm") export("print.summary.mppm") export("print.summary.objsurf") export("print.summary.ppm") export("print.summary.rmhexpand") export("print.summary.slrm") export("print.summary.ssf") export("print.vblogit") export("print.zclustermodel") export("profilepl") export("project.ppm") export("prune.rppm") export("pseudoR2") export("pseudoR2.ppm") export("pseudoR2.slrm") export("psib") export("psib.kppm") export("psst") export("psstA") export("psstG") export("qkernel") export("qknn") export("qmixpois") export("qqplot.ppm") export("QQversion") export("quadBlockSizes") export("quad.mppm") export("quad.ppm") export("quadratresample") export("quadrat.test") export("quadrat.testEngine") export("quadrat.test.mppm") export("quadrat.test.ppm") export("quadrat.test.ppp") export("quadrat.test.quadratcount") export("quadrat.test.slrm") export("quadrat.test.splitppp") export("quantile.density") export("rags") export("ragsAreaInter") export("ragsMultiHard") export("RandomFieldsSafe") export("ranef.mppm") export("range.fv") export("range.ssf") export("[.rat") export("rat") export("ratfv") export("rCauchy") export("rcell") export("rcellnumber") export("rDGS") export("rDiggleGratton") export("rdpp") export("reach") export("reach.detpointprocfamily") export("reach.dppm") export("reach.fii") export("reach.interact") export("reach.kppm") export("reach.ppm") export("reach.rmhmodel") export("reach.slrm") export("reach.zclustermodel") export("rebadge.as.crossfun") export("rebadge.as.dotfun") export("rebadge.fv") export("rebadgeLabels") export("reconcile.fv") export("rectcontact") export("reduced.sample") export("reduceformula") export("reheat") export("reincarnate.interact") export("RelevantDeviation") export("reload.or.compute") export("relrisk") export("relrisk.ppm") export("relrisk.ppp") export("rename.fv") export("repul") export("repul.dppm") export("rescale.msr") export("resid1panel") export("resid1plot") export("resid4plot") export("residuals.dppm") export("residuals.kppm") export("residuals.mppm") export("residuals.ppm") export("residuals.slrm") export("resolve.2D.kernel") export("resolveEinfo") export("resolve.foxall.window") export("resolve.lambda") export("resolve.lambda.cross") export("resolve.vargamma.shape") export("response") export("response.dppm") export("response.glm") export("response.kppm") export("response.lm") export("response.mppm") export("response.ppm") export("response.slrm") export("rex") export("rGaussPoisson") export("rHardcore") export("rho2hat") export("rhohat") export("rhohatCalc") export("rhohatEngine") export("rhohat.ppm") export("rhohat.ppp") export("rhohat.quad") export("rhohat.slrm") export("rjitter.psp") export("rkernel") export("rknn") export("rlabel") export("rLGCP") export("rMatClust") export("rMaternI") export("rMaternII") export("rMaternInhibition") export("rmax.Rigid") export("rmax.Ripley") export("rmax.rule") export("rmax.Trans") export("rmh") export("rmhcontrol") export("rmhcontrol.default") export("rmhcontrol.list") export("rmhcontrol.rmhcontrol") export("rmh.default") export("rmhEngine") export("rmhexpand") export("RmhExpandRule") export("rmhmodel") export("rmhmodel.default") export("rmhmodel.list") export("rmhmodel.ppm") export("rmhmodel.rmhmodel") export("rmh.ppm") export("rmhResolveControl") export("rmhResolveExpansion") export("rmhResolveTypes") export("rmhsnoop") export("rmhSnoopEnv") export("rmhstart") export("rmhstart.default") export("rmhstart.list") export("rmhstart.rmhstart") export("rmixpois") export("rMosaicField") export("rMosaicSet") export("rmpoint") export("rmpoint.I.allim") export("rmpoispp") export("rNeymanScott") export("rnoise") export("roc") export("rocData") export("roc.kppm") export("rocModel") export("roc.ppm") export("roc.ppp") export("roc.slrm") export("rose") export("roseContinuous") export("rose.default") export("rose.density") export("rose.fv") export("rose.histogram") export("rotate.msr") export("rotmean") export("rPenttinen") export("rpoint") export("rpoint.multi") export("rpoisline") export("rpoislinetess") export("rpoispp") export("rpoispp3") export("rpoisppOnLines") export("rpoisppx") export("rPoissonCluster") export("rppm") export("rPSNCP") export("rshift") export("rshift.ppp") export("rshift.psp") export("rshift.splitppp") export("rSSI") export("rstrat") export("rStrauss") export("rStraussHard") export("rtemper") export("rthin") export("rthinclumps") export("rThomas") export("runifdisc") export("runifpoint") export("runifpoint3") export("runifpointOnLines") export("runifpointx") export("runifpoispp") export("runifpoisppOnLines") export("rVarGamma") export("safeFiniteValue") export("safePositiveValue") export("SatPiece") export("Saturated") export("scalardilate.msr") export("scanBinomLRTS") export("scanLRTS") export("scanmeasure") export("scanmeasure.im") export("scanmeasure.ppp") export("scanPoisLRTS") export("scan.test") export("sdr") export("sdr.ppp") export("sdrPredict") export("second.moment.calc") export("second.moment.engine") export("segregation.test") export("segregation.test.ppp") export("sewpcf") export("sewsmod") export("sharpen") export("sharpen.ppp") export("shift.influence.ppm") export("shift.leverage.ppm") export("shift.msr") export("shift.quadrattest") export("signalStatus") export("simulate.detpointprocfamily") export("simulate.dppm") export("simulate.kppm") export("simulate.mppm") export("simulate.ppm") export("simulate.profilepl") export("simulate.rhohat") export("simulate.slrm") export("simulrecipe") export("slrAssemblePixelData") export("slrm") export("slrmInfluence") export("slr.prepare") export("Smooth") export("smoothcrossEngine") export("Smoothfun") export("Smoothfun.ppp") export("Smooth.fv") export("Smooth.im") export("Smooth.influence.ppm") export("Smooth.leverage.ppm") export("Smooth.msr") export("smoothpointsEngine") export("Smooth.ppp") export("Smooth.solist") export("Smooth.ssf") export("Softcore") export("spatcov") export("spatialcdf") export("spatialCDFframe") export("spatialCDFtest") export("spatialCDFtestCalc") export("spatstatClusterModelInfo") export("spatstatDPPModelInfo") export("spatstatRmhInfo") export("sp.foundclass") export("sp.foundclasses") export("sphere.volume") export("splitHybridInteraction") export("split.msr") export("[.ssf") export("ssf") export("stieltjes") export("stienen") export("stienenSet") export("Strauss") export("strausscounts") export("StraussHard") export("studpermu.test") export("subfits") export("subfits.new") export("subfits.old") export("subspaceDistance") export("suffloc") export("suffstat") export("suffstat.generic") export("suffstat.poisson") export("summarise.trend") export("summary.dppm") export("summary.envelope") export("summary.fii") export("summary.kppm") export("summary.mppm") export("summary.msr") export("summary.objsurf") export("summary.ppm") export("summary.profilepl") export("summary.rmhexpand") export("summary.slrm") export("summary.ssf") export("summary.vblogit") export("terms.dppm") export("terms.kppm") export("terms.mppm") export("terms.ppm") export("terms.slrm") export("thinjump") export("thomas.estK") export("thomas.estpcf") export("thresholdCI") export("thresholdSelect") export("totalVariation") export("transect.im") export("triplet.family") export("Triplets") export("Tstat") export("tweak.coefs") export("tweak.fv.entry") export("tweak.ratfv.entry") export("twostage.envelope") export("twostage.test") export("unitname.dppm") export("unitname<-.dppm") export("unitname.kppm") export("unitname<-.kppm") export("unitname.minconfit") export("unitname<-.minconfit") export("unitname.msr") export("unitname<-.msr") export("unitname.ppm") export("unitname<-.ppm") export("unitname.slrm") export("unitname<-.slrm") export("unmark.ssf") export("unstack.msr") export("update.detpointprocfamily") export("update.interact") export("update.ippm") export("update.kppm") export("update.msr") export("update.ppm") export("update.rmhcontrol") export("update.rmhstart") export("update.slrm") export("valid") export("validate2Dkernel") export("validate.angles") export("validate.weights") export("valid.detpointprocfamily") export("valid.ppm") export("valid.slrm") export("vanilla.fv") export("varblock") export("varcount") export("varcountEngine") export("vargamma.estK") export("vargamma.estpcf") export("vcov.kppm") export("vcov.mppm") export("vcov.ppm") export("vcov.slrm") export("versionstring.interact") export("versionstring.ppm") export("Vmark") export("weightedclosepairs") export("which.max.im") export("will.expand") export("Window.dppm") export("Window.influence.ppm") export("Window.kppm") export("Window.leverage.ppm") export("Window.msr") export("Window.ppm") export("Window.quadrattest") export("Window.rmhmodel") export("Window.slrm") export("windows.mppm") export("with.fv") export("with.msr") export("with.ssf") export("X2testEngine") export("zclustermodel") # ....... Special cases ........... S3method("Ops", "msr") # ....... End of special cases ... # ......................................... # Automatically generated list of S3 methods # ......................................... S3method("affine", "msr") S3method("AIC", "dppm") S3method("AIC", "kppm") S3method("AIC", "mppm") S3method("AIC", "ppm") S3method("anova", "mppm") S3method("anova", "ppm") S3method("anova", "slrm") S3method("as.data.frame", "bw.optim") S3method("as.data.frame", "envelope") S3method("as.data.frame", "fv") S3method("as.function", "fv") S3method("as.function", "leverage.ppm") S3method("as.function", "rhohat") S3method("as.function", "ssf") S3method("as.fv", "bw.optim") S3method("as.fv", "data.frame") S3method("as.fv", "dppm") S3method("as.fv", "fasp") S3method("as.fv", "fv") S3method("as.fv", "kppm") S3method("as.fv", "matrix") S3method("as.fv", "minconfit") S3method("as.im", "leverage.ppm") S3method("as.im", "scan.test") S3method("as.im", "ssf") S3method("as.interact", "fii") S3method("as.interact", "interact") S3method("as.interact", "ppm") S3method("as.layered", "msr") S3method("as.owin", "dppm") S3method("as.owin", "influence.ppm") S3method("as.owin", "kppm") S3method("as.owin", "leverage.ppm") S3method("as.owin", "msr") S3method("as.owin", "ppm") S3method("as.owin", "quadrattest") S3method("as.owin", "rmhmodel") S3method("as.owin", "slrm") S3method("as.ppm", "dppm") S3method("as.ppm", "kppm") S3method("as.ppm", "ppm") S3method("as.ppm", "profilepl") S3method("as.ppm", "rppm") S3method("as.ppp", "influence.ppm") S3method("as.ppp", "ssf") S3method("auc", "kppm") S3method("auc", "ppm") S3method("auc", "ppp") S3method("auc", "slrm") S3method("bc", "ppm") S3method("berman.test", "ppm") S3method("berman.test", "ppp") S3method("cbind", "fv") S3method("CDF", "density") S3method("cdf.test", "mppm") S3method("cdf.test", "ppm") S3method("cdf.test", "ppp") S3method("cdf.test", "slrm") S3method("clusterfield", "character") S3method("clusterfield", "function") S3method("clusterfield", "kppm") S3method("clusterkernel", "character") S3method("clusterkernel", "kppm") S3method("clusterradius", "character") S3method("clusterradius", "kppm") S3method("clusterradius", "zclustermodel") S3method("coef", "dppm") S3method("coef", "fii") S3method("coef", "kppm") S3method("coef", "mppm") S3method("coef", "ppm") S3method("coef", "slrm") S3method("coef", "summary.fii") S3method("coef", "summary.kppm") S3method("coef", "summary.ppm") S3method("coef", "summary.slrm") S3method("coef", "vblogit") S3method("collapse", "anylist") S3method("collapse", "fv") S3method("compatible", "fasp") S3method("compatible", "fv") S3method("compatible", "rat") S3method("contour", "leverage.ppm") S3method("contour", "objsurf") S3method("contour", "ssf") S3method("densityAdaptiveKernel", "ppp") S3method("densityfun", "ppp") S3method("densityHeat", "ppp") S3method("density", "ppp") S3method("density", "ppplist") S3method("density", "psp") S3method("density", "splitppp") S3method("densityVoronoi", "ppp") S3method("deriv", "fv") S3method("deviance", "ppm") S3method("deviance", "slrm") S3method("dfbetas", "ppm") S3method("dfbetas", "ppmInfluence") S3method("dfbetas", "slrm") S3method("dffit", "ppm") S3method("dffit", "slrm") S3method("dim", "detpointprocfamily") S3method("dim", "fasp") S3method("dim", "msr") S3method("dimnames", "fasp") S3method("dimnames", "msr") S3method("domain", "dppm") S3method("domain", "influence.ppm") S3method("domain", "kppm") S3method("domain", "leverage.ppm") S3method("domain", "msr") S3method("domain", "ppm") S3method("domain", "quadrattest") S3method("domain", "rmhmodel") S3method("domain", "slrm") S3method("eem", "ppm") S3method("eem", "slrm") S3method("emend", "ppm") S3method("emend", "slrm") S3method("envelope", "envelope") S3method("envelope", "hasenvelope") S3method("envelope", "kppm") S3method("envelope", "matrix") S3method("envelope", "pp3") S3method("envelope", "ppm") S3method("envelope", "ppp") S3method("envelope", "slrm") S3method("evalCovar", "ppm") S3method("evalCovar", "slrm") S3method("extractAIC", "dppm") S3method("extractAIC", "kppm") S3method("extractAIC", "mppm") S3method("extractAIC", "ppm") S3method("extractAIC", "slrm") S3method("family", "vblogit") S3method("[", "fasp") S3method("fitin", "ppm") S3method("fitin", "profilepl") S3method("fitted", "dppm") S3method("fitted", "kppm") S3method("fitted", "mppm") S3method("fitted", "ppm") S3method("fitted", "rppm") S3method("fitted", "slrm") S3method("fixef", "mppm") S3method("flipxy", "msr") S3method("formula", "dppm") S3method("formula", "fv") S3method("formula", "kppm") S3method("formula", "ppm") S3method("formula", "slrm") S3method("[", "fv") S3method("getCall", "mppm") S3method("harmonise", "fv") S3method("harmonise", "msr") S3method("harmonize", "fv") S3method("ic", "kppm") S3method("ic", "ppm") S3method("image", "objsurf") S3method("image", "ssf") S3method("[", "influence.ppm") S3method("influence", "ppm") S3method("influence", "ppmInfluence") S3method("influence", "slrm") S3method("integral", "influence.ppm") S3method("integral", "leverage.ppm") S3method("integral", "msr") S3method("integral", "ssf") S3method("intensity", "detpointprocfamily") S3method("intensity", "dppm") S3method("intensity", "ppm") S3method("intensity", "slrm") S3method("intensity", "zclustermodel") S3method("is.expandable", "ppm") S3method("is.expandable", "rmhmodel") S3method("is.hybrid", "interact") S3method("is.hybrid", "ppm") S3method("is.marked", "mppm") S3method("is.marked", "msr") S3method("is.marked", "ppm") S3method("is.marked", "slrm") S3method("is.multitype", "mppm") S3method("is.multitype", "msr") S3method("is.multitype", "ppm") S3method("is.multitype", "slrm") S3method("is.poisson", "interact") S3method("is.poisson", "kppm") S3method("is.poisson", "mppm") S3method("is.poisson", "ppm") S3method("is.poisson", "rmhmodel") S3method("is.poisson", "slrm") S3method("is.stationary", "detpointprocfamily") S3method("is.stationary", "dppm") S3method("is.stationary", "kppm") S3method("is.stationary", "ppm") S3method("is.stationary", "rmhmodel") S3method("is.stationary", "slrm") S3method("Kmodel", "detpointprocfamily") S3method("Kmodel", "dppm") S3method("Kmodel", "kppm") S3method("Kmodel", "ppm") S3method("Kmodel", "slrm") S3method("Kmodel", "zclustermodel") S3method("kppm", "formula") S3method("kppm", "ppp") S3method("kppm", "quad") S3method("labels", "dppm") S3method("labels", "kppm") S3method("labels", "ppm") S3method("labels", "slrm") S3method("[", "leverage.ppm") S3method("leverage", "ppm") S3method("leverage", "ppmInfluence") S3method("leverage", "slrm") S3method("[", "localpcfmatrix") S3method("logLik", "dppm") S3method("logLik", "kppm") S3method("logLik", "mppm") S3method("logLik", "ppm") S3method("logLik", "slrm") S3method("logLik", "vblogit") S3method("lurking", "mppm") S3method("lurking", "ppm") S3method("lurking", "ppp") S3method("lurking", "slrm") S3method("marks", "ssf") S3method("max", "fv") S3method("max", "ssf") S3method("mean", "leverage.ppm") S3method("min", "fv") S3method("min", "ssf") S3method("model.frame", "dppm") S3method("model.frame", "kppm") S3method("model.frame", "ppm") S3method("model.frame", "slrm") S3method("model.images", "dppm") S3method("model.images", "kppm") S3method("model.images", "ppm") S3method("model.images", "slrm") S3method("model.matrix", "dppm") S3method("model.matrix", "ippm") S3method("model.matrix", "kppm") S3method("model.matrix", "mppm") S3method("model.matrix", "ppm") S3method("model.matrix", "slrm") S3method("[", "msr") S3method("nnclean", "pp3") S3method("nnclean", "ppp") S3method("nndensity", "ppp") S3method("nobs", "dppm") S3method("nobs", "kppm") S3method("nobs", "mppm") S3method("nobs", "ppm") S3method("objsurf", "dppm") S3method("objsurf", "kppm") S3method("objsurf", "minconfit") S3method("pairs", "im") S3method("pairs", "listof") S3method("pairs", "solist") S3method("parameters", "dppm") S3method("parameters", "fii") S3method("parameters", "interact") S3method("parameters", "kppm") S3method("parameters", "ppm") S3method("parameters", "profilepl") S3method("parameters", "slrm") S3method("pcf", "fasp") S3method("pcf", "fv") S3method("pcfmodel", "detpointprocfamily") S3method("pcfmodel", "dppm") S3method("pcfmodel", "kppm") S3method("pcfmodel", "ppm") S3method("pcfmodel", "slrm") S3method("pcfmodel", "zclustermodel") S3method("pcf", "ppp") S3method("persp", "leverage.ppm") S3method("persp", "objsurf") S3method("plot", "addvar") S3method("plot", "bermantest") S3method("plot", "bw.frac") S3method("plot", "bw.optim") S3method("plot", "cdftest") S3method("plot", "diagppm") S3method("plot", "dppm") S3method("plot", "envelope") S3method("plot", "fasp") S3method("plot", "fii") S3method("plot", "fv") S3method("plot", "influence.ppm") S3method("plot", "kppm") S3method("plot", "laslett") S3method("plot", "leverage.ppm") S3method("plot", "localpcfmatrix") S3method("plot", "lurk") S3method("plot", "minconfit") S3method("plot", "mppm") S3method("plot", "msr") S3method("plot", "objsurf") S3method("plot", "parres") S3method("plot", "plotpairsim") S3method("plot", "plotppm") S3method("plot", "ppm") S3method("plot", "profilepl") S3method("plot", "qqppm") S3method("plot", "quadrattest") S3method("plot", "rho2hat") S3method("plot", "rhohat") S3method("plot", "rppm") S3method("plot", "scan.test") S3method("plot", "slrm") S3method("plot", "spatialcdf") S3method("plot", "ssf") S3method("plot", "studpermutest") S3method("pool", "anylist") S3method("pool", "envelope") S3method("pool", "fasp") S3method("pool", "fv") S3method("pool", "quadrattest") S3method("pool", "rat") S3method("ppm", "default") S3method("ppm", "formula") S3method("ppm", "ppp") S3method("ppm", "quad") S3method("predict", "dppm") S3method("predict", "kppm") S3method("predict", "mppm") S3method("predict", "ppm") S3method("predict", "profilepl") S3method("predict", "rho2hat") S3method("predict", "rhohat") S3method("predict", "rppm") S3method("predict", "slrm") S3method("predict", "vblogit") S3method("predict", "zclustermodel") S3method("print", "addvar") S3method("print", "bt.frame") S3method("print", "bw.frac") S3method("print", "bw.optim") S3method("print", "densityfun") S3method("print", "detpointprocfamily") S3method("print", "detpointprocfamilyfun") S3method("print", "diagppm") S3method("print", "dppm") S3method("print", "envelope") S3method("print", "fasp") S3method("print", "fii") S3method("print", "fv") S3method("print", "fvfun") S3method("print", "hasenvelope") S3method("print", "hierarchicalordering") S3method("print", "influence.ppm") S3method("print", "interact") S3method("print", "intermaker") S3method("print", "isf") S3method("print", "kppm") S3method("print", "laslett") S3method("print", "leverage.ppm") S3method("print", "localpcfmatrix") S3method("print", "lurk") S3method("print", "minconfit") S3method("print", "mppm") S3method("print", "msr") S3method("print", "objsurf") S3method("print", "parres") S3method("print", "plotpairsim") S3method("print", "plotppm") S3method("print", "ppm") S3method("print", "profilepl") S3method("print", "qqppm") S3method("print", "quadrattest") S3method("print", "rat") S3method("print", "rho2hat") S3method("print", "rhohat") S3method("print", "rmhcontrol") S3method("print", "rmhexpand") S3method("print", "rmhInfoList") S3method("print", "rmhmodel") S3method("print", "rmhstart") S3method("print", "rppm") S3method("print", "slrm") S3method("print", "Smoothfun") S3method("print", "ssf") S3method("print", "summary.dppm") S3method("print", "summary.fii") S3method("print", "summary.kppm") S3method("print", "summary.mppm") S3method("print", "summary.objsurf") S3method("print", "summary.ppm") S3method("print", "summary.rmhexpand") S3method("print", "summary.slrm") S3method("print", "summary.ssf") S3method("print", "vblogit") S3method("print", "zclustermodel") S3method("prune", "rppm") S3method("pseudoR2", "ppm") S3method("pseudoR2", "slrm") S3method("psib", "kppm") S3method("quadrat.test", "mppm") S3method("quadrat.test", "ppm") S3method("quadrat.test", "ppp") S3method("quadrat.test", "quadratcount") S3method("quadrat.test", "slrm") S3method("quadrat.test", "splitppp") S3method("quantile", "density") S3method("ranef", "mppm") S3method("range", "fv") S3method("range", "ssf") S3method("[", "rat") S3method("reach", "detpointprocfamily") S3method("reach", "dppm") S3method("reach", "fii") S3method("reach", "interact") S3method("reach", "kppm") S3method("reach", "ppm") S3method("reach", "rmhmodel") S3method("reach", "slrm") S3method("reach", "zclustermodel") S3method("relrisk", "ppm") S3method("relrisk", "ppp") S3method("repul", "dppm") S3method("rescale", "msr") S3method("residuals", "dppm") S3method("residuals", "kppm") S3method("residuals", "mppm") S3method("residuals", "ppm") S3method("residuals", "slrm") S3method("response", "dppm") S3method("response", "glm") S3method("response", "kppm") S3method("response", "lm") S3method("response", "mppm") S3method("response", "ppm") S3method("response", "slrm") S3method("rhohat", "ppm") S3method("rhohat", "ppp") S3method("rhohat", "quad") S3method("rhohat", "slrm") S3method("rjitter", "psp") S3method("rmhcontrol", "default") S3method("rmhcontrol", "list") S3method("rmhcontrol", "rmhcontrol") S3method("rmh", "default") S3method("rmhmodel", "default") S3method("rmhmodel", "list") S3method("rmhmodel", "ppm") S3method("rmhmodel", "rmhmodel") S3method("rmh", "ppm") S3method("rmhstart", "default") S3method("rmhstart", "list") S3method("rmhstart", "rmhstart") S3method("roc", "kppm") S3method("roc", "ppm") S3method("roc", "ppp") S3method("roc", "slrm") S3method("rose", "default") S3method("rose", "density") S3method("rose", "fv") S3method("rose", "histogram") S3method("rotate", "msr") S3method("rshift", "ppp") S3method("rshift", "psp") S3method("rshift", "splitppp") S3method("scalardilate", "msr") S3method("scanmeasure", "im") S3method("scanmeasure", "ppp") S3method("sdr", "ppp") S3method("segregation.test", "ppp") S3method("sharpen", "ppp") S3method("shift", "influence.ppm") S3method("shift", "leverage.ppm") S3method("shift", "msr") S3method("shift", "quadrattest") S3method("simulate", "detpointprocfamily") S3method("simulate", "dppm") S3method("simulate", "kppm") S3method("simulate", "mppm") S3method("simulate", "ppm") S3method("simulate", "profilepl") S3method("simulate", "rhohat") S3method("simulate", "slrm") S3method("Smoothfun", "ppp") S3method("Smooth", "fv") S3method("Smooth", "im") S3method("Smooth", "influence.ppm") S3method("Smooth", "leverage.ppm") S3method("Smooth", "msr") S3method("Smooth", "ppp") S3method("Smooth", "solist") S3method("Smooth", "ssf") S3method("split", "msr") S3method("[", "ssf") S3method("summary", "dppm") S3method("summary", "envelope") S3method("summary", "fii") S3method("summary", "kppm") S3method("summary", "mppm") S3method("summary", "msr") S3method("summary", "objsurf") S3method("summary", "ppm") S3method("summary", "profilepl") S3method("summary", "rmhexpand") S3method("summary", "slrm") S3method("summary", "ssf") S3method("summary", "vblogit") S3method("terms", "dppm") S3method("terms", "kppm") S3method("terms", "mppm") S3method("terms", "ppm") S3method("terms", "slrm") S3method("unitname", "dppm") S3method("unitname", "kppm") S3method("unitname", "minconfit") S3method("unitname", "msr") S3method("unitname", "ppm") S3method("unitname", "slrm") S3method("unmark", "ssf") S3method("unstack", "msr") S3method("update", "detpointprocfamily") S3method("update", "interact") S3method("update", "ippm") S3method("update", "kppm") S3method("update", "msr") S3method("update", "ppm") S3method("update", "rmhcontrol") S3method("update", "rmhstart") S3method("update", "slrm") S3method("valid", "detpointprocfamily") S3method("valid", "ppm") S3method("valid", "slrm") S3method("vcov", "kppm") S3method("vcov", "mppm") S3method("vcov", "ppm") S3method("vcov", "slrm") S3method("Window", "dppm") S3method("Window", "influence.ppm") S3method("Window", "kppm") S3method("Window", "leverage.ppm") S3method("Window", "msr") S3method("Window", "ppm") S3method("Window", "quadrattest") S3method("Window", "rmhmodel") S3method("Window", "slrm") S3method("with", "fv") S3method("with", "msr") S3method("with", "ssf") # ......................................... # Assignment methods # ......................................... S3method("$<-", "fv") S3method("coef<-", "fii") S3method("dimnames<-", "fasp") S3method("formula<-", "fv") S3method("[<-", "fv") S3method("marks<-", "ssf") S3method("names<-", "fv") S3method("unitname<-", "dppm") S3method("unitname<-", "kppm") S3method("unitname<-", "minconfit") S3method("unitname<-", "msr") S3method("unitname<-", "ppm") S3method("unitname<-", "slrm") # ......................................... # End of methods # ......................................... spatstat.core/man/0000755000176200001440000000000014141452520013615 5ustar liggesusersspatstat.core/man/as.ppm.Rd0000644000176200001440000000365014141452520015306 0ustar liggesusers\name{as.ppm} \alias{as.ppm} \alias{as.ppm.ppm} \alias{as.ppm.profilepl} \alias{as.ppm.kppm} \alias{as.ppm.dppm} \title{Extract Fitted Point Process Model} \description{ Extracts the fitted point process model from some kind of fitted model. } \usage{ as.ppm(object) \method{as.ppm}{ppm}(object) \method{as.ppm}{profilepl}(object) \method{as.ppm}{kppm}(object) \method{as.ppm}{dppm}(object) } \arguments{ \item{object}{An object that includes a fitted Poisson or Gibbs point process model. An object of class \code{"ppm"}, \code{"profilepl"}, \code{"kppm"} or \code{"dppm"} or possibly other classes. } } \details{ The function \code{as.ppm} extracts the fitted point process model (of class \code{"ppm"}) from a suitable object. The function \code{as.ppm} is generic, with methods for the classes \code{"ppm"}, \code{"profilepl"}, \code{"kppm"} and \code{"dppm"}, and possibly for other classes. For the class \code{"profilepl"} of models fitted by maximum profile pseudolikelihood, the method \code{as.ppm.profilepl} extracts the fitted point process model (with the optimal values of the irregular parameters). For the class \code{"kppm"} of models fitted by minimum contrast (or Palm or composite likelihood) using Waagepetersen's two-step estimation procedure (see \code{\link{kppm}}), the method \code{as.ppm.kppm} extracts the Poisson point process model that is fitted in the first stage of the procedure. The behaviour for the class \code{"dppm"} is analogous to the \code{"kppm"} case above. } \value{ An object of class \code{"ppm"}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{ppm}}, \code{\link{profilepl}}. } \examples{ # fit a model by profile maximum pseudolikelihood rvals <- data.frame(r=(1:10)/100) pfit <- profilepl(rvals, Strauss, cells, ~1) # extract the fitted model fit <- as.ppm(pfit) } \keyword{spatial} \keyword{models} spatstat.core/man/plot.envelope.Rd0000644000176200001440000000273114141452520016701 0ustar liggesusers\name{plot.envelope} \alias{plot.envelope} \title{Plot a Simulation Envelope} \description{ Plot method for the class \code{"envelope"}. } \usage{ \method{plot}{envelope}(x, \dots, main) } \arguments{ \item{x}{ An object of class \code{"envelope"}, containing the variables to be plotted or variables from which the plotting coordinates can be computed. } \item{main}{Main title for plot.} \item{\dots}{ Extra arguments passed to \code{\link{plot.fv}}. } } \value{ Either \code{NULL}, or a data frame giving the meaning of the different line types and colours. } \details{ This is the \code{plot} method for the class \code{"envelope"} of simulation envelopes. Objects of this class are created by the command \code{\link{envelope}}. This plot method is currently identical to \code{\link{plot.fv}}. Its default behaviour is to shade the region between the upper and lower envelopes in a light grey colour. To suppress the shading and plot the upper and lower envelopes as curves, set \code{shade=NULL}. To change the colour of the shading, use the argument \code{shadecol} which is passed to \code{\link{plot.fv}}. See \code{\link{plot.fv}} for further information on how to control the plot. } \examples{ data(cells) E <- envelope(cells, Kest, nsim=19) plot(E) plot(E, sqrt(./pi) ~ r) } \seealso{ \code{\link{envelope}}, \code{\link{plot.fv}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat.core/man/reduced.sample.Rd0000644000176200001440000000640014141452520016777 0ustar liggesusers\name{reduced.sample} \alias{reduced.sample} \title{Reduced Sample Estimator using Histogram Data} \description{ Compute the Reduced Sample estimator of a survival time distribution function, from histogram data } \usage{ reduced.sample(nco, cen, ncc, show=FALSE, uppercen=0) } \arguments{ \item{nco}{vector of counts giving the histogram of uncensored observations (those survival times that are less than or equal to the censoring time) } \item{cen}{vector of counts giving the histogram of censoring times } \item{ncc}{vector of counts giving the histogram of censoring times for the uncensored observations only } \item{uppercen}{ number of censoring times greater than the rightmost histogram breakpoint (if there are any) } \item{show}{Logical value controlling the amount of detail returned by the function value (see below) } } \value{ If \code{show = FALSE}, a numeric vector giving the values of the reduced sample estimator. If \code{show=TRUE}, a list with three components which are vectors of equal length, \item{rs}{Reduced sample estimate of the survival time c.d.f. \eqn{F(t)} } \item{numerator}{numerator of the reduced sample estimator } \item{denominator}{denominator of the reduced sample estimator } } \details{ This function is needed mainly for internal use in \pkg{spatstat}, but may be useful in other applications where you want to form the reduced sample estimator from a huge dataset. Suppose \eqn{T_i}{T[i]} are the survival times of individuals \eqn{i=1,\ldots,M} with unknown distribution function \eqn{F(t)} which we wish to estimate. Suppose these times are right-censored by random censoring times \eqn{C_i}{C[i]}. Thus the observations consist of right-censored survival times \eqn{\tilde T_i = \min(T_i,C_i)}{T*[i] = min(T[i],C[i])} and non-censoring indicators \eqn{D_i = 1\{T_i \le C_i\}}{D[i] = 1(T[i] <= C[i])} for each \eqn{i}. If the number of observations \eqn{M} is large, it is efficient to use histograms. Form the histogram \code{cen} of all censoring times \eqn{C_i}{C[i]}. That is, \code{obs[k]} counts the number of values \eqn{C_i}{C[i]} in the interval \code{(breaks[k],breaks[k+1]]} for \eqn{k > 1} and \code{[breaks[1],breaks[2]]} for \eqn{k = 1}. Also form the histogram \code{nco} of all uncensored times, i.e. those \eqn{\tilde T_i}{T*[i]} such that \eqn{D_i=1}{D[i]=1}, and the histogram of all censoring times for which the survival time is uncensored, i.e. those \eqn{C_i}{C[i]} such that \eqn{D_i=1}{D[i]=1}. These three histograms are the arguments passed to \code{kaplan.meier}. The return value \code{rs} is the reduced-sample estimator of the distribution function \eqn{F(t)}. Specifically, \code{rs[k]} is the reduced sample estimate of \code{F(breaks[k+1])}. The value is exact, i.e. the use of histograms does not introduce any approximation error. Note that, for the results to be valid, either the histogram breaks must span the censoring times, or the number of censoring times that do not fall in a histogram cell must have been counted in \code{uppercen}. } \seealso{ \code{\link{kaplan.meier}}, \code{\link{km.rs}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/rMaternII.Rd0000644000176200001440000000532314141452520015741 0ustar liggesusers\name{rMaternII} \alias{rMaternII} \title{Simulate Matern Model II} \description{ Generate a random point pattern, a simulated realisation of the \Matern Model II inhibition process. } \usage{ rMaternII(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, ..., nsim=1, drop=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of proposal points. A single positive number. } \item{r}{ Inhibition distance. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Alternatively a higher-dimensional box of class \code{"box3"} or \code{"boxx"}. } \item{stationary}{ Logical. Whether to start with a stationary process of proposal points (\code{stationary=TRUE}) or to generate the proposal points only inside the window (\code{stationary=FALSE}). } \item{\dots}{Ignored.} \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Each point pattern is normally an object of class \code{"ppp"}, but may be of class \code{"pp3"} or \code{"ppx"} depending on the window. } \details{ This algorithm generates one or more realisations of \Matern's Model II inhibition process inside the window \code{win}. The process is constructed by first generating a uniform Poisson point process of ``proposal'' points with intensity \code{kappa}. If \code{stationary = TRUE} (the default), the proposal points are generated in a window larger than \code{win} that effectively means the proposals are stationary. If \code{stationary=FALSE} then the proposal points are only generated inside the window \code{win}. Then each proposal point is marked by an ``arrival time'', a number uniformly distributed in \eqn{[0,1]} independently of other variables. A proposal point is deleted if it lies within \code{r} units' distance of another proposal point \emph{that has an earlier arrival time}. Otherwise it is retained. The retained points constitute \Matern's Model II. The difference between \Matern's Model I and II is the italicised statement above. Model II has a higher intensity for the same parameter values. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rMaternI}} } \examples{ X <- rMaternII(20, 0.05) Y <- rMaternII(20, 0.05, stationary=FALSE) } \author{ \adrian , Ute Hahn, \rolf and \ege } \keyword{spatial} \keyword{datagen} spatstat.core/man/pcf.ppp.Rd0000644000176200001440000002411714141452520015457 0ustar liggesusers\name{pcf.ppp} \alias{pcf.ppp} \title{Pair Correlation Function of Point Pattern} \description{ Estimates the pair correlation function of a point pattern using kernel methods. } \usage{ \method{pcf}{ppp}(X, \dots, r = NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor = c("r", "d"), var.approx = FALSE, domain=NULL, ratio=FALSE, close=NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. Either a single numeric value giving the standard deviation of the kernel, or a character string specifying a bandwidth selection rule recognised by \code{\link{density.default}}. If \code{bw} is missing or \code{NULL}, the default value is computed using Stoyan's rule of thumb: see Details. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Coefficient for Stoyan's bandwidth selection rule; see Details. } \item{correction}{ Edge correction. A character vector specifying the choice (or choices) of edge correction. See Details. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See Details. } \item{var.approx}{ Logical value indicating whether to compute an analytic approximation to the variance of the estimated pair correlation. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{close}{ Advanced use only. Precomputed data. See section on Advanced Use. } } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{the vector of values of the argument \eqn{r} at which the pair correlation function \eqn{g(r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g(r)} for the Poisson process } \item{trans}{vector of values of \eqn{g(r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g(r)} estimated by Ripley isotropic correction } \item{v}{vector of approximate values of the variance of the estimate of \eqn{g(r)} } as required. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{g(r)}. The return value also has an attribute \code{"bw"} giving the smoothing bandwidth that was used. } \details{ The pair correlation function \eqn{g(r)} is a summary of the dependence between points in a spatial point process. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding two points at locations \eqn{x} and \eqn{y} separated by a distance \eqn{r} is equal to \deqn{ p(r) = \lambda^2 g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda^2 * g(r) dx dy } where \eqn{\lambda}{lambda} is the intensity of the point process. For a completely random (uniform Poisson) process, \eqn{p(r) = \lambda^2 \,{\rm d}x \, {\rm d}y}{p(r) = lambda^2 dx dy} so \eqn{g(r) = 1}. Formally, the pair correlation function of a stationary point process is defined by \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. This routine computes an estimate of \eqn{g(r)} by kernel smoothing. \itemize{ \item If \code{divisor="r"} (the default), then the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used: the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \itemize{ \item If \code{correction="translate"} or \code{correction="translation"} then the translation correction is used. For \code{divisor="r"} the translation-corrected estimate is given in equation (15.15), page 284 of Stoyan and Stoyan (1994). \item If \code{correction="Ripley"} or \code{correction="isotropic"} then Ripley's isotropic edge correction is used. For \code{divisor="r"} the isotropic-corrected estimate is given in equation (15.18), page 285 of Stoyan and Stoyan (1994). \item If \code{correction="none"} then no edge correction is used, that is, an uncorrected estimate is computed. } Multiple corrections can be selected. The default is \code{correction=c("translate", "Ripley")}. Alternatively \code{correction="all"} selects all options; \code{correction="best"} selects the option which has the best statistical performance; \code{correction="good"} selects the option which is the best compromise between statistical performance and speed of computation. The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density.default}}. The default is the Epanechnikov kernel, recommended by Stoyan and Stoyan (1994, page 285). The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Bandwidth is defined as the standard deviation of the kernel; see the documentation for \code{\link{density.default}}. For the Epanechnikov kernel with half-width \code{h}, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. Stoyan and Stoyan (1994, page 285) recommend using the Epanechnikov kernel with support \eqn{[-h,h]} chosen by the rule of thumn \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the point process, and \eqn{c} is a constant in the range from 0.1 to 0.2. See equation (15.16). If \code{bw} is missing or \code{NULL}, then this rule of thumb will be applied. The argument \code{stoyan} determines the value of \eqn{c}. The smoothing bandwidth that was used in the calculation is returned as an attribute of the final result. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. If it is specified, \code{r} must be a vector of increasing numbers starting from \code{r[1] = 0}, and \code{max(r)} must not exceed half the diameter of the window. If the argument \code{domain} is given, estimation will be restricted to this region. That is, the estimate of \eqn{g(r)} will be based on pairs of points in which the first point lies inside \code{domain} and the second point is unrestricted. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. It must be a subset of the window of the point pattern \code{X}. To compute a confidence band for the true value of the pair correlation function, use \code{\link{lohboot}}. If \code{var.approx = TRUE}, the variance of the estimate of the pair correlation will also be calculated using an analytic approximation (Illian et al, 2008, page 234) which is valid for stationary point processes which are not too clustered. This calculation is not yet implemented when the argument \code{domain} is given. } \section{Advanced Use}{ To perform the same computation using several different bandwidths \code{bw}, it is efficient to use the argument \code{close}. This should be the result of \code{\link{closepairs}(X, rmax)} for a suitably large value of \code{rmax}, namely \code{rmax >= max(r) + 3 * bw}. } \references{ Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008) \emph{Statistical Analysis and Modelling of Spatial Point Patterns.} Wiley. Stoyan, D. and Stoyan, H. (1994) \emph{Fractals, random shapes and point fields: methods of geometrical statistics.} John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}}, \code{\link{density.default}}, \code{\link{bw.stoyan}}, \code{\link{bw.pcf}}, \code{\link{lohboot}}. } \examples{ X <- simdat \testonly{ X <- X[seq(1,npoints(X), by=4)] } p <- pcf(X) plot(p, main="pair correlation function for X") # indicates inhibition at distances r < 0.3 pd <- pcf(X, divisor="d") # compare estimates plot(p, cbind(iso, theo) ~ r, col=c("blue", "red"), ylim.covers=0, main="", lwd=c(2,1), lty=c(1,3), legend=FALSE) plot(pd, iso ~ r, col="green", lwd=2, add=TRUE) legend("center", col=c("blue", "green"), lty=1, lwd=2, legend=c("divisor=r","divisor=d")) # calculate approximate variance and show POINTWISE confidence bands pv <- pcf(X, var.approx=TRUE) plot(pv, cbind(iso, iso+2*sqrt(v), iso-2*sqrt(v)) ~ r) } \author{ \spatstatAuthors and Martin Hazelton. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/rmpoispp.Rd0000644000176200001440000001741214141452520015762 0ustar liggesusers\name{rmpoispp} \alias{rmpoispp} \title{Generate Multitype Poisson Point Pattern} \description{ Generate a random point pattern, a realisation of the (homogeneous or inhomogeneous) multitype Poisson process. } \usage{ rmpoispp(lambda, lmax=NULL, win, types, \dots, nsim=1, drop=TRUE, warnwin=!missing(win)) } \arguments{ \item{lambda}{ Intensity of the multitype Poisson process. Either a single positive number, a vector, a \code{function(x,y,m, \dots)}, a pixel image, a list of functions \code{function(x,y, \dots)}, or a list of pixel images. } \item{lmax}{ An upper bound for the value of \code{lambda}. May be omitted } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Ignored if \code{lambda} is a pixel image or list of images. } \item{types}{ All the possible types for the multitype pattern. } \item{\dots}{ Arguments passed to \code{lambda} if it is a function. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{warnwin}{ Logical value specifying whether to issue a warning when \code{win} is ignored. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Each point pattern is multitype (it carries a vector of marks which is a factor). } \details{ This function generates a realisation of the marked Poisson point process with intensity \code{lambda}. Note that the intensity function \eqn{\lambda(x,y,m)}{lambda(x,y,m)} is the average number of points \bold{of type m} per unit area near the location \eqn{(x,y)}. Thus a marked point process with a constant intensity of 10 and three possible types will have an average of 30 points per unit area, with 10 points of each type on average. The intensity function may be specified in any of the following ways. \describe{ \item{single number:}{ If \code{lambda} is a single number, then this algorithm generates a realisation of the uniform marked Poisson process inside the window \code{win} with intensity \code{lambda} for each type. The total intensity of points of all types is \code{lambda * length(types)}. The argument \code{types} must be given and determines the possible types in the multitype pattern. } \item{vector:}{ If \code{lambda} is a numeric vector, then this algorithm generates a realisation of the stationary marked Poisson process inside the window \code{win} with intensity \code{lambda[i]} for points of type \code{types[i]}. The total intensity of points of all types is \code{sum(lambda)}. The argument \code{types} defaults to \code{names(lambda)}, or if that is null, \code{1:length(lambda)}. } \item{function:}{ If \code{lambda} is a function, the process has intensity \code{lambda(x,y,m,\dots)} at spatial location \code{(x,y)} for points of type \code{m}. The function \code{lambda} must work correctly with vectors \code{x}, \code{y} and \code{m}, returning a vector of function values. (Note that \code{m} will be a factor with levels equal to \code{types}.) The value \code{lmax}, if present, must be an upper bound on the values of \code{lambda(x,y,m,\dots)} for all locations \code{(x, y)} inside the window \code{win} and all types \code{m}. The argument \code{types} must be given. } \item{list of functions:}{ If \code{lambda} is a list of functions, the process has intensity \code{lambda[[i]](x,y,\dots)} at spatial location \code{(x,y)} for points of type \code{types[i]}. The function \code{lambda[[i]]} must work correctly with vectors \code{x} and \code{y}, returning a vector of function values. The value \code{lmax}, if given, must be an upper bound on the values of \code{lambda(x,y,\dots)} for all locations \code{(x, y)} inside the window \code{win}. The argument \code{types} defaults to \code{names(lambda)}, or if that is null, \code{1:length(lambda)}. } \item{pixel image:}{ If \code{lambda} is a pixel image object of class \code{"im"} (see \code{\link{im.object}}), the intensity at a location \code{(x,y)} for points of any type is equal to the pixel value of \code{lambda} for the pixel nearest to \code{(x,y)}. The argument \code{win} is ignored; the window of the pixel image is used instead. The argument \code{types} must be given. } \item{list of pixel images:}{ If \code{lambda} is a list of pixel images, then the image \code{lambda[[i]]} determines the intensity of points of type \code{types[i]}. The argument \code{win} is ignored; the window of the pixel image is used instead. The argument \code{types} defaults to \code{names(lambda)}, or if that is null, \code{1:length(lambda)}. } } If \code{lmax} is missing, an approximate upper bound will be calculated. To generate an inhomogeneous Poisson process the algorithm uses ``thinning'': it first generates a uniform Poisson process of intensity \code{lmax} for points of each type \code{m}, then randomly deletes or retains each point independently, with retention probability \eqn{p(x,y,m) = \lambda(x,y,m)/\mbox{lmax}}{p(x,y,m) = lambda(x,y)/lmax}. } \seealso{ \code{\link{rpoispp}} for unmarked Poisson point process; \code{\link{rmpoint}} for a fixed number of random marked points; \code{\link{ppp.object}}, \code{\link{owin.object}}. } \examples{ # uniform bivariate Poisson process with total intensity 100 in unit square pp <- rmpoispp(50, types=c("a","b")) # stationary bivariate Poisson process with intensity A = 30, B = 70 pp <- rmpoispp(c(30,70), types=c("A","B")) pp <- rmpoispp(c(30,70)) # works in any window pp <- rmpoispp(c(30,70), win=letterR, types=c("A","B")) # inhomogeneous lambda(x,y,m) # note argument 'm' is a factor lam <- function(x,y,m) { 50 * (x^2 + y^3) * ifelse(m=="A", 2, 1)} pp <- rmpoispp(lam, win=letterR, types=c("A","B")) # extra arguments lam <- function(x,y,m,scal) { scal * (x^2 + y^3) * ifelse(m=="A", 2, 1)} pp <- rmpoispp(lam, win=letterR, types=c("A","B"), scal=50) # list of functions lambda[[i]](x,y) lams <- list(function(x,y){50 * x^2}, function(x,y){20 * abs(y)}) pp <- rmpoispp(lams, win=letterR, types=c("A","B")) pp <- rmpoispp(lams, win=letterR) # functions with extra arguments lams <- list(function(x,y,scal){5 * scal * x^2}, function(x,y, scal){2 * scal * abs(y)}) pp <- rmpoispp(lams, win=letterR, types=c("A","B"), scal=10) pp <- rmpoispp(lams, win=letterR, scal=10) # florid example lams <- list(function(x,y){ 100*exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend , function(x,y){ 100*exp(-0.6*x+0.5*y) } # log linear trend ) X <- rmpoispp(lams, win=unit.square(), types=c("on", "off")) # pixel image Z <- as.im(function(x,y){30 * (x^2 + y^3)}, letterR) pp <- rmpoispp(Z, types=c("A","B")) # list of pixel images ZZ <- list( as.im(function(x,y){20 * (x^2 + y^3)}, letterR), as.im(function(x,y){40 * (x^3 + y^2)}, letterR)) pp <- rmpoispp(ZZ, types=c("A","B")) pp <- rmpoispp(ZZ) # randomising an existing point pattern rmpoispp(intensity(amacrine), win=Window(amacrine)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/sdr.Rd0000644000176200001440000000712014141452520014674 0ustar liggesusers\name{sdr} \alias{sdr} \alias{sdr.ppp} \title{ Sufficient Dimension Reduction } \description{ Given a point pattern and a set of predictors, find a minimal set of new predictors, each constructed as a linear combination of the original predictors. } \usage{ sdr(X, covariates, \dots) \method{sdr}{ppp}(X, covariates, method = c("DR", "NNIR", "SAVE", "SIR", "TSE"), Dim1 = 1, Dim2 = 1, predict=FALSE, \dots) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{covariates}{ A list of pixel images (objects of class \code{"im"}) to serve as predictor variables. } \item{method}{ Character string indicating which method to use. See Details. } \item{Dim1}{ Dimension of the first order Central Intensity Subspace (applicable when \code{method} is \code{"DR"}, \code{"NNIR"}, \code{"SAVE"} or \code{"TSE"}). } \item{Dim2}{ Dimension of the second order Central Intensity Subspace (applicable when \code{method="TSE"}). } \item{predict}{ Logical value indicating whether to compute the new predictors as well. } \item{\dots}{ Additional arguments (ignored by \code{sdr.ppp}). } } \details{ Given a point pattern \eqn{X} and predictor variables \eqn{Z_1, \dots, Z_p}{Z[1], ..., Z[p]}, Sufficient Dimension Reduction methods (Guan and Wang, 2010) attempt to find a minimal set of new predictor variables, each constructed by taking a linear combination of the original predictors, which explain the dependence of \eqn{X} on \eqn{Z_1, \dots, Z_p}{Z[1], ..., Z[p]}. The methods do not assume any particular form of dependence of the point pattern on the predictors. The predictors are assumed to be Gaussian random fields. Available methods are: \tabular{ll}{ \code{method="DR"} \tab directional regression \cr \code{method="NNIR"} \tab nearest neighbour inverse regression \cr \code{method="SAVE"} \tab sliced average variance estimation \cr \code{method="SIR"} \tab sliced inverse regression \cr \code{method="TSE"} \tab two-step estimation \cr } The result includes a matrix \code{B} whose columns are estimates of the basis vectors of the space of new predictors. That is, the \code{j}th column of \code{B} expresses the \code{j}th new predictor as a linear combination of the original predictors. If \code{predict=TRUE}, the new predictors are also evaluated. They can also be evaluated using \code{\link{sdrPredict}}. } \value{ A list with components \code{B, M} or \code{B, M1, M2} where \code{B} is a matrix whose columns are estimates of the basis vectors for the space, and \code{M} or \code{M1,M2} are matrices containing estimates of the kernel. If \code{predict=TRUE}, the result also includes a component \code{Y} which is a list of pixel images giving the values of the new predictors. } \examples{ A <- sdr(bei, bei.extra, predict=TRUE) A Y1 <- A$Y[[1]] plot(Y1) points(bei, pch=".", cex=2) # investigate likely form of dependence plot(rhohat(bei, Y1)) } \seealso{ \code{\link{sdrPredict}} to compute the new predictors from the coefficient matrix. \code{\link{dimhat}} to estimate the subspace dimension. \code{\link{subspaceDistance}} } \references{ Guan, Y. and Wang, H. (2010) Sufficient dimension reduction for spatial point processes directed by Gaussian random fields. \emph{Journal of the Royal Statistical Society, Series B}, \bold{72}, 367--387. } \author{ Matlab original by Yongtao Guan, translated to \R by Suman Rakshit. } \keyword{spatial} \keyword{multivariate} spatstat.core/man/Extract.ssf.Rd0000644000176200001440000000145614141452520016316 0ustar liggesusers\name{[.ssf} \alias{[.ssf} \title{ Subset of spatially sampled function } \description{ Extract a subset of the data for a spatially sampled function. } \usage{ \method{[}{ssf}(x, i, j, ..., drop) } \arguments{ \item{x}{ Object of class \code{"ssf"}. } \item{i}{ Subset index applying to the locations where the function is sampled. } \item{j}{ Subset index applying to the columns (variables) measured at each location. } \item{\dots, drop}{ Ignored. } } \details{ This is the subset operator for the class \code{"ssf"}. } \value{ Another object of class \code{"ssf"}. } \author{ \adrian. } \seealso{ \code{\link{ssf}}, \code{\link{with.ssf}} } \examples{ f <- ssf(cells, data.frame(d=nndist(cells), i=1:42)) f f[1:10,] f[ ,1] } \keyword{spatial} \keyword{manip} spatstat.core/man/fitted.mppm.Rd0000644000176200001440000000530414141452520016335 0ustar liggesusers\name{fitted.mppm} \alias{fitted.mppm} \title{Fitted Conditional Intensity for Multiple Point Process Model} \description{ Given a point process model fitted to multiple point patterns, compute the fitted conditional intensity of the model at the points of each data pattern, or at the points of the quadrature schemes used to fit the model. } \usage{ \method{fitted}{mppm}(object, ..., type = "lambda", dataonly = FALSE) } \arguments{ \item{object}{ The fitted model. An object of class \code{"mppm"} obtained from \code{\link{mppm}}. } \item{\dots}{Ignored.} \item{type}{ Type of fitted values: either \code{"trend"} for the spatial trend, or \code{"lambda"} or \code{"cif"} for the conditional intensity. } \item{dataonly}{ If \code{TRUE}, fitted values are computed only for the points of the data point patterns. If \code{FALSE}, fitted values are computed for the points of the quadrature schemes used to fit the model. } } \details{ This function evaluates the conditional intensity \eqn{\hat\lambda(u,x)}{lambdahat(u,x)} or spatial trend \eqn{\hat{b(u)}}{bhat(u)} of the fitted point process model for certain locations \eqn{u}, for each of the original point patterns \eqn{x} to which the model was fitted. The locations \eqn{u} at which the fitted conditional intensity/trend is evaluated, are the points of the quadrature schemes used to fit the model in \code{\link{mppm}}. They include the data points (the points of the original point pattern datasets) and other ``dummy'' points in the window of observation. Use \code{\link{predict.mppm}} to compute the fitted conditional intensity at other locations or with other values of the explanatory variables. } \value{ A list of vectors (one for each row of the original hyperframe, i.e. one vector for each of the original point patterns) containing the values of the fitted conditional intensity or (if \code{type="trend"}) the fitted spatial trend. Entries in these vector correspond to the quadrature points (data or dummy points) used to fit the model. The quadrature points can be extracted from \code{object} by \code{\link{quad.mppm}(object)}. } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \adrian \rolf and \ege } \examples{ model <- mppm(Bugs ~ x, data=hyperframe(Bugs=waterstriders), interaction=Strauss(7)) cifs <- fitted(model) } \seealso{ \code{\link{mppm}}, \code{\link{predict.mppm}} } \keyword{spatial} \keyword{models} spatstat.core/man/anova.ppm.Rd0000644000176200001440000001456714141452520016020 0ustar liggesusers\name{anova.ppm} \alias{anova.ppm} \title{ANOVA for Fitted Point Process Models} \description{ Performs analysis of deviance for one or more fitted point process models. } \usage{ \method{anova}{ppm}(object, \dots, test=NULL, adjust=TRUE, warn=TRUE, fine=FALSE) } \arguments{ \item{object}{ A fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Optional. Additional objects of class \code{"ppm"}. } \item{test}{ Character string, partially matching one of \code{"Chisq"}, \code{"LRT"}, \code{"Rao"}, \code{"score"}, \code{"F"} or \code{"Cp"}, or \code{NULL} indicating that no test should be performed. } \item{adjust}{ Logical value indicating whether to correct the pseudolikelihood ratio when some of the models are not Poisson processes. } \item{warn}{ Logical value indicating whether to issue warnings if problems arise. } \item{fine}{ Logical value, passed to \code{\link{vcov.ppm}}, indicating whether to use a quick estimate (\code{fine=FALSE}, the default) or a slower, more accurate estimate (\code{fine=TRUE}) of variance terms. Relevant only when some of the models are not Poisson and \code{adjust=TRUE}. } } \value{ An object of class \code{"anova"}, or \code{NULL}. } \details{ This is a method for \code{\link[stats]{anova}} for fitted point process models (objects of class \code{"ppm"}, usually generated by the model-fitting function \code{\link{ppm}}). If the fitted models are all Poisson point processes, then by default, this function performs an Analysis of Deviance of the fitted models. The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"} or \code{test="LRT"}) the two-sided p-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link[stats]{anova.glm}}. If \code{test="Rao"} or \code{test="score"}, the \emph{score test} (Rao, 1948) is performed instead. If some of the fitted models are \emph{not} Poisson point processes, the `deviance' differences in this table are 'pseudo-deviances' equal to 2 times the differences in the maximised values of the log pseudolikelihood (see \code{\link{ppm}}). It is not valid to compare these values to the chi-squared distribution. In this case, if \code{adjust=TRUE} (the default), the pseudo-deviances will be adjusted using the method of Pace et al (2011) and Baddeley et al (2015) so that the chi-squared test is valid. It is strongly advisable to perform this adjustment. } \section{Errors and warnings}{ \describe{ \item{models not nested:}{ There may be an error message that the models are not \dQuote{nested}. For an Analysis of Deviance the models must be nested, i.e. one model must be a special case of the other. For example the point process model with formula \code{~x} is a special case of the model with formula \code{~x+y}, so these models are nested. However the two point process models with formulae \code{~x} and \code{~y} are not nested. If you get this error message and you believe that the models should be nested, the problem may be the inability of \R to recognise that the two formulae are nested. Try modifying the formulae to make their relationship more obvious. } \item{different sizes of dataset:}{ There may be an error message from \code{anova.glmlist} that \dQuote{models were not all fitted to the same size of dataset}. This implies that the models were fitted using different quadrature schemes (see \code{\link{quadscheme}}) and/or with different edge corrections or different values of the border edge correction distance \code{rbord}. To ensure that models are comparable, check the following: \itemize{ \item the models must all have been fitted to the same point pattern dataset, in the same window. \item all models must have been fitted by the same fitting method as specified by the argument \code{method} in \code{\link{ppm}}. \item If some of the models depend on covariates, then they should all have been fitted using the same list of covariates, and using \code{allcovar=TRUE} to ensure that the same quadrature scheme is used. \item all models must have been fitted using the same edge correction as specified by the arguments \code{correction} and \code{rbord}. If you did not specify the value of \code{rbord}, then it may have taken a different value for different models. The default value of \code{rbord} is equal to zero for a Poisson model, and otherwise equals the reach (interaction distance) of the interaction term (see \code{\link{reach}}). To ensure that the models are comparable, set \code{rbord} to equal the maximum reach of the interactions that you are fitting. } } } } \seealso{ \code{\link{ppm}}, \code{\link{vcov.ppm}} } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix of one of the models was either too large or too small for reliable numerical calculation. See \code{\link{vcov.ppm}} for suggestions on how to handle this. } \examples{ mod0 <- ppm(swedishpines ~1) modx <- ppm(swedishpines ~x) # Likelihood ratio test anova(mod0, modx, test="Chi") # Score test anova(mod0, modx, test="Rao") # Single argument modxy <- ppm(swedishpines ~x + y) anova(modxy, test="Chi") # Adjusted composite likelihood ratio test modP <- ppm(swedishpines ~1, rbord=9) modS <- ppm(swedishpines ~1, Strauss(9)) anova(modP, modS, test="Chi") } \references{ Baddeley, A., Turner, R. and Rubak, E. (2015) Adjusted composite likelihood ratio test for Gibbs point processes. \emph{Journal of Statistical Computation and Simulation} \bold{86} (5) 922--941. DOI: 10.1080/00949655.2015.1044530. Pace, L., Salvan, A. and Sartori, N. (2011) Adjusting composite likelihood ratio statistics. \emph{Statistica Sinica} \bold{21}, 129--148. Rao, C.R. (1948) Large sample tests of statistical hypotheses concerning several parameters with applications to problems of estimation. \emph{Proceedings of the Cambridge Philosophical Society} \bold{44}, 50--57. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.core/man/G3est.Rd0000644000176200001440000000741714141452520015102 0ustar liggesusers\name{G3est} \Rdversion{1.1} \alias{G3est} \title{ Nearest Neighbour Distance Distribution Function of a Three-Dimensional Point Pattern } \description{ Estimates the nearest-neighbour distance distribution function \eqn{G_3(r)}{G3(r)} from a three-dimensional point pattern. } \usage{ G3est(X, ..., rmax = NULL, nrval = 128, correction = c("rs", "km", "Hanisch")) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{G_3(r)}{G3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{G_3(r)}{G3(r)} will be estimated. A large value of \code{nrval} is required to avoid discretisation effects. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the nearest-neighbour function is \deqn{ G_3(r) = P(d^\ast(x,\Phi) \le r \mid x \in \Phi) }{ G3(r) = P(d*(x,Phi) <= r | x in Phi) } the cumulative distribution function of the distance \eqn{d^\ast(x,\Phi)}{d*(x,Phi)} from a typical point \eqn{x} in \eqn{\Phi}{Phi} to its nearest neighbour, i.e. to the nearest \emph{other} point of \eqn{\Phi}{Phi}. The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The nearest neighbour function of \eqn{\Phi}{Phi} can then be estimated using techniques described in the References. For each data point, the distance to the nearest neighbour is computed. The empirical cumulative distribution function of these values, with appropriate edge corrections, is the estimate of \eqn{G_3(r)}{G3(r)}. The available edge corrections are: \describe{ \item{\code{"rs"}:}{ the reduced sample (aka minus sampling, border correction) estimator (Baddeley et al, 1993) } \item{\code{"km"}:}{ the three-dimensional version of the Kaplan-Meier estimator (Baddeley and Gill, 1997) } \item{\code{"Hanisch"}:}{ the three-dimensional generalisation of the Hanisch estimator (Hanisch, 1984). } } Alternatively \code{correction="all"} selects all options. } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Baddeley, A.J. and Gill, R.D. (1997) Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25}, 263--292. Hanisch, K.-H. (1984) Some remarks on estimators of the distribution function of nearest neighbour distance in stationary spatial point patterns. \emph{Mathematische Operationsforschung und Statistik, series Statistics} \bold{15}, 409--412. } \author{ \adrian and Rana Moyeed. } \section{Warnings}{ A large value of \code{nrval} is required in order to avoid discretisation effects (due to the use of histograms in the calculation). } \seealso{ \code{\link{pp3}} to create a three-dimensional point pattern (object of class \code{"pp3"}). \code{\link{F3est}}, \code{\link{K3est}}, \code{\link{pcf3est}} for other summary functions of a three-dimensional point pattern. \code{\link{Gest}} to estimate the empty space function of point patterns in two dimensions. } \examples{ X <- rpoispp3(42) Z <- G3est(X) if(interactive()) plot(Z) } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/quantile.density.Rd0000644000176200001440000000451114141452520017405 0ustar liggesusers\name{quantile.density} \alias{quantile.density} \title{ Quantiles of a Density Estimate } \description{ Given a kernel estimate of a probability density, compute quantiles. } \usage{ \method{quantile}{density}(x, probs = seq(0, 1, 0.25), names = TRUE, \dots, warn = TRUE) } \arguments{ \item{x}{ Object of class \code{"density"} computed by a method for \code{\link[stats]{density}} } \item{probs}{ Numeric vector of probabilities for which the quantiles are required. } \item{names}{ Logical value indicating whether to attach names (based on \code{probs}) to the result. } \item{\dots}{ Ignored. } \item{warn}{ Logical value indicating whether to issue a warning if the density estimate \code{x} had to be renormalised because it was computed in a restricted interval. } } \details{ This function calculates quantiles of the probability distribution whose probability density has been estimated and stored in the object \code{x}. The object \code{x} must belong to the class \code{"density"}, and would typically have been obtained from a call to the function \code{\link[stats]{density}}. The probability density is first normalised so that the total probability is equal to 1. A warning is issued if the density estimate was restricted to an interval (i.e. if \code{x} was created by a call to \code{\link[stats]{density}} which included either of the arguments \code{from} and \code{to}). Next, the density estimate is numerically integrated to obtain an estimate of the cumulative distribution function \eqn{F(x)}. Then for each desired probability \eqn{p}, the algorithm finds the corresponding quantile \eqn{q}. The quantile \eqn{q} corresponding to probability \eqn{p} satisfies \eqn{F(q) = p} up to the resolution of the grid of values contained in \code{x}. The quantile is computed from the right, that is, \eqn{q} is the smallest available value of \eqn{x} such that \eqn{F(x) \ge p}{F(x) >= p}. } \value{ A numeric vector containing the quantiles. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link[stats]{quantile}}, \code{\link{quantile.ewcdf}}, \code{\link{quantile.im}}, \code{\link{CDF}}. } \examples{ dd <- density(runif(10)) quantile(dd) } \keyword{methods} \keyword{univar} \keyword{nonparametric} spatstat.core/man/rppm.Rd0000644000176200001440000000365114141452520015067 0ustar liggesusers\name{rppm} \alias{rppm} \title{ Recursively Partitioned Point Process Model } \description{ Fits a recursive partition model to point pattern data. } \usage{ rppm(\dots, rpargs=list()) } \arguments{ \item{\dots}{ Arguments passed to \code{\link{ppm}} specifying the point pattern data and the explanatory covariates. } \item{rpargs}{ Optional list of arguments passed to \code{\link[rpart]{rpart}} controlling the recursive partitioning procedure. } } \details{ This function attempts to find a simple rule for predicting low and high intensity regions of points in a point pattern, using explanatory covariates. The arguments \code{\dots} specify the point pattern data and explanatory covariates in the same way as they would be in the function \code{\link{ppm}}. The recursive partitioning algorithm \code{\link[rpart]{rpart}} is then used to find a partitioning rule. } \value{ An object of class \code{"rppm"}. There are methods for \code{print}, \code{plot}, \code{fitted}, \code{predict} and \code{prune} for this class. } \references{ Breiman, L., Friedman, J. H., Olshen, R. A., and Stone, C. J. (1984) \emph{Classification and Regression Trees}. Wadsworth. } \author{ \spatstatAuthors. } \seealso{ \code{\link{plot.rppm}}, \code{\link{predict.rppm}}, \code{\link{prune.rppm}}. } \examples{ # New Zealand trees data: trees planted along border # Use covariates 'x', 'y' nzfit <- rppm(nztrees ~ x + y) nzfit prune(nzfit, cp=0.035) # Murchison gold data: numeric and logical covariates mur <- solapply(murchison, rescale, s=1000, unitname="km") mur$dfault <- distfun(mur$faults) # mfit <- rppm(gold ~ dfault + greenstone, data=mur) mfit # Gorillas data: factor covariates # (symbol '.' indicates 'all variables') gfit <- rppm(unmark(gorillas) ~ . , data=gorillas.extra) gfit } \keyword{spatial} \keyword{models} spatstat.core/man/anova.slrm.Rd0000644000176200001440000000307014141452520016164 0ustar liggesusers\name{anova.slrm} \Rdversion{1.1} \alias{anova.slrm} \title{ Analysis of Deviance for Spatial Logistic Regression Models } \description{ Performs Analysis of Deviance for two or more fitted Spatial Logistic Regression models. } \usage{ \method{anova}{slrm}(object, ..., test = NULL) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ additional objects of the same type (optional). } \item{test}{ a character string, (partially) matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"}, indicating the reference distribution that should be used to compute \eqn{p}-values. } } \details{ This is a method for \code{\link[stats]{anova}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"}) the two-sided \eqn{p}-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link[stats]{anova.glm}}. } \value{ An object of class \code{"anova"}, inheriting from class \code{"data.frame"}, representing the analysis of deviance table. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit0 <- slrm(X ~ 1) fit1 <- slrm(X ~ x+y) anova(fit0, fit1, test="Chi") } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.core/man/hopskel.Rd0000644000176200001440000000625014141452520015554 0ustar liggesusers\name{hopskel} \alias{hopskel} \alias{hopskel.test} \title{Hopkins-Skellam Test} \description{ Perform the Hopkins-Skellam test of Complete Spatial Randomness, or simply calculate the test statistic. } \usage{ hopskel(X) hopskel.test(X, \dots, alternative=c("two.sided", "less", "greater", "clustered", "regular"), method=c("asymptotic", "MonteCarlo"), nsim=999) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{alternative}{ String indicating the type of alternative for the hypothesis test. Partially matched. } \item{method}{ Method of performing the test. Partially matched. } \item{nsim}{ Number of Monte Carlo simulations to perform, if a Monte Carlo p-value is required. } \item{\dots}{Ignored.} } \details{ Hopkins and Skellam (1954) proposed a test of Complete Spatial Randomness based on comparing nearest-neighbour distances with point-event distances. If the point pattern \code{X} contains \code{n} points, we first compute the nearest-neighbour distances \eqn{P_1, \ldots, P_n}{P[1], ..., P[n]} so that \eqn{P_i}{P[i]} is the distance from the \eqn{i}th data point to the nearest other data point. Then we generate another completely random pattern \code{U} with the same number \code{n} of points, and compute for each point of \code{U} the distance to the nearest point of \code{X}, giving distances \eqn{I_1, \ldots, I_n}{I[1], ..., I[n]}. The test statistic is \deqn{ A = \frac{\sum_i P_i^2}{\sum_i I_i^2} }{ A = (sum[i] P[i]^2) / (sum[i] I[i]^2) } The null distribution of \eqn{A} is roughly an \eqn{F} distribution with shape parameters \eqn{(2n,2n)}. (This is equivalent to using the test statistic \eqn{H=A/(1+A)} and referring \eqn{H} to the Beta distribution with parameters \eqn{(n,n)}). The function \code{hopskel} calculates the Hopkins-Skellam test statistic \eqn{A}, and returns its numeric value. This can be used as a simple summary of spatial pattern: the value \eqn{H=1} is consistent with Complete Spatial Randomness, while values \eqn{H < 1} are consistent with spatial clustering, and values \eqn{H > 1} are consistent with spatial regularity. The function \code{hopskel.test} performs the test. If \code{method="asymptotic"} (the default), the test statistic \eqn{H} is referred to the \eqn{F} distribution. If \code{method="MonteCarlo"}, a Monte Carlo test is performed using \code{nsim} simulated point patterns. } \value{ The value of \code{hopskel} is a single number. The value of \code{hopskel.test} is an object of class \code{"htest"} representing the outcome of the test. It can be printed. } \references{ Hopkins, B. and Skellam, J.G. (1954) A new method of determining the type of distribution of plant individuals. \emph{Annals of Botany} \bold{18}, 213--227. } \seealso{ \code{\link{clarkevans}}, \code{\link{clarkevans.test}}, \code{\link{nndist}}, \code{\link{nncross}} } \examples{ hopskel(redwood) hopskel.test(redwood, alternative="clustered") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} \keyword{htest} spatstat.core/man/Gcom.Rd0000644000176200001440000002247614141452520015004 0ustar liggesusers\name{Gcom} \Rdversion{1.1} \alias{Gcom} \title{ Model Compensator of Nearest Neighbour Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the \emph{compensator} of the nearest neighbour distance distribution function \eqn{G} based on the fitted model (as well as the usual nonparametric estimates of \eqn{G} based on the data alone). Comparison between the nonparametric and model-compensated \eqn{G} functions serves as a diagnostic for the model. } \usage{ Gcom(object, r = NULL, breaks = NULL, ..., correction = c("border", "Hanisch"), conditional = !is.poisson(object), restrict=FALSE, model=NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), ppmcorrection="border", truecoef = NULL, hi.res = NULL) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the function \eqn{G(r)} should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Edge correction(s) to be employed in calculating the compensator. Options are \code{"border"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{conditional}{ Optional. Logical value indicating whether to compute the estimates for the conditional case. See Details. } \item{restrict}{ Logical value indicating whether to compute the restriction estimator (\code{restrict=TRUE}) or the reweighting estimator (\code{restrict=FALSE}, the default). Applies only if \code{conditional=TRUE}. See Details. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord,ppmcorrection}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{\dots}{ Extra arguments passed to \code{\link{ppm}}. } \item{ppmcorrection}{ The \code{correction} argument to \code{\link{ppm}}. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes different estimates of the nearest neighbour distance distribution function \eqn{G} of the dataset, which should be approximately equal if the model is a good fit to the data. The first argument, \code{object}, is usually a fitted point process model (object of class \code{"ppm"}), obtained from the model-fitting function \code{\link{ppm}}. For convenience, \code{object} can also be a point pattern (object of class \code{"ppp"}). In that case, a point process model will be fitted to it, by calling \code{\link{ppm}} using the arguments \code{trend} (for the first order trend), \code{interaction} (for the interpoint interaction) and \code{rbord} (for the erosion distance in the border correction for the pseudolikelihood). See \code{\link{ppm}} for details of these arguments. The algorithm first extracts the original point pattern dataset (to which the model was fitted) and computes the standard nonparametric estimates of the \eqn{G} function. It then also computes the \emph{model-compensated} \eqn{G} function. The different functions are returned as columns in a data frame (of class \code{"fv"}). The interpretation of the columns is as follows (ignoring edge corrections): \describe{ \item{\code{bord}:}{ the nonparametric border-correction estimate of \eqn{G(r)}, \deqn{ \hat G(r) = \frac{\sum_i I\{ d_i \le r\} I\{ b_i > r \}}{\sum_i I\{ b_i > r\}} }{ G(r) = (sum[i] I(d[i] <= r) I(b[i] > r))/(sum[i] I(b[i] > r)) } where \eqn{d_i}{d[i]} is the distance from the \eqn{i}-th data point to its nearest neighbour, and \eqn{b_i}{b[i]} is the distance from the \eqn{i}-th data point to the boundary of the window \eqn{W}. } \item{\code{bcom}:}{ the model compensator of the border-correction estimate \deqn{ {\bf C}\, \hat G(r) = \frac{\int \lambda(u,x) I\{ b(u) > r\} I\{ d(u,x) \le r\}}{ 1 + \sum_i I\{ b_i > r\} } }{ C G(r) = (integral[u] lambda(u,x) I(b(u) > r) I( d(u,x) <= r ))/(1 + sum[i] I(b[i] > r)) } where \eqn{\lambda(u,x)}{lambda(u,x)} denotes the conditional intensity of the model at the location \eqn{u}, and \eqn{d(u,x)} denotes the distance from \eqn{u} to the nearest point in \eqn{x}, while \eqn{b(u)} denotes the distance from \eqn{u} to the boundary of the window\eqn{W}. } \item{\code{han}:}{ the nonparametric Hanisch estimate of \eqn{G(r)} \deqn{ \hat G(r) = \frac{D(r)}{D(\infty)} }{ G(r) = D(r)/D(infty) } where \deqn{ D(r) = \sum_i \frac{ I\{x_i \in W_{\ominus d_i}\} I\{d_i \le r\} }{ \mbox{area}(W_{\ominus d_i}) } }{ D(r) = sum[i] I(x[i] in W[-r]) I(d[i] <= r)/area(W[-d[i]]) } in which \eqn{W_{\ominus r}}{W[-r]} denotes the erosion of the window \eqn{W} by a distance \eqn{r}. } \item{\code{hcom}:}{ the corresponding model-compensated function \deqn{ {\bf C} \, G(r) = \int_W \frac{ \lambda(u,x) I(u \in W_{\ominus d(u)}) I(d(u) \le r) }{ \hat D(\infty) \mbox{area}(W_{\ominus d(u)}) + 1 } }{ C G(r) = integral[u] lambda(u,x) I(u in W[-d(u)]) I(d(u) <= r)/ (1 + D(infty) area(W[-d(u)])) } where \eqn{d(u) = d(u, x)} is the (`empty space') distance from location \eqn{u} to the nearest point of \eqn{x}. } } If the fitted model is a Poisson point process, then the formulae above are exactly what is computed. If the fitted model is not Poisson, the formulae above are modified slightly to handle edge effects. The modification is determined by the arguments \code{conditional} and \code{restrict}. The value of \code{conditional} defaults to \code{FALSE} for Poisson models and \code{TRUE} for non-Poisson models. If \code{conditional=FALSE} then the formulae above are not modified. If \code{conditional=TRUE}, then the algorithm calculates the \emph{restriction estimator} if \code{restrict=TRUE}, and calculates the \emph{reweighting estimator} if \code{restrict=FALSE}. See Appendix E of Baddeley, Rubak and \Moller (2011). See also \code{\link{spatstat.options}('eroded.intensity')}. Thus, by default, the reweighting estimator is computed for non-Poisson models. The border-corrected and Hanisch-corrected estimates of \eqn{G(r)} are approximately unbiased estimates of the \eqn{G}-function, assuming the point process is stationary. The model-compensated functions are unbiased estimates \emph{of the mean value of the corresponding nonparametric estimate}, assuming the model is true. Thus, if the model is a good fit, the mean value of the difference between the nonparametric and model-compensated estimates is approximately zero. To compute the difference between the nonparametric and model-compensated functions, use \code{\link{Gres}}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Related functions: \code{\link{Gest}}, \code{\link{Gres}}. Alternative functions: \code{\link{Kcom}}, \code{\link{psstA}}, \code{\link{psstG}}, \code{\link{psst}}. Model fitting: \code{\link{ppm}}. } \examples{ data(cells) fit0 <- ppm(cells, ~1) # uniform Poisson G0 <- Gcom(fit0) G0 plot(G0) # uniform Poisson is clearly not correct # Hanisch estimates only plot(Gcom(fit0), cbind(han, hcom) ~ r) fit1 <- ppm(cells, ~1, Strauss(0.08)) plot(Gcom(fit1), cbind(han, hcom) ~ r) # Try adjusting interaction distance fit2 <- update(fit1, Strauss(0.10)) plot(Gcom(fit2), cbind(han, hcom) ~ r) G3 <- Gcom(cells, interaction=Strauss(0.12)) plot(G3, cbind(han, hcom) ~ r) } \keyword{spatial} \keyword{models} spatstat.core/man/leverage.slrm.Rd0000644000176200001440000000477114141452520016663 0ustar liggesusers\name{leverage.slrm} \alias{leverage.slrm} \alias{influence.slrm} \alias{dfbetas.slrm} \alias{dffit.slrm} \title{ Leverage and Influence Diagnostics for Spatial Logistic Regression } \description{ For a fitted spatial logistic regression model, these functions compute diagnostics of leverage and influence. } \usage{ \method{leverage}{slrm}(model, \dots) \method{influence}{slrm}(model, \dots) \method{dfbetas}{slrm}(model, \dots) \method{dffit}{slrm}(object, \dots) } \arguments{ \item{model,object}{ A fitted spatial logistic regression model (object of class \code{"slrm"}). } \item{\dots}{ Arguments passed to methods. } } \details{ These functions are methods for the generics \code{\link{leverage}}, \code{\link[stats]{influence}}, \code{\link[stats]{dfbetas}} and \code{\link{dffit}} for the class \code{"slrm"}. These functions adapt the standard diagnostics for logistic regression (see \code{\link[stats]{influence.measures}}) to a fitted spatial logistic regression model (object of class \code{"slrm"}). This adaptation was described by Baddeley, Chang and Song (2013). \code{leverage.slrm} computes the leverage value (diagonal of the hat matrix) for the covariate data in each pixel. The result is a pixel image. \code{influence.slrm} computes the likelihood influence for the data (covariates and presence/absence of points) in each pixel. The result is a pixel image. \code{dfbetas.slrm} computes the parameter influence for the data (covariates and presence/absence of points) in each pixel. The result is a list of pixel images, one image for each of the model coefficients in \code{coef(model)}. The list can be plotted immediately. \code{dffit.slrm} computes the total influence for the data (covariates and presence/absence of points) in each pixel. The result is a pixel image. } \value{ A pixel image, or a list of pixel images. } \references{ Baddeley, A., Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ \adrian. } \seealso{ \code{\link[stats]{influence.measures}}. \code{\link{leverage.ppm}}, \code{\link{influence.ppm}}, \code{\link{dfbetas.ppm}}, \code{\link{dffit.ppm}} } \examples{ H <- unmark(humberside) fit <- slrm(H ~ x+y, dimyx=32) plot(leverage(fit)) plot(influence(fit)) plot(dfbetas(fit)) plot(dffit(fit)) } \keyword{spatial} \keyword{models} \concept{diagnostics} spatstat.core/man/with.fv.Rd0000644000176200001440000000747314141452520015504 0ustar liggesusers\name{with.fv} \alias{with.fv} \title{Evaluate an Expression in a Function Table} \description{ Evaluate an R expression in a function value table (object of class \code{"fv"}). } \usage{ \method{with}{fv}(data, expr, ..., fun = NULL, enclos=NULL) } \arguments{ \item{data}{A function value table (object of class \code{"fv"}) in which the expression will be evaluated. } \item{expr}{The expression to be evaluated. An \R language expression, which may involve the names of columns in \code{data}, the special abbreviations \code{.}, \code{.x} and \code{.y}, and global constants or functions. } \item{\dots}{Ignored.} \item{fun}{Logical value, specifying whether the result should be interpreted as another function (\code{fun=TRUE}) or simply returned as a numeric vector or array (\code{fun=FALSE}). See Details. } \item{enclos}{ An environment in which to search for variables that are not found in \code{data}. Defaults to \code{\link{parent.frame}()}. } } \details{ This is a method for the generic command \code{\link{with}} for an object of class \code{"fv"} (function value table). An object of class \code{"fv"} is a convenient way of storing and plotting several different estimates of the same function. It is effectively a data frame with extra attributes. See \code{\link{fv.object}} for further explanation. This command makes it possible to perform computations that involve different estimates of the same function. For example we use it to compute the arithmetic difference between two different edge-corrected estimates of the \eqn{K} function of a point pattern. The argument \code{expr} should be an \R language expression. The expression may involve \itemize{ \item the name of any column in \code{data}, referring to one of the estimates of the function; \item the symbol \code{.} which stands for all the available estimates of the function; \item the symbol \code{.y} which stands for the recommended estimate of the function (in an \code{"fv"} object, one of the estimates is always identified as the recommended estimate); \item the symbol \code{.x} which stands for the argument of the function; \item global constants or functions. } See the Examples. The expression should be capable of handling vectors and matrices. The interpretation of the argument \code{fun} is as follows: \itemize{ \item If \code{fun=FALSE}, the result of evaluating the expression \code{expr} will be returned as a numeric vector, matrix or data frame. \item If \code{fun=TRUE}, then the result of evaluating \code{expr} will be interpreted as containing the values of a new function. The return value will be an object of class \code{"fv"}. (This can only happen if the result has the right dimensions.) \item The default is \code{fun=TRUE} if the result of evaluating \code{expr} has more than one column, and \code{fun=FALSE} otherwise. } To perform calculations involving \emph{several} objects of class \code{"fv"}, use \code{\link{eval.fv}}. } \value{ A function value table (object of class \code{"fv"}) or a numeric vector or data frame. } \seealso{ \code{\link{with}}, \code{\link{fv.object}}, \code{\link{eval.fv}}, \code{\link[spatstat.core]{Kest}} } \examples{ # compute 4 estimates of the K function X <- runifrect(42) K <- Kest(X) plot(K) # derive 4 estimates of the L function L(r) = sqrt(K(r)/pi) L <- with(K, sqrt(./pi)) plot(L) # compute 4 estimates of V(r) = L(r)/r V <- with(L, ./.x) plot(V) # compute the maximum absolute difference between # the isotropic and translation correction estimates of K(r) D <- with(K, max(abs(iso - trans))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat.core/man/Kmark.Rd0000644000176200001440000001422614141452520015156 0ustar liggesusers\name{Kmark} \alias{Kmark} \alias{markcorrint} \title{Mark-Weighted K Function} \description{ Estimates the mark-weighted \eqn{K} function of a marked point pattern. } \usage{ Kmark(X, f = NULL, r = NULL, correction = c("isotropic", "Ripley", "translate"), ..., f1 = NULL, normalise = TRUE, returnL = FALSE, fargs = NULL) markcorrint(X, f = NULL, r = NULL, correction = c("isotropic", "Ripley", "translate"), ..., f1 = NULL, normalise = TRUE, returnL = FALSE, fargs = NULL) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. } \item{f}{Optional. Test function \eqn{f} used in the definition of the mark correlation function. An \R function with at least two arguments. There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{ Ignored. } \item{f1}{ An alternative to \code{f}. If this argument is given, then \eqn{f} is assumed to take the form \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)}. } \item{normalise}{ If \code{normalise=FALSE}, compute only the numerator of the expression for the mark correlation. } \item{returnL}{ Compute the analogue of the K-function if \code{returnL=FALSE} or the analogue of the L-function if \code{returnL=TRUE}. } \item{fargs}{ Optional. A list of extra arguments to be passed to the function \code{f} or \code{f1}. } } \details{ The functions \code{Kmark} and \code{markcorrint} are identical. (Eventually \code{markcorrint} will be deprecated.) The \emph{mark-weighted \eqn{K} function} \eqn{K_f(r)}{K[f](r)} of a marked point process (Penttinen et al, 1992) is a generalisation of Ripley's \eqn{K} function, in which the contribution from each pair of points is weighted by a function of their marks. If the marks of the two points are \eqn{m_1, m_2}{m1, m2} then the weight is proportional to \eqn{f(m_1, m_2)}{f(m1, m2)} where \eqn{f} is a specified \emph{test function}. The mark-weighted \eqn{K} function is defined so that \deqn{ \lambda K_f(r) = \frac{C_f(r)}{E[ f(M_1, M_2) ]} }{ lambda * K_f(r) = C[f](r)/E[f(M1, M2)] } where \deqn{ C_f(r) = E \left[ \sum_{x \in X} f(m(u), m(x)) 1{0 < ||u - x|| \le r} \; \big| \; u \in X \right] }{ C[f](r) = E[ sum[x in X] f(m(u), m(x)) 1(0 < d(u,x) <= r) | u in X] } for any spatial location \eqn{u} taken to be a typical point of the point process \eqn{X}. Here \eqn{||u-x||}{d(u,x)} is the euclidean distance between \eqn{u} and \eqn{x}, so that the sum is taken over all random points \eqn{x} that lie within a distance \eqn{r} of the point \eqn{u}. The function \eqn{C_f(r)}{C[f](r)} is the \emph{unnormalised} mark-weighted \eqn{K} function. To obtain \eqn{K_f(r)}{K[f](r)} we standardise \eqn{C_f(r)}{C[f](r)} by dividing by \eqn{E[f(M_1,M_2)]}{E[f(M1,M2)]}, the expected value of \eqn{f(M_1,M_2)}{f(M1,M2)} when \eqn{M_1}{M1} and \eqn{M_2}{M2} are independent random marks with the same distribution as the marks in the point process. Under the hypothesis of random labelling, the mark-weighted \eqn{K} function is equal to Ripley's \eqn{K} function, \eqn{K_f(r) = K(r)}{K[f](r) = K(r)}. The mark-weighted \eqn{K} function is sometimes called the \emph{mark correlation integral} because it is related to the mark correlation function \eqn{k_f(r)}{k[f](r)} and the pair correlation function \eqn{g(r)} by \deqn{ K_f(r) = 2 \pi \int_0^r s k_f(s) \, g(s) \, {\rm d}s }{ K[f](r) = 2 * pi * integral[0,r] (s * k[f](s) * g(s) ) ds } See \code{\link{markcorr}} for a definition of the mark correlation function. Given a marked point pattern \code{X}, this command computes edge-corrected estimates of the mark-weighted \eqn{K} function. If \code{returnL=FALSE} then the estimated function \eqn{K_f(r)}{K[f](r)} is returned; otherwise the function \deqn{ L_f(r) = \sqrt{K_f(r)/\pi} }{ L[f](r) = sqrt(K[f](r)/pi) } is returned. } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark correlation integral \eqn{K_f(r)}{K[f](r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_f(r)}{K[f](r)} when the marks attached to different points are independent, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the mark-weighted \eqn{K} function \eqn{K_f(r)}{K[f](r)} obtained by the edge corrections named (if \code{returnL=FALSE}). } \references{ Penttinen, A., Stoyan, D. and Henttonen, H. M. (1992) Marked point processes in forest statistics. \emph{Forest Science} \bold{38} (1992) 806-824. Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008) \emph{Statistical analysis and modelling of spatial point patterns}. Chichester: John Wiley. } \seealso{ \code{\link{markcorr}} to estimate the mark correlation function. } \examples{ # CONTINUOUS-VALUED MARKS: # (1) Spruces # marks represent tree diameter # mark correlation function ms <- Kmark(spruces) plot(ms) # (2) simulated data with independent marks X <- rpoispp(100) X <- X \%mark\% runif(npoints(X)) Xc <- Kmark(X) plot(Xc) # MULTITYPE DATA: # Hughes' amacrine data # Cells marked as 'on'/'off' M <- Kmark(amacrine, function(m1,m2) {m1==m2}, correction="translate") plot(M) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/cauchy.estpcf.Rd0000644000176200001440000001354414141452520016652 0ustar liggesusers\name{cauchy.estpcf} \alias{cauchy.estpcf} \title{Fit the Neyman-Scott cluster process with Cauchy kernel} \description{ Fits the Neyman-Scott Cluster point process with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. } \usage{ cauchy.estpcf(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs = list()) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Neyman-Scott cluster point process model with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Neyman-Scott cluster point process with Cauchy kernel to \code{X}, by finding the parameters of the \Matern Cluster model which give the closest match between the theoretical pair correlation function of the \Matern Cluster process and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{\kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{\mu}, and the locations of the offspring points of one parent follow a common distribution described in Jalilian et al (2013). If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{\lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{\lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{\lambda} cannot be estimated, and the parameter \eqn{\mu}{\mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rCauchy}}. For computational reasons, the optimisation procedure internally uses the parameter \code{eta2}, which is equivalent to \code{4 * scale^2} where \code{scale} is the scale parameter for the model as used in \code{\link{rCauchy}}. Homogeneous or inhomogeneous Neyman-Scott/Cauchy models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Ghorbani, M. (2012) Cauchy cluster process. \emph{Metrika}, to appear. Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{cauchy.estK}}, \code{\link{lgcp.estpcf}}, \code{\link{thomas.estpcf}}, \code{\link{vargamma.estpcf}}, \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{pcfmodel}}. \code{\link{rCauchy}} to simulate the model. } \examples{ u <- cauchy.estpcf(redwood) u plot(u, legendpos="topright") } \keyword{spatial} \keyword{models} spatstat.core/man/densityAdaptiveKernel.Rd0000644000176200001440000001316314141452520020406 0ustar liggesusers\name{densityAdaptiveKernel} \alias{densityAdaptiveKernel} \alias{densityAdaptiveKernel.ppp} \title{Adaptive Kernel Estimate of Intensity of Point Pattern} \description{ Computes an adaptive estimate of the intensity function of a point pattern using a variable-bandwidth smoothing kernel. } \usage{ densityAdaptiveKernel(X, \dots) \method{densityAdaptiveKernel}{ppp}(X, bw, \dots, weights=NULL, at=c("pixels", "points"), edge=TRUE, ngroups) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{bw}{ Numeric vector of smoothing bandwidths for each point in \code{X}, or a pixel image giving the smoothing bandwidth at each spatial location, or a spatial function of class \code{"funxy"} giving the smoothing bandwidth at each location. The default is to compute bandwidths using \code{\link{bw.abram}}. } \item{\dots}{ Arguments passed to \code{\link{bw.abram}} to compute the smoothing bandwidths if \code{bw} is missing, or passed to \code{\link[spatstat.geom]{as.mask}} to control the spatial resolution of the result. } \item{weights}{ Optional vector of numeric weights for the points of \code{X}. } \item{at}{ String specifying whether to compute the intensity values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{x} (\code{at="points"}). } \item{edge}{ Logical value indicating whether to perform edge correction. } \item{ngroups}{ Number of groups into which the bandwidth values should be partitioned and discretised. } } \details{ This function computes a spatially-adaptive kernel estimate of the spatially-varying intensity from the point pattern \code{X} using the partitioning technique of Davies and Baddeley (2018). The argument \code{bw} specifies the smoothing bandwidths to be applied to each of the points in \code{X}. It may be a numeric vector of bandwidth values, or a pixel image or function yielding the bandwidth values. If the points of \code{X} are \eqn{x_1,\ldots,x_n}{x[1], ..., x[n]} and the corresponding bandwidths are \eqn{\sigma_1,\ldots,\sigma_n}{\sigma[1], ..., \sigma[n]} then the adaptive kernel estimate of intensity at a location \eqn{u} is \deqn{ \hat\lambda(u) = \sum_{i=1}^n k(u, x_i, \sigma_i) }{ \lambda(u) = sum[i] e(x[i], k(u, x[i], \sigma[i]) } where \eqn{k(u, v, \sigma)} is the value at \eqn{u} of the (possibly edge-corrected) smoothing kernel with bandwidth \eqn{\sigma} induced by a data point at \eqn{v}. Exact computation of the estimate above can be time-consuming: it takes \eqn{n} times longer than fixed-bandwidth smoothing. The partitioning method of Davies and Baddeley (2018) accelerates this computation by partitioning the range of bandwidths into \code{ngroups} intervals, correspondingly subdividing the points of the pattern \code{X} into \code{ngroups} sub-patterns according to bandwidth, and applying fixed-bandwidth smoothing to each sub-pattern. The default value of \code{ngroups} is the integer part of the square root of the number of points in \code{X}, so that the computation time is only about \eqn{\sqrt{n}}{sqrt(n)} times slower than fixed-bandwidth smoothing. Any positive value of \code{ngroups} can be specified by the user. Specifying \code{ngroups=Inf} enforces exact computation of the estimate without partitioning. Specifying \code{ngroups=1} is the same as fixed-bandwidth smoothing with bandwidth \code{sigma=median(bw)}. } \section{Bandwidths and Bandwidth Selection}{ The function \code{densityAdaptiveKernel} computes one adaptive estimate of the intensity, determined by the smoothing bandwidth values \code{bw}. Typically the bandwidth values are computed by first computing a pilot estimate of the intensity, then using \code{\link{bw.abram}} to compute the vector of bandwidths according to Abramson's rule. This involves specifying a global bandwidth \code{h0}. The default bandwidths may work well in many contexts, but for optimal bandwidth selection, this calculation should be performed repeatedly with different values of \code{h0} to optimise the value of \code{h0}. This can be computationally demanding; we recommend the function \code{multiscale.density} in the \pkg{sparr} package which supports much faster bandwidth selection, using the FFT method of Davies and Baddeley (2018). } \value{ If \code{at="pixels"} (the default), the result is a pixel image. If \code{at="points"}, the result is a numeric vector with one entry for each data point in \code{X}. } \author{ \adrian and Tilman Davies. } \references{ Davies, T.M. and Baddeley, A. (2018) Fast computation of spatially adaptive kernel estimates. \emph{Statistics and Computing}, \bold{28}(4), 937-956.\cr Hall, P. and Marron, J.S. (1988) Variable window width kernel density estimates of probability densities. \emph{Probability Theory and Related Fields}, \bold{80}, 37-49.\cr Silverman, B.W. (1986) \emph{Density Estimation for Statistics and Data Analysis}. Chapman and Hall, New York. } \seealso{ \code{\link{density.ppp}}, \code{\link{adaptive.density}}, \code{\link{densityVoronoi}}, \code{\link[spatstat.geom]{im.object}}. See the function \code{bivariate.density} in the \pkg{sparr} package for a more flexible implementation, and \code{multiscale.density} for an implementation that is more efficient for bandwidth selection. } \examples{ Z <- densityAdaptiveKernel(redwood, h0=0.1) plot(Z, main="Adaptive kernel estimate") points(redwood, col="white") } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/plot.mppm.Rd0000644000176200001440000000463314141452520016040 0ustar liggesusers\name{plot.mppm} \alias{plot.mppm} \title{plot a Fitted Multiple Point Process Model} \description{ Given a point process model fitted to multiple point patterns by \code{\link{mppm}}, compute spatial trend or conditional intensity surface of the model, in a form suitable for plotting, and (optionally) plot this surface. } \usage{ \method{plot}{mppm}(x, \dots, trend=TRUE, cif=FALSE, se=FALSE, how=c("image", "contour", "persp")) } \arguments{ \item{x}{ A point process model fitted to multiple point patterns, typically obtained from the model-fitting algorithm \code{\link{mppm}}. An object of class \code{"mppm"}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppm}} or \code{\link{plot.anylist}} controlling the plot. } \item{trend}{ Logical value indicating whether to plot the fitted trend. } \item{cif}{ Logical value indicating whether to plot the fitted conditional intensity. } \item{se}{ Logical value indicating whether to plot the standard error of the fitted trend. } \item{how}{ Single character string indicating the style of plot to be performed. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for the class \code{"mppm"} of point process models fitted to multiple point patterns (see \code{\link{mppm}}). It invokes \code{\link{subfits}} to compute the fitted model for each individual point pattern dataset, then calls \code{\link{plot.ppm}} to plot these individual models. These individual plots are displayed using \code{\link{plot.anylist}}, which generates either a series of separate plot frames or an array of plot panels on a single page. } \seealso{ \code{\link{plot.ppm}}, \code{\link{mppm}}, \code{\link{plot.listof}} } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \examples{ # Synthetic data from known model n <- 9 H <- hyperframe(V=1:n, U=runif(n, min=-1, max=1)) H$Z <- setcov(square(1)) H$U <- with(H, as.im(U, as.rectangle(Z))) H$Y <- with(H, rpoispp(eval.im(exp(2+3*Z)))) fit <- mppm(Y ~Z + U + V, data=H) plot(fit) } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \adrian \rolf and \ege } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat.core/man/print.ppm.Rd0000644000176200001440000000301514141452520016032 0ustar liggesusers\name{print.ppm} \alias{print.ppm} \title{Print a Fitted Point Process Model} \description{ Default \code{print} method for a fitted point process model. } \usage{ \method{print}{ppm}(x,\dots, what=c("all", "model", "trend", "interaction", "se", "errors")) } \arguments{ \item{x}{ A fitted point process model, typically obtained from the model-fittingg algorithm \code{\link{ppm}}. An object of class \code{"ppm"}. } \item{what}{ Character vector (partially-matched) indicating what information should be printed. } \item{\dots}{Ignored.} } \value{ none. } \details{ This is the \code{print} method for the class \code{"ppm"}. It prints information about the fitted model in a sensible format. The argument \code{what} makes it possible to print only some of the information. If \code{what} is missing, then by default, standard errors for the estimated coefficients of the model will be printed only if the model is a Poisson point process. To print the standard errors for a non-Poisson model, call \code{print.ppm} with the argument \code{what} given explicitly, or reset the default rule by typing \code{spatstat.options(print.ppm.SE="always")}. } \seealso{ \code{\link{ppm.object}} for details of the class \code{"ppm"}. \code{\link{ppm}} for generating these objects. \code{\link{plot.ppm}}, \code{\link{predict.ppm}} } \examples{ # m <- ppm(cells ~1, Strauss(0.05)) # m } \author{\adrian and \rolf } \keyword{spatial} \keyword{print} \keyword{models} spatstat.core/man/rStraussHard.Rd0000644000176200001440000000720114141452520016531 0ustar liggesusers\name{rStraussHard} \alias{rStraussHard} \title{Perfect Simulation of the Strauss-Hardcore Process} \description{ Generate a random pattern of points, a simulated realisation of the Strauss-Hardcore process, using a perfect simulation algorithm. } \usage{ rStraussHard(beta, gamma = 1, R = 0, H = 0, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{gamma}{ interaction parameter (a number between 0 and 1, inclusive). } \item{R}{ interaction radius (a non-negative number). } \item{H}{ hard core distance (a non-negative number smaller than \code{R}). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. Currently this must be a rectangular window. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Strauss-Hardcore point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. The Strauss-Hardcore process is described in \code{\link{StraussHard}}. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). A limitation of the perfect simulation algorithm is that the interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1}. To simulate a Strauss-hardcore process with \eqn{\gamma > 1}{gamma > 1}, use \code{\link{rmh}}. There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. \Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. } \author{ Kasper Klitgaard Berthelsen and \adrian } \examples{ Z <- rStraussHard(100,0.7,0.05,0.02) Y <- rStraussHard(100,0.7,0.05,0.01, nsim=2) } \seealso{ \code{\link{rmh}}, \code{\link{StraussHard}}. \code{\link{rHardcore}}, \code{\link{rStrauss}}, \code{\link{rDiggleGratton}}, \code{\link{rDGS}}, \code{\link{rPenttinen}}. } \keyword{spatial} \keyword{datagen} spatstat.core/man/methods.leverage.ppm.Rd0000644000176200001440000000447014141452520020140 0ustar liggesusers\name{methods.leverage.ppm} \alias{methods.leverage.ppm} %DoNotExport \alias{as.im.leverage.ppm} \alias{as.owin.leverage.ppm} \alias{domain.leverage.ppm} \alias{integral.leverage.ppm} \alias{mean.leverage.ppm} \alias{Smooth.leverage.ppm} \alias{Window.leverage.ppm} \title{Methods for Leverage Objects} \description{ Methods for the class \code{"leverage.ppm"}. } \usage{ \method{as.im}{leverage.ppm}(X, \dots, what=c("smooth", "nearest")) \method{as.owin}{leverage.ppm}(W, \dots, fatal=TRUE) \method{domain}{leverage.ppm}(X, \dots) \method{integral}{leverage.ppm}(f, domain, \dots) \method{mean}{leverage.ppm}(x, \dots) \method{Smooth}{leverage.ppm}(X, \dots) \method{Window}{leverage.ppm}(X, \dots) } \arguments{ \item{X,x,W,f}{An object of class \code{"leverage.ppm"}.} \item{domain}{ Optional. Domain of integration: a window (class \code{"owin"}) or a tessellation (class \code{"tess"}). } \item{\dots}{ Additional arguments. See Details. } \item{fatal}{ Logical value indicating what to do if the data cannot be converted to a window. If \code{fatal=TRUE} (the default) an error occurs. If \code{fatal=FALSE} a value of \code{NULL} is returned. } \item{what}{ Character string (partially matched) specifying which image data should be extracted. See \code{\link{plot.leverage.ppm}} for explanation. } } \value{ A window (object of class \code{"owin"}) for \code{as.owin}, \code{domain} and \code{Window}. A numeric value or numeric vector for \code{integral}. A pixel image, or list of pixel images, for \code{as.im} and \code{Smooth}. } \details{ These functions are methods for the class \code{"leverage.ppm"}. An object of this class represents the leverage measure of a fitted point process model (see \code{\link{leverage.ppm}}). For \code{as.im}, \code{domain} and \code{Window}, additional arguments (\code{\dots}) are ignored. For \code{as.owin}, \code{integral}, \code{mean} and \code{Smooth}, additional arguments are passed to the method for class \code{"im"}. } \seealso{ \code{\link{leverage.ppm}}, \code{\link{plot.leverage.ppm}}, \code{\link{[.leverage.ppm}}, \code{\link{as.function.leverage.ppm}}. } \examples{ fit <- ppm(cells ~ x) a <- leverage(fit) integral(a) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat.core/man/model.matrix.ppm.Rd0000644000176200001440000000771214141452520017311 0ustar liggesusers\name{model.matrix.ppm} \alias{model.matrix.ppm} \alias{model.matrix.kppm} \alias{model.matrix.dppm} \alias{model.matrix.ippm} \title{Extract Design Matrix from Point Process Model} \description{ Given a point process model that has been fitted to spatial point pattern data, this function extracts the design matrix of the model. } \usage{ \method{model.matrix}{ppm}(object, data=model.frame(object, na.action=NULL), \dots, Q=NULL, keepNA=TRUE) \method{model.matrix}{kppm}(object, data=model.frame(object, na.action=NULL), \dots, Q=NULL, keepNA=TRUE) \method{model.matrix}{dppm}(object, data=model.frame(object, na.action=NULL), \dots, Q=NULL, keepNA=TRUE) \method{model.matrix}{ippm}(object, data=model.frame(object, na.action=NULL), \dots, Q=NULL, keepNA=TRUE, irregular=FALSE) } \arguments{ \item{object}{ The fitted point process model. An object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"} or \code{"ippm"}. } \item{data}{ A model frame, containing the data required for the Berman-Turner device. } \item{Q}{ A point pattern (class \code{"ppp"}) or quadrature scheme (class \code{"quad"}) specifying new locations where the covariates should be computed. } \item{keepNA}{ Logical. Determines whether rows containing NA values will be deleted or retained. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link{model.matrix.lm}}. } \item{irregular}{ Logical value indicating whether to include the irregular score components. } } \details{ These commands are methods for the generic function \code{\link{model.matrix}}. They extract the design matrix of a spatial point process model (class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}). More precisely, this command extracts the design matrix of the generalised linear model associated with a spatial point process model. The \code{object} must be a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}) fitted to spatial point pattern data. Such objects are produced by the model-fitting functions \code{\link{ppm}}, \code{\link{kppm}}, and \code{\link{dppm}}. The methods \code{model.matrix.ppm}, \code{model.matrix.kppm}, and \code{model.matrix.dppm} extract the model matrix for the GLM. The result is a matrix, with one row for every quadrature point in the fitting procedure, and one column for every constructed covariate in the design matrix. If there are \code{NA} values in the covariates, the argument \code{keepNA} determines whether to retain or delete the corresponding rows of the model matrix. The default \code{keepNA=TRUE} is to retain them. Note that this differs from the default behaviour of many other methods for \code{model.matrix}, which typically delete rows containing \code{NA}. The quadrature points themselves can be extracted using \code{\link{quad.ppm}}. } \value{ A matrix. Columns of the matrix are canonical covariates in the model. Rows of the matrix correspond to quadrature points in the fitting procedure (provided \code{keepNA=TRUE}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{model.matrix}}, \code{\link{model.images}}, \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{dppm}}, \code{\link{ippm}}, \code{\link{ppm.object}}, \code{\link{quad.ppm}}, \code{\link{residuals.ppm}} } \examples{ fit <- ppm(cells ~ x) head(model.matrix(fit)) model.matrix(fit, Q=runifpoint(5)) kfit <- kppm(redwood ~ x, "Thomas") m <- model.matrix(kfit) } \keyword{spatial} \keyword{models} spatstat.core/man/dppPowerExp.Rd0000644000176200001440000000247714141452520016373 0ustar liggesusers\name{dppPowerExp} \alias{dppPowerExp} \title{Power Exponential Spectral Determinantal Point Process Model} \description{Function generating an instance of the Power Exponential Spectral determinantal point process model.} \usage{dppPowerExp(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the parameters. See Details.} } \details{ The Power Exponential Spectral DPP is defined in (Lavancier, \Moller and Rubak, 2015) The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the shape parameter \code{nu} as a positive numeric (artificially required to be less than 20 in the code for numerical stability) \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ \adrian \rolf and \ege } \references{ Lavancier, F. \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \examples{ m <- dppPowerExp(lambda=100, alpha=.01, nu=1, d=2) } \seealso{ \code{\link{dppBessel}}, \code{\link{dppCauchy}}, \code{\link{dppGauss}}, \code{\link{dppMatern}} } spatstat.core/man/dim.detpointprocfamily.Rd0000644000176200001440000000073114141452520020571 0ustar liggesusers\name{dim.detpointprocfamily} \alias{dim.detpointprocfamily} \title{Dimension of Determinantal Point Process Model} \description{Extracts the dimension of a determinantal point process model.} \usage{ \method{dim}{detpointprocfamily}(x) } \arguments{ \item{x}{object of class \code{"detpointprocfamily"}.} } \value{A numeric (or NULL if the dimension of the model is unspecified).} \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat.core/man/Lcross.Rd0000644000176200001440000000563714141452520015364 0ustar liggesusers\name{Lcross} \alias{Lcross} \title{Multitype L-function (cross-type)} \description{ Calculates an estimate of the cross-type L-function for a multitype point pattern. } \usage{ Lcross(X, i, j, ..., from, to, correction) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross-type \eqn{L} function \eqn{L_{ij}(r)}{Lij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{correction,\dots}{ Arguments passed to \code{\link{Kcross}}. } \item{from,to}{ An alternative way to specify \code{i} and \code{j} respectively. } } \details{ The cross-type L-function is a transformation of the cross-type K-function, \deqn{L_{ij}(r) = \sqrt{\frac{K_{ij}(r)}{\pi}}}{Lij(r) = sqrt(Kij(r)/pi)} where \eqn{K_{ij}(r)}{Kij(r)} is the cross-type K-function from type \code{i} to type \code{j}. See \code{\link{Kcross}} for information about the cross-type K-function. The command \code{Lcross} first calls \code{\link{Kcross}} to compute the estimate of the cross-type K-function, and then applies the square root transformation. For a marked point pattern in which the points of type \code{i} are independent of the points of type \code{j}, the theoretical value of the L-function is \eqn{L_{ij}(r) = r}{Lij(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L_{ij}}{Lij} is more appropriate for use in simulation envelopes and hypothesis tests. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L_{ij}}{Lij} has been estimated } \item{theo}{the theoretical value \eqn{L_{ij}(r) = r}{Lij(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{ij}}{Lij} obtained by the edge corrections named. } \seealso{ \code{\link{Kcross}}, \code{\link{Ldot}}, \code{\link{Lest}} } \examples{ data(amacrine) L <- Lcross(amacrine, "off", "on") plot(L) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/pcf.fv.Rd0000644000176200001440000001134214141452520015267 0ustar liggesusers\name{pcf.fv} \alias{pcf.fv} \title{Pair Correlation Function obtained from K Function} \description{ Estimates the pair correlation function of a point pattern, given an estimate of the K function. } \usage{ \method{pcf}{fv}(X, \dots, method="c") } \arguments{ \item{X}{ An estimate of the \eqn{K} function or one of its variants. An object of class \code{"fv"}. } \item{\dots}{ Arguments controlling the smoothing spline function \code{smooth.spline}. } \item{method}{ Letter \code{"a"}, \code{"b"}, \code{"c"} or \code{"d"} indicating the method for deriving the pair correlation function from the \code{K} function. } } \value{ A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}) representing a pair correlation function. Essentially a data frame containing (at least) the variables \item{r}{the vector of values of the argument \eqn{r} at which the pair correlation function \eqn{g(r)} has been estimated } \item{pcf}{vector of values of \eqn{g(r)} } } \details{ The pair correlation function of a stationary point process is \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. We also apply the same definition to other variants of the classical \eqn{K} function, such as the multitype \eqn{K} functions (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}). For all these variants, the benchmark value of \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to \eqn{g(r) = 1}. This routine computes an estimate of \eqn{g(r)} from an estimate of \eqn{K(r)} or its variants, using smoothing splines to approximate the derivative. It is a method for the generic function \code{\link{pcf}} for the class \code{"fv"}. The argument \code{X} should be an estimated \eqn{K} function, given as a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). This object should be the value returned by \code{\link{Kest}}, \code{\link{Kcross}}, \code{\link{Kmulti}} or \code{\link{Kinhom}}. The smoothing spline operations are performed by \code{\link{smooth.spline}} and \code{\link{predict.smooth.spline}} from the \code{modreg} library. Four numerical methods are available: \itemize{ \item \bold{"a"} apply smoothing to \eqn{K(r)}, estimate its derivative, and plug in to the formula above; \item \bold{"b"} apply smoothing to \eqn{Y(r) = \frac{K(r)}{2 \pi r}}{Y(r) = K(r)/(2 * pi * r)} constraining \eqn{Y(0) = 0}, estimate the derivative of \eqn{Y}, and solve; \item \bold{"c"} apply smoothing to \eqn{Z(r) = \frac{K(r)}{\pi r^2}}{Y(r) = K(r)/(pi * r^2)} constraining \eqn{Z(0)=1}, estimate its derivative, and solve. \item \bold{"d"} apply smoothing to \eqn{V(r) = \sqrt{K(r)}}{V(r) = sqrt(K(r))}, estimate its derivative, and solve. } Method \code{"c"} seems to be the best at suppressing variability for small values of \eqn{r}. However it effectively constrains \eqn{g(0) = 1}. If the point pattern seems to have inhibition at small distances, you may wish to experiment with method \code{"b"} which effectively constrains \eqn{g(0)=0}. Method \code{"a"} seems comparatively unreliable. Useful arguments to control the splines include the smoothing tradeoff parameter \code{spar} and the degrees of freedom \code{df}. See \code{\link{smooth.spline}} for details. } \references{ Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{pcf}}, \code{\link{pcf.ppp}}, \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kmulti}}, \code{\link{alltypes}}, \code{\link{smooth.spline}}, \code{\link{predict.smooth.spline}} } \examples{ # univariate point pattern X <- simdat \testonly{ X <- X[seq(1,npoints(X), by=4)] } K <- Kest(X) p <- pcf.fv(K, spar=0.5, method="b") plot(p, main="pair correlation function for simdat") # indicates inhibition at distances r < 0.3 } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/density.psp.Rd0000644000176200001440000000701414141452520016366 0ustar liggesusers\name{density.psp} \alias{density.psp} \title{Kernel Smoothing of Line Segment Pattern} \description{ Compute a kernel smoothed intensity function from a line segment pattern. } \usage{ \method{density}{psp}(x, sigma, \dots, weights=NULL, edge=TRUE, method=c("FFT", "C", "interpreted"), at=NULL) } \arguments{ \item{x}{ Line segment pattern (object of class \code{"psp"}) to be smoothed. } \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel. } \item{\dots}{ Extra arguments, including arguments passed to \code{\link{as.mask}} to determine the resolution of the resulting image. } \item{weights}{ Optional. Numerical weights for each line segment. A numeric vector, of length equal to the number of segments in \code{x}. } \item{edge}{ Logical flag indicating whether to apply edge correction. } \item{method}{ Character string (partially matched) specifying the method of computation. Option \code{"FFT"} is the fastest, while \code{"C"} is the most accurate. } \item{at}{ Optional. An object specifying the locations where density values should be computed. Either a window (object of class \code{"owin"}) or a point pattern (object of class \code{"ppp"} or \code{"lpp"}). } } \value{ A pixel image (object of class \code{"im"}) or a numeric vector. } \details{ This is the method for the generic function \code{\link{density}} for the class \code{"psp"} (line segment patterns). A kernel estimate of the intensity of the line segment pattern is computed. The result is the convolution of the isotropic Gaussian kernel, of standard deviation \code{sigma}, with the line segments. The result is computed as follows: \itemize{ \item if \code{method="FFT"} (the default), the line segments are discretised using \code{\link{pixellate.psp}}, then the Fast Fourier Transform is used to calculate the convolution. This method is the fastest, but is slightly less accurate. Accuracy can be improved by increasing pixel resolution. \item if \code{method="C"} the exact value of the convolution at the centre of each pixel is computed analytically using \code{C} code; \item if \code{method="interpreted"}, the exact value of the convolution at the centre of each pixel is computed analytically using \code{R} code. This method is the slowest. } If \code{edge=TRUE} this result is adjusted for edge effects by dividing it by the convolution of the same Gaussian kernel with the observation window. If \code{weights} are given, then the contribution from line segment \code{i} is multiplied by the value of \code{weights[i]}. If the argument \code{at} is given, then it specifies the locations where density values should be computed. \itemize{ \item If \code{at} is a window, then the window is converted to a binary mask using the arguments \code{\dots}, and density values are computed at the centre of each pixel in this mask. The result is a pixel image. \item If \code{at} is a point pattern, then density values are computed at each point location, and the result is a numeric vector. } } \seealso{ \code{\link{psp.object}}, \code{\link{im.object}}, \code{\link{density}} } \examples{ L <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) D <- density(L, sigma=0.03) plot(D, main="density(L)") plot(L, add=TRUE) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/dmixpois.Rd0000644000176200001440000000512314141452520015741 0ustar liggesusers\name{dmixpois} \alias{dmixpois} \alias{pmixpois} \alias{qmixpois} \alias{rmixpois} \title{ Mixed Poisson Distribution } \description{ Density, distribution function, quantile function and random generation for a mixture of Poisson distributions. } \usage{ dmixpois(x, mu, sd, invlink = exp, GHorder = 5) pmixpois(q, mu, sd, invlink = exp, lower.tail = TRUE, GHorder = 5) qmixpois(p, mu, sd, invlink = exp, lower.tail = TRUE, GHorder = 5) rmixpois(n, mu, sd, invlink = exp) } \arguments{ \item{x}{vector of (non-negative integer) quantiles.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of random values to return.} \item{mu}{ Mean of the linear predictor. A single numeric value. } \item{sd}{ Standard deviation of the linear predictor. A single numeric value. } \item{invlink}{ Inverse link function. A function in the \R language, used to transform the linear predictor into the parameter \code{lambda} of the Poisson distribution. } \item{lower.tail}{ Logical. If \code{TRUE} (the default), probabilities are \eqn{P[X \le x]}, otherwise, \eqn{P[X > x]}. } \item{GHorder}{ Number of quadrature points in the Gauss-Hermite quadrature approximation. A small positive integer. } } \details{ These functions are analogous to \code{\link{dpois}} \code{\link{ppois}}, \code{\link{qpois}} and \code{\link{rpois}} except that they apply to a mixture of Poisson distributions. In effect, the Poisson mean parameter \code{lambda} is randomised by setting \code{lambda = invlink(Z)} where \code{Z} has a Gaussian \eqn{N(\mu,\sigma^2)}{N(\mu, \sigma^2)} distribution. The default is \code{invlink=exp} which means that \code{lambda} is lognormal. Set \code{invlink=I} to assume that \code{lambda} is approximately Normal. For \code{dmixpois}, \code{pmixpois} and \code{qmixpois}, the probability distribution is approximated using Gauss-Hermite quadrature. For \code{rmixpois}, the deviates are simulated exactly. } \value{ Numeric vector: \code{dmixpois} gives probability masses, \code{ppois} gives cumulative probabilities, \code{qpois} gives (non-negative integer) quantiles, and \code{rpois} generates (non-negative integer) random deviates. } \seealso{ \code{\link{dpois}}, \code{\link{gauss.hermite}}. } \examples{ dmixpois(7, 10, 1, invlink = I) dpois(7, 10) pmixpois(7, log(10), 0.2) ppois(7, 10) qmixpois(0.95, log(10), 0.2) qpois(0.95, 10) x <- rmixpois(100, log(10), log(1.2)) mean(x) var(x) } \author{\adrian , \rolf and \ege } \keyword{distribution} spatstat.core/man/ppmInfluence.Rd0000644000176200001440000000607714141452520016543 0ustar liggesusers\name{ppmInfluence} \alias{ppmInfluence} \title{ Leverage and Influence Measures for Spatial Point Process Model } \description{ Calculates all the leverage and influence measures described in \code{\link{influence.ppm}}, \code{\link{leverage.ppm}} and \code{\link{dfbetas.ppm}}. } \usage{ ppmInfluence(fit, what = c("leverage", "influence", "dfbetas"), \dots, iScore = NULL, iHessian = NULL, iArgs = NULL, drop = FALSE, fitname = NULL) } \arguments{ \item{fit}{ A fitted point process model of class \code{"ppm"}. } \item{what}{ Character vector specifying which quantities are to be calculated. Default is to calculate all quantities. } \item{\dots}{ Ignored. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{fitname}{ Optional character string name for the fitted model \code{fit}. } } \details{ This function calculates all the leverage and influence measures described in \code{\link{influence.ppm}}, \code{\link{leverage.ppm}} and \code{\link{dfbetas.ppm}}. When analysing large datasets, the user can call \code{ppmInfluence} to perform the calculations efficiently, then extract the leverage and influence values as desired. For example the leverage can be extracted either as \code{result$leverage} or \code{leverage(result)}. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the influence calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. } \value{ A list containing the leverage and influence measures specified by \code{what}. The result also belongs to the class \code{"ppmInfluence"}. } \author{ \adrian } \seealso{ \code{\link{leverage.ppm}}, \code{\link{influence.ppm}}, \code{\link{dfbetas.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~ x+y) fI <- ppmInfluence(fit) fitlev <- fI$leverage fitlev <- leverage(fI) fitinf <- fI$influence fitinf <- influence(fI) fitdfb <- fI$dfbetas fitdfb <- dfbetas(fI) } \keyword{spatial} \keyword{models} spatstat.core/man/reach.Rd0000644000176200001440000001206014141452520015165 0ustar liggesusers\name{reach} \alias{reach} \alias{reach.ppm} \alias{reach.interact} \alias{reach.fii} \alias{reach.rmhmodel} \title{Interaction Distance of a Point Process} \description{ Computes the interaction distance of a point process. } \usage{ reach(x, \dots) \method{reach}{ppm}(x, \dots, epsilon=0) \method{reach}{interact}(x, \dots) \method{reach}{rmhmodel}(x, \dots) \method{reach}{fii}(x, \dots, epsilon) } \arguments{ \item{x}{Either a fitted point process model (object of class \code{"ppm"}), an interpoint interaction (object of class \code{"interact"}), a fitted interpoint interaction (object of class \code{"fii"}) or a point process model for simulation (object of class \code{"rmhmodel"}). } \item{epsilon}{ Numerical threshold below which interaction is treated as zero. See details. } \item{\dots}{ Other arguments are ignored. } } \value{ The interaction distance, or \code{NA} if this cannot be computed from the information given. } \details{ The `interaction distance' or `interaction range' of a point process model of class \code{"ppm"} is the smallest distance \eqn{D} such that any two points in the process which are separated by a distance greater than \eqn{D} do not interact with each other. For example, the interaction range of a Strauss process (see \code{\link{Strauss}}) with parameters \eqn{\beta,\gamma,r}{beta,gamma,r} is equal to \eqn{r}, unless \eqn{\gamma=1}{gamma=1} in which case the model is Poisson and the interaction range is \eqn{0}. The interaction range of a Poisson process is zero. The interaction range of the Ord threshold process (see \code{\link{OrdThresh}}) is infinite, since two points \emph{may} interact at any distance apart. The function \code{reach(x)} is generic, with methods for the case where \code{x} is \itemize{ \item a fitted point process model (object of class \code{"ppm"}, usually obtained from the model-fitting function \code{\link{ppm}}); \item an interpoint interaction structure (object of class \code{"interact"}), created by one of the functions \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Softcore}}, \code{\link{DiggleGratton}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Geyer}}, \code{\link{LennardJones}}, \code{\link{Saturated}}, \code{\link{OrdThresh}} or \code{\link{Ord}}; \item a fitted interpoint interaction (object of class \code{"fii"}) extracted from a fitted point process model by the command \code{\link{fitin}}; \item a point process model for simulation (object of class \code{"rmhmodel"}), usually obtained from \code{\link{rmhmodel}}. } When \code{x} is an \code{"interact"} object, \code{reach(x)} returns the maximum possible interaction range for any point process model with interaction structure given by \code{x}. For example, \code{reach(Strauss(0.2))} returns \code{0.2}. When \code{x} is a \code{"ppm"} object, \code{reach(x)} returns the interaction range for the point process model represented by \code{x}. For example, a fitted Strauss process model with parameters \code{beta,gamma,r} will return either \code{0} or \code{r}, depending on whether the fitted interaction parameter \code{gamma} is equal or not equal to 1. For some point process models, such as the soft core process (see \code{\link{Softcore}}), the interaction distance is infinite, because the interaction terms are positive for all pairs of points. A practical solution is to compute the distance at which the interaction contribution from a pair of points falls below a threshold \code{epsilon}, on the scale of the log conditional intensity. This is done by setting the argument \code{epsilon} to a positive value. } \section{Other types of models}{ Methods for \code{reach} are also defined for point process models of class \code{"kppm"} and \code{"dppm"}. Their technical definition is different from this one. See \code{\link{reach.kppm}} and \code{\link{reach.dppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Softcore}}, \code{\link{DiggleGratton}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Geyer}}, \code{\link{LennardJones}}, \code{\link{Saturated}}, \code{\link{OrdThresh}}, \code{\link{Ord}}, \code{\link{rmhmodel}} See \code{\link{reach.kppm}} and \code{\link{reach.dppm}} for other types of point process models. } \examples{ reach(Poisson()) # returns 0 reach(Strauss(r=7)) # returns 7 fit <- ppm(swedishpines ~ 1, Strauss(r=7)) reach(fit) # returns 7 reach(OrdThresh(42)) # returns Inf reach(MultiStrauss(matrix(c(1,3,3,1),2,2))) # returns 3 } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/predict.ppm.Rd0000644000176200001440000003642714141452520016345 0ustar liggesusers\name{predict.ppm} \alias{predict.ppm} \title{Prediction from a Fitted Point Process Model} \description{ Given a fitted point process model obtained by \code{\link{ppm}}, evaluate the spatial trend or the conditional intensity of the model at new locations. } \usage{ \method{predict}{ppm}(object, window=NULL, ngrid=NULL, locations=NULL, covariates=NULL, type=c("trend", "cif", "intensity", "count"), se=FALSE, interval=c("none", "confidence", "prediction"), level = 0.95, X=data.ppm(object), correction, ignore.hardcore=FALSE, \dots, dimyx=NULL, eps=NULL, new.coef=NULL, check=TRUE, repair=TRUE) } \arguments{ \item{object}{ A fitted point process model, typically obtained from the model-fitting algorithm \code{\link{ppm}}. An object of class \code{"ppm"} (see \code{\link{ppm.object}}). } \item{window}{ Optional. A window (object of class \code{"owin"}) \emph{delimiting} the locations where predictions should be computed. Defaults to the window of the original data used to fit the model \code{object}. } \item{ngrid}{ Optional. Dimensions of a rectangular grid of locations inside \code{window} where the predictions should be computed. An integer, or an integer vector of length 2, specifying the number of grid points in the \eqn{y} and \eqn{x} directions. (Incompatible with \code{locations}. Equivalent to \code{dimyx}.) } \item{locations}{ Optional. Data giving the exact \eqn{x,y} coordinates (and marks, if required) of locations at which predictions should be computed. Either a point pattern, or a data frame with columns named \code{x} and \code{y}, or a binary image mask, or a pixel image. (Incompatible with \code{ngrid}, \code{dimyx} and \code{eps}). } \item{covariates}{ Values of external covariates required by the model. Either a data frame or a list of images. See Details. } \item{type}{ Character string. Indicates which property of the fitted model should be predicted. Options are \code{"trend"} for the spatial trend, \code{"cif"} or \code{"lambda"} for the conditional intensity, \code{"intensity"} for the intensity, and \code{"count"} for the total number of points in \code{window}. } \item{se}{ Logical value indicating whether to calculate standard errors as well. } \item{interval}{ String (partially matched) indicating whether to produce estimates (\code{interval="none"}, the default) or a confidence interval (\code{interval="confidence"}) or a prediction interval (\code{interval="prediction"}). } \item{level}{ Coverage probability for the confidence or prediction interval. } \item{X}{ Optional. A point pattern (object of class \code{"ppp"}) to be taken as the data point pattern when calculating the conditional intensity. The default is to use the original data to which the model was fitted. } \item{correction}{ Name of the edge correction to be used in calculating the conditional intensity. Options include \code{"border"} and \code{"none"}. Other options may include \code{"periodic"}, \code{"isotropic"} and \code{"translate"} depending on the model. The default correction is the one that was used to fit \code{object}. } \item{ignore.hardcore}{ Advanced use only. Logical value specifying whether to compute only the finite part of the interaction potential (effectively removing any hard core interaction terms). } \item{\dots}{ Ignored. } \item{dimyx}{ Equivalent to \code{ngrid}. } \item{eps}{ Width and height of pixels in the prediction grid. A numerical value, or numeric vector of length 2. } \item{new.coef}{ Numeric vector of parameter values to replace the fitted model parameters \code{coef(object)}. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{object}, if it is found to be damaged. } } \value{ \emph{If \code{total} is given:} a numeric vector or matrix. \emph{If \code{locations} is given and is a data frame:} a vector of predicted values for the spatial locations (and marks, if required) given in \code{locations}. \emph{If \code{ngrid} is given, or if \code{locations} is given and is a binary image mask or a pixel image:} If \code{object} is an unmarked point process, the result is a pixel image object (of class \code{"im"}, see \code{\link{im.object}}) containing the predictions. If \code{object} is a multitype point process, the result is a list of pixel images, containing the predictions for each type at the same grid of locations. The ``predicted values'' are either values of the spatial trend (if \code{type="trend"}), values of the conditional intensity (if \code{type="cif"} or \code{type="lambda"}), values of the intensity (if \code{type="intensity"}) or numbers of points (if \code{type="count"}). If \code{se=TRUE}, then the result is a list with two entries, the first being the predicted values in the format described above, and the second being the standard errors in the same format. } \details{ This function computes properties of a fitted spatial point process model (object of class \code{"ppm"}). For a Poisson point process it can compute the fitted intensity function, or the expected number of points in a region. For a Gibbs point process it can compute the spatial trend (first order potential), conditional intensity, and approximate intensity of the process. Point estimates, standard errors, confidence intervals and prediction intervals are available. Given a point pattern dataset, we may fit a point process model to the data using the model-fitting algorithm \code{\link{ppm}}. This returns an object of class \code{"ppm"} representing the fitted point process model (see \code{\link{ppm.object}}). The parameter estimates in this fitted model can be read off simply by printing the \code{ppm} object. The spatial trend, conditional intensity and intensity of the fitted model are evaluated using this function \code{predict.ppm}. The default action is to create a rectangular grid of points in the observation window of the data point pattern, and evaluate the spatial trend at these locations. The argument \code{type} specifies the values that are desired: \describe{ \item{If \code{type="trend"}:}{ the ``spatial trend'' of the fitted model is evaluated at each required spatial location \eqn{u}. See below. } \item{If \code{type="cif"}:}{ the conditional intensity \eqn{\lambda(u, X)}{lambda(u,X)} of the fitted model is evaluated at each required spatial location \eqn{u}, with respect to the data point pattern \eqn{X}. } \item{If \code{type="intensity"}:}{ the intensity \eqn{\lambda(u)}{lambda(u)} of the fitted model is evaluated at each required spatial location \eqn{u}. } \item{If \code{type="count"}:}{ the expected total number of points (or the expected number of points falling in \code{window}) is evaluated. If \code{window} is a tessellation, the expected number of points in each tile of the tessellation is evaluated. } } The spatial trend, conditional intensity, and intensity are all equivalent if the fitted model is a Poisson point process. However, if the model is not a Poisson process, then they are all different. The ``spatial trend'' is the (exponentiated) first order potential, and not the intensity of the process. [For example if we fit the stationary Strauss process with parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, then the spatial trend is constant and equal to \eqn{\beta}{beta}, while the intensity is a smaller value.] The default is to compute an estimate of the desired quantity. If \code{interval="confidence"} or \code{interval="prediction"}, the estimate is replaced by a confidence interval or prediction interval. If \code{se=TRUE}, then a standard error is also calculated, and is returned together with the (point or interval) estimate. The spatial locations where predictions are required, are determined by the (incompatible) arguments \code{ngrid} and \code{locations}. \itemize{ \item If the argument \code{ngrid} is present, then predictions are performed at a rectangular grid of locations in the window \code{window}. The result of prediction will be a pixel image or images. \item If \code{locations} is present, then predictions will be performed at the spatial locations given by this dataset. These may be an arbitrary list of spatial locations, or they may be a rectangular grid. The result of prediction will be either a numeric vector or a pixel image or images. \item If neither \code{ngrid} nor \code{locations} is given, then \code{ngrid} is assumed. The value of \code{ngrid} defaults to \code{\link{spatstat.options}("npixel")}, which is initialised to 128 when \pkg{spatstat} is loaded. } The argument \code{locations} may be a point pattern, a data frame or a list specifying arbitrary locations; or it may be a binary image mask (an object of class \code{"owin"} with type \code{"mask"}) or a pixel image (object of class \code{"im"}) specifying (a subset of) a rectangular grid of locations. \itemize{ \item If \code{locations} is a point pattern (object of class \code{"ppp"}), then prediction will be performed at the points of the point pattern. The result of prediction will be a vector of predicted values, one value for each point. If the model is a marked point process, then \code{locations} should be a marked point pattern, with marks of the same kind as the model; prediction will be performed at these marked points. The result of prediction will be a vector of predicted values, one value for each (marked) point. \item If \code{locations} is a data frame or list, then it must contain vectors \code{locations$x} and \code{locations$y} specifying the \eqn{x,y} coordinates of the prediction locations. Additionally, if the model is a marked point process, then \code{locations} must also contain a factor \code{locations$marks} specifying the marks of the prediction locations. These vectors must have equal length. The result of prediction will be a vector of predicted values, of the same length. \item If \code{locations} is a binary image mask, then prediction will be performed at each pixel in this binary image where the pixel value is \code{TRUE} (in other words, at each pixel that is inside the window). If the fitted model is an unmarked point process, then the result of prediction will be an image. If the fitted model is a marked point process, then prediction will be performed for each possible value of the mark at each such location, and the result of prediction will be a list of images, one for each mark value. \item If \code{locations} is a pixel image (object of class \code{"im"}), then prediction will be performed at each pixel in this image where the pixel value is defined (i.e.\ where the pixel value is not \code{NA}). } The argument \code{covariates} gives the values of any spatial covariates at the prediction locations. If the trend formula in the fitted model involves spatial covariates (other than the Cartesian coordinates \code{x}, \code{y}) then \code{covariates} is required. The format and use of \code{covariates} are analogous to those of the argument of the same name in \code{\link{ppm}}. It is either a data frame or a list of images. \itemize{ \item If \code{covariates} is a list of images, then the names of the entries should correspond to the names of covariates in the model formula \code{trend}. Each entry in the list must be an image object (of class \code{"im"}, see \code{\link{im.object}}). The software will look up the pixel values of each image at the quadrature points. \item If \code{covariates} is a data frame, then the \code{i}th row of \code{covariates} is assumed to contain covariate data for the \code{i}th location. When \code{locations} is a data frame, this just means that each row of \code{covariates} contains the covariate data for the location specified in the corresponding row of \code{locations}. When \code{locations} is a binary image mask, the row \code{covariates[i,]} must correspond to the location \code{x[i],y[i]} where \code{x = as.vector(raster.x(locations))} and \code{y = as.vector(raster.y(locations))}. } Note that if you only want to use prediction in order to generate a plot of the predicted values, it may be easier to use \code{\link{plot.ppm}} which calls this function and plots the results. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Berman, M. and Turner, T.R. Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41} (1992) 31--38. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{plot.ppm}}, \code{\link{print.ppm}}, \code{\link{fitted.ppm}}, \code{\link{spatstat.options}} } \section{Warnings}{ The current implementation invokes \code{\link{predict.glm}} so that \bold{prediction is wrong} if the trend formula in \code{object} involves terms in \code{ns()}, \code{bs()} or \code{poly()}. This is a weakness of \code{\link{predict.glm}} itself! Error messages may be very opaque, as they tend to come from deep in the workings of \code{\link{predict.glm}}. If you are passing the \code{covariates} argument and the function crashes, it is advisable to start by checking that all the conditions listed above are satisfied. } \examples{ \testonly{op <- spatstat.options(npixel=32)} m <- ppm(cells ~ polynom(x,y,2), Strauss(0.05)) trend <- predict(m, type="trend") if(human <- interactive()) { image(trend) points(cells) } cif <- predict(m, type="cif") if(human) { persp(cif) } mj <- ppm(japanesepines ~ harmonic(x,y,2)) se <- predict(mj, se=TRUE) # image of standard error ci <- predict(mj, interval="c") # two images, confidence interval # prediction interval for total number of points predict(mj, type="count", interval="p") # prediction intervals for counts in tiles predict(mj, window=quadrats(japanesepines, 3), type="count", interval="p") # prediction at arbitrary locations predict(mj, locations=data.frame(x=0.3, y=0.4)) X <- runifpoint(5, Window(japanesepines)) predict(mj, locations=X, se=TRUE) # multitype rr <- matrix(0.06, 2, 2) ma <- ppm(amacrine ~ marks, MultiStrauss(rr)) Z <- predict(ma) Z <- predict(ma, type="cif") predict(ma, locations=data.frame(x=0.8, y=0.5,marks="on"), type="cif") \testonly{spatstat.options(op)} } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/spatstat.core-package.Rd0000644000176200001440000014020414141456232020274 0ustar liggesusers\name{spatstat.core-package} \alias{spatstat.core-package} \alias{spatstat.core} \docType{package} \title{The spatstat.core Package} \description{ The \pkg{spatstat.core} package belongs to the \pkg{spatstat} family of packages. It contains the core functionality for statistical analysis and modelling of spatial data. } \details{ \pkg{spatstat} is a family of \R packages for the statistical analysis of spatial data. Its main focus is the analysis of spatial patterns of points in two-dimensional space. The original \pkg{spatstat} package has now been split into several sub-packages. This sub-package \pkg{spatstat.core} contains all the main user-level functions that perform statistical analysis and modelling of spatial data. (The main exception is that functions for linear networks are in the separate sub-package \pkg{spatstat.linnet}.) } \section{Structure of the spatstat family}{ The orginal \pkg{spatstat} package grew to be very large. It has now been divided into several \bold{sub-packages}: \itemize{ \item \pkg{spatstat.utils} containing basic utilities \item \pkg{spatstat.sparse} containing linear algebra utilities \item \pkg{spatstat.data} containing datasets \item \pkg{spatstat.geom} containing geometrical objects and geometrical operations \item \pkg{spatstat.core} containing the main functionality for statistical analysis and modelling of spatial data \item \pkg{spatstat.linnet} containing functions for spatial data on a linear network \item \pkg{spatstat}, which simply loads the other sub-packages listed above, and provides documentation. } When you install \pkg{spatstat}, these sub-packages are also installed. Then if you load the \pkg{spatstat} package by typing \code{library(spatstat)}, the other sub-packages listed above will automatically be loaded or imported. For an overview of all the functions available in the sub-packages of \pkg{spatstat}, see the help file for \code{"spatstat-package"} in the \pkg{spatstat} package. Additionally there are several \bold{extension packages:} \itemize{ \item \pkg{spatstat.gui} for interactive graphics \item \pkg{spatstat.local} for local likelihood (including geographically weighted regression) \item \pkg{spatstat.Knet} for additional, computationally efficient code for linear networks \item \pkg{spatstat.sphere} (under development) for spatial data on a sphere, including spatial data on the earth's surface } The extension packages must be installed separately and loaded explicitly if needed. They also have separate documentation. } \section{Overview of Functionality in \pkg{spatstat.core}}{ The \pkg{spatstat} family of packages is designed to support a complete statistical analysis of spatial data. It supports \itemize{ \item creation, manipulation and plotting of point patterns; \item exploratory data analysis; \item spatial random sampling; \item simulation of point process models; \item parametric model-fitting; \item non-parametric smoothing and regression; \item formal inference (hypothesis tests, confidence intervals); \item model diagnostics. } For an overview, see the help file for \code{"spatstat-package"} in the \pkg{spatstat} package. Following is a list of the functionality provided in the \pkg{spatstat.core} package only. \bold{To simulate a random point pattern:} \tabular{ll}{ \code{\link[spatstat.core]{runifpoint}} \tab generate \eqn{n} independent uniform random points \cr \code{\link[spatstat.core]{rpoint}} \tab generate \eqn{n} independent random points \cr \code{\link[spatstat.core]{rmpoint}} \tab generate \eqn{n} independent multitype random points \cr \code{\link[spatstat.core]{rpoispp}} \tab simulate the (in)homogeneous Poisson point process \cr \code{\link[spatstat.core]{rmpoispp}} \tab simulate the (in)homogeneous multitype Poisson point process \cr \code{\link[spatstat.core]{runifdisc}} \tab generate \eqn{n} independent uniform random points in disc\cr \code{\link[spatstat.core]{rstrat}} \tab stratified random sample of points \cr \code{\link[spatstat.core]{rMaternI}} \tab simulate the \Matern Model I inhibition process\cr \code{\link[spatstat.core]{rMaternII}} \tab simulate the \Matern Model II inhibition process\cr \code{\link[spatstat.core]{rSSI}} \tab simulate Simple Sequential Inhibition process\cr \code{\link[spatstat.core]{rStrauss}} \tab simulate Strauss process (perfect simulation)\cr \code{\link[spatstat.core]{rHardcore}} \tab simulate Hard Core process (perfect simulation)\cr \code{\link[spatstat.core]{rStraussHard}} \tab simulate Strauss-hard core process (perfect simulation)\cr \code{\link[spatstat.core]{rDiggleGratton}} \tab simulate Diggle-Gratton process (perfect simulation)\cr \code{\link[spatstat.core]{rDGS}} \tab simulate Diggle-Gates-Stibbard process (perfect simulation)\cr \code{\link[spatstat.core]{rPenttinen}} \tab simulate Penttinen process (perfect simulation)\cr \code{\link[spatstat.core]{rNeymanScott}} \tab simulate a general Neyman-Scott process\cr \code{\link[spatstat.core]{rPoissonCluster}} \tab simulate a general Poisson cluster process\cr \code{\link[spatstat.core]{rMatClust}} \tab simulate the \Matern Cluster process\cr \code{\link[spatstat.core]{rThomas}} \tab simulate the Thomas process \cr \code{\link[spatstat.core]{rGaussPoisson}} \tab simulate the Gauss-Poisson cluster process\cr \code{\link[spatstat.core]{rCauchy}} \tab simulate Neyman-Scott Cauchy cluster process \cr \code{\link[spatstat.core]{rVarGamma}} \tab simulate Neyman-Scott Variance Gamma cluster process \cr \code{\link[spatstat.core]{rthin}} \tab random thinning \cr \code{\link[spatstat.core]{rcell}} \tab simulate the Baddeley-Silverman cell process \cr \code{\link[spatstat.core]{rmh}} \tab simulate Gibbs point process using Metropolis-Hastings \cr \code{\link[spatstat.core]{simulate.ppm}} \tab simulate Gibbs point process using Metropolis-Hastings \cr \code{\link[spatstat.core]{runifpointOnLines}} \tab generate \eqn{n} random points along specified line segments \cr \code{\link[spatstat.core]{rpoisppOnLines}} \tab generate Poisson random points along specified line segments } \bold{To randomly change an existing point pattern:} \tabular{ll}{ \code{\link[spatstat.core]{rshift}} \tab random shifting of points \cr \code{\link[spatstat.core]{rthin}} \tab random thinning \cr \code{\link[spatstat.core]{rlabel}} \tab random (re)labelling of a multitype point pattern \cr \code{\link[spatstat.core]{quadratresample}} \tab block resampling } \bold{To interrogate a point pattern:} \tabular{ll}{ \code{\link[spatstat.core]{density.ppp}} \tab kernel estimation of point pattern intensity\cr \code{\link[spatstat.core]{densityHeat.ppp}} \tab diffusion kernel estimation of point pattern intensity\cr \code{\link[spatstat.core]{Smooth.ppp}} \tab kernel smoothing of marks of point pattern\cr \code{\link[spatstat.core]{sharpen.ppp}} \tab data sharpening\cr } \bold{Manipulation of pixel images:} An object of class \code{"im"} represents a pixel image. \tabular{ll}{ \code{\link[spatstat.core]{blur}} \tab apply Gaussian blur to image\cr \code{\link[spatstat.core]{Smooth.im}} \tab apply Gaussian blur to image\cr \code{\link[spatstat.core]{transect.im}} \tab line transect of image \cr \code{\link[spatstat.geom]{pixelcentres}} \tab extract centres of pixels \cr \code{\link[spatstat.core]{rnoise}} \tab random pixel noise } \bold{Line segment patterns} An object of class \code{"psp"} represents a pattern of straight line segments. \tabular{ll}{ \code{\link[spatstat.core]{density.psp}} \tab kernel smoothing of line segments\cr \code{\link[spatstat.core]{rpoisline}} \tab generate a realisation of the Poisson line process inside a window } \bold{Tessellations} An object of class \code{"tess"} represents a tessellation. \tabular{ll}{ \code{\link[spatstat.core]{rpoislinetess}} \tab generate tessellation using Poisson line process } \bold{Three-dimensional point patterns} An object of class \code{"pp3"} represents a three-dimensional point pattern in a rectangular box. The box is represented by an object of class \code{"box3"}. \tabular{ll}{ \code{\link[spatstat.core]{runifpoint3}} \tab generate uniform random points in 3-D \cr \code{\link[spatstat.core]{rpoispp3}} \tab generate Poisson random points in 3-D \cr \code{\link[spatstat.core]{envelope.pp3}} \tab generate simulation envelopes for 3-D pattern \cr } \bold{Multi-dimensional space-time point patterns} An object of class \code{"ppx"} represents a point pattern in multi-dimensional space and/or time. \tabular{ll}{ \code{\link[spatstat.core]{runifpointx}} \tab generate uniform random points \cr \code{\link[spatstat.core]{rpoisppx}} \tab generate Poisson random points } \bold{Classical exploratory tools:} \tabular{ll}{ \code{\link[spatstat.core]{clarkevans}} \tab Clark and Evans aggregation index \cr \code{\link[spatstat.core]{fryplot}} \tab Fry plot \cr \code{\link[spatstat.core]{miplot}} \tab Morisita Index plot } \bold{Smoothing:} \tabular{ll}{ \code{\link[spatstat.core]{density.ppp}} \tab kernel smoothed density/intensity\cr \code{\link[spatstat.core]{relrisk}} \tab kernel estimate of relative risk\cr \code{\link[spatstat.core]{Smooth.ppp}} \tab spatial interpolation of marks \cr \code{\link[spatstat.core]{bw.diggle}} \tab cross-validated bandwidth selection for \code{\link[spatstat.core]{density.ppp}}\cr \code{\link[spatstat.core]{bw.ppl}} \tab likelihood cross-validated bandwidth selection for \code{\link[spatstat.core]{density.ppp}}\cr \code{\link[spatstat.core]{bw.CvL}} \tab Cronie-Van Lieshout bandwidth selection for density estimation\cr \code{\link[spatstat.core]{bw.scott}} \tab Scott's rule of thumb for density estimation\cr \code{\link[spatstat.core]{bw.abram}} \tab Abramson's rule for adaptive bandwidths\cr \code{\link[spatstat.core]{bw.relrisk}} \tab cross-validated bandwidth selection for \code{\link[spatstat.core]{relrisk}} \cr \code{\link[spatstat.core]{bw.smoothppp}} \tab cross-validated bandwidth selection for \code{\link[spatstat.core]{Smooth.ppp}} \cr \code{\link[spatstat.core]{bw.frac}} \tab bandwidth selection using window geometry\cr \code{\link[spatstat.core]{bw.stoyan}} \tab Stoyan's rule of thumb for bandwidth for \code{\link[spatstat.core]{pcf}} } \bold{Modern exploratory tools:} \tabular{ll}{ \code{\link[spatstat.core]{clusterset}} \tab Allard-Fraley feature detection \cr \code{\link[spatstat.core]{nnclean}} \tab Byers-Raftery feature detection \cr \code{\link[spatstat.core]{sharpen.ppp}} \tab Choi-Hall data sharpening \cr \code{\link[spatstat.core]{rhohat}} \tab Kernel estimate of covariate effect\cr \code{\link[spatstat.core]{rho2hat}} \tab Kernel estimate of effect of two covariates\cr \code{\link[spatstat.core]{spatialcdf}} \tab Spatial cumulative distribution function\cr \code{\link[spatstat.core]{roc}} \tab Receiver operating characteristic curve } \bold{Summary statistics for a point pattern:} \tabular{ll}{ \code{\link[spatstat.core]{Fest}} \tab empty space function \eqn{F} \cr \code{\link[spatstat.core]{Gest}} \tab nearest neighbour distribution function \eqn{G} \cr \code{\link[spatstat.core]{Jest}} \tab \eqn{J}-function \eqn{J = (1-G)/(1-F)} \cr \code{\link[spatstat.core]{Kest}} \tab Ripley's \eqn{K}-function\cr \code{\link[spatstat.core]{Lest}} \tab Besag \eqn{L}-function\cr \code{\link[spatstat.core]{Tstat}} \tab Third order \eqn{T}-function \cr \code{\link[spatstat.core]{allstats}} \tab all four functions \eqn{F}, \eqn{G}, \eqn{J}, \eqn{K} \cr \code{\link[spatstat.core]{pcf}} \tab pair correlation function \cr \code{\link[spatstat.core]{Kinhom}} \tab \eqn{K} for inhomogeneous point patterns \cr \code{\link[spatstat.core]{Linhom}} \tab \eqn{L} for inhomogeneous point patterns \cr \code{\link[spatstat.core]{pcfinhom}} \tab pair correlation for inhomogeneous patterns\cr \code{\link[spatstat.core]{Finhom}} \tab \eqn{F} for inhomogeneous point patterns \cr \code{\link[spatstat.core]{Ginhom}} \tab \eqn{G} for inhomogeneous point patterns \cr \code{\link[spatstat.core]{Jinhom}} \tab \eqn{J} for inhomogeneous point patterns \cr \code{\link[spatstat.core]{localL}} \tab Getis-Franklin neighbourhood density function\cr \code{\link[spatstat.core]{localK}} \tab neighbourhood K-function\cr \code{\link[spatstat.core]{localpcf}} \tab local pair correlation function\cr \code{\link[spatstat.core]{localKinhom}} \tab local \eqn{K} for inhomogeneous point patterns \cr \code{\link[spatstat.core]{localLinhom}} \tab local \eqn{L} for inhomogeneous point patterns \cr \code{\link[spatstat.core]{localpcfinhom}} \tab local pair correlation for inhomogeneous patterns\cr \code{\link[spatstat.core]{Ksector}} \tab Directional \eqn{K}-function\cr \code{\link[spatstat.core]{Kscaled}} \tab locally scaled \eqn{K}-function \cr \code{\link[spatstat.core]{Kest.fft}} \tab fast \eqn{K}-function using FFT for large datasets \cr \code{\link[spatstat.core]{Kmeasure}} \tab reduced second moment measure \cr \code{\link[spatstat.core]{envelope}} \tab simulation envelopes for a summary function \cr \code{\link[spatstat.core]{varblock}} \tab variances and confidence intervals\cr \tab for a summary function \cr \code{\link[spatstat.core]{lohboot}} \tab bootstrap for a summary function } Related facilities: \tabular{ll}{ \code{\link[spatstat.core]{plot.fv}} \tab plot a summary function\cr \code{\link[spatstat.core]{eval.fv}} \tab evaluate any expression involving summary functions\cr \code{\link[spatstat.core]{harmonise.fv}} \tab make functions compatible \cr \code{\link[spatstat.core]{eval.fasp}} \tab evaluate any expression involving an array of functions\cr \code{\link[spatstat.core]{with.fv}} \tab evaluate an expression for a summary function\cr \code{\link[spatstat.core]{Smooth.fv}} \tab apply smoothing to a summary function\cr \code{\link[spatstat.core]{deriv.fv}} \tab calculate derivative of a summary function\cr \code{\link[spatstat.core]{pool.fv}} \tab pool several estimates of a summary function\cr \code{\link[spatstat.core]{density.ppp}} \tab kernel smoothed density\cr \code{\link[spatstat.core]{densityHeat.ppp}} \tab diffusion kernel smoothed density\cr \code{\link[spatstat.core]{Smooth.ppp}} \tab spatial interpolation of marks \cr \code{\link[spatstat.core]{relrisk}} \tab kernel estimate of relative risk\cr \code{\link[spatstat.core]{sharpen.ppp}} \tab data sharpening \cr \code{\link[spatstat.core]{rknn}} \tab theoretical distribution of nearest neighbour distance } \bold{Summary statistics for a multitype point pattern:} A multitype point pattern is represented by an object \code{X} of class \code{"ppp"} such that \code{marks(X)} is a factor. \tabular{ll}{ \code{\link[spatstat.core]{relrisk}} \tab kernel estimation of relative risk \cr \code{\link[spatstat.core]{scan.test}} \tab spatial scan test of elevated risk \cr \code{\link[spatstat.core]{Gcross},\link[spatstat.core]{Gdot},\link[spatstat.core]{Gmulti}} \tab multitype nearest neighbour distributions \eqn{G_{ij}, G_{i\bullet}}{G[i,j], G[i.]} \cr \code{\link[spatstat.core]{Kcross},\link[spatstat.core]{Kdot}, \link[spatstat.core]{Kmulti}} \tab multitype \eqn{K}-functions \eqn{K_{ij}, K_{i\bullet}}{K[i,j], K[i.]} \cr \code{\link[spatstat.core]{Lcross},\link[spatstat.core]{Ldot}} \tab multitype \eqn{L}-functions \eqn{L_{ij}, L_{i\bullet}}{L[i,j], L[i.]} \cr \code{\link[spatstat.core]{Jcross},\link[spatstat.core]{Jdot},\link[spatstat.core]{Jmulti}} \tab multitype \eqn{J}-functions \eqn{J_{ij}, J_{i\bullet}}{J[i,j],J[i.]} \cr \code{\link[spatstat.core]{pcfcross}} \tab multitype pair correlation function \eqn{g_{ij}}{g[i,j]} \cr \code{\link[spatstat.core]{pcfdot}} \tab multitype pair correlation function \eqn{g_{i\bullet}}{g[i.]} \cr \code{\link[spatstat.core]{pcfmulti}} \tab general pair correlation function \cr \code{\link[spatstat.core]{markconnect}} \tab marked connection function \eqn{p_{ij}}{p[i,j]} \cr \code{\link[spatstat.core]{alltypes}} \tab estimates of the above for all \eqn{i,j} pairs \cr \code{\link[spatstat.core]{Iest}} \tab multitype \eqn{I}-function\cr \code{\link[spatstat.core]{Kcross.inhom},\link[spatstat.core]{Kdot.inhom}} \tab inhomogeneous counterparts of \code{Kcross}, \code{Kdot} \cr \code{\link[spatstat.core]{Lcross.inhom},\link[spatstat.core]{Ldot.inhom}} \tab inhomogeneous counterparts of \code{Lcross}, \code{Ldot} \cr \code{\link[spatstat.core]{pcfcross.inhom},\link[spatstat.core]{pcfdot.inhom}} \tab inhomogeneous counterparts of \code{pcfcross}, \code{pcfdot} \cr \code{\link[spatstat.core]{localKcross},\link[spatstat.core]{localKdot}} \tab local counterparts of \code{Kcross}, \code{Kdot} \cr \code{\link[spatstat.core]{localLcross},\link[spatstat.core]{localLdot}} \tab local counterparts of \code{Lcross}, \code{Ldot} \cr \code{\link[spatstat.core]{localKcross.inhom},\link[spatstat.core]{localLcross.inhom}} \tab local counterparts of \code{Kcross.inhom}, \code{Lcross.inhom} } \bold{Summary statistics for a marked point pattern:} A marked point pattern is represented by an object \code{X} of class \code{"ppp"} with a component \code{X$marks}. The entries in the vector \code{X$marks} may be numeric, complex, string or any other atomic type. For numeric marks, there are the following functions: \tabular{ll}{ \code{\link[spatstat.core]{markmean}} \tab smoothed local average of marks \cr \code{\link[spatstat.core]{markvar}} \tab smoothed local variance of marks \cr \code{\link[spatstat.core]{markcorr}} \tab mark correlation function \cr \code{\link[spatstat.core]{markcrosscorr}} \tab mark cross-correlation function \cr \code{\link[spatstat.core]{markvario}} \tab mark variogram \cr \code{\link[spatstat.core]{markmarkscatter}} \tab mark-mark scatterplot \cr \code{\link[spatstat.core]{Kmark}} \tab mark-weighted \eqn{K} function \cr \code{\link[spatstat.core]{Emark}} \tab mark independence diagnostic \eqn{E(r)} \cr \code{\link[spatstat.core]{Vmark}} \tab mark independence diagnostic \eqn{V(r)} \cr \code{\link[spatstat.core]{nnmean}} \tab nearest neighbour mean index \cr \code{\link[spatstat.core]{nnvario}} \tab nearest neighbour mark variance index } For marks of any type, there are the following: \tabular{ll}{ \code{\link[spatstat.core]{Gmulti}} \tab multitype nearest neighbour distribution \cr \code{\link[spatstat.core]{Kmulti}} \tab multitype \eqn{K}-function \cr \code{\link[spatstat.core]{Jmulti}} \tab multitype \eqn{J}-function } Alternatively use \code{\link[spatstat.geom]{cut.ppp}} to convert a marked point pattern to a multitype point pattern. \bold{Programming tools:} \tabular{ll}{ \code{\link[spatstat.core]{marktable}} \tab tabulate the marks of neighbours in a point pattern } \bold{Summary statistics for a three-dimensional point pattern:} These are for 3-dimensional point pattern objects (class \code{pp3}). \tabular{ll}{ \code{\link[spatstat.core]{F3est}} \tab empty space function \eqn{F} \cr \code{\link[spatstat.core]{G3est}} \tab nearest neighbour function \eqn{G} \cr \code{\link[spatstat.core]{K3est}} \tab \eqn{K}-function \cr \code{\link[spatstat.core]{pcf3est}} \tab pair correlation function } Related facilities: \tabular{ll}{ \code{\link[spatstat.core]{envelope.pp3}} \tab simulation envelopes } \bold{Summary statistics for random sets:} These work for point patterns (class \code{ppp}), line segment patterns (class \code{psp}) or windows (class \code{owin}). \tabular{ll}{ \code{\link[spatstat.core]{Hest}} \tab spherical contact distribution \eqn{H} \cr \code{\link[spatstat.core]{Gfox}} \tab Foxall \eqn{G}-function \cr \code{\link[spatstat.core]{Jfox}} \tab Foxall \eqn{J}-function } \bold{Model fitting (Cox and cluster models)} Cluster process models (with homogeneous or inhomogeneous intensity) and Cox processes can be fitted by the function \code{\link[spatstat.core]{kppm}}. Its result is an object of class \code{"kppm"}. The fitted model can be printed, plotted, predicted, simulated and updated. \tabular{ll}{ \code{\link[spatstat.core]{kppm}} \tab Fit model\cr \code{\link[spatstat.core]{plot.kppm}} \tab Plot the fitted model\cr \code{\link[spatstat.core]{summary.kppm}} \tab Summarise the fitted model\cr \code{\link[spatstat.core]{fitted.kppm}} \tab Compute fitted intensity \cr \code{\link[spatstat.core]{predict.kppm}} \tab Compute fitted intensity \cr \code{\link[spatstat.core]{update.kppm}} \tab Update the model \cr \code{\link[spatstat.core]{improve.kppm}} \tab Refine the estimate of trend \cr \code{\link[spatstat.core]{simulate.kppm}} \tab Generate simulated realisations \cr \code{\link[spatstat.core]{vcov.kppm}} \tab Variance-covariance matrix of coefficients \cr \code{\link[spatstat.core:methods.kppm]{coef.kppm}} \tab Extract trend coefficients \cr \code{\link[spatstat.core:methods.kppm]{formula.kppm}} \tab Extract trend formula \cr \code{\link[spatstat.core]{parameters}} \tab Extract all model parameters \cr \code{\link[spatstat.core]{clusterfield}} \tab Compute offspring density \cr \code{\link[spatstat.core]{clusterradius}} \tab Radius of support of offspring density \cr \code{\link[spatstat.core]{Kmodel.kppm}} \tab \eqn{K} function of fitted model \cr \code{\link[spatstat.core]{pcfmodel.kppm}} \tab Pair correlation of fitted model } For model selection, you can also use the generic functions \code{\link[stats]{step}}, \code{\link[stats]{drop1}} and \code{\link[stats]{AIC}} on fitted point process models. For variable selection, see \code{\link[spatstat.core]{sdr}}. The theoretical models can also be simulated, for any choice of parameter values, using \code{\link[spatstat.core]{rThomas}}, \code{\link[spatstat.core]{rMatClust}}, \code{\link[spatstat.core]{rCauchy}}, \code{\link[spatstat.core]{rVarGamma}}, and \code{\link[spatstat.core]{rLGCP}}. Lower-level fitting functions include: \tabular{ll}{ \code{\link[spatstat.core]{lgcp.estK}} \tab fit a log-Gaussian Cox process model\cr \code{\link[spatstat.core]{lgcp.estpcf}} \tab fit a log-Gaussian Cox process model\cr \code{\link[spatstat.core]{thomas.estK}} \tab fit the Thomas process model \cr \code{\link[spatstat.core]{thomas.estpcf}} \tab fit the Thomas process model \cr \code{\link[spatstat.core]{matclust.estK}} \tab fit the \Matern Cluster process model \cr \code{\link[spatstat.core]{matclust.estpcf}} \tab fit the \Matern Cluster process model \cr \code{\link[spatstat.core]{cauchy.estK}} \tab fit a Neyman-Scott Cauchy cluster process \cr \code{\link[spatstat.core]{cauchy.estpcf}} \tab fit a Neyman-Scott Cauchy cluster process\cr \code{\link[spatstat.core]{vargamma.estK}} \tab fit a Neyman-Scott Variance Gamma process\cr \code{\link[spatstat.core]{vargamma.estpcf}} \tab fit a Neyman-Scott Variance Gamma process\cr \code{\link[spatstat.core]{mincontrast}} \tab low-level algorithm for fitting models \cr \tab by the method of minimum contrast } \bold{Model fitting (Poisson and Gibbs models)} Poisson point processes are the simplest models for point patterns. A Poisson model assumes that the points are stochastically independent. It may allow the points to have a non-uniform spatial density. The special case of a Poisson process with a uniform spatial density is often called Complete Spatial Randomness. Poisson point processes are included in the more general class of Gibbs point process models. In a Gibbs model, there is \emph{interaction} or dependence between points. Many different types of interaction can be specified. For a detailed explanation of how to fit Poisson or Gibbs point process models to point pattern data using \pkg{spatstat}, see Baddeley and Turner (2005b) or Baddeley (2008). \bold{To fit a Poisson or Gibbs point process model:} Model fitting in \pkg{spatstat} is performed mainly by the function \code{\link[spatstat.core]{ppm}}. Its result is an object of class \code{"ppm"}. Here are some examples, where \code{X} is a point pattern (class \code{"ppp"}): \tabular{ll}{ \emph{command} \tab \emph{model} \cr \code{ppm(X)} \tab Complete Spatial Randomness \cr \code{ppm(X ~ 1)} \tab Complete Spatial Randomness \cr \code{ppm(X ~ x)} \tab Poisson process with \cr \tab intensity loglinear in \eqn{x} coordinate \cr \code{ppm(X ~ 1, Strauss(0.1))} \tab Stationary Strauss process \cr \code{ppm(X ~ x, Strauss(0.1))} \tab Strauss process with \cr \tab conditional intensity loglinear in \eqn{x} } It is also possible to fit models that depend on other covariates. \bold{Manipulating the fitted model:} \tabular{ll}{ \code{\link[spatstat.core]{plot.ppm}} \tab Plot the fitted model\cr \code{\link[spatstat.core]{predict.ppm}} \tab Compute the spatial trend and conditional intensity\cr \tab of the fitted point process model \cr \code{\link[spatstat.core]{coef.ppm}} \tab Extract the fitted model coefficients\cr \code{\link[spatstat.core]{parameters}} \tab Extract all model parameters\cr \code{\link[spatstat.core]{formula.ppm}} \tab Extract the trend formula\cr \code{\link[spatstat.core]{intensity.ppm}} \tab Compute fitted intensity \cr \code{\link[spatstat.core]{Kmodel.ppm}} \tab \eqn{K} function of fitted model \cr \code{\link[spatstat.core]{pcfmodel.ppm}} \tab pair correlation of fitted model \cr \code{\link[spatstat.core]{fitted.ppm}} \tab Compute fitted conditional intensity at quadrature points \cr \code{\link[spatstat.core]{residuals.ppm}} \tab Compute point process residuals at quadrature points \cr \code{\link[spatstat.core]{update.ppm}} \tab Update the fit \cr \code{\link[spatstat.core]{vcov.ppm}} \tab Variance-covariance matrix of estimates\cr \code{\link[spatstat.core]{rmh.ppm}} \tab Simulate from fitted model \cr \code{\link[spatstat.core]{simulate.ppm}} \tab Simulate from fitted model \cr \code{\link[spatstat.core]{print.ppm}} \tab Print basic information about a fitted model\cr \code{\link[spatstat.core]{summary.ppm}} \tab Summarise a fitted model\cr \code{\link[spatstat.core]{effectfun}} \tab Compute the fitted effect of one covariate\cr \code{\link[spatstat.core]{logLik.ppm}} \tab log-likelihood or log-pseudolikelihood\cr \code{\link[spatstat.core]{anova.ppm}} \tab Analysis of deviance \cr \code{\link[spatstat.core]{model.frame.ppm}} \tab Extract data frame used to fit model \cr \code{\link[spatstat.core]{model.images}} \tab Extract spatial data used to fit model \cr \code{\link[spatstat.core]{model.depends}} \tab Identify variables in the model \cr \code{\link[spatstat.core]{as.interact}} \tab Interpoint interaction component of model \cr \code{\link[spatstat.core]{fitin}} \tab Extract fitted interpoint interaction \cr \code{\link[spatstat.core]{is.hybrid}} \tab Determine whether the model is a hybrid \cr \code{\link[spatstat.core]{valid.ppm}} \tab Check the model is a valid point process \cr \code{\link[spatstat.core]{project.ppm}} \tab Ensure the model is a valid point process } For model selection, you can also use the generic functions \code{\link[stats]{step}}, \code{\link[stats]{drop1}} and \code{\link[stats]{AIC}} on fitted point process models. For variable selection, see \code{\link[spatstat.core]{sdr}}. See \code{\link[spatstat.geom]{spatstat.options}} to control plotting of fitted model. \bold{To specify a point process model:} The first order ``trend'' of the model is determined by an \R language formula. The formula specifies the form of the \emph{logarithm} of the trend. \tabular{ll}{ \code{X ~ 1} \tab No trend (stationary) \cr \code{X ~ x} \tab Loglinear trend \eqn{\lambda(x,y) = \exp(\alpha + \beta x)}{lambda(x,y) = exp(alpha + beta * x)} \cr \tab where \eqn{x,y} are Cartesian coordinates \cr \code{X ~ polynom(x,y,3)} \tab Log-cubic polynomial trend \cr \code{X ~ harmonic(x,y,2)} \tab Log-harmonic polynomial trend \cr \code{X ~ Z} \tab Loglinear function of covariate \code{Z} \cr \tab \eqn{\lambda(x,y) = \exp(\alpha + \beta Z(x,y))}{lambda(x,y) = exp(alpha + beta * Z(x,y))} } The higher order (``interaction'') components are described by an object of class \code{"interact"}. Such objects are created by: \tabular{ll}{ \code{\link[spatstat.core]{Poisson}()} \tab the Poisson point process\cr \code{\link[spatstat.core]{AreaInter}()} \tab Area-interaction process\cr \code{\link[spatstat.core]{BadGey}()} \tab multiscale Geyer process\cr \code{\link[spatstat.core]{Concom}()} \tab connected component interaction\cr \code{\link[spatstat.core]{DiggleGratton}() } \tab Diggle-Gratton potential \cr \code{\link[spatstat.core]{DiggleGatesStibbard}() } \tab Diggle-Gates-Stibbard potential \cr \code{\link[spatstat.core]{Fiksel}()} \tab Fiksel pairwise interaction process\cr \code{\link[spatstat.core]{Geyer}()} \tab Geyer's saturation process\cr \code{\link[spatstat.core]{Hardcore}()} \tab Hard core process\cr \code{\link[spatstat.core]{HierHard}()} \tab Hierarchical multiype hard core process\cr \code{\link[spatstat.core]{HierStrauss}()} \tab Hierarchical multiype Strauss process\cr \code{\link[spatstat.core]{HierStraussHard}()} \tab Hierarchical multiype Strauss-hard core process\cr \code{\link[spatstat.core]{Hybrid}()} \tab Hybrid of several interactions\cr \code{\link[spatstat.core]{LennardJones}() } \tab Lennard-Jones potential \cr \code{\link[spatstat.core]{MultiHard}()} \tab multitype hard core process \cr \code{\link[spatstat.core]{MultiStrauss}()} \tab multitype Strauss process \cr \code{\link[spatstat.core]{MultiStraussHard}()} \tab multitype Strauss/hard core process \cr \code{\link[spatstat.core]{OrdThresh}()} \tab Ord process, threshold potential\cr \code{\link[spatstat.core]{Ord}()} \tab Ord model, user-supplied potential \cr \code{\link[spatstat.core]{PairPiece}()} \tab pairwise interaction, piecewise constant \cr \code{\link[spatstat.core]{Pairwise}()} \tab pairwise interaction, user-supplied potential\cr \code{\link[spatstat.core]{Penttinen}()} \tab Penttinen pairwise interaction\cr \code{\link[spatstat.core]{SatPiece}()} \tab Saturated pair model, piecewise constant potential\cr \code{\link[spatstat.core]{Saturated}()} \tab Saturated pair model, user-supplied potential\cr \code{\link[spatstat.core]{Softcore}()} \tab pairwise interaction, soft core potential\cr \code{\link[spatstat.core]{Strauss}()} \tab Strauss process \cr \code{\link[spatstat.core]{StraussHard}()} \tab Strauss/hard core point process \cr \code{\link[spatstat.core]{Triplets}()} \tab Geyer triplets process } Note that it is also possible to combine several such interactions using \code{\link[spatstat.core]{Hybrid}}. \bold{Simulation and goodness-of-fit for fitted models:} \tabular{ll}{ \code{\link[spatstat.core]{rmh.ppm}} \tab simulate realisations of a fitted model \cr \code{\link[spatstat.core]{simulate.ppm}} \tab simulate realisations of a fitted model \cr \code{\link[spatstat.core]{envelope}} \tab compute simulation envelopes for a fitted model } \bold{Model fitting (determinantal point process models)} Code for fitting \emph{determinantal point process models} has recently been added to \pkg{spatstat}. For information, see the help file for \code{\link[spatstat.core]{dppm}}. \bold{Model fitting (spatial logistic regression)} Pixel-based spatial logistic regression is an alternative technique for analysing spatial point patterns that is widely used in Geographical Information Systems. It is approximately equivalent to fitting a Poisson point process model. In pixel-based logistic regression, the spatial domain is divided into small pixels, the presence or absence of a data point in each pixel is recorded, and logistic regression is used to model the presence/absence indicators as a function of any covariates. Facilities for performing spatial logistic regression are provided in \pkg{spatstat} for comparison purposes. \bold{Fitting a spatial logistic regression} Spatial logistic regression is performed by the function \code{\link[spatstat.core]{slrm}}. Its result is an object of class \code{"slrm"}. There are many methods for this class, including methods for \code{print}, \code{fitted}, \code{predict}, \code{simulate}, \code{anova}, \code{coef}, \code{logLik}, \code{terms}, \code{update}, \code{formula} and \code{vcov}. For example, if \code{X} is a point pattern (class \code{"ppp"}): \tabular{ll}{ \emph{command} \tab \emph{model} \cr \code{slrm(X ~ 1)} \tab Complete Spatial Randomness \cr \code{slrm(X ~ x)} \tab Poisson process with \cr \tab intensity loglinear in \eqn{x} coordinate \cr \code{slrm(X ~ Z)} \tab Poisson process with \cr \tab intensity loglinear in covariate \code{Z} } \bold{Manipulating a fitted spatial logistic regression} \tabular{ll}{ \code{\link[spatstat.core]{anova.slrm}} \tab Analysis of deviance \cr \code{\link[spatstat.core]{coef.slrm}} \tab Extract fitted coefficients \cr \code{\link[spatstat.core]{vcov.slrm}} \tab Variance-covariance matrix of fitted coefficients \cr \code{\link[spatstat.core]{fitted.slrm}} \tab Compute fitted probabilities or intensity \cr \code{\link[spatstat.core]{logLik.slrm}} \tab Evaluate loglikelihood of fitted model \cr \code{\link[spatstat.core]{plot.slrm}} \tab Plot fitted probabilities or intensity \cr \code{\link[spatstat.core]{predict.slrm}} \tab Compute predicted probabilities or intensity with new data \cr \code{\link[spatstat.core]{simulate.slrm}} \tab Simulate model } There are many other undocumented methods for this class, including methods for \code{print}, \code{update}, \code{formula} and \code{terms}. Stepwise model selection is possible using \code{step} or \code{stepAIC}. For variable selection, see \code{\link[spatstat.core]{sdr}}. \bold{Simulation} There are many ways to generate a random point pattern, line segment pattern, pixel image or tessellation in \pkg{spatstat}. \bold{Random point patterns:} \tabular{ll}{ \code{\link[spatstat.core]{runifpoint}} \tab generate \eqn{n} independent uniform random points \cr \code{\link[spatstat.core]{rpoint}} \tab generate \eqn{n} independent random points \cr \code{\link[spatstat.core]{rmpoint}} \tab generate \eqn{n} independent multitype random points \cr \code{\link[spatstat.core]{rpoispp}} \tab simulate the (in)homogeneous Poisson point process \cr \code{\link[spatstat.core]{rmpoispp}} \tab simulate the (in)homogeneous multitype Poisson point process \cr \code{\link[spatstat.core]{runifdisc}} \tab generate \eqn{n} independent uniform random points in disc\cr \code{\link[spatstat.core]{rstrat}} \tab stratified random sample of points \cr \code{\link[spatstat.core]{rMaternI}} \tab simulate the \Matern Model I inhibition process\cr \code{\link[spatstat.core]{rMaternII}} \tab simulate the \Matern Model II inhibition process\cr \code{\link[spatstat.core]{rSSI}} \tab simulate Simple Sequential Inhibition process\cr \code{\link[spatstat.core]{rHardcore}} \tab simulate hard core process (perfect simulation)\cr \code{\link[spatstat.core]{rStrauss}} \tab simulate Strauss process (perfect simulation)\cr \code{\link[spatstat.core]{rStraussHard}} \tab simulate Strauss-hard core process (perfect simulation)\cr \code{\link[spatstat.core]{rDiggleGratton}} \tab simulate Diggle-Gratton process (perfect simulation)\cr \code{\link[spatstat.core]{rDGS}} \tab simulate Diggle-Gates-Stibbard process (perfect simulation)\cr \code{\link[spatstat.core]{rPenttinen}} \tab simulate Penttinen process (perfect simulation)\cr \code{\link[spatstat.core]{rNeymanScott}} \tab simulate a general Neyman-Scott process\cr \code{\link[spatstat.core]{rMatClust}} \tab simulate the \Matern Cluster process\cr \code{\link[spatstat.core]{rThomas}} \tab simulate the Thomas process \cr \code{\link[spatstat.core]{rLGCP}} \tab simulate the log-Gaussian Cox process \cr \code{\link[spatstat.core]{rGaussPoisson}} \tab simulate the Gauss-Poisson cluster process\cr \code{\link[spatstat.core]{rCauchy}} \tab simulate Neyman-Scott process with Cauchy clusters \cr \code{\link[spatstat.core]{rVarGamma}} \tab simulate Neyman-Scott process with Variance Gamma clusters \cr \code{\link[spatstat.core]{rcell}} \tab simulate the Baddeley-Silverman cell process \cr \code{\link[spatstat.core]{runifpointOnLines}} \tab generate \eqn{n} random points along specified line segments \cr \code{\link[spatstat.core]{rpoisppOnLines}} \tab generate Poisson random points along specified line segments } \bold{Resampling a point pattern:} \tabular{ll}{ \code{\link[spatstat.core]{quadratresample}} \tab block resampling \cr \code{\link[spatstat.core]{rshift}} \tab random shifting of (subsets of) points\cr \code{\link[spatstat.core]{rthin}} \tab random thinning } See also \code{\link[spatstat.core]{varblock}} for estimating the variance of a summary statistic by block resampling, and \code{\link[spatstat.core]{lohboot}} for another bootstrap technique. \bold{Fitted point process models:} If you have fitted a point process model to a point pattern dataset, the fitted model can be simulated. Cluster process models are fitted by the function \code{\link[spatstat.core]{kppm}} yielding an object of class \code{"kppm"}. To generate one or more simulated realisations of this fitted model, use \code{\link[spatstat.core]{simulate.kppm}}. Gibbs point process models are fitted by the function \code{\link[spatstat.core]{ppm}} yielding an object of class \code{"ppm"}. To generate a simulated realisation of this fitted model, use \code{\link[spatstat.core]{rmh}}. To generate one or more simulated realisations of the fitted model, use \code{\link[spatstat.core]{simulate.ppm}}. \bold{Other random patterns:} \tabular{ll}{ \code{\link[spatstat.core]{rpoisline}} \tab simulate the Poisson line process within a window \cr \code{\link[spatstat.core]{rpoislinetess}} \tab generate random tessellation using Poisson line process \cr \code{\link[spatstat.core]{rMosaicSet}} \tab generate random set by selecting some tiles of a tessellation \cr \code{\link[spatstat.core]{rMosaicField}} \tab generate random pixel image by assigning random values in each tile of a tessellation } \bold{Simulation-based inference} \tabular{ll}{ \code{\link[spatstat.core]{envelope}} \tab critical envelope for Monte Carlo test of goodness-of-fit \cr \code{\link[spatstat.core]{bits.envelope}} \tab critical envelope for balanced two-stage Monte Carlo test \cr \code{\link[spatstat.core]{qqplot.ppm}} \tab diagnostic plot for interpoint interaction \cr \code{\link[spatstat.core]{scan.test}} \tab spatial scan statistic/test \cr \code{\link[spatstat.core]{studpermu.test}} \tab studentised permutation test\cr \code{\link[spatstat.core]{segregation.test}} \tab test of segregation of types } \bold{Hypothesis tests:} \tabular{ll}{ \code{\link[spatstat.core]{quadrat.test}} \tab \eqn{\chi^2}{chi^2} goodness-of-fit test on quadrat counts \cr \code{\link[spatstat.core]{clarkevans.test}} \tab Clark and Evans test \cr \code{\link[spatstat.core]{cdf.test}} \tab Spatial distribution goodness-of-fit test\cr \code{\link[spatstat.core]{berman.test}} \tab Berman's goodness-of-fit tests\cr \code{\link[spatstat.core]{envelope}} \tab critical envelope for Monte Carlo test of goodness-of-fit \cr \code{\link[spatstat.core]{scan.test}} \tab spatial scan statistic/test \cr \code{\link[spatstat.core]{dclf.test}} \tab Diggle-Cressie-Loosmore-Ford test \cr \code{\link[spatstat.core]{mad.test}} \tab Mean Absolute Deviation test \cr \code{\link[spatstat.core]{anova.ppm}} \tab Analysis of Deviance for point process models } \bold{More recently-developed tests:} \tabular{ll}{ \code{\link[spatstat.core]{dg.test}} \tab Dao-Genton test \cr \code{\link[spatstat.core]{bits.test}} \tab Balanced independent two-stage test \cr \code{\link[spatstat.core]{dclf.progress}} \tab Progress plot for DCLF test \cr \code{\link[spatstat.core]{mad.progress}} \tab Progress plot for MAD test \cr } \bold{Sensitivity diagnostics:} Classical measures of model sensitivity such as leverage and influence have been adapted to point process models. \tabular{ll}{ \code{\link[spatstat.core]{leverage.ppm}} \tab Leverage for point process model\cr \code{\link[spatstat.core]{influence.ppm}} \tab Influence for point process model\cr \code{\link[spatstat.core]{dfbetas.ppm}} \tab Parameter influence\cr \code{\link[spatstat.core]{dffit.ppm}} \tab Effect change diagnostic } \bold{Diagnostics for covariate effect:} Classical diagnostics for covariate effects have been adapted to point process models. \tabular{ll}{ \code{\link[spatstat.core]{parres}} \tab Partial residual plot\cr \code{\link[spatstat.core]{addvar}} \tab Added variable plot \cr \code{\link[spatstat.core]{rhohat}} \tab Kernel estimate of covariate effect\cr \code{\link[spatstat.core]{rho2hat}} \tab Kernel estimate of covariate effect (bivariate) } \bold{Residual diagnostics:} Residuals for a fitted point process model, and diagnostic plots based on the residuals, were introduced in Baddeley et al (2005) and Baddeley, Rubak and \Moller (2011). Type \code{demo(diagnose)} for a demonstration of the diagnostics features. \tabular{ll}{ \code{\link[spatstat.core]{diagnose.ppm}} \tab diagnostic plots for spatial trend\cr \code{\link[spatstat.core]{qqplot.ppm}} \tab diagnostic Q-Q plot for interpoint interaction\cr \code{\link[spatstat.data]{residualspaper}} \tab examples from Baddeley et al (2005) \cr \code{\link[spatstat.core]{Kcom}} \tab model compensator of \eqn{K} function \cr \code{\link[spatstat.core]{Gcom}} \tab model compensator of \eqn{G} function \cr \code{\link[spatstat.core]{Kres}} \tab score residual of \eqn{K} function \cr \code{\link[spatstat.core]{Gres}} \tab score residual of \eqn{G} function \cr \code{\link[spatstat.core]{psst}} \tab pseudoscore residual of summary function \cr \code{\link[spatstat.core]{psstA}} \tab pseudoscore residual of empty space function \cr \code{\link[spatstat.core]{psstG}} \tab pseudoscore residual of \eqn{G} function \cr \code{\link[spatstat.core]{compareFit}} \tab compare compensators of several fitted models } \bold{Resampling and randomisation procedures} You can build your own tests based on randomisation and resampling using the following capabilities: \tabular{ll}{ \code{\link[spatstat.core]{quadratresample}} \tab block resampling \cr \code{\link[spatstat.core]{rshift}} \tab random shifting of (subsets of) points\cr \code{\link[spatstat.core]{rthin}} \tab random thinning } } \section{Licence}{ This library and its documentation are usable under the terms of the "GNU General Public License", a copy of which is distributed with the package. } \author{ \spatstatAuthors. } \section{Acknowledgements}{ Kasper Klitgaard Berthelsen, Ottmar Cronie, Tilman Davies, Julian Gilbey, Yongtao Guan, Ute Hahn, Kassel Hingee, Abdollah Jalilian, Marie-Colette van Lieshout, Greg McSwiggan, Tuomas Rajala, Suman Rakshit, Dominic Schuhmacher, Rasmus Waagepetersen and Hangsheng Wang made substantial contributions of code. For comments, corrections, bug alerts and suggestions, we thank Monsuru Adepeju, Corey Anderson, Ang Qi Wei, Ryan Arellano, Jens \ifelse{latex}{\out{{\AA}str{\" o}m}}{Astrom}, Robert Aue, Marcel Austenfeld, Sandro Azaele, Malissa Baddeley, Guy Bayegnak, Colin Beale, Melanie Bell, Thomas Bendtsen, Ricardo Bernhardt, Andrew Bevan, Brad Biggerstaff, Anders Bilgrau, Leanne Bischof, Christophe Biscio, Roger Bivand, Jose M. Blanco Moreno, Florent Bonneu, Jordan Brown, Ian Buller, Julian Burgos, Simon Byers, Ya-Mei Chang, Jianbao Chen, Igor Chernayavsky, Y.C. Chin, Bjarke Christensen, \ifelse{latex}{\out{Luc\'{\i{}}a}}{Lucia} Cobo Sanchez, Jean-Francois Coeurjolly, Kim Colyvas, Hadrien Commenges, Rochelle Constantine, Robin Corria Ainslie, Richard Cotton, Marcelino de la Cruz, Peter Dalgaard, Mario D'Antuono, Sourav Das, Peter Diggle, Patrick Donnelly, Ian Dryden, Stephen Eglen, Ahmed El-Gabbas, Belarmain Fandohan, Olivier Flores, David Ford, Peter Forbes, Shane Frank, Janet Franklin, Funwi-Gabga Neba, Oscar Garcia, Agnes Gault, Jonas Geldmann, Marc Genton, Shaaban Ghalandarayeshi, Jason Goldstick, Pavel Grabarnik, C. Graf, Ute Hahn, Andrew Hardegen, Martin \Bogsted Hansen, Martin Hazelton, Juha Heikkinen, Mandy Hering, Markus Herrmann, Maximilian Hesselbarth, Paul Hewson, Hamidreza Heydarian, Kurt Hornik, Philipp Hunziker, Jack Hywood, Ross Ihaka, \ifelse{latex}{\out{\u{C}enk I\c{c}\"{o}s}}{Cenk Icos}, Aruna Jammalamadaka, Robert John-Chandran, Devin Johnson, Mahdieh Khanmohammadi, Bob Klaver, Lily Kozmian-Ledward, Peter Kovesi, Mike Kuhn, Jeff Laake, Robert Lamb, \ifelse{latex}{\out{Fr\'{e}d\'{e}ric}}{Frederic} Lavancier, Tom Lawrence, Tomas Lazauskas, Jonathan Lee, George Leser, Angela Li, Li Haitao, George Limitsios, Andrew Lister, Nestor Luambua, Ben Madin, Martin Maechler, Kiran Marchikanti, Jeff Marcus, Robert Mark, Peter McCullagh, Monia Mahling, Jorge Mateu Mahiques, Ulf Mehlig, Frederico Mestre, Sebastian Wastl Meyer, Mi Xiangcheng, Lore De Middeleer, Robin Milne, Enrique Miranda, Jesper \Moller, Annie \ifelse{latex}{\out{Molli{\'e}}}{Mollie}, Ines Moncada, Mehdi Moradi, Virginia Morera Pujol, Erika Mudrak, Gopalan Nair, Nader Najari, Nicoletta Nava, Linda Stougaard Nielsen, Felipe Nunes, Jens Randel Nyengaard, Jens \Oehlschlaegel, Thierry Onkelinx, Sean O'Riordan, Evgeni Parilov, Jeff Picka, Nicolas Picard, Tim Pollington, Mike Porter, Sergiy Protsiv, Adrian Raftery, Ben Ramage, Pablo Ramon, Xavier Raynaud, Nicholas Read, Matt Reiter, Ian Renner, Tom Richardson, Brian Ripley, Ted Rosenbaum, Barry Rowlingson, Jason Rudokas, Tyler Rudolph, John Rudge, Christopher Ryan, Farzaneh Safavimanesh, Aila \Sarkka, Cody Schank, Katja Schladitz, Sebastian Schutte, Bryan Scott, Olivia Semboli, \ifelse{latex}{\out{Fran\c{c}ois S\'{e}m\'{e}curbe}}{Francois Semecurbe}, Vadim Shcherbakov, Shen Guochun, Shi Peijian, Harold-Jeffrey Ship, Tammy L Silva, Ida-Maria Sintorn, Yong Song, Malte Spiess, Mark Stevenson, Kaspar Stucki, Jan Sulavik, Michael Sumner, P. Surovy, Ben Taylor, Thordis Linda Thorarinsdottir, Leigh Torres, Berwin Turlach, Torben Tvedebrink, Kevin Ummer, Medha Uppala, Andrew van Burgel, Tobias Verbeke, Mikko Vihtakari, Alexendre Villers, Fabrice Vinatier, Maximilian Vogtland, Sasha Voss, Sven Wagner, Hao Wang, H. Wendrock, Jan Wild, Carl G. Witthoft, Selene Wong, Maxime Woringer, Luke Yates, Mike Zamboni and Achim Zeileis. } \keyword{spatial} \keyword{package} spatstat.core/man/suffstat.Rd0000644000176200001440000001072314141452520015746 0ustar liggesusers\name{suffstat} \alias{suffstat} \title{Sufficient Statistic of Point Process Model} \description{ The canonical sufficient statistic of a point process model is evaluated for a given point pattern. } \usage{ suffstat(model, X=data.ppm(model)) } \arguments{ \item{model}{A fitted point process model (object of class \code{"ppm"}). } \item{X}{ A point pattern (object of class \code{"ppp"}). } } \value{ A numeric vector of sufficient statistics. The entries correspond to the model coefficients \code{coef(model)}. } \details{ The canonical sufficient statistic of \code{model} is evaluated for the point pattern \code{X}. This computation is useful for various Monte Carlo methods. Here \code{model} should be a point process model (object of class \code{"ppm"}, see \code{\link{ppm.object}}), typically obtained from the model-fitting function \code{\link{ppm}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). Every point process model fitted by \code{\link{ppm}} has a probability density of the form \deqn{f(x) = Z(\theta) \exp(\theta^T S(x))}{f(x) = Z(theta) exp(theta * S(x))} where \eqn{x} denotes a typical realisation (i.e. a point pattern), \eqn{\theta}{theta} is the vector of model coefficients, \eqn{Z(\theta)}{Z(theta)} is a normalising constant, and \eqn{S(x)} is a function of the realisation \eqn{x}, called the ``canonical sufficient statistic'' of the model. For example, the stationary Poisson process has canonical sufficient statistic \eqn{S(x)=n(x)}, the number of points in \eqn{x}. The stationary Strauss process with interaction range \eqn{r} (and fitted with no edge correction) has canonical sufficient statistic \eqn{S(x)=(n(x),s(x))} where \eqn{s(x)} is the number of pairs of points in \eqn{x} which are closer than a distance \eqn{r} to each other. \code{suffstat(model, X)} returns the value of \eqn{S(x)}, where \eqn{S} is the canonical sufficient statistic associated with \code{model}, evaluated when \eqn{x} is the given point pattern \code{X}. The result is a numeric vector, with entries which correspond to the entries of the coefficient vector \code{coef(model)}. The sufficient statistic \eqn{S} does not depend on the fitted coefficients of the model. However it does depend on the irregular parameters which are fixed in the original call to \code{\link{ppm}}, for example, the interaction range \code{r} of the Strauss process. The sufficient statistic also depends on the edge correction that was used to fit the model. For example in a Strauss process, \itemize{ \item If the model is fitted with \code{correction="none"}, the sufficient statistic is \eqn{S(x) = (n(x), s(x))} where \eqn{n(x)} is the number of points and \eqn{s(x)} is the number of pairs of points which are closer than \eqn{r} units apart. \item If the model is fitted with \code{correction="periodic"}, the sufficient statistic is the same as above, except that distances are measured in the periodic sense. \item If the model is fitted with \code{correction="translate"}, then \eqn{n(x)} is unchanged but \eqn{s(x)} is replaced by a weighted sum (the sum of the translation correction weights for all pairs of points which are closer than \eqn{r} units apart). \item If the model is fitted with \code{correction="border"} (the default), then points lying less than \eqn{r} units from the boundary of the observation window are treated as fixed. Thus \eqn{n(x)} is replaced by the number \eqn{n_r(x)}{n[r](x)} of points lying at least \eqn{r} units from the boundary of the observation window, and \eqn{s(x)} is replaced by the number \eqn{s_r(x)}{s[r](x)} of pairs of points, which are closer than \eqn{r} units apart, and at least one of which lies more than \eqn{r} units from the boundary of the observation window. } Non-finite values of the sufficient statistic (\code{NA} or \code{-Inf}) may be returned if the point pattern \code{X} is not a possible realisation of the model (i.e. if \code{X} has zero probability of occurring under \code{model} for all values of the canonical coefficients \eqn{\theta}{theta}). } \seealso{ \code{\link{ppm}} } \examples{ fitS <- ppm(swedishpines~1, Strauss(7)) suffstat(fitS) X <- rpoispp(intensity(swedishpines), win=Window(swedishpines)) suffstat(fitS, X) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.core/man/logLik.slrm.Rd0000644000176200001440000000324014141452520016300 0ustar liggesusers\name{logLik.slrm} \Rdversion{1.1} \alias{logLik.slrm} \title{ Loglikelihood of Spatial Logistic Regression } \description{ Computes the (maximised) loglikelihood of a fitted Spatial Logistic Regression model. } \usage{ \method{logLik}{slrm}(object, ..., adjust = TRUE) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Ignored. } \item{adjust}{ Logical value indicating whether to adjust the loglikelihood of the model to make it comparable with a point process likelihood. See Details. } } \details{ This is a method for \code{\link[stats]{logLik}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). It computes the log-likelihood of a fitted spatial logistic regression model. If \code{adjust=FALSE}, the loglikelihood is computed using the standard formula for the loglikelihood of a logistic regression model for a finite set of (pixel) observations. If \code{adjust=TRUE} then the loglikelihood is adjusted so that it is approximately comparable with the likelihood of a point process in continuous space, by subtracting the value \eqn{n \log(a)}{n * log(a)} where \eqn{n} is the number of points in the original point pattern dataset, and \eqn{a} is the area of one pixel. } \value{ A numerical value. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) logLik(fit) logLik(fit, adjust=FALSE) } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.core/man/dffit.ppm.Rd0000644000176200001440000000355614141452520016004 0ustar liggesusers\name{dffit.ppm} \alias{dffit} \alias{dffit.ppm} \title{ Case Deletion Effect Measure of Fitted Model } \description{ Computes the case deletion effect measure \code{DFFIT} for a fitted model. } \usage{ dffit(object, \dots) \method{dffit}{ppm}(object, \dots, collapse = FALSE, dfb = NULL) } \arguments{ \item{object}{ A fitted model, such as a point process model (object of class \code{"ppm"}). } \item{\dots}{ Additional arguments passed to \code{\link{dfbetas.ppm}}. } \item{collapse}{ Logical value specifying whether to collapse the vector-valued measure to a scalar-valued measure by adding all the components. } \item{dfb}{ Optional. The result of \code{dfbetas(object)}, if it has already been computed. } } \details{ The case deletion effect measure \code{DFFIT} is a model diagnostic traditionally used for regression models. In that context, \code{DFFIT[i,j]} is the negative change, in the value of the \code{j}th term in the linear predictor, that would occur if the \code{i}th data value was deleted. It is closely related to the diagnostic \code{DFBETA}. For a spatial point process model, \code{dffit} computes the analogous spatial case deletion diagnostic, described in Baddeley, Rubak and Turner (2019). } \value{ A measure (object of class \code{"msr"}). } \references{ Baddeley, A., Rubak, E. and Turner, R. (2019) Leverage and influence diagnostics for Gibbs spatial point processes. \emph{Spatial Statistics} \bold{29}, {15--48}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{dfbetas.ppm}} } \examples{ \testonly{op <- spatstat.options(npixel=32)} X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) \testonly{fit <- ppm(X ~x+y, nd=16)} plot(dffit(fit)) plot(dffit(fit, collapse=TRUE)) \testonly{spatstat.options(op)} } \keyword{spatial} \keyword{models} spatstat.core/man/plot.fv.Rd0000644000176200001440000002115114141452520015474 0ustar liggesusers\name{plot.fv} \alias{plot.fv} \title{Plot Function Values} \description{ Plot method for the class \code{"fv"}. } \usage{ \method{plot}{fv}(x, fmla, \dots, subset=NULL, lty=NULL, col=NULL, lwd=NULL, xlim=NULL, ylim=NULL, xlab=NULL, ylab=NULL, ylim.covers=NULL, legend=!add, legendpos="topleft", legendavoid=missing(legendpos), legendmath=TRUE, legendargs=list(), shade=fvnames(x, ".s"), shadecol="grey", add=FALSE, log="", mathfont=c("italic", "plain", "bold", "bolditalic"), limitsonly=FALSE) } \arguments{ \item{x}{ An object of class \code{"fv"}, containing the variables to be plotted or variables from which the plotting coordinates can be computed. } \item{fmla}{ an R language formula determining which variables or expressions are plotted. Either a formula object, or a string that can be parsed as a formula. See Details. } \item{subset}{ (optional) subset of rows of the data frame that will be plotted. } \item{lty}{ (optional) numeric vector of values of the graphical parameter \code{lty} controlling the line style of each plot. } \item{col}{ (optional) numeric vector of values of the graphical parameter \code{col} controlling the colour of each plot. } \item{lwd}{ (optional) numeric vector of values of the graphical parameter \code{lwd} controlling the line width of each plot. } \item{xlim}{ (optional) range of x axis } \item{ylim}{ (optional) range of y axis } \item{xlab}{ (optional) label for x axis } \item{ylab}{ (optional) label for y axis } \item{\dots}{ Extra arguments passed to \code{plot.default}. } \item{ylim.covers}{ Optional vector of \eqn{y} values that must be included in the \eqn{y} axis. For example \code{ylim.covers=0} will ensure that the \eqn{y} axis includes the origin. } \item{legend}{ Logical flag or \code{NULL}. If \code{legend=TRUE}, the algorithm plots a legend in the top left corner of the plot, explaining the meaning of the different line types and colours. } \item{legendpos}{ The position of the legend. Either a character string keyword (see \code{\link[graphics]{legend}} for keyword options) or a pair of coordinates in the format \code{list(x,y)}. Alternatively if \code{legendpos="float"}, a location will be selected inside the plot region, avoiding the graphics. } \item{legendavoid}{ Whether to avoid collisions between the legend and the graphics. Logical value. If \code{TRUE}, the code will check for collisions between the legend box and the graphics, and will override \code{legendpos} if a collision occurs. If \code{FALSE}, the value of \code{legendpos} is always respected. } \item{legendmath}{ Logical. If \code{TRUE}, the legend will display the mathematical notation for each curve. If \code{FALSE}, the legend text is the identifier (column name) for each curve. } \item{legendargs}{ Named list containing additional arguments to be passed to \code{\link{legend}} controlling the appearance of the legend. } \item{shade}{ A character vector giving the names of two columns of \code{x}, or another type of index that identifies two columns. When the corresponding curves are plotted, the region between the curves will be shaded in light grey. The object \code{x} may or may not contain two columns which are designated as boundaries for shading; they are identified by \code{fvnames(x, ".s")}. The default is to shade between these two curves if they exist. To suppress this behaviour, set \code{shade=NULL}. } \item{shadecol}{ The colour to be used in the \code{shade} plot. A character string or an integer specifying a colour. } \item{add}{ Logical. Whether the plot should be added to an existing plot } \item{log}{ A character string which contains \code{"x"} if the x axis is to be logarithmic, \code{"y"} if the y axis is to be logarithmic and \code{"xy"} or \code{"yx"} if both axes are to be logarithmic. } \item{mathfont}{ Character string. The font to be used for mathematical expressions in the axis labels and the legend. } \item{limitsonly}{ Logical. If \code{FALSE}, plotting is performed normally. If \code{TRUE}, no plotting is performed at all; just the \eqn{x} and \eqn{y} limits of the plot are computed and returned. } } \value{ Invisible: either \code{NULL}, or a data frame giving the meaning of the different line types and colours. } \details{ This is the \code{plot} method for the class \code{"fv"}. The use of the argument \code{fmla} is like \code{plot.formula}, but offers some extra functionality. The left and right hand sides of \code{fmla} are evaluated, and the results are plotted against each other (the left side on the \eqn{y} axis against the right side on the \eqn{x} axis). The left and right hand sides of \code{fmla} may be the names of columns of the data frame \code{x}, or expressions involving these names. If a variable in \code{fmla} is not the name of a column of \code{x}, the algorithm will search for an object of this name in the environment where \code{plot.fv} was called, and then in the enclosing environment, and so on. Multiple curves may be specified by a single formula of the form \code{cbind(y1,y2,\dots,yn) ~ x}, where \code{x,y1,y2,\dots,yn} are expressions involving the variables in the data frame. Each of the variables \code{y1,y2,\dots,yn} in turn will be plotted against \code{x}. See the examples. Convenient abbreviations which can be used in the formula are \itemize{ \item the symbol \code{.} which represents all the columns in the data frame that will be plotted by default; \item the symbol \code{.x} which represents the function argument; \item the symbol \code{.y} which represents the recommended value of the function. } For further information, see \code{\link{fvnames}}. The value returned by this plot function indicates the meaning of the line types and colours in the plot. It can be used to make a suitable legend for the plot if you want to do this by hand. See the examples. The argument \code{shade} can be used to display critical bands or confidence intervals. If it is not \code{NULL}, then it should be a subset index for the columns of \code{x}, that identifies exactly 2 columns. When the corresponding curves are plotted, the region between the curves will be shaded in light grey. See the Examples. The default values of \code{lty}, \code{col} and \code{lwd} can be changed using \code{\link{spatstat.options}("plot.fv")}. Use \code{type = "n"} to create the plot region and draw the axes without plotting any data. Use \code{limitsonly=TRUE} to suppress all plotting and just compute the \eqn{x} and \eqn{y} limits. This can be used to calculate common \eqn{x} and \eqn{y} scales for several plots. To change the kind of parenthesis enclosing the explanatory text about the unit of length, use \code{\link{spatstat.options}('units.paren')} } \examples{ K <- Kest(cells) # K is an object of class "fv" plot(K, iso ~ r) # plots iso against r plot(K, sqrt(iso/pi) ~ r) # plots sqrt(iso/r) against r plot(K, cbind(iso,theo) ~ r) # plots iso against r AND theo against r plot(K, . ~ r) # plots all available estimates of K against r plot(K, sqrt(./pi) ~ r) # plots all estimates of L-function # L(r) = sqrt(K(r)/pi) plot(K, cbind(iso,theo) ~ r, col=c(2,3)) # plots iso against r in colour 2 # and theo against r in colour 3 plot(K, iso ~ r, subset=quote(r < 0.2)) # plots iso against r for r < 10 # Can't remember the names of the columns? No problem.. plot(K, sqrt(./pi) ~ .x) # making a legend by hand v <- plot(K, . ~ r, legend=FALSE) legend("topleft", legend=v$meaning, lty=v$lty, col=v$col) # significance bands KE <- envelope(cells, Kest, nsim=19) plot(KE, shade=c("hi", "lo")) # how to display two functions on a common scale Kr <- Kest(redwood) a <- plot(K, limitsonly=TRUE) b <- plot(Kr, limitsonly=TRUE) xlim <- range(a$xlim, b$xlim) ylim <- range(a$ylim, b$ylim) opa <- par(mfrow=c(1,2)) plot(K, xlim=xlim, ylim=ylim) plot(Kr, xlim=xlim, ylim=ylim) par(opa) } \seealso{ \code{\link{fv.object}}, \code{\link[spatstat.core]{Kest}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat.core/man/pcfmulti.Rd0000644000176200001440000001144214141452520015731 0ustar liggesusers\name{pcfmulti} \alias{pcfmulti} \title{ Marked pair correlation function } \description{ For a marked point pattern, estimate the multitype pair correlation function using kernel methods. } \usage{ pcfmulti(X, I, J, ..., r = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("translate", "Ripley"), divisor = c("r", "d"), Iname = "points satisfying condition I", Jname = "points satisfying condition J") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{I}{Subset index specifying the points of \code{X} from which distances are measured. } \item{J}{Subset index specifying the points in \code{X} to which distances are measured. } \item{\dots}{ Ignored. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{stoyan}{ Coefficient for default bandwidth rule. } \item{correction}{ Choice of edge correction. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. } \item{Iname,Jname}{ Optional. Character strings describing the members of the subsets \code{I} and \code{J}. } } \details{ This is a generalisation of \code{\link{pcfcross}} to arbitrary collections of points. The algorithm measures the distance from each data point in subset \code{I} to each data point in subset \code{J}, excluding identical pairs of points. The distances are kernel-smoothed and renormalised to form a pair correlation function. \itemize{ \item If \code{divisor="r"} (the default), then the multitype counterpart of the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used: the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \code{correction="translate"} is the Ohser-Stoyan translation correction, and \code{correction="isotropic"} or \code{"Ripley"} is Ripley's isotropic correction. The arguments \code{I} and \code{J} specify two subsets of the point pattern \code{X}. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density}}. The default is the Epanechnikov kernel. The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Its precise interpretation is explained in the documentation for \code{\link{density.default}}. For the Epanechnikov kernel with support \eqn{[-h,h]}, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. If \code{bw} is not specified, the default bandwidth is determined by Stoyan's rule of thumb (Stoyan and Stoyan, 1994, page 285) applied to the points of type \code{j}. That is, \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the point process of type \code{j}, and \eqn{c} is a constant in the range from 0.1 to 0.2. The argument \code{stoyan} determines the value of \eqn{c}. } \value{ An object of class \code{"fv"}. } \seealso{ \code{\link{pcfcross}}, \code{\link{pcfdot}}, \code{\link{pcf.ppp}}. } \examples{ adult <- (marks(longleaf) >= 30) juvenile <- !adult p <- pcfmulti(longleaf, adult, juvenile) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/lohboot.Rd0000644000176200001440000002035714141452520015561 0ustar liggesusers\name{lohboot} \alias{lohboot} \title{Bootstrap Confidence Bands for Summary Function} \description{ Computes a bootstrap confidence band for a summary function of a point process. } \usage{ lohboot(X, fun=c("pcf", "Kest", "Lest", "pcfinhom", "Kinhom", "Linhom", "Kcross", "Lcross", "Kdot", "Ldot", "Kcross.inhom", "Lcross.inhom"), \dots, block=FALSE, global=FALSE, basicboot=FALSE, Vcorrection=FALSE, confidence=0.95, nx = 4, ny = nx, nsim=200, type=7) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{fun}{ Name of the summary function for which confidence intervals are desired: one of the strings \code{"pcf"}, \code{"Kest"}, \code{"Lest"}, \code{"pcfinhom"}, \code{"Kinhom"} \code{"Linhom"}, \code{"Kcross"}, \code{"Lcross"}, \code{"Kdot"}, \code{"Ldot"}, \code{"Kcross.inhom"} or \code{"Lcross.inhom"}. Alternatively, the function itself; it must be one of the functions listed here. } \item{\dots}{ Arguments passed to the corresponding local version of the summary function (see Details). } \item{block}{ Logical value indicating whether to use Loh's block bootstrap as originally proposed. Default is \code{FALSE} for consistency with older code. See Details. } \item{global}{ Logical. If \code{FALSE} (the default), pointwise confidence intervals are constructed. If \code{TRUE}, a global (simultaneous) confidence band is constructed. } \item{basicboot}{ Logical value indicating whether to use the so-called basic bootstrap confidence interval. See Details. } \item{Vcorrection}{ Logical value indicating whether to use a variance correction when \code{fun="Kest"} or \code{fun="Kinhom"}. See Details. } \item{confidence}{ Confidence level, as a fraction between 0 and 1. } \item{nx,ny}{ Integers. If \code{block=TRUE}, divide the window into \code{nx*ny} rectangles. } \item{nsim}{ Number of bootstrap simulations. } \item{type}{ Integer. Type of quantiles. Argument passed to \code{\link[stats]{quantile.default}} controlling the way the quantiles are calculated. } } \value{ A function value table (object of class \code{"fv"}) containing columns giving the estimate of the summary function, the upper and lower limits of the bootstrap confidence interval, and the theoretical value of the summary function for a Poisson process. } \details{ This algorithm computes confidence bands for the true value of the summary function \code{fun} using the bootstrap method of Loh (2008) and a modification described in Baddeley, Rubak, Turner (2015). If \code{fun="pcf"}, for example, the algorithm computes a pointwise \code{(100 * confidence)}\% confidence interval for the true value of the pair correlation function for the point process, normally estimated by \code{\link{pcf}}. It starts by computing the array of \emph{local} pair correlation functions, \code{\link{localpcf}}, of the data pattern \code{X}. This array consists of the contributions to the estimate of the pair correlation function from each data point. If \code{block=FALSE}, these contributions are resampled \code{nsim} times with replacement as described in Baddeley, Rubak, Turner (2015); from each resampled dataset the total contribution is computed, yielding \code{nsim} random pair correlation functions. If \code{block=TRUE}, the calculation is performed as originally proposed by Loh (2008, 2010). The (bounding box of the) window is divided into \eqn{nx * ny} rectangles (blocks). The average contribution of a block is obtained by averaging the contribution of each point included in the block. Then, the average contributions on each block are resampled \code{nsim} times with replacement as described in Loh (2008) and Loh (2010); from each resampled dataset the total contribution is computed, yielding \code{nsim} random pair correlation functions. Notice that for non-rectangular windows any blocks not fully contained in the window are discarded before doing the resampling, so the effective number of blocks may be substantially smaller than \eqn{nx * ny} in this case. The pointwise \code{alpha/2} and \code{1 - alpha/2} quantiles of these functions are computed, where \code{alpha = 1 - confidence}. The average of the local functions is also computed as an estimate of the pair correlation function. There are several ways to define a bootstrap confidence interval. If \code{basicbootstrap=TRUE}, the so-called basic confidence bootstrap interval is used as described in Loh (2008). It has been noticed in Loh (2010) that when the intensity of the point process is unknown, the bootstrap error estimate is larger than it should be. When the \eqn{K} function is used, an adjustment procedure has been proposed in Loh (2010) that is used if \code{Vcorrection=TRUE}. In this case, the basic confidence bootstrap interval is implicitly used. To control the estimation algorithm, use the arguments \code{\dots}, which are passed to the local version of the summary function, as shown below: \tabular{ll}{ \bold{fun} \tab \bold{local version} \cr \code{\link{pcf}} \tab \code{\link{localpcf}} \cr \code{\link{Kest}} \tab \code{\link{localK}} \cr \code{\link{Lest}} \tab \code{\link{localL}} \cr \code{\link{pcfinhom}} \tab \code{\link{localpcfinhom}} \cr \code{\link{Kinhom}} \tab \code{\link{localKinhom}} \cr \code{\link{Linhom}} \tab \code{\link{localLinhom}} \cr \code{\link{Kcross}} \tab \code{\link{localKcross}} \cr \code{\link{Lcross}} \tab \code{\link{localLcross}} \cr \code{\link{Kdot}} \tab \code{\link{localKdot}} \cr \code{\link{Ldot}} \tab \code{\link{localLdot}} \cr \code{\link{Kcross.inhom}} \tab \code{\link{localKcross.inhom}} \cr \code{\link{Lcross.inhom}} \tab \code{\link{localLcross.inhom}} } For \code{fun="Lest"}, the calculations are first performed as if \code{fun="Kest"}, and then the square-root transformation is applied to obtain the \eqn{L}-function. Similarly for \code{fun="Linhom", "Lcross", "Ldot", "Lcross.inhom"}. Note that the confidence bands computed by \code{lohboot(fun="pcf")} may not contain the estimate of the pair correlation function computed by \code{\link{pcf}}, because of differences between the algorithm parameters (such as the choice of edge correction) in \code{\link{localpcf}} and \code{\link{pcf}}. If you are using \code{lohboot}, the appropriate point estimate of the pair correlation itself is the pointwise mean of the local estimates, which is provided in the result of \code{lohboot} and is shown in the default plot. If the confidence bands seem unbelievably narrow, this may occur because the point pattern has a hard core (the true pair correlation function is zero for certain values of distance) or because of an optical illusion when the function is steeply sloping (remember the width of the confidence bands should be measured \emph{vertically}). An alternative to \code{lohboot} is \code{\link{varblock}}. } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. Loh, J.M. (2008) A valid and fast spatial bootstrap for correlation functions. \emph{The Astrophysical Journal}, \bold{681}, 726--734. Loh, J.M. (2010) Bootstrapping an inhomogeneous point process. \emph{Journal of Statistical Planning and Inference}, \bold{140}, 734--749. } \seealso{ Summary functions \code{\link{Kest}}, \code{\link{pcf}}, \code{\link{Kinhom}}, \code{\link{pcfinhom}}, \code{\link{localK}}, \code{\link{localpcf}}, \code{\link{localKinhom}}, \code{\link{localpcfinhom}}, \code{\link{localKcross}}, \code{\link{localKdot}}, \code{\link{localLcross}}, \code{\link{localLdot}}. \code{\link{localKcross.inhom}}, \code{\link{localLcross.inhom}}. See \code{\link{varblock}} for an alternative bootstrap technique. } \examples{ p <- lohboot(simdat, stoyan=0.5) g <- lohboot(simdat, stoyan=0.5, block=TRUE) g plot(g) } \author{ \spatstatAuthors and Christophe Biscio. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/rags.Rd0000644000176200001440000000337314141452520015046 0ustar liggesusers\name{rags} \alias{rags} \title{ Alternating Gibbs Sampler for Multitype Point Processes } \description{ Simulate a realisation of a point process model using the alternating Gibbs sampler. } \usage{ rags(model, \dots, ncycles = 100) } \arguments{ \item{model}{ Data specifying some kind of point process model. } \item{\dots}{ Additional arguments passed to other code. } \item{ncycles}{ Number of cycles of the alternating Gibbs sampler that should be performed. } } \details{ The Alternating Gibbs Sampler for a multitype point process is an iterative simulation procedure. Each step of the sampler updates the pattern of points of a particular type \code{i}, by drawing a realisation from the conditional distribution of points of type \code{i} given the points of all other types. Successive steps of the sampler update the points of type 1, then type 2, type 3, and so on. This is an experimental implementation which currently works only for multitype hard core processes (see \code{\link{MultiHard}}) in which there is no interaction between points of the same type. The argument \code{model} should be an object describing a point process model. At the moment, the only permitted format for \code{model} is of the form \code{list(beta, hradii)} where \code{beta} gives the first order trend and \code{hradii} is the matrix of interaction radii. See \code{\link{ragsMultiHard}} for full details. } \value{ A point pattern (object of class \code{"ppp"}). } \author{ \adrian } \seealso{ \code{\link{ragsMultiHard}}, \code{\link{ragsAreaInter}} } \examples{ mo <- list(beta=c(30, 20), hradii = 0.05 * matrix(c(0,1,1,0), 2, 2)) rags(mo, ncycles=10) } \keyword{spatial} \keyword{datagen} spatstat.core/man/Extract.fv.Rd0000644000176200001440000000613614141452520016136 0ustar liggesusers\name{Extract.fv} \alias{[.fv} \alias{[<-.fv} \alias{$<-.fv} \title{Extract or Replace Subset of Function Values} \description{ Extract or replace a subset of an object of class \code{"fv"}. } \usage{ \method{[}{fv}(x, i, j, \dots, drop=FALSE) \method{[}{fv}(x, i, j) <- value \method{$}{fv}(x, name) <- value } \arguments{ \item{x}{ a function value object, of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame. } \item{i}{ any appropriate subset index. Selects a subset of the rows of the data frame, i.e. a subset of the domain of the function(s) represented by \code{x}. } \item{j}{ any appropriate subset index for the columns of the data frame. Selects some of the functions present in \code{x}. } \item{name}{ the name of a column of the data frame. } \item{\dots}{ Ignored. } \item{drop}{ Logical. If \code{TRUE}, the result is a data frame or vector containing the selected rows and columns of data. If \code{FALSE} (the default), the result is another object of class \code{"fv"}. } \item{value}{ Replacement value for the column or columns selected by \code{name} or \code{j}. } } \value{ The result of \code{[.fv} with \code{drop=TRUE} is a data frame or vector. Otherwise, the result is another object of class \code{"fv"}. } \details{ These functions extract a designated subset of an object of class \code{"fv"}, or replace the designated subset with other data, or delete the designated subset. The subset is specified by the row index \code{i} and column index \code{j}, or by the column name \code{name}. Either \code{i} or \code{j} may be missing, or both may be missing. The function \code{[.fv} is a method for the generic operator \code{\link{[}} for the class \code{"fv"}. It extracts the designated subset of \code{x}, and returns it as another object of class \code{"fv"} (if \code{drop=FALSE}) or as a data frame or vector (if \code{drop=TRUE}). The function \code{[<-.fv} is a method for the generic operator \code{\link{[<-}} for the class \code{"fv"}. If \code{value} is \code{NULL}, the designated subset of \code{x} will be deleted from \code{x}. Otherwise, the designated subset of \code{x} will be replaced by the data contained in \code{value}. The return value is the modified object \code{x}. The function \code{$<-.fv} is a method for the generic operator \code{\link{$<-}} for the class \code{"fv"}. If \code{value} is \code{NULL}, the designated column of \code{x} will be deleted from \code{x}. Otherwise, the designated column of \code{x} will be replaced by the data contained in \code{value}. The return value is the modified object \code{x}. } \seealso{ \code{\link{fv.object}} } \examples{ K <- Kest(cells) # discard the estimates of K(r) for r > 0.1 Ksub <- K[K$r <= 0.1, ] # extract the border method estimates bor <- K[ , "border", drop=TRUE] # or equivalently bor <- K$border # remove the border-method estimates K$border <- NULL K } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat.core/man/rshift.splitppp.Rd0000644000176200001440000000501514141452520017256 0ustar liggesusers\name{rshift.splitppp} \alias{rshift.splitppp} \title{Randomly Shift a List of Point Patterns} \description{ Randomly shifts each point pattern in a list of point patterns. } \usage{ \method{rshift}{splitppp}(X, \dots, which=seq_along(X), nsim=1, drop=TRUE) } \arguments{ \item{X}{ An object of class \code{"splitppp"}. Basically a list of point patterns. } \item{\dots}{ Parameters controlling the generation of the random shift vector and the handling of edge effects. See \code{\link{rshift.ppp}}. } \item{which}{ Optional. Identifies which patterns will be shifted, while other patterns are not shifted. Any valid subset index for \code{X}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a split point pattern object, rather than a list containing the split point pattern. } } \value{ Another object of class \code{"splitppp"}, or a list of such objects. } \details{ This operation applies a random shift to each of the point patterns in the list \code{X}. The function \code{\link{rshift}} is generic. This function \code{rshift.splitppp} is the method for objects of class \code{"splitppp"}, which are essentially lists of point patterns, created by the function \code{\link{split.ppp}}. By default, every pattern in the list \code{X} will be shifted. The argument \code{which} indicates that only some of the patterns should be shifted, while other groups should be left unchanged. \code{which} can be any valid subset index for \code{X}. Each point pattern in the list \code{X} (or each pattern in \code{X[which]}) is shifted by a random displacement vector. The shifting is performed by \code{\link{rshift.ppp}}. See the help page for \code{\link{rshift.ppp}} for details of the other arguments. If \code{nsim > 1}, then the simulation procedure is performed \code{nsim} times; the result is a list of split point patterns. } \seealso{ \code{\link{rshift}}, \code{\link{rshift.ppp}} } \examples{ data(amacrine) Y <- split(amacrine) # random toroidal shift # shift "on" and "off" points separately X <- rshift(Y) # shift "on" points and leave "off" points fixed X <- rshift(Y, which="on") # maximum displacement distance 0.1 units X <- rshift(Y, radius=0.1) # shift with erosion X <- rshift(Y, radius=0.1, edge="erode") } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/pairorient.Rd0000644000176200001440000000777014141452520016273 0ustar liggesusers\name{pairorient} \alias{pairorient} \title{ Point Pair Orientation Distribution } \description{ Computes the distribution of the orientation of vectors joining pairs of points at a particular range of distances. } \usage{ pairorient(X, r1, r2, \dots, cumulative=FALSE, correction, ratio = FALSE, unit=c("degree", "radian"), domain=NULL) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{r1,r2}{ Minimum and maximum values of distance to be considered. } \item{\dots}{ Arguments passed to \code{\link{circdensity}} to control the kernel smoothing, if \code{cumulative=FALSE}. } \item{cumulative}{ Logical value specifying whether to estimate the probability density (\code{cumulative=FALSE}, the default) or the cumulative distribution function (\code{cumulative=TRUE}). } \item{correction}{ Character vector specifying edge correction or corrections. Options are \code{"none"}, \code{"isotropic"}, \code{"translate"}, \code{"border"}, \code{"bord.modif"}, \code{"good"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. The default is to compute all edge corrections except \code{"none"}. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{unit}{ Unit in which the angles should be expressed. Either \code{"degree"} or \code{"radian"}. } \item{domain}{ Optional window. The first point \eqn{x_i}{x[i]} of each pair of points will be constrained to lie in \code{domain}. } } \details{ This algorithm considers all pairs of points in the pattern \code{X} that lie more than \code{r1} and less than \code{r2} units apart. The \emph{direction} of the arrow joining the points is measured, as an angle in degrees or radians, anticlockwise from the \eqn{x} axis. If \code{cumulative=FALSE} (the default), a kernel estimate of the probability density of the orientations is calculated using \code{\link{circdensity}}. If \code{cumulative=TRUE}, then the cumulative distribution function of these directions is calculated. This is the function \eqn{O_{r1,r2}(\phi)}{O[r1,r2](phi)} defined in Stoyan and Stoyan (1994), equation (14.53), page 271. In either case the result can be plotted as a rose diagram by \code{\link{rose}}, or as a function plot by \code{\link{plot.fv}}. The algorithm gives each observed direction a weight, determined by an edge correction, to adjust for the fact that some interpoint distances are more likely to be observed than others. The choice of edge correction or corrections is determined by the argument \code{correction}. See the help for \code{\link{Kest}} for details of edge corrections, and explanation of the options available. The choice \code{correction="none"} is not recommended; it is included for demonstration purposes only. The default is to compute all corrections except \code{"none"}. It is also possible to calculate an estimate of the probability density from the cumulative distribution function, by numerical differentiation. Use \code{\link{deriv.fv}} with the argument \code{Dperiodic=TRUE}. } \value{ A function value table (object of class \code{"fv"}) containing the estimates of the probability density or the cumulative distribution function of angles, in degrees (if \code{unit="degree"}) or radians (if \code{unit="radian"}). } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, Random Shapes and Point Fields: Methods of Geometrical Statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{Ksector}}, \code{\link{nnorient}} } \examples{ rose(pairorient(redwood, 0.05, 0.15, sigma=8), col="grey") plot(CDF <- pairorient(redwood, 0.05, 0.15, cumulative=TRUE)) plot(f <- deriv(CDF, spar=0.6, Dperiodic=TRUE)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/Pairwise.Rd0000644000176200001440000000675114141452520015700 0ustar liggesusers\name{Pairwise} \alias{Pairwise} \title{Generic Pairwise Interaction model} \description{ Creates an instance of a pairwise interaction point process model which can then be fitted to point pattern data. } \usage{ Pairwise(pot, name, par, parnames, printfun) } \arguments{ \item{pot}{An R language function giving the user-supplied pairwise interaction potential.} \item{name}{Character string.} \item{par}{List of numerical values for irregular parameters} \item{parnames}{Vector of names of irregular parameters} \item{printfun}{Do not specify this argument: for internal use only.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This code constructs a member of the pairwise interaction family \code{\link{pairwise.family}} with arbitrary pairwise interaction potential given by the user. Each pair of points in the point pattern contributes a factor \eqn{h(d)} to the probability density, where \eqn{d} is the distance between the two points. The factor term \eqn{h(d)} is \deqn{h(d) = \exp(-\theta \mbox{pot}(d))}{h(d) = exp(-theta * pot(d))} provided \eqn{\mbox{pot}(d)}{pot(d)} is finite, where \eqn{\theta}{theta} is the coefficient vector in the model. The function \code{pot} must take as its first argument a matrix of interpoint distances, and evaluate the potential for each of these distances. The result must be either a matrix with the same dimensions as its input, or an array with its first two dimensions the same as its input (the latter case corresponds to a vector-valued potential). If irregular parameters are present, then the second argument to \code{pot} should be a vector of the same type as \code{par} giving those parameter values. The values returned by \code{pot} may be finite numeric values, or \code{-Inf} indicating a hard core (that is, the corresponding interpoint distance is forbidden). We define \eqn{h(d) = 0} if \eqn{\mbox{pot}(d) = -\infty}{pot(d) = -Inf}. Thus, a potential value of minus infinity is \emph{always} interpreted as corresponding to \eqn{h(d) = 0}, regardless of the sign and magnitude of \eqn{\theta}{theta}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \examples{ #This is the same as StraussHard(r=0.7,h=0.05) strpot <- function(d,par) { r <- par$r h <- par$h value <- (d <= r) value[d < h] <- -Inf value } mySH <- Pairwise(strpot, "StraussHard process", list(r=0.7,h=0.05), c("interaction distance r", "hard core distance h")) data(cells) ppm(cells, ~ 1, mySH, correction="isotropic") # Fiksel (1984) double exponential interaction # see Stoyan, Kendall, Mecke 1987 p 161 fikspot <- function(d, par) { r <- par$r h <- par$h zeta <- par$zeta value <- exp(-zeta * d) value[d < h] <- -Inf value[d > r] <- 0 value } Fiksel <- Pairwise(fikspot, "Fiksel double exponential process", list(r=3.5, h=1, zeta=1), c("interaction distance r", "hard core distance h", "exponential coefficient zeta")) data(spruces) fit <- ppm(unmark(spruces), ~1, Fiksel, rbord=3.5) fit plot(fitin(fit), xlim=c(0,4)) coef(fit) # corresponding values obtained by Fiksel (1984) were -1.9 and -6.0 } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/valid.ppm.Rd0000644000176200001440000000532514141452520016003 0ustar liggesusers\name{valid.ppm} \alias{valid.ppm} \title{ Check Whether Point Process Model is Valid } \description{ Determines whether a fitted point process model satisfies the integrability conditions for existence of the point process. } \usage{ \method{valid}{ppm}(object, warn=TRUE, \dots) } \arguments{ \item{object}{ Fitted point process model (object of class \code{"ppm"}). } \item{warn}{ Logical value indicating whether to issue a warning if the validity of the model cannot be checked (due to unavailability of the required code). } \item{\dots}{Ignored.} } \details{ This is a method for the generic function \code{\link{valid}} for Poisson and Gibbs point process models (class \code{"ppm"}). The model-fitting function \code{\link{ppm}} fits Gibbs point process models to point pattern data. By default, \code{\link{ppm}} does not check whether the fitted model actually exists as a point process. This checking is done by \code{valid.ppm}. Unlike a regression model, which is well-defined for any values of the fitted regression coefficients, a Gibbs point process model is only well-defined if the fitted interaction parameters satisfy some constraints. A famous example is the Strauss process (see \code{\link{Strauss}}) which exists only when the interaction parameter \eqn{\gamma}{gamma} is less than or equal to 1. For values \eqn{\gamma > 1}{gamma > 1}, the probability density is not integrable and the process does not exist (and cannot be simulated). By default, \code{\link{ppm}} does not enforce the constraint that a fitted Strauss process (for example) must satisfy \eqn{\gamma \le 1}{gamma <= 1}. This is because a fitted parameter value of \eqn{\gamma > 1}{gamma > 1} could be useful information for data analysis, as it indicates that the Strauss model is not appropriate, and suggests a clustered model should be fitted. The function \code{valid.ppm} checks whether the fitted model \code{object} specifies a well-defined point process. It returns \code{TRUE} if the model is well-defined. Another possible reason for invalid models is that the data may not be adequate for estimation of the model parameters. In this case, some of the fitted coefficients could be \code{NA} or infinite values. If this happens then \code{valid.ppm} returns \code{FALSE}. Use the function \code{\link{project.ppm}} to force the fitted model to be valid. } \value{ A logical value, or \code{NA}. } \author{\adrian and \rolf } \seealso{ \code{\link{ppm}}, \code{\link{project.ppm}} } \examples{ fit1 <- ppm(cells, ~1, Strauss(0.1)) valid(fit1) fit2 <- ppm(redwood, ~1, Strauss(0.1)) valid(fit2) } \keyword{spatial} \keyword{models} spatstat.core/man/Lest.Rd0000644000176200001440000000551114141452520015015 0ustar liggesusers\name{Lest} \alias{Lest} \title{L-function} \description{ Calculates an estimate of the \eqn{L}-function (Besag's transformation of Ripley's \eqn{K}-function) for a spatial point pattern. } \usage{ Lest(X, ..., correction) } \arguments{ \item{X}{ The observed point pattern, from which an estimate of \eqn{L(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{correction,\dots}{ Other arguments passed to \code{\link{Kest}} to control the estimation procedure. } } \details{ This command computes an estimate of the \eqn{L}-function for the spatial point pattern \code{X}. The \eqn{L}-function is a transformation of Ripley's \eqn{K}-function, \deqn{L(r) = \sqrt{\frac{K(r)}{\pi}}}{L(r) = sqrt(K(r)/pi)} where \eqn{K(r)} is the \eqn{K}-function. See \code{\link{Kest}} for information about Ripley's \eqn{K}-function. The transformation to \eqn{L} was proposed by Besag (1977). The command \code{Lest} first calls \code{\link{Kest}} to compute the estimate of the \eqn{K}-function, and then applies the square root transformation. For a completely random (uniform Poisson) point pattern, the theoretical value of the \eqn{L}-function is \eqn{L(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L(r)} is more appropriate for use in simulation envelopes and hypothesis tests. See \code{\link{Kest}} for the list of arguments. } \section{Variance approximations}{ If the argument \code{var.approx=TRUE} is given, the return value includes columns \code{rip} and \code{ls} containing approximations to the variance of \eqn{\hat L(r)}{Lest(r)} under CSR. These are obtained by the delta method from the variance approximations described in \code{\link{Kest}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L} has been estimated } \item{theo}{the theoretical value \eqn{L(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L(r)} obtained by the edge corrections named. } \references{ Besag, J. (1977) Discussion of Dr Ripley's paper. \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 193--195. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} } \examples{ data(cells) L <- Lest(cells) plot(L, main="L function for cells") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/unitname.Rd0000644000176200001440000000540614141452520015731 0ustar liggesusers\name{unitname} \alias{unitname.dppm} \alias{unitname.kppm} \alias{unitname.minconfit} \alias{unitname.ppm} \alias{unitname.slrm} \alias{unitname<-.dppm} \alias{unitname<-.kppm} \alias{unitname<-.minconfit} \alias{unitname<-.ppm} \alias{unitname<-.slrm} \title{Name for Unit of Length} \description{ Inspect or change the name of the unit of length in a spatial dataset. } \usage{ \method{unitname}{dppm}(x) \method{unitname}{kppm}(x) \method{unitname}{minconfit}(x) \method{unitname}{ppm}(x) \method{unitname}{slrm}(x) \method{unitname}{dppm}(x) <- value \method{unitname}{kppm}(x) <- value \method{unitname}{minconfit}(x) <- value \method{unitname}{ppm}(x) <- value \method{unitname}{slrm}(x) <- value } \arguments{ \item{x}{A spatial dataset. Either a point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}), a window (object of class \code{"owin"}), a pixel image (object of class \code{"im"}), a tessellation (object of class \code{"tess"}), a quadrature scheme (object of class \code{"quad"}), or a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"slrm"} or \code{"dppm"} or \code{"minconfit"}). } \item{value}{ Name of the unit of length. See Details. } } \details{ Spatial datasets in the \pkg{spatstat} package may include the name of the unit of length. This name is used when printing or plotting the dataset, and in some other applications. \code{unitname(x)} extracts this name, and \code{unitname(x) <- value} sets the name to \code{value}. A valid name is either \itemize{ \item a single character string \item a vector of two character strings giving the singular and plural forms of the unit name \item a list of length 3, containing two character strings giving the singular and plural forms of the basic unit, and a number specifying the multiple of this unit. } Note that re-setting the name of the unit of length \emph{does not} affect the numerical values in \code{x}. It changes only the string containing the name of the unit of length. To rescale the numerical values, use \code{\link{rescale}}. } \value{ The return value of \code{unitname} is an object of class \code{"unitname"} containing the name of the unit of length in \code{x}. There are methods for \code{print}, \code{summary}, \code{as.character}, \code{\link{rescale}} and \code{\link{compatible}}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{rescale}}, \code{\link{owin}}, \code{\link{ppp}} } \examples{ X <- runifrect(20) # if the unit of length is 1 metre: unitname(X) <- c("metre", "metres") # if the unit of length is 6 inches: unitname(X) <- list("inch", "inches", 6) } \keyword{spatial} \keyword{manip} spatstat.core/man/rotmean.Rd0000644000176200001440000000526114141452520015555 0ustar liggesusers\name{rotmean} \alias{rotmean} \title{ Rotational Average of a Pixel Image } \description{ Compute the average pixel value over all rotations of the image about the origin, as a function of distance from the origin. } \usage{ rotmean(X, ..., origin, padzero=TRUE, Xname, result=c("fv", "im"), adjust=1) } \arguments{ \item{X}{ A pixel image. } \item{\dots}{ Ignored. } \item{origin}{ Optional. Origin about which the rotations should be performed. Either a numeric vector or a character string as described in the help for \code{\link{shift.owin}}. } \item{padzero}{ Logical. If \code{TRUE} (the default), the value of \code{X} is assumed to be zero outside the window of \code{X}. If \code{FALSE}, the value of \code{X} is taken to be undefined outside the window of \code{X}. } \item{Xname}{ Optional name for \code{X} to be used in the function labels. } \item{result}{ Character string specifying the kind of result required: either a function object or a pixel image. } \item{adjust}{ Adjustment factor for bandwidth used in kernel smoothing. } } \details{ This command computes, for each possible distance \eqn{r}, the average pixel value of the pixels lying at distance \eqn{r} from the origin. Kernel smoothing is used to obtain a smooth function of \eqn{r}. If \code{result="fv"} (the default) the result is a function object of class \code{"fv"} giving the mean pixel value of \code{X} as a function of distance from the origin. If \code{result="im"} the result is a pixel image, with the same dimensions as \code{X}, giving the mean value of \code{X} over all pixels lying at the same distance from the origin as the current pixel. If \code{padzero=TRUE} (the default), the value of \code{X} is assumed to be zero outside the window of \code{X}. The rotational mean at a given distance \eqn{r} is the average value of the image \code{X} over the \emph{entire} circle of radius \eqn{r}, including zero values outside the window if the circle lies partly outside the window. If \code{padzero=FALSE}, the value of \code{X} is taken to be undefined outside the window of \code{X}. The rotational mean is the average of the \code{X} values over the \emph{subset} of the circle of radius \eqn{r} that lies entirely inside the window. } \value{ An object of class \code{"fv"} or \code{"im"}, with the same coordinate units as \code{X}. } \author{ \spatstatAuthors. } \examples{ if(interactive()) { Z <- setcov(square(1)) plot(rotmean(Z)) plot(rotmean(Z, result="im")) } else { Z <- setcov(square(1), dimyx=32) f <- rotmean(Z) } } \keyword{spatial} \keyword{math} spatstat.core/man/fvnames.Rd0000644000176200001440000000517114141452520015547 0ustar liggesusers\name{fvnames} \alias{fvnames} \alias{fvnames<-} \title{ Abbreviations for Groups of Columns in Function Value Table } \description{ Groups of columns in a function value table (object of class \code{"fv"}) identified by standard abbreviations. } \usage{ fvnames(X, a = ".") fvnames(X, a = ".") <- value } \arguments{ \item{X}{ Function value table (object of class \code{"fv"}). See \code{\link{fv.object}}. } \item{a}{ One of the standard abbreviations listed below. } \item{value}{ Character vector containing names of columns of \code{X}. } } \details{ An object of class \code{"fv"} represents a table of values of a function, usually a summary function for spatial data such as the \eqn{K}-function, for which several different statistical estimators may be available. The different estimates are stored as columns of the table. Auxiliary information carried in the object \code{X} specifies some columns or groups of columns of this table that should be used for particular purposes. For convenience these groups can be referred to by standard abbreviations which are recognised by various functions in the \pkg{spatstat} package, such as \code{\link{plot.fv}}. These abbreviations are: \tabular{ll}{ \code{".x"} \tab the function argument \cr \code{".y"} \tab the recommended value of the function \cr \code{"."} \tab all function values to be plotted by default \cr \tab (in order of plotting) \cr \code{".s"} \tab the upper and lower limits of shading \cr \tab (for envelopes and confidence intervals)\cr \code{".a"} \tab all function values (in column order) } The command \code{fvnames(X, a)} expands the abbreviation \code{a} and returns a character vector containing the names of the columns. The assignment \code{fvnames(X, a) <- value} changes the definition of the abbreviation \code{a} to the character string \code{value} (which should be the name of another column of \code{X}). The column names of \code{X} are not changed. Note that \code{fvnames(x, ".")} lists the columns of values that will be plotted by default, in the order that they would be plotted, not in order of the column position. The order in which curves are plotted affects the colours and line styles associated with the curves. } \value{ For \code{fvnames}, a character vector. For \code{fvnames<-}, the updated object. } \author{\adrian and \rolf } \seealso{ \code{\link{fv.object}}, \code{\link{plot.fv}} } \examples{ K <- Kest(cells) fvnames(K, ".y") fvnames(K, ".y") <- "trans" } \keyword{spatial} \keyword{manip} spatstat.core/man/compatible.fasp.Rd0000644000176200001440000000214214141452520017152 0ustar liggesusers\name{compatible.fasp} \alias{compatible.fasp} \title{Test Whether Function Arrays Are Compatible} \description{ Tests whether two or more function arrays (class \code{"fasp"}) are compatible. } \usage{ \method{compatible}{fasp}(A, B, \dots) } \arguments{ \item{A,B,\dots}{Two or more function arrays (object of class \code{"fasp"}).} } \details{ An object of class \code{"fasp"} can be regarded as an array of functions. Such objects are returned by the command \code{\link[spatstat.core]{alltypes}}. This command tests whether such objects are compatible (so that, for example, they could be added or subtracted). It is a method for the generic command \code{\link{compatible}}. The function arrays are compatible if the arrays have the same dimensions, and the corresponding elements in each cell of the array are compatible as defined by \code{\link{compatible.fv}}. } \value{ Logical value: \code{TRUE} if the objects are compatible, and \code{FALSE} if they are not. } \seealso{ \code{\link{eval.fasp}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat.core/man/harmonise.fv.Rd0000644000176200001440000000561114141452520016506 0ustar liggesusers\name{harmonise.fv} \alias{harmonise.fv} \alias{harmonize.fv} \title{Make Function Tables Compatible} \description{ Convert several objects of class \code{"fv"} to the same values of the function argument. } \usage{ \method{harmonise}{fv}(\dots, strict=FALSE) \method{harmonize}{fv}(\dots, strict=FALSE) } \arguments{ \item{\dots}{ Any number of function tables (objects of class \code{"fv"}). } \item{strict}{ Logical. If \code{TRUE}, a column of data will be deleted if columns of the same name do not appear in every object. } } \details{ A function value table (object of class \code{"fv"}) is essentially a data frame giving the values of a function \eqn{f(x)} (or several alternative estimates of this value) at equally-spaced values of the function argument \eqn{x}. The command \code{\link{harmonise}} is generic. This is the method for objects of class \code{"fv"}. This command makes any number of \code{"fv"} objects compatible, in the loose sense that they have the same sequence of values of \eqn{x}. They can then be combined by \code{\link{cbind.fv}}, but not necessarily by \code{\link{eval.fv}}. All arguments \code{\dots} must be function value tables (objects of class \code{"fv"}). The result will be a list, of length equal to the number of arguments \code{\dots}, containing new versions of each of these functions, converted to a common sequence of \eqn{x} values. If the arguments were named (\code{name=value}) then the return value also carries these names. The range of \eqn{x} values in the resulting functions will be the intersection of the ranges of \eqn{x} values in the original functions. The spacing of \eqn{x} values in the resulting functions will be the finest (narrowest) of the spacings of the \eqn{x} values in the original functions. Function values are interpolated using \code{\link[stats]{approxfun}}. If \code{strict=TRUE}, each column of data will be retained only if a column of the same name appears in all of the arguments \code{\dots}. This ensures that the resulting objects are strictly compatible in the sense of \code{\link{compatible.fv}}, and can be combined using \code{\link{eval.fv}} or \code{\link{collapse.fv}}. If \code{strict=FALSE} (the default), this does not occur, and then the resulting objects are \bold{not} guaranteed to be compatible in the sense of \code{\link{compatible.fv}}. } \value{ A list, of length equal to the number of arguments \code{\dots}, whose entries are objects of class \code{"fv"}. If the arguments were named (\code{name=value}) then the return value also carries these names. } \author{\adrian , \rolf and \ege. } \examples{ H <- harmonise(K=Kest(cells), G=Gest(cells)) H } \seealso{ \code{\link{fv.object}}, \code{\link{cbind.fv}}, \code{\link{eval.fv}}, \code{\link{compatible.fv}} } \keyword{spatial} \keyword{manip} spatstat.core/man/gauss.hermite.Rd0000644000176200001440000000322214141452520016661 0ustar liggesusers\name{gauss.hermite} \alias{gauss.hermite} \title{ Gauss-Hermite Quadrature Approximation to Expectation for Normal Distribution } \description{ Calculates an approximation to the expected value of any function of a normally-distributed random variable, using Gauss-Hermite quadrature. } \usage{ gauss.hermite(f, mu = 0, sd = 1, ..., order = 5) } \arguments{ \item{f}{ The function whose moment should be approximated. } \item{mu}{ Mean of the normal distribution. } \item{sd}{ Standard deviation of the normal distribution. } \item{\dots}{ Additional arguments passed to \code{f}. } \item{order}{ Number of quadrature points in the Gauss-Hermite quadrature approximation. A small positive integer. } } \details{ This algorithm calculates the approximate expected value of \code{f(Z)} when \code{Z} is a normally-distributed random variable with mean \code{mu} and standard deviation \code{sd}. The expected value is an integral with respect to the Gaussian density; this integral is approximated using Gauss-Hermite quadrature. The argument \code{f} should be a function in the \R language whose first argument is the variable \code{Z}. Additional arguments may be passed through \code{\dots}. The value returned by \code{f} may be a single numeric value, a vector, or a matrix. The values returned by \code{f} for different values of \code{Z} must have compatible dimensions. The result is a weighted average of several values of \code{f}. } \value{ Numeric value, vector or matrix. } \author{\adrian , \rolf and \ege. } \examples{ gauss.hermite(function(x) x^2, 3, 1) } \keyword{math} spatstat.core/man/unstack.msr.Rd0000644000176200001440000000223614141452520016357 0ustar liggesusers\name{unstack.msr} \alias{unstack.msr} \title{ Separate a Vector Measure into its Scalar Components } \description{ Converts a vector-valued measure into a list of scalar-valued measures. } \usage{ \method{unstack}{msr}(x, \dots) } \arguments{ \item{x}{ A measure (object of class \code{"msr"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic \code{\link[utils]{unstack}} for the class \code{"msr"} of measures. If \code{x} is a vector-valued measure, then \code{y <- unstack(x)} is a list of scalar-valued measures defined by the components of \code{x}. The \code{j}th entry of the list, \code{y[[j]]}, is equivalent to the \code{j}th component of the vector measure \code{x}. If \code{x} is a scalar-valued measure, then the result is a list consisting of one entry, which is \code{x}. } \value{ A list of measures, of class \code{"solist"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link[utils]{unstack}} \code{\link{unstack.ppp}} \code{\link{split.msr}}. } \examples{ fit <- ppm(cells ~ x) m <- residuals(fit, type="score") m unstack(m) } \keyword{spatial} \keyword{manip} spatstat.core/man/transect.im.Rd0000644000176200001440000000547314141452520016344 0ustar liggesusers\name{transect.im} \alias{transect.im} \title{ Pixel Values Along a Transect } \description{ Extract the pixel values of a pixel image at each point along a linear transect. } \usage{ transect.im(X, ..., from="bottomleft", to="topright", nsample=512, click=FALSE, add=FALSE, curve=NULL) } \arguments{ \item{X}{ A pixel image (object of class \code{"im"}). } \item{\dots}{ Ignored. } \item{from,to}{ Optional. Start point and end point of the transect. Pairs of \eqn{(x,y)} coordinates in a format acceptable to \code{\link{xy.coords}}, or keywords \code{"bottom"}, \code{"left"}, \code{"top"}, \code{"right"}, \code{"bottomleft"} etc. } \item{nsample}{ Integer. Number of sample locations along the transect. } \item{click}{ Optional. Logical value. If \code{TRUE}, the linear transect is determined interactively by the user, who clicks two points on the current plot. } \item{add}{ Logical. If \code{click=TRUE}, this argument determines whether to perform interactive tasks on the current plot (\code{add=TRUE}) or to start by plotting \code{X} (\code{add=FALSE}). } \item{curve}{ Optional. A specification of a curved transect. See the section on Curved Transect. } } \details{ The pixel values of the image \code{X} along a line segment will be extracted. The result is a function table (\code{"fv"} object) which can be plotted directly. If \code{click=TRUE}, then the user is prompted to click two points on the plot of \code{X}. These endpoints define the transect. Otherwise, the transect is defined by the endpoints \code{from} and \code{to}. The default is a diagonal transect from bottom left to top right of the frame. } \section{Curved Transect}{ If \code{curve} is given, then the transect will be a curve. The argument \code{curve} should be a list with the following arguments: \describe{ \item{f}{A function in the \R language with one argument \code{t}.} \item{tlim}{A numeric vector of length 2 giving the range of values of the argument \code{t}.} \code{tname}{(Optional) a character string giving the symbolic name of the function argument \code{t}; defaults to \code{"t"}.} \item{tdescrip}{(Optional) a character string giving a short description of the function argument \code{t}; defaults to \code{"curve parameter"}.} } The function \code{f} must return a 2-column matrix or data frame specifying the spatial coordinates \code{(x,y)} of locations along the curve, determined by the values of the input argument \code{t}. } \value{ An object of class \code{"fv"} which can be plotted. } \author{ \adrian and \rolf } \seealso{ \code{\link{im}} } \examples{ Z <- bei.extra$elev plot(transect.im(Z)) } \keyword{spatial} \keyword{manip} \keyword{iplot} spatstat.core/man/Extract.msr.Rd0000644000176200001440000000230014141452520016311 0ustar liggesusers\name{Extract.msr} \alias{[.msr} \title{Extract Subset of Signed or Vector Measure} \description{ Extract a subset of a signed measure or vector-valued measure. } \usage{ \method{[}{msr}(x, i, j, \dots) } \arguments{ \item{x}{ A signed or vector measure. An object of class \code{"msr"} (see \code{\link{msr}}). } \item{i}{ Object defining the subregion or subset to be extracted. Either a spatial window (an object of class \code{"owin"}), or a pixel image with logical values, or any type of index that applies to a matrix. } \item{j}{ Subset index selecting the vector coordinates to be extracted, if \code{x} is a vector-valued measure. } \item{\dots}{Ignored.} } \value{ An object of class \code{"msr"}. } \details{ This operator extracts a subset of the data which determines the signed measure or vector-valued measure \code{x}. The result is another measure. } \seealso{ \code{\link{msr}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) rp <- residuals(fit, type="pearson") rs <- residuals(fit, type="score") rp[square(0.5)] rs[ , 2:3] } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat.core/man/dg.envelope.Rd0000644000176200001440000001023014141452520016306 0ustar liggesusers\name{dg.envelope} \alias{dg.envelope} \title{ Global Envelopes for Dao-Genton Test } \description{ Computes the global envelopes corresponding to the Dao-Genton test of goodness-of-fit. } \usage{ dg.envelope(X, \dots, nsim = 19, nsimsub=nsim-1, nrank = 1, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose = TRUE) } \arguments{ \item{X}{ Either a point pattern dataset (object of class \code{"ppp"}, \code{"lpp"} or \code{"pp3"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{mad.test}} or \code{\link{envelope}} to control the conduct of the test. Useful arguments include \code{fun} to determine the summary function, \code{rinterval} to determine the range of \eqn{r} values used in the test, and \code{verbose=FALSE} to turn off the messages. } \item{nsim}{ Number of simulated patterns to be generated in the primary experiment. } \item{nsimsub}{ Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsimsub} simulated realisations, so there will be a total of \code{nsim * (nsimsub + 1)} simulations. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{alternative="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{alternative="less"}) or a one-sided test with an upper critical boundary (\code{alternative="greater"}). } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value determining whether to print progress reports. } } \details{ Computes global simulation envelopes corresponding to the Dao-Genton (2014) adjusted Monte Carlo goodness-of-fit test. The envelopes are described in Baddeley et al (2015). If \code{X} is a point pattern, the null hypothesis is CSR. If \code{X} is a fitted model, the null hypothesis is that model. The Dao-Genton test is biased when the significance level is very small (small \eqn{p}-values are not reliable) and we recommend \code{\link{bits.envelope}} in this case. } \value{ An object of class \code{"fv"}. } \references{ Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2015) Pushing the envelope: extensions of graphical Monte Carlo tests. Submitted for publication. } \author{ \adrian, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \adrian \rolf and \ege } \seealso{ \code{\link{dg.test}}, \code{\link{mad.test}}, \code{\link{envelope}} } \examples{ ns <- if(interactive()) 19 else 4 E <- dg.envelope(swedishpines, Lest, nsim=ns) E plot(E) Eo <- dg.envelope(swedishpines, Lest, alternative="less", nsim=ns) Ei <- dg.envelope(swedishpines, Lest, interpolate=TRUE, nsim=ns) } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat.core/man/mincontrast.Rd0000644000176200001440000001730014142420412016442 0ustar liggesusers\name{mincontrast} \alias{mincontrast} \title{Method of Minimum Contrast} \description{ A general low-level algorithm for fitting theoretical point process models to point pattern data by the Method of Minimum Contrast. } \usage{ mincontrast(observed, theoretical, startpar, \dots, ctrl=list(q = 1/4, p = 2, rmin=NULL, rmax=NULL), fvlab=list(label=NULL, desc="minimum contrast fit"), explain=list(dataname=NULL, modelname=NULL, fname=NULL), action.bad.values=c("warn", "stop", "silent"), control=list(), stabilize=TRUE, pspace=NULL) } \arguments{ \item{observed}{ Summary statistic, computed for the data. An object of class \code{"fv"}. } \item{theoretical}{ An R language function that calculates the theoretical expected value of the summary statistic, given the model parameters. See Details. } \item{startpar}{ Vector of initial values of the parameters of the point process model (passed to \code{theoretical}). } \item{\dots}{ Additional arguments passed to the function \code{theoretical} and to the optimisation algorithm \code{\link[stats]{optim}}. } \item{ctrl}{ Optional. List of arguments controlling the optimisation. See Details. } \item{fvlab}{ Optional. List containing some labels for the return value. See Details. } \item{explain}{ Optional. List containing strings that give a human-readable description of the model, the data and the summary statistic. } \item{action.bad.values}{ String (partially matched) specifying what to do if values of the summary statistic are \code{NA}, \code{NaN} or infinite. See Details. } \item{control}{ Optional. Argument passed to \code{\link[stats]{optim}}. A list of parameters which control the behaviour of the optimization algorithm. } \item{stabilize}{ Logical value specifying whether to numerically stabilize the optimization algorithm, by specifying suitable default values of \code{control$fnscale} and \code{control$parscale}. } \item{pspace}{ For internal use by the package only. } } \details{ This function is a general algorithm for fitting point process models by the Method of Minimum Contrast. If you want to fit the Thomas process, see \code{\link{thomas.estK}}. If you want to fit a log-Gaussian Cox process, see \code{\link{lgcp.estK}}. If you want to fit the \Matern cluster process, see \code{\link{matclust.estK}}. The Method of Minimum Contrast (Pfanzagl, 1969; Diggle and Gratton, 1984) is a general technique for fitting a point process model to point pattern data. First a summary function (typically the \eqn{K} function) is computed from the data point pattern. Second, the theoretical expected value of this summary statistic under the point process model is derived (if possible, as an algebraic expression involving the parameters of the model) or estimated from simulations of the model. Then the model is fitted by finding the optimal parameter values for the model to give the closest match between the theoretical and empirical curves. The argument \code{observed} should be an object of class \code{"fv"} (see \code{\link{fv.object}}) containing the values of a summary statistic computed from the data point pattern. Usually this is the function \eqn{K(r)} computed by \code{\link{Kest}} or one of its relatives. The argument \code{theoretical} should be a user-supplied function that computes the theoretical expected value of the summary statistic. It must have an argument named \code{par} that will be the vector of parameter values for the model (the length and format of this vector are determined by the starting values in \code{startpar}). The function \code{theoretical} should also expect a second argument (the first argument other than \code{par}) containing values of the distance \eqn{r} for which the theoretical value of the summary statistic \eqn{K(r)} should be computed. The value returned by \code{theoretical} should be a vector of the same length as the given vector of \eqn{r} values. The argument \code{ctrl} determines the contrast criterion (the objective function that will be minimised). The algorithm minimises the criterion \deqn{ D(\theta)= \int_{r_{\mbox{\scriptsize min}}}^{r_{\mbox{\scriptsize max}}} |\hat F(r)^q - F_\theta(r)^q|^p \, {\rm d}r }{ D(theta) = integral from rmin to rmax of abs(Fhat(r)^q - F(theta,r)^q)^p } where \eqn{\theta}{theta} is the vector of parameters of the model, \eqn{\hat F(r)}{Fhat(r)} is the observed value of the summary statistic computed from the data, \eqn{F_\theta(r)}{F(theta,r)} is the theoretical expected value of the summary statistic, and \eqn{p,q} are two exponents. The default is \code{q = 1/4}, \code{p=2} so that the contrast criterion is the integrated squared difference between the fourth roots of the two functions (Waagepetersen, 2007). The argument \code{action.bad.values} specifies what to do if some of the values of the summary statistic are \code{NA}, \code{NaN} or infinite. If \code{action.bad.values="stop"}, or if all of the values are bad, then a fatal error occurs. Otherwise, the domain of the summary function is shortened to avoid the bad values. The shortened domain is the longest interval on which the function values are finite (provided this interval is at least half the length of the original domain). A warning is issued if \code{action.bad.values="warn"} (the default) and no warning is issued if \code{action.bad.values="silent"}. The other arguments just make things print nicely. The argument \code{fvlab} contains labels for the component \code{fit} of the return value. The argument \code{explain} contains human-readable strings describing the data, the model and the summary statistic. The \code{"..."} argument of \code{mincontrast} can be used to pass extra arguments to the function \code{theoretical} and/or to the optimisation function \code{\link[stats]{optim}}. In this case, the function \code{theoretical} should also have a \code{"..."} argument and should ignore it (so that it ignores arguments intended for \code{\link[stats]{optim}}). } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } \item{opt }{The object returned from the optimizer \code{\link{optim}}.} \item{crtl }{List of parameters determining the contrast objective.} \item{info }{List of explanatory strings.} } \references{ Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Pfanzagl, J. (1969). On the measurability and consistency of minimum contrast estimates. \emph{Metrika} \bold{14}, 249--276. Waagepetersen, R. (2007). An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Rasmus Waagepetersen \email{rw@math.auc.dk}, adapted for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{kppm}}, \code{\link{lgcp.estK}}, \code{\link{matclust.estK}}, \code{\link{thomas.estK}}, } \keyword{spatial} \keyword{models} spatstat.core/man/dppkernel.Rd0000644000176200001440000000121514141452520016067 0ustar liggesusers\name{dppkernel} \alias{dppkernel} \title{Extract Kernel from Determinantal Point Process Model Object} \description{ Returns the kernel of a determinantal point process model as a function of one argument \code{x}. } \usage{dppkernel(model, \dots)} \arguments{ \item{model}{Model of class \code{"detpointprocfamily"}.} \item{\dots}{Arguments passed to \code{\link{dppapproxkernel}} if the exact kernel is unknown} } \value{A function} \author{ \adrian \rolf and \ege } \examples{ kernelMatern <- dppkernel(dppMatern(lambda = 100, alpha=.01, nu=1, d=2)) plot(kernelMatern, xlim = c(0,0.1)) } \keyword{spatial} \keyword{models} spatstat.core/man/anova.mppm.Rd0000644000176200001440000001217014141452520016161 0ustar liggesusers\name{anova.mppm} \alias{anova.mppm} \title{ANOVA for Fitted Point Process Models for Replicated Patterns} \description{ Performs analysis of deviance for one or more point process models fitted to replicated point pattern data. } \usage{ \method{anova}{mppm}(object, \dots, test=NULL, adjust=TRUE, fine=FALSE, warn=TRUE) } \arguments{ \item{object}{ Object of class \code{"mppm"} representing a point process model that was fitted to replicated point patterns. } \item{\dots}{ Optional. Additional objects of class \code{"mppm"}. } \item{test}{ Type of hypothesis test to perform. A character string, partially matching one of \code{"Chisq"}, \code{"LRT"}, \code{"Rao"}, \code{"score"}, \code{"F"} or \code{"Cp"}, or \code{NULL} indicating that no test should be performed. } \item{adjust}{ Logical value indicating whether to correct the pseudolikelihood ratio when some of the models are not Poisson processes. } \item{fine}{ Logical value passed to \code{\link{vcov.ppm}} indicating whether to use a quick estimate (\code{fine=FALSE}, the default) or a slower, more accurate estimate (\code{fine=TRUE}) of the variance of the fitted coefficients of each model. Relevant only when some of the models are not Poisson and \code{adjust=TRUE}. } \item{warn}{ Logical value indicating whether to issue warnings if problems arise. } } \value{ An object of class \code{"anova"}, or \code{NULL}. } \details{ This is a method for \code{\link{anova}} for comparing several fitted point process models of class \code{"mppm"}, usually generated by the model-fitting function \code{\link{mppm}}). If the fitted models are all Poisson point processes, then this function performs an Analysis of Deviance of the fitted models. The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"}) the two-sided p-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link{anova.glm}}. If some of the fitted models are \emph{not} Poisson point processes, the `deviance' differences in this table are 'pseudo-deviances' equal to 2 times the differences in the maximised values of the log pseudolikelihood (see \code{\link{ppm}}). It is not valid to compare these values to the chi-squared distribution. In this case, if \code{adjust=TRUE} (the default), the pseudo-deviances will be adjusted using the method of Pace et al (2011) and Baddeley, Turner and Rubak (2015) so that the chi-squared test is valid. It is strongly advisable to perform this adjustment. The argument \code{test} determines which hypothesis test, if any, will be performed to compare the models. The argument \code{test} should be a character string, partially matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"}, or \code{NULL}. The first option \code{"Chisq"} gives the likelihood ratio test based on the asymptotic chi-squared distribution of the deviance difference. The meaning of the other options is explained in \code{\link{anova.glm}}. } \section{Random effects models are currently not supported}{ For models with random effects (i.e. where the call to \code{\link{mppm}} included the argument \code{random}), %% only \code{"Chisq"} is %% available, and again gives the likelihood ratio test. analysis of deviance is currently not supported, due to changes in the \pkg{nlme} package. We will try to find a solution. } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix of one of the models was either too large or too small for reliable numerical calculation. See \code{\link{vcov.ppm}} for suggestions on how to handle this. } \seealso{ \code{\link{mppm}} } \examples{ H <- hyperframe(X=waterstriders) #' test for loglinear trend in x coordinate mod0 <- mppm(X~1, data=H, Poisson()) modx <- mppm(X~x, data=H, Poisson()) anova(mod0, modx, test="Chi") # not significant anova(modx, test="Chi") # not significant #' test for inhibition mod0S <- mppm(X~1, data=H, Strauss(2)) anova(mod0, mod0S, test="Chi") # significant! #' test for trend after accounting for inhibition modxS <- mppm(X~x, data=H, Strauss(2)) anova(mod0S, modxS, test="Chi") # not significant } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. Baddeley, A., Turner, R. and Rubak, E. (2015) Adjusted composite likelihood ratio test for Gibbs point processes. \emph{Journal of Statistical Computation and Simulation} \bold{86} (5) 922--941. DOI: 10.1080/00949655.2015.1044530. Pace, L., Salvan, A. and Sartori, N. (2011) Adjusting composite likelihood ratio statistics. \emph{Statistica Sinica} \bold{21}, 129--148. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.core/man/kernel.moment.Rd0000644000176200001440000000347514141452520016673 0ustar liggesusers\name{kernel.moment} \alias{kernel.moment} \title{Moment of Smoothing Kernel} \description{ Computes the complete or incomplete \eqn{m}th moment of a smoothing kernel. } \usage{ kernel.moment(m, r, kernel = "gaussian") } \arguments{ \item{m}{ Exponent (order of moment). An integer. } \item{r}{ Upper limit of integration for the incomplete moment. A numeric value or numeric vector. Set \code{r=Inf} to obtain the complete moment. } \item{kernel}{ String name of the kernel. Options are \code{"gaussian"}, \code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"}, \code{"biweight"}, \code{"cosine"} and \code{"optcosine"}. (Partial matching is used). } } \details{ Kernel estimation of a probability density in one dimension is performed by \code{\link[stats]{density.default}} using a kernel function selected from the list above. For more information about these kernels, see \code{\link[stats]{density.default}}. The function \code{kernel.moment} computes the partial integral \deqn{ \int_{-\infty}^r t^m k(t) dt }{ integral[-Inf][r] t^m k(t) dt } where \eqn{k(t)} is the selected kernel, \eqn{r} is the upper limit of integration, and \eqn{m} is the exponent or order. Here \eqn{k(t)} is the \bold{standard form} of the kernel, which has support \eqn{[-1,1]} and standard deviation \eqn{sigma = 1/c} where \code{c = kernel.factor(kernel)}. } \value{ A single number, or a numeric vector of the same length as \code{r}. } \seealso{ \code{\link[stats]{density.default}}, \code{\link{dkernel}}, \code{\link{kernel.factor}}, } \examples{ kernel.moment(1, 0.1, "epa") curve(kernel.moment(2, x, "epa"), from=-1, to=1) } \author{ \adrian and Martin Hazelton. } \keyword{methods} \keyword{nonparametric} \keyword{smooth} spatstat.core/man/Ginhom.Rd0000644000176200001440000001534314141452520015333 0ustar liggesusers\name{Ginhom} \alias{Ginhom} \title{ Inhomogeneous Nearest Neighbour Function } \description{ Estimates the inhomogeneous nearest neighbour function \eqn{G} of a non-stationary point pattern. } \usage{ Ginhom(X, lambda = NULL, lmin = NULL, ..., sigma = NULL, varcov = NULL, r = NULL, breaks = NULL, ratio = FALSE, update = TRUE, warn.bias=TRUE, savelambda=FALSE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{G} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lmin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{\dots}{ Extra arguments passed to \code{\link{as.mask}} to control the pixel resolution, or passed to \code{\link{density.ppp}} to control the smoothing bandwidth. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } \item{update}{ Logical. If \code{lambda} is a fitted model (class \code{"ppm"} or \code{"kppm"}) and \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without fitting it to \code{X}. } \item{warn.bias}{ Logical value specifying whether to issue a warning when the inhomogeneity correction factor takes extreme values, which can often lead to biased results. This usually occurs when insufficient smoothing is used to estimate the intensity. } \item{savelambda}{ Logical value specifying whether to save the values of \code{lmin} and \code{lambda} as attributes of the result. } } \details{ This command computes estimates of the inhomogeneous \eqn{G}-function (van Lieshout, 2010) of a point pattern. It is the counterpart, for inhomogeneous spatial point patterns, of the nearest-neighbour distance distribution function \eqn{G} for homogeneous point patterns computed by \code{\link{Gest}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). The inhomogeneous \eqn{G} function is computed using the border correction, equation (7) in Van Lieshout (2010). The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda} of the point process. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"} or \code{"kppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. (2010) A J-function for inhomogeneous point processes. \emph{Statistica Neerlandica} \bold{65}, 183--201. } \seealso{ \code{\link{Finhom}}, \code{\link{Jinhom}}, \code{\link{Gest}} } \examples{ plot(Ginhom(swedishpines, sigma=10)) # plot(Ginhom(swedishpines, sigma=bw.diggle, adjust=2)) } \author{ Original code by Marie-Colette van Lieshout. C implementation and R adaptation by \adrian and \ege. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/PairPiece.Rd0000644000176200001440000000744014141452520015752 0ustar liggesusers\name{PairPiece} \alias{PairPiece} \title{The Piecewise Constant Pairwise Interaction Point Process Model} \description{ Creates an instance of a pairwise interaction point process model with piecewise constant potential function. The model can then be fitted to point pattern data. } \usage{ PairPiece(r) } \arguments{ \item{r}{vector of jump points for the potential function} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. The process is a pairwise interaction process, whose interaction potential is piecewise constant, with jumps at the distances given in the vector \eqn{r}. } \details{ A pairwise interaction point process in a bounded region is a stochastic point process with probability density of the form \deqn{ f(x_1,\ldots,x_n) = \alpha \prod_i b(x_i) \prod_{i < j} h(x_i, x_j) }{ f(x_1,\ldots,x_n) = alpha . product { b(x[i]) } product { h(x_i, x_j) } } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern. The first product on the right hand side is over all points of the pattern; the second product is over all unordered pairs of points of the pattern. Thus each point \eqn{x_i}{x[i]} of the pattern contributes a factor \eqn{b(x_i)}{b(x[i])} to the probability density, and each pair of points \eqn{x_i, x_j}{x[i], x[j]} contributes a factor \eqn{h(x_i,x_j)}{h(x[i], x[j])} to the density. The pairwise interaction term \eqn{h(u, v)} is called \emph{piecewise constant} if it depends only on the distance between \eqn{u} and \eqn{v}, say \eqn{h(u,v) = H(||u-v||)}, and \eqn{H} is a piecewise constant function (a function which is constant except for jumps at a finite number of places). The use of piecewise constant interaction terms was first suggested by Takacs (1986). The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant pairwise interaction is yielded by the function \code{PairPiece()}. See the examples below. The entries of \code{r} must be strictly increasing, positive numbers. They are interpreted as the points of discontinuity of \eqn{H}. It is assumed that \eqn{H(s) =1} for all \eqn{s > r_{max}}{s > rmax} where \eqn{r_{max}}{rmax} is the maximum value in \code{r}. Thus the model has as many regular parameters (see \code{\link{ppm}}) as there are entries in \code{r}. The \eqn{i}-th regular parameter \eqn{\theta_i}{theta[i]} is the logarithm of the value of the interaction function \eqn{H} on the interval \eqn{[r_{i-1},r_i)}{[r[i-1],r[i])}. If \code{r} is a single number, this model is similar to the Strauss process, see \code{\link{Strauss}}. The difference is that in \code{PairPiece} the interaction function is continuous on the right, while in \code{\link{Strauss}} it is continuous on the left. The analogue of this model for multitype point processes has not yet been implemented. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}} \code{\link{rmh.ppm}} } \examples{ PairPiece(c(0.1,0.2)) # prints a sensible description of itself data(cells) ppm(cells ~1, PairPiece(r = c(0.05, 0.1, 0.2))) # fit a stationary piecewise constant pairwise interaction process # ppm(cells ~polynom(x,y,3), PairPiece(c(0.05, 0.1))) # nonstationary process with log-cubic polynomial trend } \references{ Takacs, R. (1986) Estimator for the pair potential of a Gibbsian point process. \emph{Statistics} \bold{17}, 429--433. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/Jinhom.Rd0000644000176200001440000001521414141452520015333 0ustar liggesusers\name{Jinhom} \alias{Jinhom} \title{ Inhomogeneous J-function } \description{ Estimates the inhomogeneous \eqn{J} function of a non-stationary point pattern. } \usage{ Jinhom(X, lambda = NULL, lmin = NULL, ..., sigma = NULL, varcov = NULL, r = NULL, breaks = NULL, update = TRUE, warn.bias=TRUE, savelambda=FALSE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{J} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lmin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{\dots}{ Extra arguments passed to \code{\link{as.mask}} to control the pixel resolution, or passed to \code{\link{density.ppp}} to control the smoothing bandwidth. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{update}{ Logical. If \code{lambda} is a fitted model (class \code{"ppm"} or \code{"kppm"}) and \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without fitting it to \code{X}. } \item{warn.bias}{ Logical value specifying whether to issue a warning when the inhomogeneity correction factor takes extreme values, which can often lead to biased results. This usually occurs when insufficient smoothing is used to estimate the intensity. } \item{savelambda}{ Logical value specifying whether to save the values of \code{lmin} and \code{lambda} as attributes of the result. } } \details{ This command computes estimates of the inhomogeneous \eqn{J}-function (Van Lieshout, 2010) of a point pattern. It is the counterpart, for inhomogeneous spatial point patterns, of the \eqn{J} function for homogeneous point patterns computed by \code{\link{Jest}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). The inhomogeneous \eqn{J} function is computed as \eqn{Jinhom(r) = (1 - Ginhom(r))/(1-Finhom(r))} where \eqn{Ginhom, Finhom} are the inhomogeneous \eqn{G} and \eqn{F} functions computed using the border correction (equations (7) and (6) respectively in Van Lieshout, 2010). The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda} of the point process. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"} or \code{"kppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. van Lieshout, M.N.M. (2010) A J-function for inhomogeneous point processes. \emph{Statistica Neerlandica} \bold{65}, 183--201. } \seealso{ \code{\link{Ginhom}}, \code{\link{Finhom}}, \code{\link{Jest}} } \examples{ # plot(Jinhom(swedishpines, sigma=bw.diggle, adjust=2)) plot(Jinhom(swedishpines, sigma=10)) } \author{ Original code by Marie-Colette van Lieshout. C implementation and R adaptation by \adrian and \ege. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/pairsat.family.Rd0000644000176200001440000000446114141452520017034 0ustar liggesusers\name{pairsat.family} \alias{pairsat.family} \title{Saturated Pairwise Interaction Point Process Family} \description{ An object describing the Saturated Pairwise Interaction family of point process models } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the ``saturated pairwise interaction'' family of point process models. If you need to create a specific interaction model for use in spatial pattern analysis, use the function \code{\link{Saturated}()} or the two existing implementations of models in this family, \code{\link{Geyer}()} and \code{\link{SatPiece}()}. Geyer (1999) introduced the ``saturation process'', a modification of the Strauss process in which the total contribution to the potential from each point (from its pairwise interaction with all other points) is trimmed to a maximum value \eqn{c}. This model is implemented in the function \code{\link{Geyer}()}. The present class \code{pairsat.family} is the extension of this saturation idea to all pairwise interactions. Note that the resulting models are no longer pairwise interaction processes - they have interactions of infinite order. \code{pairsat.family} is an object of class \code{"isf"} containing a function \code{pairwise$eval} for evaluating the sufficient statistics of any saturated pairwise interaction point process model in which the original pair potentials take an exponential family form. } \value{ Object of class \code{"isf"}, see \code{\link{isf.object}}. } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{Geyer}} to create the Geyer saturation process. \code{\link{SatPiece}} to create a saturated process with piecewise constant pair potential. \code{\link{Saturated}} to create a more general saturation model. Other families: \code{\link{inforder.family}}, \code{\link{ord.family}}, \code{\link{pairwise.family}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/Iest.Rd0000644000176200001440000001234714141452520015017 0ustar liggesusers\name{Iest} \alias{Iest} \title{Estimate the I-function} \description{ Estimates the summary function \eqn{I(r)} for a multitype point pattern. } \usage{ Iest(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{I(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{eps}{ the resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{Optional. Numeric vector of values for the argument \eqn{r} at which \eqn{I(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \code{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. Vector of character strings specifying the edge correction(s) to be used by \code{\link{Jest}}. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{I} has been estimated} \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{I(r)} computed from the border-corrected estimates of \eqn{J} functions} \item{km}{the spatial Kaplan-Meier estimator of \eqn{I(r)} computed from the Kaplan-Meier estimates of \eqn{J} functions} \item{han}{the Hanisch-style estimator of \eqn{I(r)} computed from the Hanisch-style estimates of \eqn{J} functions} \item{un}{the uncorrected estimate of \eqn{I(r)} computed from the uncorrected estimates of \eqn{J} } \item{theo}{the theoretical value of \eqn{I(r)} for a stationary Poisson process: identically equal to \eqn{0} } } \note{ Sizeable amounts of memory may be needed during the calculation. } \details{ The \eqn{I} function summarises the dependence between types in a multitype point process (Van Lieshout and Baddeley, 1999) It is based on the concept of the \eqn{J} function for an unmarked point process (Van Lieshout and Baddeley, 1996). See \code{\link{Jest}} for information about the \eqn{J} function. The \eqn{I} function is defined as \deqn{ % I(r) = \sum_{i=1}^m p_i J_{ii}(r) % - J_{\bullet\bullet}(r)}{ % I(r) = (sum p[i] Jii(r)) - J(r) } where \eqn{J_{\bullet\bullet}}{J} is the \eqn{J} function for the entire point process ignoring the marks, while \eqn{J_{ii}}{Jii} is the \eqn{J} function for the process consisting of points of type \eqn{i} only, and \eqn{p_i}{p[i]} is the proportion of points which are of type \eqn{i}. The \eqn{I} function is designed to measure dependence between points of different types, even if the points are not Poisson. Let \eqn{X} be a stationary multitype point process, and write \eqn{X_i}{X[i]} for the process of points of type \eqn{i}. If the processes \eqn{X_i}{X[i]} are independent of each other, then the \eqn{I}-function is identically equal to \eqn{0}. Deviations \eqn{I(r) < 1} or \eqn{I(r) > 1} typically indicate negative and positive association, respectively, between types. See Van Lieshout and Baddeley (1999) for further information. An estimate of \eqn{I} derived from a multitype spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern. The estimate of \eqn{I(r)} is compared against the constant function \eqn{0}. Deviations \eqn{I(r) < 1} or \eqn{I(r) > 1} may suggest negative and positive association, respectively. This algorithm estimates the \eqn{I}-function from the multitype point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial marked point process in the plane, observed through a bounded window. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}()}. It must be a multitype point pattern (it must have a \code{marks} vector which is a \code{factor}). The function \code{\link{Jest}} is called to compute estimates of the \eqn{J} functions in the formula above. In fact three different estimates are computed using different edge corrections. See \code{\link{Jest}} for information. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \seealso{ \code{\link{Jest}} } \examples{ data(amacrine) Ic <- Iest(amacrine) plot(Ic, main="Amacrine Cells data") # values are below I= 0, suggesting negative association # between 'on' and 'off' cells. } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/predict.rppm.Rd0000644000176200001440000000511414141452520016514 0ustar liggesusers\name{predict.rppm} \alias{fitted.rppm} \alias{predict.rppm} \title{ Make Predictions From a Recursively Partitioned Point Process Model } \description{ Given a model which has been fitted to point pattern data by recursive partitioning, compute the predicted intensity of the model. } \usage{ \method{predict}{rppm}(object, \dots) \method{fitted}{rppm}(object, \dots) } \arguments{ \item{object}{ Fitted point process model of class \code{"rppm"} produced by the function \code{\link{rppm}}. } \item{\dots}{ Optional arguments passed to \code{\link{predict.ppm}} to specify the locations where prediction is required. (Ignored by \code{fitted.rppm}) } } \details{ These functions are methods for the generic functions \code{\link[stats]{fitted}} and \code{\link[stats]{predict}}. They compute the fitted intensity of a point process model. The argument \code{object} should be a fitted point process model of class \code{"rppm"} produced by the function \code{\link{rppm}}. The \code{fitted} method computes the fitted intensity at the original data points, yielding a numeric vector with one entry for each data point. The \code{predict} method computes the fitted intensity at any locations. By default, predictions are calculated at a regular grid of spatial locations, and the result is a pixel image giving the predicted intensity values at these locations. Alternatively, predictions can be performed at other locations, or a finer grid of locations, or only at certain specified locations, using additional arguments \code{\dots} which will be interpreted by \code{\link{predict.ppm}}. Common arguments are \code{ngrid} to increase the grid resolution, \code{window} to specify the prediction region, and \code{locations} to specify the exact locations of predictions. See \code{\link{predict.ppm}} for details of these arguments. Predictions are computed by evaluating the explanatory covariates at each desired location, and applying the recursive partitioning rule to each set of covariate values. } \value{ The result of \code{fitted.rppm} is a numeric vector. The result of \code{predict.rppm} is a pixel image, a list of pixel images, or a numeric vector. } \author{ \spatstatAuthors. } \seealso{ \code{\link{rppm}}, \code{\link{plot.rppm}} } \examples{ fit <- rppm(unmark(gorillas) ~ vegetation, data=gorillas.extra) plot(predict(fit)) lambdaX <- fitted(fit) lambdaX[1:5] # Mondriaan pictures plot(predict(rppm(redwoodfull ~ x + y))) points(redwoodfull) } \keyword{spatial} \keyword{models} spatstat.core/man/rthinclumps.Rd0000644000176200001440000000401714141452520016456 0ustar liggesusers\name{rthinclumps} \alias{rthinclumps} \title{Random Thinning of Clumps} \description{ Finds the topologically-connected clumps of a spatial region and randomly deletes some of the clumps. } \usage{ rthinclumps(W, p, \dots) } \arguments{ \item{W}{ Window (object of class \code{"owin"} or pixel image (object of class \code{"im"}). } \item{p}{ Probability of \emph{retaining} each clump. A single number between 0 and 1. } \item{\dots}{ Additional arguments passed to \code{\link{connected.im}} or \code{\link{connected.owin}} to determine the connected clumps. } } \details{ The argument \code{W} specifies a region of space, typically consisting of several clumps that are not connected to each other. The algorithm randomly deletes or retains each clump. The fate of each clump is independent of other clumps. If \code{W} is a spatial window (class \code{"owin"}) then it will be divided into clumps using \code{\link{connected.owin}}. Each clump will either be retained (with probability \code{p}) or deleted in its entirety (with probability \code{1-p}). If \code{W} is a pixel image (class \code{"im"}) then its domain will be divided into clumps using \code{\link{connected.im}}. The default behaviour depends on the type of pixel values. If the pixel values are logical, then the spatial region will be taken to consist of all pixels whose value is \code{TRUE}. Otherwise, the spatial region is taken to consist of all pixels whose value is defined (i.e. not equal to \code{NA}). This behaviour can be changed using the argument \code{background} passed to \code{\link{connected.im}}. The result is a window comprising all the clumps that were retained. } \value{ Window (object of class \code{"owin"}). } \author{ \adrian. } \seealso{ \code{\link{rthin}} for thinning other kinds of objects. } \examples{ A <- (distmap(cells) < 0.06) opa <- par(mfrow=c(1,2)) plot(A) plot(rthinclumps(A, 0.5)) par(opa) } \keyword{spatial} \keyword{datagen} \keyword{manip} spatstat.core/man/update.detpointprocfamily.Rd0000644000176200001440000000113514141452520021301 0ustar liggesusers\name{update.detpointprocfamily} \alias{update.detpointprocfamily} \title{Set Parameter Values in a Determinantal Point Process Model} \description{ Set parameter values in a determinantal point process model object. } \usage{ \method{update}{detpointprocfamily}(object, \dots) } \arguments{ \item{object}{object of class \code{"detpointprocfamily"}.} \item{\dots}{ arguments of the form \code{tag=value} specifying the parameters values to set. } } \value{ Another object of class \code{"detpointprocfamily"}. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.core/man/rPenttinen.Rd0000644000176200001440000001013614141452520016233 0ustar liggesusers\name{rPenttinen} \alias{rPenttinen} \title{Perfect Simulation of the Penttinen Process} \description{ Generate a random pattern of points, a simulated realisation of the Penttinen process, using a perfect simulation algorithm. } \usage{ rPenttinen(beta, gamma=1, R, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{gamma}{ Interaction strength parameter (a number between 0 and 1). } \item{R}{ disc radius (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Penttinen point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. Penttinen (1984, Example 2.1, page 18), citing Cormack (1979), described the pairwise interaction point process with interaction factor \deqn{ h(d) = e^{\theta A(d)} = \gamma^{A(d)} }{ h(d) = exp(theta * A(d)) = gamma^(A(d)) } between each pair of points separated by a distance $d$. Here \eqn{A(d)} is the area of intersection between two discs of radius \eqn{R} separated by a distance \eqn{d}, normalised so that \eqn{A(0) = 1}. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Cormack, R.M. (1979) Spatial aspects of competition between individuals. Pages 151--212 in \emph{Spatial and Temporal Analysis in Ecology}, eds. R.M. Cormack and J.K. Ord, International Co-operative Publishing House, Fairland, MD, USA. \Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. Penttinen, A. (1984) \emph{Modelling Interaction in Spatial Point Patterns: Parameter Estimation by the Maximum Likelihood Method.} \Jyvaskyla Studies in Computer Science, Economics and Statistics \bold{7}, University of \Jyvaskyla, Finland. } \author{ \adrian, based on original code for the Strauss process by Kasper Klitgaard Berthelsen. } \examples{ X <- rPenttinen(50, 0.5, 0.02) Z <- rPenttinen(50, 0.5, 0.01, nsim=2) } \seealso{ \code{\link{rmh}}, \code{\link{Penttinen}}. \code{\link{rStrauss}}, \code{\link{rHardcore}}, \code{\link{rStraussHard}}, \code{\link{rDiggleGratton}}, \code{\link{rDGS}}. } \keyword{spatial} \keyword{datagen} spatstat.core/man/plot.influence.ppm.Rd0000644000176200001440000000474114141452520017632 0ustar liggesusers\name{plot.influence.ppm} \alias{plot.influence.ppm} \title{ Plot Influence Measure } \description{ Plots an influence measure that has been computed by \code{\link{influence.ppm}}. } \usage{ \method{plot}{influence.ppm}(x, ..., multiplot=TRUE) } \arguments{ \item{x}{ Influence measure (object of class \code{"influence.ppm"}) computed by \code{\link{influence.ppm}}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppp}} to control the plotting. } \item{multiplot}{ Logical value indicating whether it is permissible to plot more than one panel. This happens if the original point process model is multitype. } } \details{ This is the plot method for objects of class \code{"influence.ppm"}. These objects are computed by the command \code{\link{influence.ppm}}. For a point process model fitted by maximum likelihood or maximum pseudolikelihood (the default), influence values are associated with the data points. The display shows circles centred at the data points with radii proportional to the influence values. If the original data were a multitype point pattern, then if \code{multiplot=TRUE} (the default), there is one such display for each possible type of point, while if \code{multiplot=FALSE} there is a single plot combining all data points regardless of type. For a model fitted by logistic composite likelihood (\code{method="logi"} in \code{\link{ppm}}) influence values are associated with the data points and also with the dummy points used to fit the model. The display consist of two panels, for the data points and dummy points respectively, showing circles with radii proportional to the influence values. If the original data were a multitype point pattern, then if \code{multiplot=TRUE} (the default), there is one pair of panels for each possible type of point, while if \code{multiplot=FALSE} there is a single plot combining all data and dummy points regardless of type. Use the argument \code{clipwin} to restrict the plot to a subset of the full data. } \value{ None. } \references{ Baddeley, A. and Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ \spatstatAuthors. } \seealso{ \code{\link{influence.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) plot(influence(fit)) } \keyword{spatial} \keyword{models} spatstat.core/man/methods.influence.ppm.Rd0000644000176200001440000000411314141452520020310 0ustar liggesusers\name{methods.influence.ppm} \alias{methods.influence.ppm} %DoNotExport \alias{as.ppp.influence.ppm} \alias{as.owin.influence.ppm} \alias{domain.influence.ppm} \alias{integral.influence.ppm} \alias{Smooth.influence.ppm} \alias{Window.influence.ppm} \title{Methods for Influence Objects} \description{ Methods for the class \code{"influence.ppm"}. } \usage{ \method{as.ppp}{influence.ppm}(X, \dots) \method{as.owin}{influence.ppm}(W, \dots, fatal=TRUE) \method{domain}{influence.ppm}(X, \dots) \method{Smooth}{influence.ppm}(X, \dots) \method{Window}{influence.ppm}(X, \dots) \method{integral}{influence.ppm}(f, domain, \dots) } \arguments{ \item{X,W,f}{An object of class \code{"influence.ppm"}.} \item{domain}{ Optional. Domain of integration: a window (class \code{"owin"}) or a tessellation (class \code{"tess"}). } \item{\dots}{ Additional arguments. See Details. } \item{fatal}{ Logical value indicating what to do if the data cannot be converted to a window. If \code{fatal=TRUE} (the default) an error occurs. If \code{fatal=FALSE} a value of \code{NULL} is returned. } } \value{ A window (object of class \code{"owin"}) for \code{as.owin}, \code{domain} and \code{Window}. A point pattern (object of class \code{"ppp"}) for \code{as.ppp}. A numeric value or numeric vector for \code{integral}. A pixel image, or list of pixel images, for \code{Smooth}. } \details{ These functions are methods for the class \code{"influence.ppm"}. An object of this class represents the influence measure of a fitted point process model (see \code{\link{influence.ppm}}). For \code{as.ppp}, \code{domain}, \code{integral} and \code{Window}, additional arguments (\code{\dots}) are ignored. For \code{as.owin} and \code{Smooth}, additional arguments are passed to the method for class \code{"ppp"}. } \seealso{ \code{\link{influence.ppm}}, \code{\link{plot.influence.ppm}}, \code{\link{[.influence.ppm}} } \examples{ fit <- ppm(cells ~ x) a <- influence(fit) Window(a) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat.core/man/K3est.Rd0000644000176200001440000000725014141452520015101 0ustar liggesusers\name{K3est} \Rdversion{1.1} \alias{K3est} \title{ K-function of a Three-Dimensional Point Pattern } \description{ Estimates the \eqn{K}-function from a three-dimensional point pattern. } \usage{ K3est(X, \dots, rmax = NULL, nrval = 128, correction = c("translation", "isotropic"), ratio=FALSE) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{K_3(r)}{K3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{K_3(r)}{K3(r)} will be estimated. A large value of \code{nrval} is required to avoid discretisation effects. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the three-dimensional \eqn{K} function is \deqn{ K_3(r) = \frac 1 \lambda E(N(\Phi, x, r) \mid x \in \Phi) }{ K3(r) = (1/lambda) E(N(Phi,x,r) | x in Phi) } where \eqn{\lambda}{lambda} is the intensity of the process (the expected number of points per unit volume) and \eqn{N(\Phi,x,r)}{N(Phi,x,r)} is the number of points of \eqn{\Phi}{Phi}, other than \eqn{x} itself, which fall within a distance \eqn{r} of \eqn{x}. This is the three-dimensional generalisation of Ripley's \eqn{K} function for two-dimensional point processes (Ripley, 1977). The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The distance between each pair of distinct points is computed. The empirical cumulative distribution function of these values, with appropriate edge corrections, is renormalised to give the estimate of \eqn{K_3(r)}{K3(r)}. The available edge corrections are: \describe{ \item{\code{"translation"}:}{ the Ohser translation correction estimator (Ohser, 1983; Baddeley et al, 1993) } \item{\code{"isotropic"}:}{ the three-dimensional counterpart of Ripley's isotropic edge correction (Ripley, 1977; Baddeley et al, 1993). } } Alternatively \code{correction="all"} selects all options. } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{ \adrian and Rana Moyeed. } \seealso{ \code{\link{pp3}} to create a three-dimensional point pattern (object of class \code{"pp3"}). \code{\link{pcf3est}}, \code{\link{F3est}}, \code{\link{G3est}} for other summary functions of a three-dimensional point pattern. \code{\link{Kest}} to estimate the \eqn{K}-function of point patterns in two dimensions or other spaces. } \examples{ X <- rpoispp3(42) Z <- K3est(X) if(interactive()) plot(Z) } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/Hybrid.Rd0000644000176200001440000000632514144333466015345 0ustar liggesusers\name{Hybrid} \alias{Hybrid} \title{ Hybrid Interaction Point Process Model } \description{ Creates an instance of a hybrid point process model which can then be fitted to point pattern data. } \usage{ Hybrid(...) } \arguments{ \item{\dots}{ Two or more interactions (objects of class \code{"interact"}) or objects which can be converted to interactions. See Details. } } \details{ A \emph{hybrid} (Baddeley, Turner, Mateu and Bevan, 2013) is a point process model created by combining two or more point process models, or an interpoint interaction created by combining two or more interpoint interactions. The \emph{hybrid} of two point processes, with probability densities \eqn{f(x)} and \eqn{g(x)} respectively, is the point process with probability density \deqn{h(x) = c \, f(x) \, g(x)}{h(x) = c * f(x) * g(x)} where \eqn{c} is a normalising constant. Equivalently, the hybrid of two point processes with conditional intensities \eqn{\lambda(u,x)}{lambda(u,x)} and \eqn{\kappa(u,x)}{kappa(u,x)} is the point process with conditional intensity \deqn{ \phi(u,x) = \lambda(u,x) \, \kappa(u,x). }{ phi(u,x) = lambda(u,x) * kappa(u,x). } The hybrid of \eqn{m > 3} point processes is defined in a similar way. The function \code{\link{ppm}}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of a hybrid interaction is yielded by the function \code{Hybrid()}. The arguments \code{\dots} will be interpreted as interpoint interactions (objects of class \code{"interact"}) and the result will be the hybrid of these interactions. Each argument must either be an interpoint interaction (object of class \code{"interact"}), or a point process model (object of class \code{"ppm"}) from which the interpoint interaction will be extracted. The arguments \code{\dots} may also be given in the form \code{name=value}. This is purely cosmetic: it can be used to attach simple mnemonic names to the component interactions, and makes the printed output from \code{\link{print.ppm}} neater. } \value{ An object of class \code{"interact"} describing an interpoint interaction structure. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \code{DOI: 10.18637/jss.v055.i11} } \seealso{ \code{\link{ppm}} } \examples{ Hybrid(Strauss(0.1), Geyer(0.2, 3)) Hybrid(Ha=Hardcore(0.05), St=Strauss(0.1), Ge=Geyer(0.2, 3)) fit <- ppm(redwood, ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2))) fit ctr <- rmhcontrol(nrep=5e4, expand=1) plot(simulate(fit, control=ctr)) # hybrid components can be models (including hybrid models) Hybrid(fit, S=Softcore(0.5)) # plot.fii only works if every component is a pairwise interaction data(swedishpines) fit2 <- ppm(swedishpines, ~1, Hybrid(DG=DiggleGratton(2,10), S=Strauss(5))) plot(fitin(fit2)) plot(fitin(fit2), separate=TRUE, mar.panel=rep(4,4)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/eem.Rd0000644000176200001440000000544414141452520014661 0ustar liggesusers\name{eem} \alias{eem} \alias{eem.ppm} \alias{eem.slrm} \title{ Exponential Energy Marks } \description{ Given a point process model fitted to a point pattern, compute the Stoyan-Grabarnik diagnostic ``exponential energy marks'' for the data points. } \usage{ eem(fit, \dots) \method{eem}{ppm}(fit, check=TRUE, \dots) \method{eem}{slrm}(fit, check=TRUE, \dots) } \arguments{ \item{fit}{ The fitted point process model. An object of class \code{"ppm"}. } \item{check}{ Logical value indicating whether to check the internal format of \code{fit}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{\dots}{Ignored.} } \value{ A vector containing the values of the exponential energy mark for each point in the pattern. } \details{ Stoyan and Grabarnik (1991) proposed a diagnostic tool for point process models fitted to spatial point pattern data. Each point \eqn{x_i}{x[i]} of the data pattern \eqn{X} is given a `mark' or `weight' \deqn{m_i = \frac 1 {\hat\lambda(x_i,X)}}{m[i] = 1/\lambda(x[i],X)} where \eqn{\hat\lambda(x_i,X)}{\lambda(x[i],X)} is the conditional intensity of the fitted model. If the fitted model is correct, then the sum of these marks for all points in a region \eqn{B} has expected value equal to the area of \eqn{B}. The argument \code{fit} must be a fitted point process model (object of class \code{"ppm"} or \code{"slrm"}). Such objects are produced by the fitting algorithms \code{\link{ppm}}) and \code{\link{slrm}}. This fitted model object contains complete information about the original data pattern and the model that was fitted to it. The value returned by \code{eem} is the vector of weights \eqn{m[i]}{m_i} associated with the points \eqn{x[i]}{x_i} of the original data pattern. The original data pattern (in corresponding order) can be extracted from \code{fit} using \code{\link{response}}. The function \code{\link{diagnose.ppm}} produces a set of sensible diagnostic plots based on these weights. } \references{ Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \seealso{ \code{\link{diagnose.ppm}}, \code{\link{ppm.object}}, \code{\link{data.ppm}}, \code{\link{residuals.ppm}}, \code{\link{ppm}} } \examples{ data(cells) fit <- ppm(cells ~x, Strauss(r=0.15)) ee <- eem(fit) sum(ee)/area(Window(cells)) # should be about 1 if model is correct Y <- setmarks(cells, ee) plot(Y, main="Cells data\n Exponential energy marks") } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/sdrPredict.Rd0000644000176200001440000000236414141452520016214 0ustar liggesusers\name{sdrPredict} \alias{sdrPredict} \title{ Compute Predictors from Sufficient Dimension Reduction } \description{ Given the result of a Sufficient Dimension Reduction method, compute the new predictors. } \usage{ sdrPredict(covariates, B) } \arguments{ \item{covariates}{ A list of pixel images (objects of class \code{"im"}). } \item{B}{ Either a matrix of coefficients for the covariates, or the result of a call to \code{\link{sdr}}. } } \details{ This function assumes that \code{\link{sdr}} has already been used to find a minimal set of predictors based on the \code{covariates}. The argument \code{B} should be either the result of \code{\link{sdr}} or the coefficient matrix returned as one of the results of \code{\link{sdr}}. The columns of this matrix define linear combinations of the \code{covariates}. This function evaluates those linear combinations, and returns a list of pixel images containing the new predictors. } \value{ A list of pixel images (objects of class \code{"im"}) with one entry for each column of \code{B}. } \author{ \adrian } \seealso{ \code{\link{sdr}} } \examples{ A <- sdr(bei, bei.extra) Y <- sdrPredict(bei.extra, A) Y } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/pool.rat.Rd0000644000176200001440000000655714141452520015657 0ustar liggesusers\name{pool.rat} \alias{pool.rat} \title{ Pool Data from Several Ratio Objects } \description{ Pool the data from several ratio objects (objects of class \code{"rat"}) and compute a pooled estimate. } \usage{ \method{pool}{rat}(..., weights=NULL, relabel=TRUE, variance=TRUE) } \arguments{ \item{\dots}{ Objects of class \code{"rat"}. } \item{weights}{ Numeric vector of weights. } \item{relabel}{ Logical value indicating whether the result should be relabelled to show that it was obtained by pooling. } \item{variance}{ Logical value indicating whether to compute the sample variance and related terms. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"rat"} of ratio objects. It is used to combine several estimates of the same quantity when each estimate is a ratio. Each of the arguments \code{\dots} must be an object of class \code{"rat"} representing a ratio object (basically a numerator and a denominator; see \code{\link{rat}}). We assume that these ratios are all estimates of the same quantity. If the objects are called \eqn{R_1, \ldots, R_n}{R[1], \dots, R[n]} and if \eqn{R_i}{R[i]} has numerator \eqn{Y_i}{Y[i]} and denominator \eqn{X_i}{X[i]}, so that notionally \eqn{R_i = Y_i/X_i}{R[i] = Y[i]/X[i]}, then the pooled estimate is the ratio-of-sums estimator \deqn{ R = \frac{\sum_i Y_i}{\sum_i X_i}. }{ R = (Y[1]+\dots+Y[n])/(X[1]+\dots+X[n]). } The standard error of \eqn{R} is computed using the delta method as described in Baddeley \emph{et al.} (1993) or Cochran (1977, pp 154, 161). If the argument \code{weights} is given, it should be a numeric vector of length equal to the number of objects to be pooled. The pooled estimator is the ratio-of-sums estimator \deqn{ R = \frac{\sum_i w_i Y_i}{\sum_i w_i X_i} }{ R = (w[1] * Y[1]+\dots+ w[n] * Y[n])/(w[1] * X[1]+\dots+w[n] * X[n]) } where \code{w_i}{w[i]} is the \code{i}th weight. This calculation is implemented only for certain classes of objects where the arithmetic can be performed. This calculation is currently implemented only for objects which also belong to the class \code{"fv"} (function value tables). For example, if \code{\link{Kest}} is called with argument \code{ratio=TRUE}, the result is a suitable object (belonging to the classes \code{"rat"} and \code{"fv"}). Warnings or errors will be issued if the ratio objects \code{\dots} appear to be incompatible. However, the code is not smart enough to decide whether it is sensible to pool the data. } \value{ An object of the same class as the input. } \seealso{ \code{\link{rat}}, \code{\link{pool}}, \code{\link{pool.fv}}, \code{\link{Kest}} } \examples{ K1 <- Kest(runifpoint(42), ratio=TRUE, correction="iso") K2 <- Kest(runifpoint(42), ratio=TRUE, correction="iso") K3 <- Kest(runifpoint(42), ratio=TRUE, correction="iso") K <- pool(K1, K2, K3) plot(K, pooliso ~ r, shade=c("hiiso", "loiso")) } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Cochran, W.G. (1977) \emph{Sampling techniques}, 3rd edition. New York: John Wiley and Sons. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/Jcross.Rd0000644000176200001440000001604614141452520015356 0ustar liggesusers\name{Jcross} \alias{Jcross} \title{ Multitype J Function (i-to-j) } \description{ For a multitype point pattern, estimate the multitype \eqn{J} function summarising the interpoint dependence between points of type \eqn{i} and of type \eqn{j}. } \usage{ Jcross(X, i, j, eps=NULL, r=NULL, breaks=NULL, \dots, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{J} function \eqn{J_{ij}(r)}{Jij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{eps}{A positive number. The resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the function \eqn{J_{ij}(r)}{Jij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{J}{the recommended estimator of \eqn{J_{ij}(r)}{Jij(r)}, currently the Kaplan-Meier estimator. } \item{r}{the values of the argument \eqn{r} at which the function \eqn{J_{ij}(r)}{Jij(r)} has been estimated } \item{km}{the Kaplan-Meier estimator of \eqn{J_{ij}(r)}{Jij(r)} } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J_{ij}(r)}{Jij(r)} } \item{han}{the Hanisch-style estimator of \eqn{J_{ij}(r)}{Jij(r)} } \item{un}{the ``uncorrected'' estimator of \eqn{J_{ij}(r)}{Jij(r)} formed by taking the ratio of uncorrected empirical estimators of \eqn{1 - G_{ij}(r)}{1 - Gij(r)} and \eqn{1 - F_{j}(r)}{1 - Fj(r)}, see \code{\link{Gdot}} and \code{\link{Fest}}. } \item{theo}{the theoretical value of \eqn{J_{ij}(r)}{Jij(r)} for a marked Poisson process, namely 1. } The result also has two attributes \code{"G"} and \code{"F"} which are respectively the outputs of \code{\link{Gcross}} and \code{\link{Fest}} for the point pattern. } \details{ This function \code{Jcross} and its companions \code{\link{Jdot}} and \code{\link{Jmulti}} are generalisations of the function \code{\link{Jest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). The ``type \eqn{i} to type \eqn{j}'' multitype \eqn{J} function of a stationary multitype point process \eqn{X} was introduced by Van lieshout and Baddeley (1999). It is defined by \deqn{J_{ij}(r) = \frac{1 - G_{ij}(r)}{1 - F_{j}(r)}}{Jij(r) = (1 - Gij(r))/(1-Fj(r))} where \eqn{G_{ij}(r)}{Gij(r)} is the distribution function of the distance from a type \eqn{i} point to the nearest point of type \eqn{j}, and \eqn{F_{j}(r)}{Fj(r)} is the distribution function of the distance from a fixed point in space to the nearest point of type \eqn{j} in the pattern. An estimate of \eqn{J_{ij}(r)}{Jij(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the subprocess of type \eqn{i} points is independent of the subprocess of points of type \eqn{j}, then \eqn{J_{ij}(r) \equiv 1}{Jij(r) = 1}. Hence deviations of the empirical estimate of \eqn{J_{ij}}{Jij} from the value 1 may suggest dependence between types. This algorithm estimates \eqn{J_{ij}(r)}{Jij(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Jest}}, using the Kaplan-Meier and border corrections. The main work is done by \code{\link{Gmulti}} and \code{\link{Fest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{J_{ij}(r)}{Jij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Jdot}}, \code{\link{Jest}}, \code{\link{Jmulti}} } \examples{ # Lansing woods data: 6 types of trees woods <- lansing \testonly{ woods <- woods[seq(1,npoints(woods), by=30)] } Jhm <- Jcross(woods, "hickory", "maple") # diagnostic plot for independence between hickories and maples plot(Jhm) # synthetic example with two types "a" and "b" pp <- runifpoint(30) \%mark\% factor(sample(c("a","b"), 30, replace=TRUE)) J <- Jcross(pp) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/plot.studpermutest.Rd0000644000176200001440000000716514141452520020022 0ustar liggesusers\name{plot.studpermutest} \alias{plot.studpermutest} \title{ Plot a Studentised Permutation Test } \description{ Plot the result of the studentised permutation test. } \usage{ \method{plot}{studpermutest}(x, fmla, \dots, lty = NULL, col = NULL, lwd = NULL, lty.theo = NULL, col.theo = NULL, lwd.theo = NULL, lwd.mean = if (meanonly) 1 else NULL, lty.mean = lty, col.mean = col, separately = FALSE, meanonly = FALSE, main = if (meanonly) "group means" else NULL, xlim = NULL, ylim = NULL, ylab = NULL, legend = !add, legendpos = "topleft", lbox = FALSE, add = FALSE) } \arguments{ \item{x}{ An object of class \code{"studpermutest"} generated by \code{\link{studpermu.test}} and representing the result of a studentised permutation test for spatial point pattern data. } \item{fmla}{ Plot formula used in \code{\link{plot.fv}}. } \item{\dots}{ Additional graphical arguments passed to \code{\link{plot.fv}}. } \item{lty,col,lwd}{ Line type, colour, and line width of the curves plotting the summary function for each point pattern in the original data. Either a single value or a vector of length equal to the number of point patterns. } \item{lty.theo,col.theo,lwd.theo}{ Line type, colour, and line width of the curve representing the theoretical value of the summary function. } \item{lty.mean,col.mean,lwd.mean}{ Line type, colour, and line width (as a multiple of \code{lwd}) of the curve representing the group mean of the summary function. } \item{separately}{ Logical value indicating whether to plot each group of data in a separate panel. } \item{meanonly}{ Logical value indicating whether to plot only the group means of the summary function. } \item{main}{ Character string giving a main title for the plot. } \item{xlim,ylim}{ Numeric vectors of length 2 giving the limits for the \eqn{x} and \eqn{y} coordinates of the plot or plots. } \item{ylab}{ Character string or expression to be used for the label on the \eqn{y} axis. } \item{legend}{ Logical value indicating whether to plot a legend explaining the meaning of each curve. } \item{legendpos}{ Position of legend. See \code{\link{plot.fv}}. } \item{lbox}{ Logical value indicating whether to plot a box around the plot. } \item{add}{ Logical value indicating whether the plot should be added to the existing plot (\code{add=TRUE}) or whether a new frame should be created (\code{add=FALSE}, the default). } } \details{ This is the \code{plot} method for objects of class \code{"studpermutest"} which represent the result of a studentised permutation test applied to several point patterns. The test is performed by \code{\link{studpermu.test}}. The plot shows the summary functions for each point pattern, coloured according to group. Optionally it can show the different groups in separate plot panels, or show only the group means in a single panel. } \value{ Null. } \author{ Ute Hahn. Modified for \code{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{studpermu.test}} } \examples{ np <- if(interactive()) 99 else 19 testpyramidal <- studpermu.test(pyramidal, Neurons ~ group, nperm=np) plot(testpyramidal) plot(testpyramidal, meanonly=TRUE) plot(testpyramidal, col.theo=8, lwd.theo=4, lty.theo=1) plot(testpyramidal, . ~ pi * r^2) op <- par(mfrow=c(1,3)) plot(testpyramidal, separately=TRUE) plot(testpyramidal, separately=TRUE, col=2, lty=1, lwd.mean=2, col.mean=4) par(op) } \keyword{hplot} \keyword{htest} spatstat.core/man/vargamma.estpcf.Rd0000644000176200001440000001521614141452520017167 0ustar liggesusers\name{vargamma.estpcf} \alias{vargamma.estpcf} \title{Fit the Neyman-Scott Cluster Point Process with Variance Gamma kernel} \description{ Fits the Neyman-Scott cluster point process, with Variance Gamma kernel, to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. } \usage{ vargamma.estpcf(X, startpar=c(kappa=1,scale=1), nu = -1/4, lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs = list()) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{nu}{ Numerical value controlling the shape of the tail of the clusters. A number greater than \code{-1/2}. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Neyman-Scott Cluster point process model with Variance Gamma kernel (Jalilian et al, 2013) to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Neyman-Scott Cluster point process with Variance Gamma kernel to \code{X}, by finding the parameters of the model which give the closest match between the theoretical pair correlation function of the model and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Neyman-Scott cluster point process with Variance Gamma kernel is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent have a common distribution described in Jalilian et al (2013). The shape of the kernel is determined by the dimensionless index \code{nu}. This is the parameter \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in equation (12) on page 126 of Jalilian et al (2013). In previous versions of spatstat instead of specifying \code{nu} (called \code{nu.ker} at that time) the user could specify \code{nu.pcf} which is the parameter \eqn{\nu=\alpha-1}{nu = alpha-1} appearing in equation (13), page 127 of Jalilian et al (2013). These are related by \code{nu.pcf = 2 * nu.ker + 1} and \code{nu.ker = (nu.pcf - 1)/2}. This syntax is still supported but not recommended for consistency across the package. In that case exactly one of \code{nu.ker} or \code{nu.pcf} must be specified. If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rVarGamma}}. The parameter \code{eta} appearing in \code{startpar} is equivalent to the scale parameter \code{omega} used in \code{\link{rVarGamma}}. Homogeneous or inhomogeneous Neyman-Scott/VarGamma models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{vargamma.estK}}, \code{\link{lgcp.estpcf}}, \code{\link{thomas.estpcf}}, \code{\link{cauchy.estpcf}}, \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{pcfmodel}}. \code{\link{rVarGamma}} to simulate the model. } \examples{ u <- vargamma.estpcf(redwood) u plot(u, legendpos="topright") } \keyword{spatial} \keyword{models} spatstat.core/man/DiggleGratton.Rd0000644000176200001440000000544514141452520016646 0ustar liggesusers\name{DiggleGratton} \alias{DiggleGratton} \title{Diggle-Gratton model} \description{ Creates an instance of the Diggle-Gratton pairwise interaction point process model, which can then be fitted to point pattern data. } \usage{ DiggleGratton(delta=NA, rho) } \arguments{ \item{delta}{lower threshold \eqn{\delta}{\delta}} \item{rho}{upper threshold \eqn{\rho}{\rho}} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Diggle and Gratton (1984, pages 208-210) introduced the pairwise interaction point process with pair potential \eqn{h(t)} of the form \deqn{ h(t) = \left( \frac{t-\delta}{\rho-\delta} \right)^\kappa \quad\quad \mbox{ if } \delta \le t \le \rho }{ h(t) = ((t - \delta)/(\rho - \delta))^\kappa, { } \delta \le t \le \rho } with \eqn{h(t) = 0} for \eqn{t < \delta}{t < \delta} and \eqn{h(t) = 1} for \eqn{t > \rho}{t > \rho}. Here \eqn{\delta}{\delta}, \eqn{\rho}{\rho} and \eqn{\kappa}{\kappa} are parameters. Note that we use the symbol \eqn{\kappa}{\kappa} where Diggle and Gratton (1984) and Diggle, Gates and Stibbard (1987) use \eqn{\beta}{\beta}, since in \pkg{spatstat} we reserve the symbol \eqn{\beta}{\beta} for an intensity parameter. The parameters must all be nonnegative, and must satisfy \eqn{\delta \le \rho}{\delta \le \rho}. The potential is inhibitory, i.e.\ this model is only appropriate for regular point patterns. The strength of inhibition increases with \eqn{\kappa}{\kappa}. For \eqn{\kappa=0}{\kappa=0} the model is a hard core process with hard core radius \eqn{\delta}{\delta}. For \eqn{\kappa=\infty}{\kappa=Inf} the model is a hard core process with hard core radius \eqn{\rho}{\rho}. The irregular parameters \eqn{\delta, \rho}{\delta, \rho} must be given in the call to \code{DiggleGratton}, while the regular parameter \eqn{\kappa}{\kappa} will be estimated. If the lower threshold \code{delta} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{delta} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{Pairwise}} } \examples{ ppm(cells ~1, DiggleGratton(0.05, 0.1)) } \references{ Diggle, P.J., Gates, D.J. and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. \emph{Biometrika} \bold{74}, 763 -- 770. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat.core/man/Gest.Rd0000644000176200001440000002135614141452520015015 0ustar liggesusers\name{Gest} \alias{Gest} \alias{nearest.neighbour} \title{ Nearest Neighbour Distance Function G } \description{ Estimates the nearest neighbour distance distribution function \eqn{G(r)} from a point pattern in a window of arbitrary shape. } \usage{ Gest(X, r=NULL, breaks=NULL, \dots, correction=c("rs", "km", "han"), domain=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{G(r)} will be computed. An object of class \code{ppp}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which \eqn{G(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{G(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing some or all of the following columns: \item{r}{the values of the argument \eqn{r} at which the function \eqn{G(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G(r)}, i.e. the empirical distribution of the distances from each point in the pattern \code{X} to the nearest other point of the pattern } \item{han}{the Hanisch correction estimator of \eqn{G(r)} } \item{theo}{the theoretical value of \eqn{G(r)} for a stationary Poisson process of the same estimated intensity. } } \details{ The nearest neighbour distance distribution function (also called the ``\emph{event-to-event}'' or ``\emph{inter-event}'' distribution) of a point process \eqn{X} is the cumulative distribution function \eqn{G} of the distance from a typical random point of \eqn{X} to the nearest other point of \eqn{X}. An estimate of \eqn{G} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1988). In exploratory analyses, the estimate of \eqn{G} is a useful statistic summarising one aspect of the ``clustering'' of points. For inferential purposes, the estimate of \eqn{G} is usually compared to the true value of \eqn{G} for a completely random (Poisson) point process, which is \deqn{G(r) = 1 - e^{ - \lambda \pi r^2} }{% G(r) = 1 - exp( - lambda * pi * r^2)} where \eqn{\lambda}{lambda} is the intensity (expected number of points per unit area). Deviations between the empirical and theoretical \eqn{G} curves may suggest spatial clustering or spatial regularity. This algorithm estimates the nearest neighbour distance distribution function \eqn{G} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}()}. The estimation of \eqn{G} is hampered by edge effects arising from the unobservability of points of the random pattern outside the window. An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). The edge corrections implemented here are the border method or ``\emph{reduced sample}'' estimator, the spatial Kaplan-Meier estimator (Baddeley and Gill, 1997) and the Hanisch estimator (Hanisch, 1984). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The estimators are computed from histogram counts. This introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G(r)}. The hazard rate is defined as the derivative \deqn{\lambda(r) = - \frac{d}{dr} \log (1 - G(r))}{% lambda(r) = - (d/dr) log(1 - G(r))} This estimate should be used with caution as \eqn{G} is not necessarily differentiable. If the argument \code{domain} is given, the estimate of \eqn{G(r)} will be based only on the nearest neighbour distances measured from points falling inside \code{domain} (although their nearest neighbours may lie outside \code{domain}). This is useful in bootstrap techniques. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. It must be a subset of the window of the point pattern \code{X}. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G}. However it is sometimes useful. It can be returned by the algorithm, by selecting \code{correction="none"}. Care should be taken not to use the uncorrected empirical \eqn{G} as if it were an unbiased estimator of \eqn{G}. To simply compute the nearest neighbour distance for each point in the pattern, use \code{\link{nndist}}. To determine which point is the nearest neighbour of a given point, use \code{\link{nnwhich}}. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37-78. Baddeley, A.J. and Gill, R.D. Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25} (1997) 263-292. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Hanisch, K.-H. (1984) Some remarks on estimators of the distribution function of nearest-neighbour distance in stationary spatial point patterns. \emph{Mathematische Operationsforschung und Statistik, series Statistics} \bold{15}, 409--412. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The function \eqn{G} does not necessarily have a density. Any valid c.d.f. may appear as the nearest neighbour distance distribution function of a stationary point process. The reduced sample estimator of \eqn{G} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{nndist}}, \code{\link{nnwhich}}, \code{\link{Fest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{km.rs}}, \code{\link{reduced.sample}}, \code{\link{kaplan.meier}} } \examples{ data(cells) G <- Gest(cells) plot(G) # P-P style plot plot(G, cbind(km,theo) ~ theo) # the empirical G is below the Poisson G, # indicating an inhibited pattern if(interactive()) { plot(G, . ~ r) plot(G, . ~ theo) plot(G, asin(sqrt(.)) ~ asin(sqrt(theo))) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/Kmodel.kppm.Rd0000644000176200001440000000365514141452520016276 0ustar liggesusers\name{Kmodel.kppm} \alias{Kmodel.kppm} \alias{pcfmodel.kppm} \title{K Function or Pair Correlation Function of Cluster Model or Cox model} \description{ Returns the theoretical \eqn{K} function or the pair correlation function of a cluster point process model or Cox point process model. } \usage{ \method{Kmodel}{kppm}(model, \dots) \method{pcfmodel}{kppm}(model, \dots) } \arguments{ \item{model}{ A fitted cluster point process model (object of class \code{"kppm"}) typically obtained from the model-fitting algorithm \code{\link{kppm}}. } \item{\dots}{ Ignored. } } \value{ A \code{function} in the \R language, which takes one argument \code{r}. } \details{ For certain types of point process models, it is possible to write down a mathematical expression for the \eqn{K} function or the pair correlation function of the model. In particular this is possible for a fitted cluster point process model (object of class \code{"kppm"} obtained from \code{\link{kppm}}). The functions \code{\link{Kmodel}} and \code{\link{pcfmodel}} are generic. The functions documented here are the methods for the class \code{"kppm"}. The return value is a \code{function} in the \R language, which takes one argument \code{r}. Evaluation of this function, on a numeric vector \code{r}, yields values of the desired \eqn{K} function or pair correlation function at these distance values. } \seealso{ \code{\link{Kest}} or \code{\link{pcf}} to estimate the \eqn{K} function or pair correlation function nonparametrically from data. \code{\link{kppm}} to fit cluster models. \code{\link{Kmodel}} for the generic functions. \code{\link{Kmodel.ppm}} for the method for Gibbs processes. } \examples{ data(redwood) fit <- kppm(redwood, ~x, "MatClust") K <- Kmodel(fit) K(c(0.1, 0.2)) curve(K(x), from=0, to=0.25) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/rmhmodel.Rd0000644000176200001440000000573214141452520015722 0ustar liggesusers\name{rmhmodel} \alias{rmhmodel} \title{Define Point Process Model for Metropolis-Hastings Simulation.} \description{ Builds a description of a point process model for use in simulating the model by the Metropolis-Hastings algorithm. } \usage{ rmhmodel(...) } \arguments{ \item{\dots}{Arguments specifying the point process model in some format. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ Simulated realisations of many point process models can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. The algorithm requires the model to be specified in a particular format: an object of class \code{"rmhmodel"}. The function \code{\link{rmhmodel}} takes a description of a point process model in some other format, and converts it into an object of class \code{"rmhmodel"}. It also checks that the parameters of the model are valid. The function \code{\link{rmhmodel}} is generic, with methods for \describe{ \item{fitted point process models:}{ an object of class \code{"ppm"}, obtained by a call to the model-fitting function \code{\link{ppm}}. See \code{\link{rmhmodel.ppm}}. } \item{lists:}{ a list of parameter values in a certain format. See \code{\link{rmhmodel.list}}. } \item{default:}{ parameter values specified as separate arguments to \code{\dots}. See \code{\link{rmhmodel.default}}. } } } \references{ Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{rmhmodel.ppm}}, \code{\link{rmhmodel.default}}, \code{\link{rmhmodel.list}}, \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \code{\link{Strauss}}, \code{\link{Softcore}}, \code{\link{StraussHard}}, \code{\link{Triplets}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{DiggleGratton}}, \code{\link{PairPiece}} \code{\link{Penttinen}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/idw.Rd0000644000176200001440000001157014141452520014673 0ustar liggesusers\name{idw} \alias{idw} \title{Inverse-distance weighted smoothing of observations at irregular points} \description{ Performs spatial smoothing of numeric values observed at a set of irregular locations using inverse-distance weighting. } \usage{ idw(X, power=2, at=c("pixels", "points"), ..., se=FALSE) } \arguments{ \item{X}{A marked point pattern (object of class \code{"ppp"}).} \item{power}{Numeric. Power of distance used in the weighting.} \item{at}{ Character string specifying whether to compute the intensity values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). String is partially matched. } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control the pixel resolution of the result.} \item{se}{ Logical value specifying whether to calculate a standard error. } } \details{ This function performs spatial smoothing of numeric values observed at a set of irregular locations. Smoothing is performed by inverse distance weighting. If the observed values are \eqn{v_1,\ldots,v_n}{v[1],...,v[n]} at locations \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} respectively, then the smoothed value at a location \eqn{u} is \deqn{ g(u) = \frac{\sum_i w_i v_i}{\sum_i w_i} }{ g(u) = (sum of w[i] * v[i])/(sum of w[i]) } where the weights are the inverse \eqn{p}-th powers of distance, \deqn{ w_i = \frac 1 {d(u,x_i)^p} }{ w[i] = 1/d(u,x[i])^p } where \eqn{d(u,x_i) = ||u - x_i||}{d(u,x[i])} is the Euclidean distance from \eqn{u} to \eqn{x_i}{x[i]}. The argument \code{X} must be a marked point pattern (object of class \code{"ppp"}, see \code{\link{ppp.object}}). The points of the pattern are taken to be the observation locations \eqn{x_i}{x[i]}, and the marks of the pattern are taken to be the numeric values \eqn{v_i}{v[i]} observed at these locations. The marks are allowed to be a data frame. Then the smoothing procedure is applied to each column of marks. If \code{at="pixels"} (the default), the smoothed mark value is calculated at a grid of pixels, and the result is a pixel image. The arguments \code{\dots} control the pixel resolution. See \code{\link{as.mask}}. If \code{at="points"}, the smoothed mark values are calculated at the data points only, using a leave-one-out rule (the mark value at a data point is excluded when calculating the smoothed value for that point). An estimate of standard error is also calculated, if \code{se=TRUE}. The calculation assumes that the data point locations are fixed, that is, the standard error only takes into account the variability in the mark values, and not the variability due to randomness of the data point locations. An alternative to inverse-distance weighting is kernel smoothing, which is performed by \code{\link{Smooth.ppp}}. } \value{ \emph{If \code{X} has a single column of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a pixel image (object of class \code{"im"}). Pixel values are values of the interpolated function. \item If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{X}. Entries are values of the interpolated function at the points of \code{X}. } \emph{If \code{X} has a data frame of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a named list of pixel images (object of class \code{"im"}). There is one image for each column of marks. This list also belongs to the class \code{"solist"}, for which there is a plot method. \item If \code{at="points"}, the result is a data frame with one row for each point of \code{X}, and one column for each column of marks. Entries are values of the interpolated function at the points of \code{X}. } If \code{se=TRUE}, then the result is a list with two entries named \code{estimate} and \code{SE}, which each have the format described above. } \seealso{ \code{\link{density.ppp}}, \code{\link{ppp.object}}, \code{\link{im.object}}. See \code{\link{Smooth.ppp}} for kernel smoothing and \code{\link{nnmark}} for nearest-neighbour interpolation. To perform other kinds of interpolation, see also the \code{akima} package. } \examples{ # data frame of marks: trees marked by diameter and height plot(idw(finpines)) idw(finpines, at="points")[1:5,] plot(idw(finpines, se=TRUE)$SE) idw(finpines, at="points", se=TRUE)$SE[1:5, ] } \references{ Shepard, D. (1968) A two-dimensional interpolation function for irregularly-spaced data. \emph{Proceedings of the 1968 ACM National Conference}, 1968, pages 517--524. DOI: 10.1145/800186.810616 } \author{ \spatstatAuthors. Variance calculation by Andrew P Wheeler with modifications by Adrian Baddeley. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/zclustermodel.Rd0000644000176200001440000000176414141452520017010 0ustar liggesusers\name{zclustermodel} \alias{zclustermodel} \title{ Cluster Point Process Model } \description{ Experimental code. Creates an object representing a cluster point process model. Typically used for theoretical calculations about such a model. } \usage{ zclustermodel(name = "Thomas", \dots, mu, kappa, scale) } \arguments{ \item{name}{ Name of the cluster process. One of \code{"Thomas"}, \code{"MatClust"}, \code{"VarGamma"} or \code{"Cauchy"}. } \item{\dots}{ Other arguments needed for the model. } \item{mu}{ Mean cluster size. A single number, or a pixel image. } \item{kappa}{ Parent intensity. A single number. } \item{scale}{ Cluster scale parameter of the model. } } \details{ Experimental. } \value{ Object of the experimental class \code{"zclustermodel"}. } \author{ \adrian } \seealso{ \code{\link{methods.zclustermodel}} } \examples{ m <- zclustermodel("Thomas", kappa=10, mu=5, scale=0.1) } \keyword{spatial} \keyword{models} spatstat.core/man/Kmulti.Rd0000644000176200001440000001720714141452520015360 0ustar liggesusers\name{Kmulti} \alias{Kmulti} \title{ Marked K-Function } \description{ For a marked point pattern, estimate the multitype \eqn{K} function which counts the expected number of points of subset \eqn{J} within a given distance from a typical point in subset \code{I}. } \usage{ Kmulti(X, I, J, r=NULL, breaks=NULL, correction, \dots, rmax=NULL, ratio=FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset index specifying the points of \code{X} from which distances are measured. See Details. } \item{J}{Subset index specifying the points in \code{X} to which distances are measured. See Details. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. If necessary, specify \code{rmax}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{Ignored.} \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{IJ}(r)}{KIJ(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{IJ}(r)}{KIJ(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{IJ}(r)}{KIJ(r)} obtained by the edge corrections named. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ The function \code{Kmulti} generalises \code{\link{Kest}} (for unmarked point patterns) and \code{\link{Kdot}} and \code{\link{Kcross}} (for multitype point patterns) to arbitrary marked point patterns. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly overlapping, of a marked point process. The multitype \eqn{K} function is defined so that \eqn{\lambda_J K_{IJ}(r)}{lambda[J] KIJ(r)} equals the expected number of additional random points of \eqn{X_J}{X[J]} within a distance \eqn{r} of a typical point of \eqn{X_I}{X[I]}. Here \eqn{\lambda_J}{lambda[J]} is the intensity of \eqn{X_J}{X[J]} i.e. the expected number of points of \eqn{X_J}{X[J]} per unit area. The function \eqn{K_{IJ}}{KIJ} is determined by the second order moment properties of \eqn{X}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{border}{the border method or ``reduced sample'' estimator (see Ripley, 1988). This is the least efficient (statistically) and the fastest to compute. It can be computed for a window of arbitrary shape. } \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is currently implemented only for rectangular and polygonal windows. } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries. } } The pair correlation function \code{\link{pcf}} can also be applied to the result of \code{Kmulti}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The function \eqn{K_{IJ}}{KIJ} is not necessarily differentiable. The border correction (reduced sample) estimator of \eqn{K_{IJ}}{KIJ} used here is pointwise approximately unbiased, but need not be a nondecreasing function of \eqn{r}, while the true \eqn{K_{IJ}}{KIJ} must be nondecreasing. } \seealso{ \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kest}}, \code{\link{pcf}} } \examples{ # Longleaf Pine data: marks represent diameter trees <- longleaf \testonly{ trees <- trees[seq(1,npoints(trees), by=50), ] } K <- Kmulti(trees, marks(trees) <= 15, marks(trees) >= 25) plot(K) # functions determining subsets f1 <- function(X) { marks(X) <= 15 } f2 <- function(X) { marks(X) >= 15 } K <- Kmulti(trees, f1, f2) \testonly{ rm(trees) } } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/dg.progress.Rd0000644000176200001440000001472514141452520016352 0ustar liggesusers\name{dg.progress} \alias{dg.progress} \title{ Progress Plot of Dao-Genton Test of Spatial Pattern } \description{ Generates a progress plot (envelope representation) of the Dao-Genton test for a spatial point pattern. } \usage{ dg.progress(X, fun = Lest, \dots, exponent = 2, nsim = 19, nsimsub = nsim - 1, nrank = 1, alpha, leaveout=1, interpolate = FALSE, rmin=0, savefuns = FALSE, savepatterns = FALSE, verbose=TRUE) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{\dots}{ Arguments passed to \code{\link{envelope}}. Useful arguments include \code{alternative} to specify one-sided or two-sided envelopes. } \item{exponent}{ Positive number. The exponent of the \eqn{L^p} distance. See Details. } \item{nsim}{ Number of repetitions of the basic test. } \item{nsimsub}{ Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsimsub} simulated realisations, so there will be a total of \code{nsim * (nsimsub + 1)} simulations. } \item{nrank}{ Integer. The rank of the critical value of the Monte Carlo test, amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will become the critical values for the test. } \item{alpha}{ Optional. The significance level of the test. Equivalent to \code{nrank/(nsim+1)} where \code{nsim} is the number of simulations. } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating how to compute the critical value. If \code{interpolate=FALSE} (the default), a standard Monte Carlo test is performed, and the critical value is the largest simulated value of the test statistic (if \code{nrank=1}) or the \code{nrank}-th largest (if \code{nrank} is another number). If \code{interpolate=TRUE}, kernel density estimation is applied to the simulated values, and the critical value is the upper \code{alpha} quantile of this estimated distribution. } \item{rmin}{ Optional. Left endpoint for the interval of \eqn{r} values on which the test statistic is calculated. } \item{savefuns}{ Logical value indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical value indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ The Dao and Genton (2014) test for a spatial point pattern is described in \code{\link{dg.test}}. This test depends on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{progress plot} or \emph{envelope representation} of the test (Baddeley et al, 2014) is a plot of the test statistic (and the corresponding critical value) against the length of the interval \code{rinterval}. The command \code{dg.progress} effectively performs \code{\link{dg.test}} on \code{X} using all possible intervals of the form \eqn{[0,R]}, and returns the resulting values of the test statistic, and the corresponding critical values of the test, as a function of \eqn{R}. The result is an object of class \code{"fv"} that can be plotted to obtain the progress plot. The display shows the test statistic (solid black line) and the test acceptance region (grey shading). If \code{X} is an envelope object, then some of the data stored in \code{X} may be re-used: \itemize{ \item If \code{X} is an envelope object containing simulated functions, and \code{fun=NULL}, then the code will re-use the simulated functions stored in \code{X}. \item If \code{X} is an envelope object containing simulated point patterns, then \code{fun} will be applied to the stored point patterns to obtain the simulated functions. If \code{fun} is not specified, it defaults to \code{\link{Lest}}. \item Otherwise, new simulations will be performed, and \code{fun} defaults to \code{\link{Lest}}. } If the argument \code{rmin} is given, it specifies the left endpoint of the interval defining the test statistic: the tests are performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]} where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}. The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). } \value{ An object of class \code{"fv"} that can be plotted to obtain the progress plot. } \references{ Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2015) Pushing the envelope: extensions of graphical Monte Carlo tests. Submitted for publication. Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. } \author{ \adrian, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \adrian \rolf and \ege } \seealso{ \code{\link{dg.test}}, \code{\link{dclf.progress}} } \examples{ ns <- if(interactive()) 19 else 5 plot(dg.progress(cells, nsim=ns)) } \keyword{spatial} \keyword{htest} spatstat.core/man/BadGey.Rd0000644000176200001440000001136614144333466015260 0ustar liggesusers\name{BadGey} \alias{BadGey} \title{Hybrid Geyer Point Process Model} \description{ Creates an instance of the Baddeley-Geyer point process model, defined as a hybrid of several Geyer interactions. The model can then be fitted to point pattern data. } \usage{ BadGey(r, sat) } \arguments{ \item{r}{vector of interaction radii} \item{sat}{ vector of saturation parameters, or a single common value of saturation parameter } } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This is Baddeley's generalisation of the Geyer saturation point process model, described in \code{\link{Geyer}}, to a process with multiple interaction distances. The BadGey point process with interaction radii \eqn{r_1,\ldots,r_k}{r[1], \ldots, r[k]}, saturation thresholds \eqn{s_1,\ldots,s_k}{s[1],\ldots,s[k]}, intensity parameter \eqn{\beta}{\beta} and interaction parameters \eqn{\gamma_1,\ldots,gamma_k}{\gamma[1], \ldots, \gamma[k]}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{ \beta \gamma_1^{v_1(x_i, X)} \ldots gamma_k^{v_k(x_i,X)} }{ \beta \gamma[1]^v(1, x_i, X) \ldots \gamma[k]^v(k, x_i, X) } to the probability density of the point pattern, where \deqn{ v_j(x_i, X) = \min( s_j, t_j(x_i,X) ) }{ v(j, x[i], X) = min(s[j], t(j, x[i], X)) } where \eqn{t_j(x_i, X)}{t(j,x[i],X)} denotes the number of points in the pattern \eqn{X} which lie within a distance \eqn{r_j}{r[j]} from the point \eqn{x_i}{x[i]}. \code{BadGey} is used to fit this model to data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant Saturated pairwise interaction is yielded by the function \code{BadGey()}. See the examples below. The argument \code{r} specifies the vector of interaction distances. The entries of \code{r} must be strictly increasing, positive numbers. The argument \code{sat} specifies the vector of saturation parameters that are applied to the point counts \eqn{t_j(x_i, X)}{t(j,x[i],X)}. It should be a vector of the same length as \code{r}, and its entries should be nonnegative numbers. Thus \code{sat[1]} is applied to the count of points within a distance \code{r[1]}, and \code{sat[2]} to the count of points within a distance \code{r[2]}, etc. Alternatively \code{sat} may be a single number, and this saturation value will be applied to every count. Infinite values of the saturation parameters are also permitted; in this case \eqn{v_j(x_i,X) = t_j(x_i,X)}{v(j, x[i], X) = t(j, x[i], X)} and there is effectively no `saturation' for the distance range in question. If all the saturation parameters are set to \code{Inf} then the model is effectively a pairwise interaction process, equivalent to \code{\link{PairPiece}} (however the interaction parameters \eqn{\gamma}{\gamma} obtained from \code{\link{BadGey}} have a complicated relationship to the interaction parameters \eqn{\gamma}{\gamma} obtained from \code{\link{PairPiece}}). If \code{r} is a single number, this model is virtually equivalent to the Geyer process, see \code{\link{Geyer}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairsat.family}}, \code{\link{Geyer}}, \code{\link{PairPiece}}, \code{\link{SatPiece}}, \code{\link{Hybrid}} } \section{Hybrids}{ A \sQuote{hybrid} interaction is one which is built by combining several different interactions (Baddeley et al, 2013). The \code{BadGey} interaction can be described as a hybrid of several \code{\link{Geyer}} interactions. The \code{\link{Hybrid}} command can be used to build hybrids of any interactions. If the \code{\link{Hybrid}} operator is applied to several \code{\link{Geyer}} models, the result is equivalent to a \code{BadGey} model. This can be useful for incremental model selection. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \code{DOI: 10.18637/jss.v055.i11} } \examples{ BadGey(c(0.1,0.2), c(1,1)) # prints a sensible description of itself BadGey(c(0.1,0.2), 1) # fit a stationary Baddeley-Geyer model ppm(cells ~1, BadGey(c(0.07, 0.1, 0.13), 2)) # nonstationary process with log-cubic polynomial trend # ppm(cells ~polynom(x,y,3), BadGey(c(0.07, 0.1, 0.13), 2)) } \author{ \adrian and \rolf in collaboration with Hao Wang and Jeff Picka } \keyword{spatial} \keyword{models} spatstat.core/man/miplot.Rd0000644000176200001440000000405514141452520015414 0ustar liggesusers\name{miplot} \alias{miplot} \title{Morisita Index Plot} \description{ Displays the Morisita Index Plot of a spatial point pattern. } \usage{ miplot(X, ...) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) or something acceptable to \code{\link{as.ppp}}. } \item{\dots}{Optional arguments to control the appearance of the plot.} } \details{ Morisita (1959) defined an index of spatial aggregation for a spatial point pattern based on quadrat counts. The spatial domain of the point pattern is first divided into \eqn{Q} subsets (quadrats) of equal size and shape. The numbers of points falling in each quadrat are counted. Then the Morisita Index is computed as \deqn{ \mbox{MI} = Q \frac{\sum_{i=1}^Q n_i (n_i - 1)}{N(N-1)} }{ MI = Q * sum(n[i] (n[i]-1))/(N(N-1)) } where \eqn{n_i}{n[i]} is the number of points falling in the \eqn{i}-th quadrat, and \eqn{N} is the total number of points. If the pattern is completely random, \code{MI} should be approximately equal to 1. Values of \code{MI} greater than 1 suggest clustering. The \emph{Morisita Index plot} is a plot of the Morisita Index \code{MI} against the linear dimension of the quadrats. The point pattern dataset is divided into \eqn{2 \times 2}{2 * 2} quadrats, then \eqn{3 \times 3}{3 * 3} quadrats, etc, and the Morisita Index is computed each time. This plot is an attempt to discern different scales of dependence in the point pattern data. } \value{ None. } \references{ M. Morisita (1959) Measuring of the dispersion of individuals and analysis of the distributional patterns. Memoir of the Faculty of Science, Kyushu University, Series E: Biology. \bold{2}: 215--235. } \seealso{ \code{\link{quadratcount}} } \examples{ data(longleaf) miplot(longleaf) opa <- par(mfrow=c(2,3)) data(cells) data(japanesepines) data(redwood) plot(cells) plot(japanesepines) plot(redwood) miplot(cells) miplot(japanesepines) miplot(redwood) par(opa) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/model.matrix.slrm.Rd0000644000176200001440000000325114141452520017464 0ustar liggesusers\name{model.matrix.slrm} \alias{model.matrix.slrm} \title{Extract Design Matrix from Spatial Logistic Regression Model} \description{ This function extracts the design matrix of a spatial logistic regression model. } \usage{ \method{model.matrix}{slrm}(object, ..., keepNA=TRUE) } \arguments{ \item{object}{ A fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link{model.matrix.lm}}. } \item{keepNA}{ Logical. Determines whether rows containing \code{NA} values will be deleted or retained. } } \details{ This command is a method for the generic function \code{\link{model.matrix}}. It extracts the design matrix of a spatial logistic regression. The \code{object} must be a fitted spatial logistic regression (object of class \code{"slrm"}). Such objects are produced by the model-fitting function \code{\link{slrm}}. Usually the result is a matrix with one column for every constructed covariate in the model, and one row for every pixel in the grid used to fit the model. If \code{object} was fitted using split pixels (by calling \code{\link{slrm}} using the argument \code{splitby}) then the matrix has one row for every pixel or half-pixel. } \value{ A matrix. Columns of the matrix are canonical covariates in the model. } \author{\adrian and \rolf } \seealso{ \code{\link{model.matrix}}, \code{\link{model.images}}, \code{\link{slrm}}. } \examples{ fit <- slrm(japanesepines ~x) head(model.matrix(fit)) # matrix with two columns: '(Intercept)' and 'x' } \keyword{spatial} \keyword{models} spatstat.core/man/rPSNCP.Rd0000644000176200001440000001414714141452520015160 0ustar liggesusers\name{rPSNCP} \alias{rPSNCP} \title{Simulate Product Shot-noise Cox Process} \description{ Generate a random multitype point pattern, a realisation of the product shot-noise Cox process. } \usage{ rPSNCP(lambda=rep(100, 4), kappa=rep(25, 4), omega=rep(0.03, 4), alpha=matrix(runif(16, -1, 3), nrow=4, ncol=4), kernels=NULL, nu.ker=NULL, win=owin(), nsim=1, drop=TRUE, \dots, cnames=NULL, epsth=0.001) % , mc.cores=1L } \arguments{ \item{lambda}{ List of intensities of component processes. Either a numberic vector determining the constant (homogeneous) intensities or a list of pixel images (objects of class \code{"im"}) determining the (inhomogeneous) intensity functions of component processes. The length of \code{lambda} determines the number of component processes. } \item{kappa}{ Numeric vector of intensities of the Poisson process of cluster centres for component processes. Must have the same size as \code{lambda}. } \item{omega}{ Numeric vector of bandwidths of cluster dispersal kernels for component processes. Must have the same size as \code{lambda} and \code{kappa}. } \item{alpha}{ Matrix of interaction parameters. Square numeric matrix with the same number of rows and columns as the length of \code{lambda}, \code{kappa} and \code{omega}. All entries of \code{alpha} must be greater than -1. } \item{kernels}{ Vector of character string determining the cluster dispersal kernels of component processes. Impleneted kernels are Gaussian kernel (\code{"Thomas"}) with bandwidth \code{omega}, Variance-Gamma (Bessel) kernel (\code{"VarGamma"}) with bandwidth \code{omega} and shape parameter \code{nu.ker} and Cauchy kernel (\code{"Cauchy"}) with bandwidth \code{omega}. Must have the same length as \code{lambda}, \code{kappa} and \code{omega}. } \item{nu.ker}{ Numeric vector of bandwidths of shape parameters for Varaince-Gamma kernels. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"}. } \item{nsim}{Number of simulated realisations to be generated.} \item{cnames}{ Optional vector of character strings giving the names of the component processes. } \item{\dots}{ Optional arguments passed to \code{\link[spatstat.geom]{as.mask}} to determine the pixel array geometry. See \code{\link[spatstat.geom]{as.mask}}. } \item{epsth}{ Numerical threshold to determine the maximum interaction range for cluster kernels. % See Details. % NO DETAILS ARE GIVEN! } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } % \item{mc.cores}{ % Integer value indicating the number of cores for parallel computing using % \code{"mclapply"} function in the \pkg{parallel} package. % } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Each point pattern is multitype (it carries a vector of marks which is a factor). } \details{ This function generates a realisation of a product shot-noise Cox process (PSNCP). This is a multitype (multivariate) Cox point process in which each element of the multivariate random intensity \eqn{\Lambda(u)} of the process is obtained by \deqn{ \Lambda_i(u) = \lambda_i(u) S_i(u) \prod_{j \neq i} E_{ji}(u) }{ Lambda[i](u) = lambda[i](u) S[i](u) prod[j != i] E[ji](u) } where \eqn{\lambda_i(u)}{\lambda[i](u)} is the intensity of component \eqn{i} of the process, \deqn{ S_i(u) = \frac{1}{\kappa_{i}} \sum_{v \in \Phi_i} k_{i}(u - v) }{ S[i](u) = 1 / (kappa[i]) sum[v in Phi[i]] k[i](u - v) } is the shot-noise random feild for component \eqn{i} and \deqn{ E_{ji}(u) = \exp(-\kappa_{j} \alpha_{ji} / k_{j}(0)) \prod_{v \in \Phi_{j}} {1 + \alpha_{ji} \frac{k_j(u-v)}{k_j(0)}} }{ E[ji](u) = exp(-\kappa[j] \alpha[ji] / k[j](0)) prod[v in Phi[j]] (1 + alpha[ji] k[j](u-v) / k[j](0)) } is a product field controlling impulses from the parent Poisson process \eqn{\Phi_j}{\Phi[j]} with constant intensity \eqn{\kappa_j}{\kappa[j]} of component process \eqn{j} on \eqn{\Lambda_i(u)}{\Lambda[i](u)}. Here \eqn{k_i(u)}{k[i](u)} is an isotropic kernel (probability density) function on \eqn{R^2} with bandwidth \eqn{\omega_i}{\omega[i]} and shape parameter \eqn{\nu_i}{\nu[i]}, and \eqn{\alpha_{ji}>-1}{\alpha[j,i] > -1} is the interaction parameter. } \seealso{ \code{\link{rmpoispp}}, \code{\link{rThomas}}, \code{\link{rVarGamma}}, \code{\link{rCauchy}}, \code{\link{rNeymanScott}} } \references{ Jalilian, A., Guan, Y., Mateu, J. and Waagepetersen, R. (2015) Multivariate product-shot-noise Cox point process models. \emph{Biometrics} \bold{71}(4), 1022--1033. } \examples{ online <- interactive() # Example 1: homogeneous components lambda <- c(250, 300, 180, 400) kappa <- c(30, 25, 20, 25) omega <- c(0.02, 0.025, 0.03, 0.02) alpha <- matrix(runif(16, -1, 1), nrow=4, ncol=4) if(!online) lambda <- lambda/10 X <- rPSNCP(lambda, kappa, omega, alpha) if(online) { plot(X) plot(split(X)) } #Example 2: inhomogeneous components z1 <- scaletointerval.im(bei.extra$elev, from=0, to=1) z2 <- scaletointerval.im(bei.extra$grad, from=0, to=1) if(!online) { ## reduce resolution to reduce check time z1 <- as.im(z1, dimyx=c(40,80)) z2 <- as.im(z2, dimyx=c(40,80)) } lambda <- list( exp(-8 + 1.5 * z1 + 0.5 * z2), exp(-7.25 + 1 * z1 - 1.5 * z2), exp(-6 - 1.5 * z1 + 0.5 * z2), exp(-7.5 + 2 * z1 - 3 * z2)) kappa <- c(35, 30, 20, 25) / (1000 * 500) omega <- c(15, 35, 40, 25) alpha <- matrix(runif(16, -1, 1), nrow=4, ncol=4) if(!online) lambda <- lapply(lambda, "/", e2=10) sapply(lambda, integral) X <- rPSNCP(lambda, kappa, omega, alpha, win = bei$window, dimyx=dim(z1)) if(online) { plot(X) plot(split(X), cex=0.5) } } \author{Abdollah Jalilian. Modified by \spatstatAuthors. } \keyword{spatial} \keyword{datagen} spatstat.core/man/rshift.ppp.Rd0000644000176200001440000001672014141452520016207 0ustar liggesusers\name{rshift.ppp} \alias{rshift.ppp} \title{Randomly Shift a Point Pattern} \description{ Randomly shifts the points of a point pattern. } \usage{ \method{rshift}{ppp}(X, \dots, which=NULL, group, nsim=1, drop=TRUE) } \arguments{ \item{X}{Point pattern to be subjected to a random shift. An object of class \code{"ppp"} } \item{\dots}{ Arguments that determine the random shift. See Details. } \item{group}{ Optional. Factor specifying a grouping of the points of \code{X}, or \code{NULL} indicating that all points belong to the same group. Each group will be shifted together, and separately from other groups. By default, points in a marked point pattern are grouped according to their mark values, while points in an unmarked point pattern are treated as a single group. } \item{which}{ Optional. Identifies which groups of the pattern will be shifted, while other groups are not shifted. A vector of levels of \code{group}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (object of class \code{"ppp"}) or a list of point patterns. } \details{ This operation randomly shifts the locations of the points in a point pattern. The function \code{rshift} is generic. This function \code{rshift.ppp} is the method for point patterns. The most common use of this function is to shift the points in a multitype point pattern. By default, points of the same type are shifted in parallel (i.e. points of a common type are shifted by a common displacement vector), and independently of other types. This is useful for testing the hypothesis of independence of types (the null hypothesis that the sub-patterns of points of each type are independent point processes). In general the points of \code{X} are divided into groups, then the points within a group are shifted by a common random displacement vector. Different groups of points are shifted independently. The grouping is determined as follows: \itemize{ \item If the argument \code{group} is present, then this determines the grouping. \item Otherwise, if \code{X} is a multitype point pattern, the marks determine the grouping. \item Otherwise, all points belong to a single group. } The argument \code{group} should be a factor, of length equal to the number of points in \code{X}. Alternatively \code{group} may be \code{NULL}, which specifies that all points of \code{X} belong to a single group. By default, every group of points will be shifted. The argument \code{which} indicates that only some of the groups should be shifted, while other groups should be left unchanged. \code{which} must be a vector of levels of \code{group} (for example, a vector of types in a multitype pattern) indicating which groups are to be shifted. The displacement vector, i.e. the vector by which the data points are shifted, is generated at random. Parameters that control the randomisation and the handling of edge effects are passed through the \code{\dots} argument. They are \describe{ \item{radius,width,height}{ Parameters of the random shift vector. } \item{edge}{ String indicating how to deal with edges of the pattern. Options are \code{"torus"}, \code{"erode"} and \code{"none"}. } \item{clip}{ Optional. Window to which the final point pattern should be clipped. } } If the window is a rectangle, the \emph{default} behaviour is to generate a displacement vector at random with equal probability for all possible displacements. This means that the \eqn{x} and \eqn{y} coordinates of the displacement vector are independent random variables, uniformly distributed over the range of possible coordinates. Alternatively, the displacement vector can be generated by another random mechanism, controlled by the arguments \code{radius}, \code{width} and \code{height}. \describe{ \item{rectangular:}{ if \code{width} and \code{height} are given, then the displacement vector is uniformly distributed in a rectangle of these dimensions, centred at the origin. The maximum possible displacement in the \eqn{x} direction is \code{width/2}. The maximum possible displacement in the \eqn{y} direction is \code{height/2}. The \eqn{x} and \eqn{y} displacements are independent. (If \code{width} and \code{height} are actually equal to the dimensions of the observation window, then this is equivalent to the default.) } \item{radial:}{ if \code{radius} is given, then the displacement vector is generated by choosing a random point inside a disc of the given radius, centred at the origin, with uniform probability density over the disc. Thus the argument \code{radius} determines the maximum possible displacement distance. The argument \code{radius} is incompatible with the arguments \code{width} and \code{height}. } } The argument \code{edge} controls what happens when a shifted point lies outside the window of \code{X}. Options are: \describe{ \item{"none":}{ Points shifted outside the window of \code{X} simply disappear. } \item{"torus":}{ Toroidal or periodic boundary. Treat opposite edges of the window as identical, so that a point which disappears off the right-hand edge will re-appear at the left-hand edge. This is called a ``toroidal shift'' because it makes the rectangle topologically equivalent to the surface of a torus (doughnut). The window must be a rectangle. Toroidal shifts are undefined if the window is non-rectangular. } \item{"erode":}{ Clip the point pattern to a smaller window. If the random displacements are generated by a radial mechanism (see above), then the window of \code{X} is eroded by a distance equal to the value of the argument \code{radius}, using \code{\link{erosion}}. If the random displacements are generated by a rectangular mechanism, then the window of \code{X} is (if it is not rectangular) eroded by a distance \code{max(height,width)} using \code{\link{erosion}}; or (if it is rectangular) trimmed by a margin of width \code{width} at the left and right sides and trimmed by a margin of height \code{height} at the top and bottom. The rationale for this is that the clipping window is the largest window for which edge effects can be ignored. } } The optional argument \code{clip} specifies a smaller window to which the pattern should be restricted. If \code{nsim > 1}, then the simulation procedure is performed \code{nsim} times; the result is a list of \code{nsim} point patterns. } \seealso{ \code{\link{rshift}}, \code{\link{rshift.psp}} } \examples{ # random toroidal shift # shift "on" and "off" points separately X <- rshift(amacrine) # shift "on" points and leave "off" points fixed X <- rshift(amacrine, which="on") # shift all points simultaneously X <- rshift(amacrine, group=NULL) # maximum displacement distance 0.1 units X <- rshift(amacrine, radius=0.1, nsim=2) # shift with erosion X <- rshift(amacrine, radius=0.1, edge="erode") } \author{ \adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/OrdThresh.Rd0000644000176200001440000000336314141452520016013 0ustar liggesusers\name{OrdThresh} \alias{OrdThresh} \title{Ord's Interaction model} \description{ Creates an instance of Ord's point process model which can then be fitted to point pattern data. } \usage{ OrdThresh(r) } \arguments{ \item{r}{Positive number giving the threshold value for Ord's model.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Ord's point process model (Ord, 1977) is a Gibbs point process of infinite order. Each point \eqn{x_i}{x[i]} in the point pattern \eqn{x} contributes a factor \eqn{g(a_i)}{g(a[i])} where \eqn{a_i = a(x_i, x)}{a[i] = a(x[i], x)} is the area of the tile associated with \eqn{x_i}{x[i]} in the Dirichlet tessellation of \eqn{x}. The function \eqn{g} is simply \eqn{g(a) = 1} if \eqn{a \ge r}{a >= r} and \eqn{g(a) = \gamma < 1}{g(a) = gamma < 1} if \eqn{a < r}{a < r}, where \eqn{r} is called the threshold value. This function creates an instance of Ord's model with a given value of \eqn{r}. It can then be fitted to point process data using \code{\link{ppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ord, J.K. (1977) Contribution to the discussion of Ripley (1977). Ord, J.K. (1978) How many trees in a forest? \emph{Mathematical Scientist} \bold{3}, 23--33. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/bw.CvLHeat.Rd0000644000176200001440000000440614141452520016005 0ustar liggesusers\name{bw.CvLHeat} \alias{bw.CvLHeat} \title{ Bandwidth Selection for Diffusion Smoother by Cronie-van Lieshout Rule } \description{ Selects an optimal bandwidth for diffusion smoothing using the Cronie-van Lieshout rule. } \usage{ bw.CvLHeat(X, \dots, srange=NULL, ns=16, sigma=NULL, leaveoneout=TRUE, verbose = TRUE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link{densityHeat.ppp}}. } \item{srange}{ Numeric vector of length 2 specifying a range of bandwidths to be considered. } \item{ns}{ Integer. Number of candidate bandwidths to be considered. } \item{sigma}{ Maximum smoothing bandwidth. A numeric value, or a pixel image, or a \code{function(x,y)}. Alternatively a numeric vector containing a sequence of candidate bandwidths. } \item{leaveoneout}{ Logical value specifying whether intensity values at data points should be estimated using the leave-one-out rule. } \item{verbose}{ Logical value specifying whether to print progress reports. } } \details{ This algorithm selects the optimal global bandwidth for kernel estimation of intensity for the dataset \code{X} using diffusion smoothing \code{\link{densityHeat.ppp}}. If \code{sigma} is a numeric value, the algorithm finds the optimal bandwidth \code{tau <= sigma}. If \code{sigma} is a pixel image or function, the algorithm finds the optimal fraction \code{0 < f <= 1} such that smoothing with \code{f * sigma} would be optimal. } \value{ A numerical value giving the selected bandwidth (if \code{sigma} was a numeric value) or the selected fraction of the maximum bandwidth (if \code{sigma} was a pixel image or function). The result also belongs to the class \code{"bw.optim"} which can be plotted. } \author{ Adrian Baddeley. } \seealso{ \code{\link{bw.pplHeat}} for an alternative method. \code{\link{densityHeat.ppp}} } \examples{ online <- interactive() if(!online) op <- spatstat.options(npixel=32) f <- function(x,y) { dnorm(x, 2.3, 0.1) * dnorm(y, 2.0, 0.2) } X <- rpoint(15, f, win=letterR) plot(X) b <- bw.CvLHeat(X, sigma=0.25) b plot(b) if(!online) spatstat.options(op) } \keyword{spatial} \keyword{smooth} spatstat.core/man/stienen.Rd0000644000176200001440000000407514141452520015557 0ustar liggesusers\name{stienen} \alias{stienen} \alias{stienenSet} \title{ Stienen Diagram } \description{ Draw the Stienen diagram of a point pattern, or compute the region covered by the Stienen diagram. } \usage{ stienen(X, \dots, bg = "grey", border = list(bg = NULL)) stienenSet(X, edge=TRUE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link{plot.ppp}} to control the plot. } \item{bg}{ Fill colour for circles. } \item{border}{ Either a list of arguments passed to \code{\link{plot.ppp}} to control the display of circles at the border of the diagram, or the value \code{FALSE} indicating that the border circles should not be plotted. } \item{edge}{ Logical value indicating whether to include the circles at the border of the diagram. } } \details{ The Stienen diagram of a point pattern (Stienen, 1982) is formed by drawing a circle around each point of the pattern, with diameter equal to the nearest-neighbour distance for that point. These circles do not overlap. If two points are nearest neighbours of each other, then the corresponding circles touch. \code{stienenSet(X)} computes the union of these circles and returns it as a window (object of class \code{"owin"}). \code{stienen(X)} generates a plot of the Stienen diagram of the point pattern \code{X}. By default, circles are shaded in grey if they lie inside the window of \code{X}, and are not shaded otherwise. } \value{ The plotting function \code{stienen} returns \code{NULL}. The return value of \code{stienenSet} is a window (object of class \code{"owin"}). } \references{ Stienen, H. (1982) \emph{Die Vergroeberung von Karbiden in reinen Eisen-Kohlenstoff Staehlen}. Dissertation, RWTH Aachen. } \seealso{ \code{\link{nndist}}, \code{\link{plot.ppp}} } \examples{ Y <- stienenSet(cells) stienen(redwood) stienen(redwood, border=list(bg=NULL, lwd=2, cols="red")) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{math} \keyword{manip} spatstat.core/man/as.function.rhohat.Rd0000644000176200001440000000676314141452520017633 0ustar liggesusers\name{as.function.rhohat} \alias{as.function.rhohat} \title{ Convert Function Table to Function } \description{ Converts an object of class \code{"rhohat"} to an \R language function. } \usage{ \method{as.function}{rhohat}(x, ..., value=".y", extrapolate=TRUE) } \arguments{ \item{x}{ Object of class \code{"rhohat"}, produced by the function \code{\link{rhohat}}. } \item{\dots}{ Ignored. } \item{value}{ Optional. Character string or character vector selecting one or more of the columns of \code{x} for use as the function value. See Details. } \item{extrapolate}{ Logical, indicating whether to extrapolate the function outside the domain of \code{x}. See Details. } } \details{ An object of class \code{"rhohat"} is essentially a data frame of estimated values of the function \eqn{rho(x)} as described in the help file for \code{\link{rhohat}}. Sometimes it is useful to convert the function value table to a \code{function} in the \R language. This is done by \code{as.function.rhohat}. It converts an object \code{x} of class \code{"rhohat"} to an \R function \code{f}. The command \code{as.function.rhohat} is a method for the generic command \code{\link{as.function}} for the class \code{"rhohat"}. If \code{f <- as.function(x)} then \code{f} is an \R function that accepts a numeric argument and returns a corresponding value for the summary function by linear interpolation between the values in the table \code{x}. Argument values lying outside the range of the table yield an \code{NA} value (if \code{extrapolate=FALSE}) or the function value at the nearest endpoint of the range (if \code{extrapolate = TRUE}). To apply different rules to the left and right extremes, use \code{extrapolate=c(TRUE,FALSE)} and so on. Typically the table \code{x} contains several columns of function values corresponding to different edge corrections. Auxiliary information for the table identifies one of these columns as the \emph{recommended value}. By default, the values of the function \code{f <- as.function(x)} are taken from this column of recommended values. This default can be changed using the argument \code{value}, which can be a character string or character vector of names of columns of \code{x}. Alternatively \code{value} can be one of the abbreviations used by \code{\link{fvnames}}. If \code{value} specifies a single column of the table, then the result is a function \code{f(r)} with a single numeric argument \code{r} (with the same name as the orginal argument of the function table). If \code{value} specifies several columns of the table, then the result is a function \code{f(r,what)} where \code{r} is the numeric argument and \code{what} is a character string identifying the column of values to be used. The formal arguments of the resulting function are \code{f(r, what=value)}, which means that in a call to this function \code{f}, the permissible values of \code{what} are the entries of the original vector \code{value}; the default value of \code{what} is the first entry of \code{value}. } \value{ A \code{function(r)} or \code{function(r,what)} where \code{r} is the name of the original argument of the function table. } \author{ \adrian and \rolf } \seealso{ \code{\link{rhohat}}, \code{\link{methods.rhohat}}, \code{\link{as.function.fv}}. } \examples{ g <- rhohat(cells, "x") f <- as.function(g) f f(0.1) } \keyword{spatial} \keyword{methods} spatstat.core/man/DiggleGatesStibbard.Rd0000644000176200001440000000502414141452520017737 0ustar liggesusers\name{DiggleGatesStibbard} \alias{DiggleGatesStibbard} \title{Diggle-Gates-Stibbard Point Process Model} \description{ Creates an instance of the Diggle-Gates-Stibbard point process model which can then be fitted to point pattern data. } \usage{ DiggleGatesStibbard(rho) } \arguments{ \item{rho}{Interaction range} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Diggle-Gates-Stibbard process with interaction range \code{rho}. } \details{ Diggle, Gates and Stibbard (1987) proposed a pairwise interaction point process in which each pair of points separated by a distance \eqn{d} contributes a factor \eqn{e(d)} to the probability density, where \deqn{ e(d) = \sin^2\left(\frac{\pi d}{2\rho}\right) }{ e(d) = sin^2((\pi * d)/(2 * \rho)) } for \eqn{d < \rho}{d < \rho}, and \eqn{e(d)} is equal to 1 for \eqn{d \ge \rho}{d \ge \rho}. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Diggle-Gates-Stibbard pairwise interaction is yielded by the function \code{DiggleGatesStibbard()}. See the examples below. Note that this model does not have any regular parameters (as explained in the section on Interaction Parameters in the help file for \code{\link{ppm}}). The parameter \eqn{\rho} is not estimated by \code{\link{ppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{DiggleGratton}}, \code{\link{rDGS}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. } \examples{ DiggleGatesStibbard(0.02) # prints a sensible description of itself ppm(cells ~1, DiggleGatesStibbard(0.05)) # fit the stationary D-G-S process to `cells' # ppm(cells ~ polynom(x,y,3), DiggleGatesStibbard(0.05)) # fit a nonstationary D-G-S process # with log-cubic polynomial trend } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat.core/man/valid.slrm.Rd0000644000176200001440000000303614141452520016161 0ustar liggesusers\name{valid.slrm} \alias{valid.slrm} \title{ Check Whether Spatial Logistic Regression Model is Valid } \description{ Determines whether a fitted spatial logistic regression model is a well-defined model. } \usage{ \method{valid}{slrm}(object, warn=TRUE, \dots) } \arguments{ \item{object}{ Fitted spatial logistic regression model (object of class \code{"slrm"}). } \item{warn}{ Logical value indicating whether to issue a warning if the validity of the model cannot be checked (due to unavailability of the required code). } \item{\dots}{Ignored.} } \details{ This is a method for the generic function \code{\link{valid}} for spatial logistic regression models (class \code{"slrm"}). In a model fitted by \code{\link{slrm}}, some of the fitted coefficients may be \code{NA} or infinite values. This can occur if the data are not adequate for estimation of the model parameters. The model is said to be \emph{unidentifiable} or \emph{confounded}. The function \code{valid.slrm} checks whether the fitted coefficients of \code{object} specify a well-defined model. It returns \code{TRUE} if the model is well-defined, and \code{FALSE} otherwise. Use the function \code{\link{emend.slrm}} to force the fitted model to be valid. } \value{ A logical value, or \code{NA}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{slrm}}, \code{\link{emend.slrm}} } \examples{ fit1 <- slrm(cells ~ x) valid(fit1) fit2 <- slrm(cells ~ x + I(x)) valid(fit2) } \keyword{spatial} \keyword{models} spatstat.core/man/Extract.influence.ppm.Rd0000644000176200001440000000340614141452520020263 0ustar liggesusers\name{Extract.influence.ppm} \alias{[.influence.ppm} \title{Extract Subset of Influence Object} \description{ Extract a subset of an influence object, or extract the influence values at specified locations. } \usage{ \method{[}{influence.ppm}(x, i, ...) } \arguments{ \item{x}{ A influence object (of class \code{"influence.ppm"}) computed by \code{\link{influence.ppm}}. } \item{i}{ Subset index (passed to \code{\link{[.ppp}}). Either a spatial window (object of class \code{"owin"}) or an integer index. } \item{\dots}{ Ignored. } } \value{ Another object of class \code{"influence.ppm"}. } \details{ An object of class \code{"influence.ppm"} contains the values of the likelihood influence for a point process model, computed by \code{\link{influence.ppm}}. This is effectively a marked point pattern obtained by marking each of the original data points with its likelihood influence. This function extracts a designated subset of the influence values, either as another influence object, or as a vector of numeric values. The function \code{[.influence.ppm} is a method for \code{\link{[}} for the class \code{"influence.ppm"}. The argument \code{i} should be an index applicable to a point pattern. It may be either a spatial window (object of class \code{"owin"}) or a sequence index. The result will be another influence object (of class \code{influence.ppm}). To extract the influence values as a numeric vector, use \code{marks(as.ppp(x))}. } \seealso{ \code{\link{influence.ppm}}. } \examples{ fit <- ppm(cells, ~x) infl <- influence(fit) b <- owin(c(0.1, 0.3), c(0.2, 0.4)) infl[b] infl[1:5] marks(as.ppp(infl))[1:3] } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat.core/man/Ksector.Rd0000644000176200001440000000556014141452520015524 0ustar liggesusers\name{Ksector} \alias{Ksector} \title{Sector K-function} \description{ A directional counterpart of Ripley's \eqn{K} function, in which pairs of points are counted only when the vector joining the pair happens to lie in a particular range of angles. } \usage{ Ksector(X, begin = 0, end = 360, \dots, units = c("degrees", "radians"), r = NULL, breaks = NULL, correction = c("border", "isotropic", "Ripley", "translate"), domain=NULL, ratio = FALSE, verbose=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{K(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{begin,end}{ Numeric values giving the range of angles inside which points will be counted. Angles are measured in degrees (if \code{units="degrees"}, the default) or radians (if \code{units="radians"}) anti-clockwise from the positive \eqn{x}-axis. } \item{\dots}{Ignored.} \item{units}{ Units in which the angles \code{begin} and \code{end} are expressed. } \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{K(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. A character vector containing any selection of the options \code{"none"}, \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"}, \code{"good"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{domain}{ Optional window. The first point \eqn{x_i}{x[i]} of each pair of points will be constrained to lie in \code{domain}. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{verbose}{ Logical value indicating whether to print progress reports and warnings. } } \details{ This is a directional counterpart of Ripley's \eqn{K} function (see \code{\link{Kest}}) in which, instead of counting all pairs of points within a specified distance \eqn{r}, we count only the pairs \eqn{(x_i, x_j)}{x[i], x[j]} for which the vector \eqn{x_j - x_i}{x[j] - x[i]} falls in a particular range of angles. This can be used to evaluate evidence for anisotropy in the point pattern \code{X}. } \value{ An object of class \code{"fv"} containing the estimated function. } \seealso{ \code{\link{Kest}} } \examples{ K <- Ksector(swedishpines, 0, 90) plot(K) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/rCauchy.Rd0000644000176200001440000001354314141452520015510 0ustar liggesusers\name{rCauchy} \alias{rCauchy} \title{Simulate Neyman-Scott Point Process with Cauchy cluster kernel} \description{ Generate a random point pattern, a simulated realisation of the Neyman-Scott process with Cauchy cluster kernel. } \usage{ rCauchy(kappa, scale, mu, win = owin(), thresh = 0.001, nsim=1, drop=TRUE, saveLambda=FALSE, expand = NULL, \dots, poisthresh=1e-6, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{scale}{ Scale parameter for cluster kernel. Determines the size of clusters. A positive number, in the same units as the spatial coordinates. } \item{mu}{ Mean number of points per cluster (a single positive number) or reference intensity for the cluster points (a function or a pixel image). } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{thresh}{ Threshold relative to the cluster kernel value at the origin (parent location) determining when the cluster kernel will be treated as zero for simulation purposes. Will be overridden by argument \code{expand} if that is given. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{saveLambda}{ Logical. If \code{TRUE} then the random intensity corresponding to the simulated parent points will also be calculated and saved, and returns as an attribute of the point pattern. } \item{expand}{ Numeric. Size of window expansion for generation of parent points. By default determined by calling \code{\link{clusterradius}} with the numeric threshold value given in \code{thresh}. } \item{\dots}{ Passed to \code{\link{clusterfield}} to control the image resolution when \code{saveLambda=TRUE} and to \code{\link{clusterradius}} when \code{expand} is missing or \code{NULL}. } \item{poisthresh}{ Numerical threshold below which the model will be treated as a Poisson process. See Details. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of this point pattern (see \code{\link{rNeymanScott}}). Furthermore, the simulated intensity function is returned as an attribute \code{"Lambda"}, if \code{saveLambda=TRUE}. } \details{ This algorithm generates a realisation of the Neyman-Scott process with Cauchy cluster kernel, inside the window \code{win}. The process is constructed by first generating a Poisson point process of ``parent'' points with intensity \code{kappa}. Then each parent point is replaced by a random cluster of points, the number of points in each cluster being random with a Poisson (\code{mu}) distribution, and the points being placed independently and uniformly according to a Cauchy kernel. In this implementation, parent points are not restricted to lie in the window; the parent process is effectively the uniform Poisson process on the infinite plane. This model can be fitted to data by the method of minimum contrast, maximum composite likelihood or Palm likelihood using \code{\link{kppm}}. The algorithm can also generate spatially inhomogeneous versions of the cluster process: \itemize{ \item The parent points can be spatially inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process that generates the parent points. \item The offspring points can be inhomogeneous. If the argument \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is interpreted as the reference density for offspring points, in the sense of Waagepetersen (2006). } When the parents are homogeneous (\code{kappa} is a single number) and the offspring are inhomogeneous (\code{mu} is a function or pixel image), the model can be fitted to data using \code{\link{kppm}}. If the pair correlation function of the model is very close to that of a Poisson process, deviating by less than \code{poisthresh}, then the model is approximately a Poisson process, and will be simulated as a Poisson process with intensity \code{kappa * mu}, using \code{\link{rpoispp}}. This avoids computations that would otherwise require huge amounts of memory. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rThomas}}, \code{\link{rVarGamma}}, \code{\link{rNeymanScott}}, \code{\link{rGaussPoisson}}, \code{\link{kppm}}, \code{\link{clusterfit}}. } \examples{ # homogeneous X <- rCauchy(30, 0.01, 5) # inhomogeneous ff <- function(x,y){ exp(2 - 3 * abs(x)) } Z <- as.im(ff, W= owin()) Y <- rCauchy(50, 0.01, Z) YY <- rCauchy(ff, 0.01, 5) } \references{ Ghorbani, M. (2013) Cauchy cluster process. \emph{Metrika} \bold{76}, 697-706. Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \keyword{spatial} \keyword{datagen} spatstat.core/man/bw.ppl.Rd0000644000176200001440000000712514141452520015313 0ustar liggesusers\name{bw.ppl} \alias{bw.ppl} \title{ Likelihood Cross Validation Bandwidth Selection for Kernel Density } \description{ Uses likelihood cross-validation to select a smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.ppl(X, \dots, srange=NULL, ns=16, sigma=NULL, weights=NULL, shortcut=FALSE, warn=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{srange}{ Optional numeric vector of length 2 giving the range of values of bandwidth to be searched. } \item{ns}{ Optional integer giving the number of values of bandwidth to search. } \item{sigma}{ Optional. Vector of values of the bandwidth to be searched. Overrides the values of \code{ns} and \code{srange}. } \item{weights}{ Optional. Numeric vector of weights for the points of \code{X}. Argument passed to \code{\link{density.ppp}}. } \item{\dots}{ Additional arguments passed to \code{\link{density.ppp}}. } \item{shortcut}{ Logical value indicating whether to speed up the calculation by omitting the integral term in the cross-validation criterion. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the maximum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{\sigma} is chosen to maximise the point process likelihood cross-validation criterion \deqn{ \mbox{LCV}(\sigma) = \sum_i \log\hat\lambda_{-i}(x_i) - \int_W \hat\lambda(u) \, {\rm d}u }{ LCV(\sigma) = sum[i] log(\lambda[-i](x[i])) - integral[W] \lambda(u) du } where the sum is taken over all the data points \eqn{x_i}{x[i]}, where \eqn{\hat\lambda_{-i}(x_i)}{\lambda[-i](x_i)} is the leave-one-out kernel-smoothing estimate of the intensity at \eqn{x_i}{x[i]} with smoothing bandwidth \eqn{\sigma}{\sigma}, and \eqn{\hat\lambda(u)}{\lambda(u)} is the kernel-smoothing estimate of the intensity at a spatial location \eqn{u} with smoothing bandwidth \eqn{\sigma}{\sigma}. See Loader(1999, Section 5.3). The value of \eqn{\mbox{LCV}(\sigma)}{LCV(\sigma)} is computed directly, using \code{\link{density.ppp}}, for \code{ns} different values of \eqn{\sigma}{\sigma} between \code{srange[1]} and \code{srange[2]}. The result is a numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted to show the (rescaled) mean-square error as a function of \code{sigma}. If \code{shortcut=TRUE}, the computation is accelerated by omitting the integral term in the equation above. This is valid because the integral is approximately constant. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.scott}}, \code{\link{bw.CvL}}, \code{\link{bw.frac}}. } \examples{ if(interactive()) { b <- bw.ppl(redwood) plot(b, main="Likelihood cross validation for redwoods") plot(density(redwood, b)) } \testonly{ b1 <- bw.ppl(redwood, srange=c(0.03, 0.07), ns=2) b2 <- bw.ppl(redwood, srange=c(0.03, 0.07), ns=2, shortcut=TRUE) } } \references{ Loader, C. (1999) \emph{Local Regression and Likelihood}. Springer, New York. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/detpointprocfamilyfun.Rd0000644000176200001440000001527714141452520020545 0ustar liggesusers\name{detpointprocfamilyfun} \alias{detpointprocfamilyfun} \title{Construct a New Determinantal Point Process Model Family Function} \description{ Function to ease the implementation of a new determinantal point process model family. } \usage{detpointprocfamilyfun(kernel = NULL, specden = NULL, basis = "fourierbasis", convkernel = NULL, Kfun = NULL, valid = NULL, intensity = NULL, dim = 2, name = "User-defined", isotropic = TRUE, range = NULL, parbounds = NULL, specdenrange = NULL, startpar = NULL, \dots) } \arguments{ \item{kernel}{ function specifying the kernel. May be set to \code{NULL}. See Details. } \item{specden}{ function specifying the spectral density. May be set to \code{NULL}. See Details. } \item{basis}{ character string giving the name of the basis. Defaults to the Fourier basis. See Details. } \item{convkernel}{ function specifying the k-fold auto-convolution of the kernel. May be set to \code{NULL}. See Details. } \item{Kfun}{ function specifying the K-function. May be set to \code{NULL}. See Details. } \item{valid}{ function determining whether a given set of parameter values yields a valid model. May be set to \code{NULL}. See Examples. } \item{intensity}{ character string specifying which parameter is the intensity in the model family. Should be \code{NULL} if the model family has no intensity parameter. } \item{dim}{ character strig specifying which parameter is the dimension of the state space in this model family (if any). Alternatively a positive integer specifying the dimension. } \item{name}{ character string giving the name of the model family used for printing. } \item{isotropic}{ logical value indicating whether or not the model is isotropic. } \item{range}{ function determining the interaction range of the model. May be set to \code{NULL}. See Examples. } \item{parbounds}{ function determining the bounds for each model parameter when all other parameters are fixed. May be set to \code{NULL}. See Examples. } \item{specdenrange}{ function specifying the the range of the spectral density if it is finite (only the case for very few models). May be set to \code{NULL}. } \item{startpar}{ function determining starting values for parameters in any estimation algorithm. May be set to \code{NULL}. See Examples. } \item{\dots}{ Additional arguments for inclusion in the returned model object. These are not checked in any way. } } \details{ A determinantal point process family is specified either in terms of a kernel (a positive semi-definite function, i.e. a covariance function) or a spectral density, or preferably both. One of these can be \code{NULL} if it is unknown, but not both. When both are supplied they must have the same arguments. The first argument gives the values at which the function should be evaluated. In general the function should accept an \eqn{n} by \eqn{d} matrix or \code{data.frame} specifying \eqn{n (>=0)} points in dimension \eqn{d}. If the model is isotropic it only needs to accept a non-negative valued numeric of length \eqn{n}. (In fact there is currently almost no support for non-isotropic models, so it is recommended not to specify such a model.) The name of this argument could be chosen freely, but \eqn{x} is recommended. The remaining arguments are the parameters of the model. If one of these is an intensity parameter the name should be mentioned in the argument \code{intensity}. If one of these specifies the dimension of the model it should be mentioned in the argument \code{dim}. The kernel and spectral density is with respect to a specific set of basis functions, which is typically the Fourier basis. However this can be changed to any user-supplied basis in the argument \code{basis}. If such an alternative is supplied it must be the name of a function expecting the same arguments as \code{\link{fourierbasis}} and returning the results in the same form as \code{\link{fourierbasis}}. If supplied, the arguments of convkernel must obey the following: first argument should be like the first argument of kernel and/or specden (see above). The second argument (preferably called \code{k}) should be the positive integer specifying how many times the auto-convolution is done (i.e. the \eqn{k} in \eqn{k}-fold auto-convolution). The remaining arguments must agree with the arguments of \code{kernel} and/or \code{specden} (see above). If supplied, the arguments of \code{Kfun} should be like the arguments of \code{kernel} and \code{specden} (see above). } \value{ A function in the \R language, belonging to the class \code{"detpointprocfamilyfun"}. The function has formal arguments \code{\dots} and returns a determinantal point process family (object of class \code{"detpointprocfamily"}). } \author{ \spatstatAuthors. } \examples{ ## Example of how to define the Gauss family exGauss <- detpointprocfamilyfun( name="Gaussian", kernel=function(x, lambda, alpha, d){ lambda*exp(-(x/alpha)^2) }, specden=function(x, lambda, alpha, d){ lambda * (sqrt(pi)*alpha)^d * exp(-(x*alpha*pi)^2) }, convkernel=function(x, k, lambda, alpha, d){ logres <- k*log(lambda*pi*alpha^2) - log(pi*k*alpha^2) - x^2/(k*alpha^2) return(exp(logres)) }, Kfun = function(x, lambda, alpha, d){ pi*x^2 - pi*alpha^2/2*(1-exp(-2*x^2/alpha^2)) }, valid=function(lambda, alpha, d){ lambda>0 && alpha>0 && d>=1 && lambda <= (sqrt(pi)*alpha)^(-d) }, isotropic=TRUE, intensity="lambda", dim="d", range=function(alpha, bound = .99){ if(missing(alpha)) stop("The parameter alpha is missing.") if(!(is.numeric(bound)&&bound>0&&bound<1)) stop("Argument bound must be a numeric between 0 and 1.") return(alpha*sqrt(-log(sqrt(1-bound)))) }, parbounds=function(name, lambda, alpha, d){ switch(name, lambda = c(0, (sqrt(pi)*alpha)^(-d)), alpha = c(0, lambda^(-1/d)/sqrt(pi)), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" \%in\% model$freepar){ lambda <- intensity(X) rslt <- c(rslt, "lambda" = lambda) model <- update(model, lambda=lambda) } if("alpha" \%in\% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2] rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) exGauss m <- exGauss(lambda=100, alpha=.05, d=2) m } \keyword{spatial} \keyword{models} spatstat.core/man/kaplan.meier.Rd0000644000176200001440000000604514141452520016457 0ustar liggesusers\name{kaplan.meier} \alias{kaplan.meier} \title{Kaplan-Meier Estimator using Histogram Data} \description{ Compute the Kaplan-Meier estimator of a survival time distribution function, from histogram data } \usage{ kaplan.meier(obs, nco, breaks, upperobs=0) } \arguments{ \item{obs}{vector of \eqn{n} integers giving the histogram of all observations (censored or uncensored survival times) } \item{nco}{vector of \eqn{n} integers giving the histogram of uncensored observations (those survival times that are less than or equal to the censoring time) } \item{breaks}{Vector of \eqn{n+1} breakpoints which were used to form both histograms. } \item{upperobs}{ Number of observations beyond the rightmost breakpoint, if any. } } \value{ A list with two elements: \item{km}{Kaplan-Meier estimate of the survival time c.d.f. \eqn{F(t)} } \item{lambda}{corresponding Nelson-Aalen estimate of the hazard rate \eqn{\lambda(t)}{lambda(t)} } These are numeric vectors of length \eqn{n}. } \details{ This function is needed mainly for internal use in \pkg{spatstat}, but may be useful in other applications where you want to form the Kaplan-Meier estimator from a huge dataset. Suppose \eqn{T_i}{T[i]} are the survival times of individuals \eqn{i=1,\ldots,M} with unknown distribution function \eqn{F(t)} which we wish to estimate. Suppose these times are right-censored by random censoring times \eqn{C_i}{C[i]}. Thus the observations consist of right-censored survival times \eqn{\tilde T_i = \min(T_i,C_i)}{T*[i] = min(T[i],C[i])} and non-censoring indicators \eqn{D_i = 1\{T_i \le C_i\}}{D[i] = 1(T[i] <= C[i])} for each \eqn{i}. If the number of observations \eqn{M} is large, it is efficient to use histograms. Form the histogram \code{obs} of all observed times \eqn{\tilde T_i}{T*[i]}. That is, \code{obs[k]} counts the number of values \eqn{\tilde T_i}{T*[i]} in the interval \code{(breaks[k],breaks[k+1]]} for \eqn{k > 1} and \code{[breaks[1],breaks[2]]} for \eqn{k = 1}. Also form the histogram \code{nco} of all uncensored times, i.e. those \eqn{\tilde T_i}{T*[i]} such that \eqn{D_i=1}{D[i]=1}. These two histograms are the arguments passed to \code{kaplan.meier}. The vectors \code{km} and \code{lambda} returned by \code{kaplan.meier} are (histogram approximations to) the Kaplan-Meier estimator of \eqn{F(t)} and its hazard rate \eqn{\lambda(t)}{lambda(t)}. Specifically, \code{km[k]} is an estimate of \code{F(breaks[k+1])}, and \code{lambda[k]} is an estimate of the average of \eqn{\lambda(t)}{lambda(t)} over the interval \code{(breaks[k],breaks[k+1])}. The histogram breaks must include \eqn{0}. If the histogram breaks do not span the range of the observations, it is important to count how many survival times \eqn{\tilde T_i}{T*[i]} exceed the rightmost breakpoint, and give this as the value \code{upperobs}. } \seealso{ \code{\link{reduced.sample}}, \code{\link{km.rs}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/predict.dppm.Rd0000644000176200001440000000305014141452520016473 0ustar liggesusers\name{predict.dppm} \alias{predict.dppm} \alias{fitted.dppm} \title{Prediction from a Fitted Determinantal Point Process Model} \description{ Given a fitted determinantal point process model, these functions compute the fitted intensity. } \usage{ \method{fitted}{dppm}(object, ...) \method{predict}{dppm}(object, ...) } \arguments{ \item{object}{ Fitted determinantal point process model. An object of class \code{"dppm"}. } \item{\dots}{ Arguments passed to \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } } \details{ These functions are methods for the generic functions \code{\link{fitted}} and \code{\link{predict}}. The argument \code{object} should be a determinantal point process model (object of class \code{"dppm"}) obtained using the function \code{\link{dppm}}. The \emph{intensity} of the fitted model is computed, using \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } \value{ The value of \code{fitted.dppm} is a numeric vector giving the fitted values at the quadrature points. The value of \code{predict.dppm} is usually a pixel image (object of class \code{"im"}), but see \code{\link{predict.ppm}} for details. } \seealso{ \code{\link{dppm}}, \code{\link{plot.dppm}}, \code{\link{fitted.ppm}}, \code{\link{predict.ppm}} } \examples{ if(interactive()) { fit <- dppm(swedishpines ~ x + y, dppGauss()) } else { fit <- dppm(redwood ~ x, dppGauss()) } predict(fit) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.core/man/rMatClust.Rd0000644000176200001440000001541614141452520016031 0ustar liggesusers\name{rMatClust} \alias{rMatClust} \title{Simulate Matern Cluster Process} \description{ Generate a random point pattern, a simulated realisation of the \Matern Cluster Process. } \usage{ rMatClust(kappa, scale, mu, win = owin(c(0,1),c(0,1)), nsim=1, drop=TRUE, saveLambda=FALSE, expand = scale, ..., poisthresh=1e-6, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{scale}{ Radius parameter of the clusters. } \item{mu}{ Mean number of points per cluster (a single positive number) or reference intensity for the cluster points (a function or a pixel image). } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{saveLambda}{ Logical. If \code{TRUE} then the random intensity corresponding to the simulated parent points will also be calculated and saved, and returns as an attribute of the point pattern. } \item{expand}{Numeric. Size of window expansion for generation of parent points. Defaults to \code{scale} which is the cluster radius. } \item{\dots}{Passed to \code{\link{clusterfield}} to control the image resolution when \code{saveLambda=TRUE}. } \item{poisthresh}{ Numerical threshold below which the model will be treated as a Poisson process. See Details. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of this point pattern (see \code{\link{rNeymanScott}}). Furthermore, the simulated intensity function is returned as an attribute \code{"Lambda"}, if \code{saveLambda=TRUE}. } \details{ This algorithm generates a realisation of \Matern's cluster process, a special case of the Neyman-Scott process, inside the window \code{win}. In the simplest case, where \code{kappa} and \code{mu} are single numbers, the algorithm generates a uniform Poisson point process of \dQuote{parent} points with intensity \code{kappa}. Then each parent point is replaced by a random cluster of \dQuote{offspring} points, the number of points per cluster being Poisson (\code{mu}) distributed, and their positions being placed and uniformly inside a disc of radius \code{scale} centred on the parent point. The resulting point pattern is a realisation of the classical \dQuote{stationary \Matern cluster process} generated inside the window \code{win}. This point process has intensity \code{kappa * mu}. The algorithm can also generate spatially inhomogeneous versions of the \Matern cluster process: \itemize{ \item The parent points can be spatially inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process that generates the parent points. \item The offspring points can be inhomogeneous. If the argument \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is interpreted as the reference density for offspring points, in the sense of Waagepetersen (2007). For a given parent point, the offspring constitute a Poisson process with intensity function equal to \code{mu/(pi * scale^2)} inside the disc of radius \code{scale} centred on the parent point, and zero intensity outside this disc. Equivalently we first generate, for each parent point, a Poisson (\eqn{M}) random number of offspring (where \eqn{M} is the maximum value of \code{mu}) placed independently and uniformly in the disc of radius \code{scale} centred on the parent location, and then randomly thin the offspring points, with retention probability \code{mu/M}. \item Both the parent points and the offspring points can be inhomogeneous, as described above. } Note that if \code{kappa} is a pixel image, its domain must be larger than the window \code{win}. This is because an offspring point inside \code{win} could have its parent point lying outside \code{win}. In order to allow this, the simulation algorithm first expands the original window \code{win} by a distance \code{expand} and generates the Poisson process of parent points on this larger window. If \code{kappa} is a pixel image, its domain must contain this larger window. The intensity of the \Matern cluster process is \code{kappa * mu} if either \code{kappa} or \code{mu} is a single number. In the general case the intensity is an integral involving \code{kappa}, \code{mu} and \code{scale}. The \Matern cluster process model with homogeneous parents (i.e. where \code{kappa} is a single number) can be fitted to data using \code{\link{kppm}}. Currently it is not possible to fit the \Matern cluster process model with inhomogeneous parents. If the pair correlation function of the model is very close to that of a Poisson process, deviating by less than \code{poisthresh}, then the model is approximately a Poisson process, and will be simulated as a Poisson process with intensity \code{kappa * mu}, using \code{\link{rpoispp}}. This avoids computations that would otherwise require huge amounts of memory. } \seealso{ \code{\link{rpoispp}}, \code{\link{rThomas}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, \code{\link{rNeymanScott}}, \code{\link{rGaussPoisson}}, \code{\link{kppm}}, \code{\link{clusterfit}}. } \examples{ # homogeneous X <- rMatClust(10, 0.05, 4) # inhomogeneous ff <- function(x,y){ 4 * exp(2 * abs(x) - 1) } Z <- as.im(ff, owin()) Y <- rMatClust(10, 0.05, Z) YY <- rMatClust(ff, 0.05, 3) } \references{ \Matern, B. (1960) \emph{Spatial Variation}. Meddelanden \ifelse{latex}{\out{fr\r{a}n}}{fraan} Statens Skogsforskningsinstitut, volume 59, number 5. Statens Skogsforskningsinstitut, Sweden. \Matern, B. (1986) \emph{Spatial Variation}. Lecture Notes in Statistics 36, Springer-Verlag, New York. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/fryplot.Rd0000644000176200001440000001315314141452520015606 0ustar liggesusers\name{fryplot} \alias{fryplot} \alias{frypoints} \title{Fry Plot of Point Pattern} \description{ Displays the Fry plot (Patterson plot) of a spatial point pattern. } \usage{ fryplot(X, ..., width=NULL, from=NULL, to=NULL, axes=FALSE) frypoints(X, from=NULL, to=NULL, dmax=Inf) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) or something acceptable to \code{\link{as.ppp}}. } \item{\dots}{Optional arguments to control the appearance of the plot.} \item{width}{Optional parameter indicating the width of a box for a zoomed-in view of the Fry plot near the origin.} \item{from,to}{ Optional. Subset indices specifying which points of \code{X} will be considered when forming the vectors (drawn from each point of \code{from}, to each point of \code{to}.) } \item{axes}{ Logical value indicating whether to draw axes, crossing at the origin. } \item{dmax}{ Maximum distance between points. Pairs at greater distances do not contribute to the result. The default means there is no maximum distance. } } \details{ The function \code{fryplot} generates a Fry plot (or Patterson plot); \code{frypoints} returns the points of the Fry plot as a point pattern dataset. Fry (1979) and Hanna and Fry (1979) introduced a manual graphical method for investigating features of a spatial point pattern of mineral deposits. A transparent sheet, marked with an origin or centre point, is placed over the point pattern. The transparent sheet is shifted so that the origin lies over one of the data points, and the positions of all the \emph{other} data points are copied onto the transparent sheet. This procedure is repeated for each data point in turn. The resulting plot (the Fry plot) is a pattern of \eqn{n(n-1)} points, where \eqn{n} is the original number of data points. This procedure was previously proposed by Patterson (1934, 1935) for studying inter-atomic distances in crystals, and is also known as a Patterson plot. The function \code{fryplot} generates the Fry/Patterson plot. Standard graphical parameters such as \code{main}, \code{pch}, \code{lwd}, \code{col}, \code{bg}, \code{cex} can be used to control the appearance of the plot. To zoom in (to view only a subset of the Fry plot at higher magnification), use the argument \code{width} to specify the width of a rectangular field of view centred at the origin, or the standard graphical arguments \code{xlim} and \code{ylim} to specify another rectangular field of view. (The actual field of view may be slightly larger, depending on the graphics device.) The function \code{frypoints} returns the points of the Fry plot as a point pattern object. There may be a large number of points in this pattern, so this function should be used only if further analysis of the Fry plot is required. Fry plots are particularly useful for recognising anisotropy in regular point patterns. A void around the origin in the Fry plot suggests regularity (inhibition between points) and the shape of the void gives a clue to anisotropy in the pattern. Fry plots are also useful for detecting periodicity or rounding of the spatial coordinates. In mathematical terms, the Fry plot of a point pattern \code{X} is simply a plot of the vectors \code{X[i] - X[j]} connecting all pairs of distinct points in \code{X}. The Fry plot is related to the \eqn{K} function (see \code{\link{Kest}}) and the reduced second moment measure (see \code{\link{Kmeasure}}). For example, the number of points in the Fry plot lying within a circle of given radius is an unnormalised and uncorrected version of the \eqn{K} function. The Fry plot has a similar appearance to the plot of the reduced second moment measure \code{\link{Kmeasure}} when the smoothing parameter \code{sigma} is very small. The Fry plot does not adjust for the effect of the size and shape of the sampling window. The density of points in the Fry plot tapers off near the edges of the plot. This is an edge effect, a consequence of the bounded sampling window. In geological applications this is usually not important, because interest is focused on the behaviour near the origin where edge effects can be ignored. To correct for the edge effect, use \code{\link{Kmeasure}} or \code{\link{Kest}} or its relatives. } \value{ \code{fryplot} returns \code{NULL}. \code{frypoints} returns a point pattern (object of class \code{"ppp"}). } \references{ Fry, N. (1979) Random point distributions and strain measurement in rocks. \emph{Tectonophysics} \bold{60}, 89--105. Hanna, S.S. and Fry, N. (1979) A comparison of methods of strain determination in rocks from southwest Dyfed (Pembrokeshire) and adjacent areas. \emph{Journal of Structural Geology} \bold{1}, 155--162. Patterson, A.L. (1934) A Fourier series method for the determination of the component of inter-atomic distances in crystals. \emph{Physics Reviews} \bold{46}, 372--376. Patterson, A.L. (1935) A direct method for the determination of the components of inter-atomic distances in crystals. \emph{Zeitschrift fuer Krystallographie} \bold{90}, 517--554. } \seealso{ \code{\link{Kmeasure}}, \code{\link{Kest}} } \examples{ ## unmarked data fryplot(cells) Y <- frypoints(cells) ## numerical marks fryplot(longleaf, width=4, axes=TRUE) ## multitype points fryplot(amacrine, width=0.2, from=(marks(amacrine) == "on"), chars=c(3,16), cols=2:3, main="Fry plot centred at an On-cell") points(0,0) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/coef.mppm.Rd0000644000176200001440000000643714141452520016002 0ustar liggesusers\name{coef.mppm} \alias{coef.mppm} \title{ Coefficients of Point Process Model Fitted to Multiple Point Patterns } \description{ Given a point process model fitted to a list of point patterns, extract the coefficients of the fitted model. A method for \code{coef}. } \usage{ \method{coef}{mppm}(object, \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"mppm"}) } \item{\dots}{ Ignored. } } \value{ Either a vector containing the fitted coefficients, or a data frame containing the fitted coefficients for each point pattern. } \details{ This function is a method for the generic function \code{\link{coef}}. The argument \code{object} must be a fitted point process model (object of class \code{"mppm"}) produced by the fitting algorithm \code{\link{mppm}}). This represents a point process model that has been fitted to a list of several point pattern datasets. See \code{\link{mppm}} for information. This function extracts the vector of coefficients of the fitted model. This is the estimate of the parameter vector \eqn{\theta}{\theta} such that the conditional intensity of the model is of the form \deqn{ \lambda(u,x) = \exp(\theta S(u,x)) }{ \lambda(u,x) = \exp(\theta . S(u,x)) } where \eqn{S(u,x)} is a (vector-valued) statistic. For example, if the model \code{object} is the uniform Poisson process, then \code{coef(object)} will yield a single value (named \code{"(Intercept)"}) which is the logarithm of the fitted intensity of the Poisson process. If the fitted model includes random effects (i.e. if the argument \code{random} was specified in the call to \code{\link{mppm}}), then the fitted coefficients are different for each point pattern in the original data, so \code{coef(object)} is a data frame with one row for each point pattern, and one column for each parameter. Use \code{\link{fixef.mppm}} to extract the vector of fixed effect coefficients, and \code{\link{ranef.mppm}} to extract the random effect coefficients at each level. Use \code{\link{print.mppm}} to print a more useful description of the fitted model. } \seealso{ \code{\link{fixef.mppm}} and \code{\link{ranef.mppm}} for the fixed and random effect coefficients in a model that includes random effects. \code{\link{print.mppm}}, \code{\link{mppm}} } \examples{ H <- hyperframe(X=waterstriders) fit.Poisson <- mppm(X ~ 1, H) coef(fit.Poisson) # The single entry "(Intercept)" # is the log of the fitted intensity of the Poisson process fit.Strauss <- mppm(X~1, H, Strauss(7)) coef(fit.Strauss) # The two entries "(Intercept)" and "Interaction" # are respectively log(beta) and log(gamma) # in the usual notation for Strauss(beta, gamma, r) # Tweak data to exaggerate differences H$X[[1]] <- rthin(H$X[[1]], 0.3) # Model with random effects fitran <- mppm(X ~ 1, H, random=~1|id) coef(fitran) } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.core/man/model.frame.ppm.Rd0000644000176200001440000000440014141452520017066 0ustar liggesusers\name{model.frame.ppm} \alias{model.frame.ppm} \alias{model.frame.kppm} \alias{model.frame.dppm} \alias{model.frame.slrm} \title{ Extract the Variables in a Point Process Model } \description{ Given a fitted point process model, this function returns a data frame containing all the variables needed to fit the model using the Berman-Turner device. } \usage{ \method{model.frame}{ppm}(formula, ...) \method{model.frame}{kppm}(formula, ...) \method{model.frame}{dppm}(formula, ...) \method{model.frame}{slrm}(formula, ...) } \arguments{ \item{formula}{ A fitted point process model. An object of class \code{"ppm"}, \code{"kppm"}, \code{"slrm"}, or \code{"dppm"}. } \item{\dots}{ Additional arguments passed to \code{\link{model.frame.glm}}. } } \details{ The function \code{\link{model.frame}} is generic. These functions are method for \code{\link{model.frame}} for fitted point process models (objects of class \code{"ppm"}, \code{"kppm"}, \code{"slrm"}, or \code{"dppm"}). The first argument should be a fitted point process model; it has to be named \code{formula} for consistency with the generic function. The result is a data frame containing all the variables used in fitting the model. The data frame has one row for each quadrature point used in fitting the model. The quadrature scheme can be extracted using \code{\link{quad.ppm}}. } \value{ A \code{data.frame} containing all the variables used in the fitted model, plus additional variables specified in \code{\dots}. It has an additional attribute \code{"terms"} containing information about the model formula. For details see \code{\link{model.frame.glm}}. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \seealso{ \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{dppm}}, \code{\link{slrm}}, \code{\link{model.frame}}, \code{\link{model.matrix.ppm}} } \examples{ fit <- ppm(cells ~ x) mf <- model.frame(fit) kfit <- kppm(redwood ~ x, "Thomas") kmf <- model.frame(kfit) sfit <- slrm(cells ~ x) smf <- model.frame(sfit) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.core/man/Jdot.Rd0000644000176200001440000001636614141452520015020 0ustar liggesusers\name{Jdot} \alias{Jdot} \title{ Multitype J Function (i-to-any) } \description{ For a multitype point pattern, estimate the multitype \eqn{J} function summarising the interpoint dependence between the type \eqn{i} points and the points of any type. } \usage{ Jdot(X, i, eps=NULL, r=NULL, breaks=NULL, \dots, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{J} function \eqn{J_{i\bullet}(r)}{Ji.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{eps}{A positive number. The resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{J_{i\bullet}(r)}{Ji.(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{J}{the recommended estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)}, currently the Kaplan-Meier estimator. } \item{r}{the values of the argument \eqn{r} at which the function \eqn{J_{i\bullet}(r)}{Ji.(r)} has been estimated } \item{km}{the Kaplan-Meier estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} } \item{han}{the Hanisch-style estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} } \item{un}{the ``uncorrected'' estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} formed by taking the ratio of uncorrected empirical estimators of \eqn{1 - G_{i\bullet}(r)}{1 - Gi.(r)} and \eqn{1 - F_{\bullet}(r)}{1 - F.(r)}, see \code{\link{Gdot}} and \code{\link{Fest}}. } \item{theo}{the theoretical value of \eqn{J_{i\bullet}(r)}{Ji.(r)} for a marked Poisson process, namely 1. } The result also has two attributes \code{"G"} and \code{"F"} which are respectively the outputs of \code{\link{Gdot}} and \code{\link{Fest}} for the point pattern. } \details{ This function \code{Jdot} and its companions \code{\link{Jcross}} and \code{\link{Jmulti}} are generalisations of the function \code{\link{Jest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level.) The ``type \eqn{i} to any type'' multitype \eqn{J} function of a stationary multitype point process \eqn{X} was introduced by Van lieshout and Baddeley (1999). It is defined by \deqn{J_{i\bullet}(r) = \frac{1 - G_{i\bullet}(r)}{1 - F_{\bullet}(r)}}{Ji.(r) = (1 - Gi.(r))/(1-F.(r))} where \eqn{G_{i\bullet}(r)}{Gi.(r)} is the distribution function of the distance from a type \eqn{i} point to the nearest other point of the pattern, and \eqn{F_{\bullet}(r)}{F.(r)} is the distribution function of the distance from a fixed point in space to the nearest point of the pattern. An estimate of \eqn{J_{i\bullet}(r)}{Ji.(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the pattern is a marked Poisson point process, then \eqn{J_{i\bullet}(r) \equiv 1}{Ji.(r) = 1}. If the subprocess of type \eqn{i} points is independent of the subprocess of points of all types not equal to \eqn{i}, then \eqn{J_{i\bullet}(r)}{Ji.(r)} equals \eqn{J_{ii}(r)}{Jii(r)}, the ordinary \eqn{J} function (see \code{\link{Jest}} and Van Lieshout and Baddeley (1996)) of the points of type \eqn{i}. Hence deviations from zero of the empirical estimate of \eqn{J_{i\bullet} - J_{ii}}{Ji.-Jii} may suggest dependence between types. This algorithm estimates \eqn{J_{i\bullet}(r)}{Ji.(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Jest}}, using the Kaplan-Meier and border corrections. The main work is done by \code{\link{Gmulti}} and \code{\link{Fest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{J_{i\bullet}(r)}{Ji.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Jcross}}, \code{\link{Jest}}, \code{\link{Jmulti}} } \examples{ # Lansing woods data: 6 types of trees woods <- lansing \testonly{ woods <- woods[seq(1,npoints(woods), by=30), ] } Jh. <- Jdot(woods, "hickory") plot(Jh.) # diagnostic plot for independence between hickories and other trees Jhh <- Jest(split(woods)$hickory) plot(Jhh, add=TRUE, legendpos="bottom") # synthetic example with two marks "a" and "b" ## pp <- runifpoint(30) \%mark\% factor(sample(c("a","b"), 30, replace=TRUE)) ## J <- Jdot(pp, "a") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/LennardJones.Rd0000644000176200001440000001200214141452520016461 0ustar liggesusers\name{LennardJones} \alias{LennardJones} \title{The Lennard-Jones Potential} \description{ Creates the Lennard-Jones pairwise interaction structure which can then be fitted to point pattern data. } \usage{ LennardJones(sigma0=NA) } \value{ An object of class \code{"interact"} describing the Lennard-Jones interpoint interaction structure. } \arguments{ \item{sigma0}{ Optional. Initial estimate of the parameter \eqn{\sigma}{sigma}. A positive number. } } \details{ In a pairwise interaction point process with the Lennard-Jones pair potential (Lennard-Jones, 1924) each pair of points in the point pattern, a distance \eqn{d} apart, contributes a factor \deqn{ v(d) = \exp \left\{ - 4\epsilon \left[ \left( \frac{\sigma}{d} \right)^{12} - \left( \frac{\sigma}{d} \right)^6 \right] \right\} }{ v(d) = exp( - 4 * epsilon * ((sigma/d)^12 - (sigma/d)^6)) } to the probability density, where \eqn{\sigma}{sigma} and \eqn{\epsilon}{epsilon} are positive parameters to be estimated. See \bold{Examples} for a plot of this expression. This potential causes very strong inhibition between points at short range, and attraction between points at medium range. The parameter \eqn{\sigma}{sigma} is called the \emph{characteristic diameter} and controls the scale of interaction. The parameter \eqn{\epsilon}{epsilon} is called the \emph{well depth} and determines the strength of attraction. The potential switches from inhibition to attraction at \eqn{d=\sigma}{d=sigma}. The maximum value of the pair potential is \eqn{\exp(\epsilon)}{exp(epsilon)} occuring at distance \eqn{d = 2^{1/6} \sigma}{d = 2^(1/6) * sigma}. Interaction is usually considered to be negligible for distances \eqn{d > 2.5 \sigma \max\{1,\epsilon^{1/6}\}}{d > 2.5 * sigma * max(1, epsilon^(1/6))}. This potential is used to model interactions between uncharged molecules in statistical physics. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Lennard-Jones pairwise interaction is yielded by the function \code{LennardJones()}. See the examples below. } \section{Rescaling}{ To avoid numerical instability, the interpoint distances \code{d} are rescaled when fitting the model. Distances are rescaled by dividing by \code{sigma0}. In the formula for \eqn{v(d)} above, the interpoint distance \eqn{d} will be replaced by \code{d/sigma0}. The rescaling happens automatically by default. If the argument \code{sigma0} is missing or \code{NA} (the default), then \code{sigma0} is taken to be the minimum nearest-neighbour distance in the data point pattern (in the call to \code{\link{ppm}}). If the argument \code{sigma0} is given, it should be a positive number, and it should be a rough estimate of the parameter \eqn{\sigma}{sigma}. The ``canonical regular parameters'' estimated by \code{\link{ppm}} are \eqn{\theta_1 = 4 \epsilon (\sigma/\sigma_0)^{12}}{theta1 = 4 * epsilon * (sigma/sigma0)^12} and \eqn{\theta_2 = 4 \epsilon (\sigma/\sigma_0)^6}{theta2 = 4 * epsilon * (sigma/sigma0)^6}. } \section{Warnings and Errors}{ Fitting the Lennard-Jones model is extremely unstable, because of the strong dependence between the functions \eqn{d^{-12}}{d^(-12)} and \eqn{d^{-6}}{d^(-6)}. The fitting algorithm often fails to converge. Try increasing the number of iterations of the GLM fitting algorithm, by setting \code{gcontrol=list(maxit=1e3)} in the call to \code{\link{ppm}}. Errors are likely to occur if this model is fitted to a point pattern dataset which does not exhibit both short-range inhibition and medium-range attraction between points. The values of the parameters \eqn{\sigma}{sigma} and \eqn{\epsilon}{epsilon} may be \code{NA} (because the fitted canonical parameters have opposite sign, which usually occurs when the pattern is completely random). An absence of warnings does not mean that the fitted model is sensible. A negative value of \eqn{\epsilon}{epsilon} may be obtained (usually when the pattern is strongly clustered); this does not correspond to a valid point process model, but the software does not issue a warning. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \examples{ badfit <- ppm(cells ~1, LennardJones(), rbord=0.1) badfit fit <- ppm(unmark(longleaf) ~1, LennardJones(), rbord=1) fit plot(fitin(fit)) # Note the Longleaf Pines coordinates are rounded to the nearest decimetre # (multiple of 0.1 metres) so the apparent inhibition may be an artefact } \references{ Lennard-Jones, J.E. (1924) On the determination of molecular fields. \emph{Proc Royal Soc London A} \bold{106}, 463--477. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/expand.owin.Rd0000644000176200001440000000232314141452520016336 0ustar liggesusers\name{expand.owin} \alias{expand.owin} \title{Apply Expansion Rule} \description{ Applies an expansion rule to a window. } \usage{ expand.owin(W, \dots) } \arguments{ \item{W}{A window.} \item{\dots}{ Arguments passed to \code{\link{rmhexpand}} to determine an expansion rule. } } \value{ A window (object of class \code{"owin"}). } \details{ The argument \code{W} should be a window (an object of class \code{"owin"}). This command applies the expansion rule specified by the arguments \code{\dots} to the window \code{W}, yielding another window. The arguments \code{\dots} are passed to \code{\link{rmhexpand}} to determine the expansion rule. For other transformations of the scale, location and orientation of a window, see \code{\link{shift}}, \code{\link{affine}} and \code{\link{rotate}}. } \seealso{ \code{\link{rmhexpand}} about expansion rules. \code{\link{shift}}, \code{\link{rotate}}, \code{\link{affine}} for other types of manipulation. } \examples{ expand.owin(square(1), 9) expand.owin(square(1), distance=0.5) expand.owin(letterR, length=2) expand.owin(letterR, distance=0.1) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat.core/man/measureContinuous.Rd0000644000176200001440000000246214141452520017640 0ustar liggesusers\name{measureContinuous} \alias{measureContinuous} \alias{measureDiscrete} \title{ Discrete and Continuous Components of a Measure } \description{ Given a measure \code{A} (object of class \code{"msr"}) these functions find the discrete and continuous parts of \code{A}. } \usage{ measureDiscrete(x) measureContinuous(x) } \arguments{ \item{x}{ A measure (object of class \code{"msr"}). } } \details{ The functions \code{measureDiscrete} and \code{measureContinuous} return the discrete and continuous components, respectively, of a measure. If \code{x} is a measure, then \code{measureDiscrete(x)} is a measure consisting only of the discrete (atomic) component of \code{x}, and \code{measureContinuous(x)} is a measure consisting only of the continuous (diffuse) component of \code{x}. } \value{ Another measure (object of class \code{"msr"}) on the same spatial domain. } \references{ Halmos, P.R. (1950) \emph{Measure Theory}. Van Nostrand. } \author{ \adrian. } \seealso{ \code{\link{msr}}, \code{\link{with.msr}}, \code{\link{split.msr}}, \code{\link{measurePositive}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rp measureDiscrete(rp) measureContinuous(rp) } \keyword{spatial} \keyword{math} spatstat.core/man/nndensity.Rd0000644000176200001440000000550114141452520016120 0ustar liggesusers\name{nndensity.ppp} \alias{nndensity} \alias{nndensity.ppp} \title{ Estimate Intensity of Point Pattern Using Nearest Neighbour Distances } \description{ Estimates the intensity of a point pattern using the distance from each spatial location to the \code{k}th nearest data point. } \usage{ nndensity(x, ...) \method{nndensity}{ppp}(x, k, ..., verbose = TRUE) } \arguments{ \item{x}{ A point pattern (object of class \code{"ppp"}) or some other spatial object. } \item{k}{ Integer. The distance to the \code{k}th nearest data point will be computed. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link{nnmap}} and \code{\link{as.mask}} controlling the pixel resolution. } \item{verbose}{ Logical. If \code{TRUE}, print the value of \code{k} when it is automatically selected. If \code{FALSE}, remain silent. } } \details{ This function computes a quick estimate of the intensity of the point process that generated the point pattern \code{x}. For each spatial location \eqn{s}, let \eqn{d(s)} be the distance from \eqn{s} to the \eqn{k}-th nearest point in the dataset \code{x}. If the data came from a homogeneous Poisson process with intensity \eqn{\lambda}{lambda}, then \eqn{\pi d(s)^2}{pi * d(s)^2} would follow a negative exponential distribution with mean \eqn{1/\lambda}{1/lambda}, and the maximum likelihood estimate of \eqn{\lambda}{lambda} would be \eqn{1/(\pi d(s)^2)}{1/(pi * d(s)^2)}. This is the estimate computed by \code{nndensity}, apart from an edge effect correction. This estimator of intensity is relatively fast to compute, and is spatially adaptive (so that it can handle wide variation in the intensity function). However, it implicitly assumes the points are independent, so it does not perform well if the pattern is strongly clustered or strongly inhibited. The value of \code{k} should be greater than 1 in order to avoid infinite peaks in the intensity estimate around each data point. The default value of \code{k} is the square root of the number of points in \code{x}, which seems to work well in many cases. The window of \code{x} is digitised using \code{\link{as.mask}} and the values \eqn{d(s)} are computed using \code{\link{nnmap}}. To control the pixel resolution, see \code{\link{as.mask}}. } \value{ A pixel image (object of class \code{"im"}) giving the estimated intensity of the point process at each spatial location. Pixel values are intensities (number of points per unit area). } \references{ NEED REFERENCES. TRY CRESSIE } \seealso{ \code{\link{density.ppp}}, \code{\link{intensity}} for alternative estimates of point process intensity. } \examples{ plot(nndensity(swedishpines)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/rDiggleGratton.Rd0000644000176200001440000001054414141452520017024 0ustar liggesusers\name{rDiggleGratton} \alias{rDiggleGratton} \title{Perfect Simulation of the Diggle-Gratton Process} \description{ Generate a random pattern of points, a simulated realisation of the Diggle-Gratton process, using a perfect simulation algorithm. } \usage{ rDiggleGratton(beta, delta, rho, kappa=1, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{delta}{ hard core distance (a non-negative number). } \item{rho}{ interaction range (a number greater than \code{delta}). } \item{kappa}{ interaction exponent (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. Currently this must be a rectangular window. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Diggle-Gratton point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. Diggle and Gratton (1984, pages 208-210) introduced the pairwise interaction point process with pair potential \eqn{h(t)} of the form \deqn{ h(t) = \left( \frac{t-\delta}{\rho-\delta} \right)^\kappa \quad\quad \mbox{ if } \delta \le t \le \rho }{ h(t) = ((t - delta)/(rho - delta))^kappa, { } delta <= t <= rho } with \eqn{h(t) = 0} for \eqn{t < \delta}{t < delta} and \eqn{h(t) = 1} for \eqn{t > \rho}{t > rho}. Here \eqn{\delta}{delta}, \eqn{\rho}{rho} and \eqn{\kappa}{kappa} are parameters. Note that we use the symbol \eqn{\kappa}{kappa} where Diggle and Gratton (1984) use \eqn{\beta}{beta}, since in \pkg{spatstat} we reserve the symbol \eqn{\beta}{beta} for an intensity parameter. The parameters must all be nonnegative, and must satisfy \eqn{\delta \le \rho}{delta <= rho}. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. \Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. } \author{ \adrian based on original code for the Strauss process by Kasper Klitgaard Berthelsen. } \examples{ X <- rDiggleGratton(50, 0.02, 0.07) Z <- rDiggleGratton(50, 0.02, 0.07, 2, nsim=2) } \seealso{ \code{\link{rmh}}, \code{\link{DiggleGratton}}. \code{\link{rStrauss}}, \code{\link{rHardcore}}, \code{\link{rStraussHard}}, \code{\link{rDGS}}, \code{\link{rPenttinen}}. } \keyword{spatial} \keyword{datagen} spatstat.core/man/prune.rppm.Rd0000644000176200001440000000255014141452520016214 0ustar liggesusers\name{prune.rppm} \alias{prune.rppm} \title{ Prune a Recursively Partitioned Point Process Model } \description{ Given a model which has been fitted to point pattern data by recursive partitioning, apply pruning to reduce the complexity of the partition tree. } \usage{ \method{prune}{rppm}(tree, \dots) } \arguments{ \item{tree}{ Fitted point process model of class \code{"rppm"} produced by the function \code{\link{rppm}}. } \item{\dots}{ Arguments passed to \code{\link[rpart]{prune.rpart}} to control the pruning procedure. } } \details{ This is a method for the generic function \code{\link[rpart]{prune}} for the class \code{"rppm"}. An object of this class is a point process model, fitted to point pattern data by recursive partitioning, by the function \code{\link{rppm}}. The recursive partition tree will be pruned using \code{\link[rpart]{prune.rpart}}. The result is another object of class \code{"rppm"}. } \value{ Object of class \code{"rppm"}. } \author{ \spatstatAuthors } \seealso{ \code{\link{rppm}}, \code{\link{plot.rppm}}, \code{\link{predict.rppm}}. } \examples{ # Murchison gold data mur <- solapply(murchison, rescale, s=1000, unitname="km") mur$dfault <- distfun(mur$faults) fit <- rppm(gold ~ dfault + greenstone, data=mur) fit prune(fit, cp=0.1) } \keyword{spatial} \keyword{models} spatstat.core/man/simulate.dppm.Rd0000644000176200001440000001140414141452520016666 0ustar liggesusers\name{simulate.dppm} \alias{simulate.dppm} \alias{simulate.detpointprocfamily} \title{Simulation of Determinantal Point Process Model} \description{ Generates simulated realisations from a determinantal point process model. } \usage{ \method{simulate}{dppm}(object, nsim = 1, seed = NULL, \dots, W = NULL, trunc = 0.99, correction = "periodic", rbord = reach(object)) \method{simulate}{detpointprocfamily}(object, nsim = 1, seed = NULL, \dots, W = NULL, trunc = 0.99, correction = "periodic", rbord = reach(object)) } \arguments{ \item{object}{ Determinantal point process model. An object of class \code{"detpointprocfamily"} or \code{"dppm"}. } \item{nsim}{Number of simulated realisations.} \item{seed}{ an object specifying whether and how to initialise the random number generator. Either \code{NULL} or an integer that will be used in a call to \code{\link[base:Random]{set.seed}} before simulating the point patterns. } \item{\dots}{Arguments passed on to \code{\link{rdpp}}.} \item{W}{ Object specifying the window of simulation (defaults to a unit box if nothing else is sensible -- see Details). Can be any single argument acceptable to \code{\link{as.boxx}} (e.g. an \code{"owin"}, \code{"box3"} or \code{"boxx"} object). } \item{trunc}{ Numeric value specifying how the model truncation is preformed. See Details. } \item{correction}{ Character string specifying the type of correction to use. The options are \code{"periodic"} (default) and \code{"border"}. See Details. } \item{rbord}{ Numeric value specifying the extent of the border correction if this correction is used. See Details. } } \details{ These functions are methods for the generic function \code{\link[stats]{simulate}} for the classes \code{"detpointprocfamily"} and \code{"dppm"} of determinantal point process models. The return value is a list of \code{nsim} point patterns. It also carries an attribute \code{"seed"} that captures the initial state of the random number generator. This follows the convention used in \code{simulate.lm} (see \code{\link[stats]{simulate}}). It can be used to force a sequence of simulations to be repeated exactly, as shown in the examples for \code{\link[stats]{simulate}}. The exact simulation of a determinantal point process model involves an infinite series, which typically has no analytical solution. In the implementation a truncation is performed. The truncation \code{trunc} can be specified either directly as a positive integer or as a fraction between 0 and 1. In the latter case the truncation is chosen such that the expected number of points in a simulation is \code{trunc} times the theoretical expected number of points in the model. The default is 0.99. The window of the returned point pattern(s) can be specified via the argument \code{W}. For a fitted model (of class \code{"dppm"}) it defaults to the observation window of the data used to fit the model. For inhomogeneous models it defaults to the window of the intensity image. Otherwise it defaults to a unit box. For non-rectangular windows simulation is done in the containing rectangle and then restricted to the window. For inhomogeneous models a stationary model is first simulated using the maximum intensity and then the result is obtained by thinning. The default is to use periodic edge correction for simulation such that opposite edges are glued together. If border correction is used then the simulation is done in an extended window. Edge effects are theoretically completely removed by doubling the size of the window in each spatial dimension, but for practical purposes much less extension may be sufficient. The numeric \code{rbord} determines the extent of the extra space added to the window. } \value{ A list of length \code{nsim} containing simulated point patterns. If the patterns are two-dimensional, then they are objects of class \code{"ppp"}, and the list has class \code{"solist"}. Otherwise, the patterns are objects of class \code{"ppx"} and the list has class \code{"anylist"}. The return value also carries an attribute \code{"seed"} that captures the initial state of the random number generator. See Details. } \references{ Lavancier, F. \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \author{ \spatstatAuthors. } \seealso{ \code{\link{rdpp}}, \code{\link[stats]{simulate}} } \examples{ if(interactive()) { nsim <- 2 lam <- 100 } else { nsim <- 1 lam <- 30 } model <- dppGauss(lambda=lam, alpha=.05, d=2) simulate(model, nsim) } \keyword{datagen} \keyword{spatial} \keyword{models} spatstat.core/man/vargamma.estK.Rd0000644000176200001440000001455314141452520016614 0ustar liggesusers\name{vargamma.estK} \alias{vargamma.estK} \title{Fit the Neyman-Scott Cluster Point Process with Variance Gamma kernel} \description{ Fits the Neyman-Scott cluster point process, with Variance Gamma kernel, to a point pattern dataset by the Method of Minimum Contrast. } \usage{ vargamma.estK(X, startpar=c(kappa=1,scale=1), nu = -1/4, lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{nu}{ Numerical value controlling the shape of the tail of the clusters. A number greater than \code{-1/2}. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Neyman-Scott Cluster point process model with Variance Gamma kernel (Jalilian et al, 2013) to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Neyman-Scott Cluster point process with Variance Gamma kernel to \code{X}, by finding the parameters of the model which give the closest match between the theoretical \eqn{K} function of the model and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Neyman-Scott cluster point process with Variance Gamma kernel is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent have a common distribution described in Jalilian et al (2013). The shape of the kernel is determined by the dimensionless index \code{nu}. This is the parameter \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in equation (12) on page 126 of Jalilian et al (2013). In previous versions of spatstat instead of specifying \code{nu} (called \code{nu.ker} at that time) the user could specify \code{nu.pcf} which is the parameter \eqn{\nu=\alpha-1}{nu = alpha-1} appearing in equation (13), page 127 of Jalilian et al (2013). These are related by \code{nu.pcf = 2 * nu.ker + 1} and \code{nu.ker = (nu.pcf - 1)/2}. This syntax is still supported but not recommended for consistency across the package. In that case exactly one of \code{nu.ker} or \code{nu.pcf} must be specified. If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rVarGamma}}. The parameter \code{eta} appearing in \code{startpar} is equivalent to the scale parameter \code{omega} used in \code{\link{rVarGamma}}. Homogeneous or inhomogeneous Neyman-Scott/VarGamma models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{kppm}}, \code{\link{vargamma.estpcf}}, \code{\link{lgcp.estK}}, \code{\link{thomas.estK}}, \code{\link{cauchy.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{Kmodel}}. \code{\link{rVarGamma}} to simulate the model. } \examples{ if(interactive()) { u <- vargamma.estK(redwood) print(u) plot(u) } } \keyword{spatial} \keyword{models} spatstat.core/man/lgcp.estK.Rd0000644000176200001440000002142214141452520015737 0ustar liggesusers\name{lgcp.estK} \alias{lgcp.estK} \title{Fit a Log-Gaussian Cox Point Process by Minimum Contrast} \description{ Fits a log-Gaussian Cox point process model to a point pattern dataset by the Method of Minimum Contrast. } \usage{ lgcp.estK(X, startpar=c(var=1,scale=1), covmodel=list(model="exponential"), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the log-Gaussian Cox process model. } \item{covmodel}{ Specification of the covariance model for the log-Gaussian field. See Details. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits a log-Gaussian Cox point process (LGCP) model to a point pattern dataset by the Method of Minimum Contrast, using the K function of the point pattern. The shape of the covariance of the LGCP must be specified: the default is the exponential covariance function, but other covariance models can be selected. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits a log-Gaussian Cox point process (LGCP) model to \code{X}, by finding the parameters of the LGCP model which give the closest match between the theoretical \eqn{K} function of the LGCP model and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model fitted is a stationary, isotropic log-Gaussian Cox process (\Moller and Waagepetersen, 2003, pp. 72-76). To define this process we start with a stationary Gaussian random field \eqn{Z} in the two-dimensional plane, with constant mean \eqn{\mu}{mu} and covariance function \eqn{C(r)}. Given \eqn{Z}, we generate a Poisson point process \eqn{Y} with intensity function \eqn{\lambda(u) = \exp(Z(u))}{lambda(u) = exp(Z(u))} at location \eqn{u}. Then \eqn{Y} is a log-Gaussian Cox process. The \eqn{K}-function of the LGCP is \deqn{ K(r) = \int_0^r 2\pi s \exp(C(s)) \, {\rm d}s. }{ K(r) = integral from 0 to r of (2 * pi * s * exp(C(s))) ds. } The intensity of the LGCP is \deqn{ \lambda = \exp(\mu + \frac{C(0)}{2}). }{ lambda= exp(mu + C(0)/2). } The covariance function \eqn{C(r)} is parametrised in the form \deqn{ C(r) = \sigma^2 c(r/\alpha) }{ C(r) = sigma^2 * c(-r/alpha) } where \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} are parameters controlling the strength and the scale of autocorrelation, respectively, and \eqn{c(r)} is a known covariance function determining the shape of the covariance. The strength and scale parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} will be estimated by the algorithm as the values \code{var} and \code{scale} respectively. The template covariance function \eqn{c(r)} must be specified as explained below. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. The template covariance function \eqn{c(r)} is specified using the argument \code{covmodel}. This should be of the form \code{list(model="modelname", \dots)} where \code{modelname} is a string identifying the template model as explained below, and \code{\dots} are optional arguments of the form \code{tag=value} giving the values of parameters controlling the \emph{shape} of the template model. The default is the exponential covariance \eqn{c(r) = e^{-r}}{c(r) = e^(-r)} so that the scaled covariance is \deqn{ C(r) = \sigma^2 e^{-r/\alpha}. }{ C(r) = sigma^2 * exp(-r/alpha). } To determine the template model, the string \code{"modelname"} will be prefixed by \code{"RM"} and the code will search for a function of this name in the \pkg{RandomFields} package. For a list of available models see \code{\link[RandomFields]{RMmodel}} in the \pkg{RandomFields} package. For example the \Matern covariance with exponent \eqn{\nu=0.3}{nu = 0.3} is specified by \code{covmodel=list(model="matern", nu=0.3)} corresponding to the function \code{RMmatern} in the \pkg{RandomFields} package. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \note{ This function is considerably slower than \code{\link{lgcp.estpcf}} because of the computation time required for the integral in the \eqn{K}-function. Computation can be accelerated, at the cost of less accurate results, by setting \code{spatstat.options(fastK.lgcp=TRUE)}. } \references{ \Moller, J, Syversveen, A. and Waagepetersen, R. (1998) Log Gaussian Cox Processes. \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ Rasmus Waagepetersen \email{rw@math.auc.dk}. Adapted for \pkg{spatstat} by \adrian Further modifications by Rasmus Waagepetersen and Shen Guochun, and by \ege. } \seealso{ \code{\link{lgcp.estpcf}} for alternative method of fitting LGCP. \code{\link{matclust.estK}}, \code{\link{thomas.estK}} for other models. \code{\link{mincontrast}} for the generic minimum contrast fitting algorithm, including important parameters that affect the accuracy of the fit. \code{\link[RandomFields]{RMmodel}} in the \pkg{RandomFields} package, for covariance function models. \code{\link{Kest}} for the \eqn{K} function. } \examples{ if(interactive()) { u <- lgcp.estK(redwood) print(u) plot(u) } else { # faster - better starting point u <- lgcp.estK(redwood, c(var=1.05, scale=0.1)) } \testonly{ if(require(RandomFields)) { K <- Kest(redwood, r=seq(0, 0.1, length=9)) op <- spatstat.options(fastK.lgcp=TRUE) lgcp.estK(K, covmodel=list(model="matern", nu=0.3), control=list(maxit=2)) spatstat.options(op) } } if(FALSE) { ## takes several minutes! lgcp.estK(redwood, covmodel=list(model="matern", nu=0.3)) } } \keyword{spatial} \keyword{models} spatstat.core/man/qqplot.ppm.Rd0000644000176200001440000003571514141452520016232 0ustar liggesusers\name{qqplot.ppm} \alias{qqplot.ppm} \title{ Q-Q Plot of Residuals from Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, produce a Q-Q plot based on residuals from the model. } \usage{ qqplot.ppm(fit, nsim=100, expr=NULL, \dots, type="raw", style="mean", fast=TRUE, verbose=TRUE, plot.it=TRUE, dimyx=NULL, nrep=if(fast) 5e4 else 1e5, control=update(default.rmhcontrol(fit), nrep=nrep), saveall=FALSE, monochrome=FALSE, limcol=if(monochrome) "black" else "red", maxerr=max(100, ceiling(nsim/10)), check=TRUE, repair=TRUE, envir.expr) } \arguments{ \item{fit}{ The fitted point process model, which is to be assessed using the Q-Q plot. An object of class \code{"ppm"}. Smoothed residuals obtained from this fitted model will provide the ``data'' quantiles for the Q-Q plot. } \item{nsim}{ The number of simulations from the ``reference'' point process model. } \item{expr}{ Determines the simulation mechanism which provides the ``theoretical'' quantiles for the Q-Q plot. See Details. } \item{\dots}{ Arguments passed to \code{\link{diagnose.ppm}} influencing the computation of residuals. } \item{type}{ String indicating the type of residuals or weights to be used. Current options are \code{"eem"} for the Stoyan-Grabarnik exponential energy weights, \code{"raw"} for the raw residuals, \code{"inverse"} for the inverse-lambda residuals, and \code{"pearson"} for the Pearson residuals. A partial match is adequate. } \item{style}{ Character string controlling the type of Q-Q plot. Options are \code{"classical"} and \code{"mean"}. See Details. } \item{fast}{ Logical flag controlling the speed and accuracy of computation. Use \code{fast=TRUE} for interactive use and \code{fast=FALSE} for publication standard plots. See Details. } \item{verbose}{ Logical flag controlling whether the algorithm prints progress reports during long computations. } \item{plot.it}{ Logical flag controlling whether the function produces a plot or simply returns a value (silently). } \item{dimyx}{ Dimensions of the pixel grid on which the smoothed residual field will be calculated. A vector of two integers. } \item{nrep}{ If \code{control} is absent, then \code{nrep} gives the number of iterations of the Metropolis-Hastings algorithm that should be used to generate one simulation of the fitted point process. } \item{control}{ List of parameters controlling the Metropolis-Hastings algorithm \code{\link{rmh}} which generates each simulated realisation from the model (unless the model is Poisson). This list becomes the argument \code{control} of \code{\link{rmh.default}}. It overrides \code{nrep}. } \item{saveall}{ Logical flag indicating whether to save all the intermediate calculations. } \item{monochrome}{ Logical flag indicating whether the plot should be in black and white (\code{monochrome=TRUE}), or in colour (\code{monochrome=FALSE}). } \item{limcol}{ String. The colour to be used when plotting the 95\% limit curves. } \item{maxerr}{ Maximum number of failures tolerated while generating simulated realisations. See Details. } \item{check}{ Logical value indicating whether to check the internal format of \code{fit}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{fit}, if it is found to be damaged. } \item{envir.expr}{ Optional. An environment in which the expression \code{expr} should be evaluated. } } \value{ An object of class \code{"qqppm"} containing the information needed to reproduce the Q-Q plot. Entries \code{x} and \code{y} are numeric vectors containing quantiles of the simulations and of the data, respectively. } \details{ This function generates a Q-Q plot of the residuals from a fitted point process model. It is an addendum to the suite of diagnostic plots produced by the function \code{\link{diagnose.ppm}}, kept separate because it is computationally intensive. The quantiles of the theoretical distribution are estimated by simulation. In classical statistics, a Q-Q plot of residuals is a useful diagnostic for checking the distributional assumptions. Analogously, in spatial statistics, a Q-Q plot of the (smoothed) residuals from a fitted point process model is a useful way to check the interpoint interaction part of the model (Baddeley et al, 2005). The systematic part of the model (spatial trend, covariate effects, etc) is assessed using other plots made by \code{\link{diagnose.ppm}}. The argument \code{fit} represents the fitted point process model. It must be an object of class \code{"ppm"} (typically produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). Residuals will be computed for this fitted model using \code{\link{residuals.ppm}}, and the residuals will be kernel-smoothed to produce a ``residual field''. The values of this residual field will provide the ``data'' quantiles for the Q-Q plot. The argument \code{expr} is not usually specified. It provides a way to modify the ``theoretical'' or ``reference'' quantiles for the Q-Q plot. In normal usage we set \code{expr=NULL}. The default is to generate \code{nsim} simulated realisations of the fitted model \code{fit}, re-fit this model to each of the simulated patterns, evaluate the residuals from these fitted models, and use the kernel-smoothed residual field from these fitted models as a sample from the reference distribution for the Q-Q plot. In advanced use, \code{expr} may be an \code{expression}. It will be re-evaluated \code{nsim} times, and should include random computations so that the results are not identical each time. The result of evaluating \code{expr} should be either a point pattern (object of class \code{"ppp"}) or a fitted point process model (object of class \code{"ppm"}). If the value is a point pattern, then the original fitted model \code{fit} will be fitted to this new point pattern using \code{\link{update.ppm}}, to yield another fitted model. Smoothed residuals obtained from these \code{nsim} fitted models will yield the ``theoretical'' quantiles for the Q-Q plot. Alternatively \code{expr} can be a list of point patterns, or an \code{envelope} object that contains a list of point patterns (typically generated by calling \code{\link{envelope}} with \code{savepatterns=TRUE}). These point patterns will be used as the simulated patterns. Simulation is performed (if \code{expr=NULL}) using the Metropolis-Hastings algorithm \code{\link{rmh}}. Each simulated realisation is the result of running the Metropolis-Hastings algorithm from an independent random starting state each time. The iterative and termination behaviour of the Metropolis-Hastings algorithm are governed by the argument \code{control}. See \code{\link{rmhcontrol}} for information about this argument. As a shortcut, the argument \code{nrep} determines the number of Metropolis-Hastings iterations used to generate each simulated realisation, if \code{control} is absent. By default, simulations are generated in an expanded window. Use the argument \code{control} to change this, as explained in the section on \emph{Warning messages}. The argument \code{type} selects the type of residual or weight that will be computed. For options, see \code{\link{diagnose.ppm}}. The argument \code{style} determines the type of Q-Q plot. It is highly recommended to use the default, \code{style="mean"}. \describe{ \item{\code{style="classical"}}{ The quantiles of the residual field for the data (on the \eqn{y} axis) are plotted against the quantiles of the \bold{pooled} simulations (on the \eqn{x} axis). This plot is biased, and therefore difficult to interpret, because of strong autocorrelations in the residual field and the large differences in sample size. } \item{\code{style="mean"}}{ The order statistics of the residual field for the data are plotted against the sample means, over the \code{nsim} simulations, of the corresponding order statistics of the residual field for the simulated datasets. Dotted lines show the 2.5 and 97.5 percentiles, over the \code{nsim} simulations, of each order statistic. } } The argument \code{fast} is a simple way to control the accuracy and speed of computation. If \code{fast=FALSE}, the residual field is computed on a fine grid of pixels (by default 100 by 100 pixels, see below) and the Q-Q plot is based on the complete set of order statistics (usually 10,000 quantiles). If \code{fast=TRUE}, the residual field is computed on a coarse grid (at most 40 by 40 pixels) and the Q-Q plot is based on the \emph{percentiles} only. This is about 7 times faster. It is recommended to use \code{fast=TRUE} for interactive data analysis and \code{fast=FALSE} for definitive plots for publication. The argument \code{dimyx} gives full control over the resolution of the pixel grid used to calculate the smoothed residuals. Its interpretation is the same as the argument \code{dimyx} to the function \code{\link{as.mask}}. Note that \code{dimyx[1]} is the number of pixels in the \eqn{y} direction, and \code{dimyx[2]} is the number in the \eqn{x} direction. If \code{dimyx} is not present, then the default pixel grid dimensions are controlled by \code{spatstat.options("npixel")}. Since the computation is so time-consuming, \code{qqplot.ppm} returns a list containing all the data necessary to re-display the Q-Q plot. It is advisable to assign the result of \code{qqplot.ppm} to something (or use \code{.Last.value} if you forgot to.) The return value is an object of class \code{"qqppm"}. There are methods for \code{\link{plot.qqppm}} and \code{\link{print.qqppm}}. See the Examples. The argument \code{saveall} is usually set to \code{FALSE}. If \code{saveall=TRUE}, then the intermediate results of calculation for each simulated realisation are saved and returned. The return value includes a 3-dimensional array \code{sim} containing the smoothed residual field images for each of the \code{nsim} realisations. When \code{saveall=TRUE}, the return value is an object of very large size, and should not be saved on disk. Errors may occur during the simulation process, because random data are generated. For example: \itemize{ \item one of the simulated patterns may be empty. \item one of the simulated patterns may cause an error in the code that fits the point process model. \item the user-supplied argument \code{expr} may have a bug. } Empty point patterns do not cause a problem for the code, but they are reported. Other problems that would lead to a crash are trapped; the offending simulated data are discarded, and the simulation is retried. The argument \code{maxerr} determines the maximum number of times that such errors will be tolerated (mainly as a safeguard against an infinite loop). } \section{Side Effects}{ Produces a Q-Q plot if \code{plot.it} is TRUE. } \section{Warning messages}{ A warning message will be issued if any of the simulations trapped an error (a potential crash). A warning message will be issued if all, or many, of the simulated point patterns are empty. This usually indicates a problem with the simulation procedure. The default behaviour of \code{qqplot.ppm} is to simulate patterns on an expanded window (specified through the argument \code{control}) in order to avoid edge effects. The model's trend is extrapolated over this expanded window. If the trend is strongly inhomogeneous, the extrapolated trend may have very large (or even infinite) values. This can cause the simulation algorithm to produce empty patterns. The only way to suppress this problem entirely is to prohibit the expansion of the window, by setting the \code{control} argument to something like \code{control=list(nrep=1e6, expand=1)}. Here \code{expand=1} means there will be no expansion. See \code{\link{rmhcontrol}} for more information about the argument \code{control}. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \seealso{ \code{\link{diagnose.ppm}}, \code{\link{lurking}}, \code{\link{residuals.ppm}}, \code{\link{eem}}, \code{\link{ppm.object}}, \code{\link{ppm}}, \code{\link{rmh}}, \code{\link{rmhcontrol}} } \examples{ data(cells) fit <- ppm(cells, ~1, Poisson()) diagnose.ppm(fit) # no suggestion of departure from stationarity if(interactive()) { qqplot.ppm(fit, 80) # strong evidence of non-Poisson interaction diagnose.ppm(fit, type="pearson") qqplot.ppm(fit, type="pearson") } \testonly{ qqplot.ppm(fit, 4) qqplot.ppm(fit, 4, type="pearson") } # capture the plot coordinates # mypreciousdata <- qqplot.ppm(fit, type="pearson") # mypreciousdata <- qqplot.ppm(fit, 4, type="pearson") # plot(mypreciousdata) ## use the idiom .Last.value if you forgot to assign them mypreciousdata <- .Last.value ###################################################### # Q-Q plots based on fixed n # The above QQ plots used simulations from the (fitted) Poisson process. # But I want to simulate conditional on n, instead of Poisson # Do this by setting rmhcontrol(p=1) fixit <- list(p=1) if(interactive()) {qqplot.ppm(fit, 100, control=fixit)} \testonly{qqplot.ppm(fit, 4, control=fixit)} ###################################################### # Inhomogeneous Poisson data X <- rpoispp(function(x,y){1000 * exp(-3*x)}, 1000) plot(X) # Inhomogeneous Poisson model fit <- ppm(X, ~x, Poisson()) if(interactive()) {qqplot.ppm(fit, 100)} \testonly{qqplot.ppm(fit, 4)} # conclusion: fitted inhomogeneous Poisson model looks OK ###################################################### # Advanced use of 'expr' argument # # set the initial conditions in Metropolis-Hastings algorithm # expr <- expression(rmh(fit, start=list(n.start=42), verbose=FALSE)) if(interactive()) \donttest{qqplot.ppm(fit, 100, expr)} \testonly{qqplot.ppm(fit, 4, expr)} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat.core/man/km.rs.Rd0000644000176200001440000000610314141452520015136 0ustar liggesusers\name{km.rs} \alias{km.rs} \title{Kaplan-Meier and Reduced Sample Estimator using Histograms} \description{ Compute the Kaplan-Meier and Reduced Sample estimators of a survival time distribution function, using histogram techniques } \usage{ km.rs(o, cc, d, breaks) } \arguments{ \item{o}{vector of observed survival times } \item{cc}{vector of censoring times } \item{d}{vector of non-censoring indicators } \item{breaks}{Vector of breakpoints to be used to form histograms. } } \value{ A list with five elements \item{rs}{Reduced-sample estimate of the survival time c.d.f. \eqn{F(t)} } \item{km}{Kaplan-Meier estimate of the survival time c.d.f. \eqn{F(t)} } \item{hazard}{corresponding Nelson-Aalen estimate of the hazard rate \eqn{\lambda(t)}{lambda(t)} } \item{r}{values of \eqn{t} for which \eqn{F(t)} is estimated } \item{breaks}{the breakpoints vector } } \details{ This function is needed mainly for internal use in \pkg{spatstat}, but may be useful in other applications where you want to form the Kaplan-Meier estimator from a huge dataset. Suppose \eqn{T_i}{T[i]} are the survival times of individuals \eqn{i=1,\ldots,M} with unknown distribution function \eqn{F(t)} which we wish to estimate. Suppose these times are right-censored by random censoring times \eqn{C_i}{C[i]}. Thus the observations consist of right-censored survival times \eqn{\tilde T_i = \min(T_i,C_i)}{T*[i] = min(T[i],C[i])} and non-censoring indicators \eqn{D_i = 1\{T_i \le C_i\}}{D[i] = 1(T[i] <= C[i])} for each \eqn{i}. The arguments to this function are vectors \code{o}, \code{cc}, \code{d} of observed values of \eqn{\tilde T_i}{T*[i]}, \eqn{C_i}{C[i]} and \eqn{D_i}{D[i]} respectively. The function computes histograms and forms the reduced-sample and Kaplan-Meier estimates of \eqn{F(t)} by invoking the functions \code{\link{kaplan.meier}} and \code{\link{reduced.sample}}. This is efficient if the lengths of \code{o}, \code{cc}, \code{d} (i.e. the number of observations) is large. The vectors \code{km} and \code{hazard} returned by \code{kaplan.meier} are (histogram approximations to) the Kaplan-Meier estimator of \eqn{F(t)} and its hazard rate \eqn{\lambda(t)}{lambda(t)}. Specifically, \code{km[k]} is an estimate of \code{F(breaks[k+1])}, and \code{lambda[k]} is an estimate of the average of \eqn{\lambda(t)}{lambda(t)} over the interval \code{(breaks[k],breaks[k+1])}. This approximation is exact only if the survival times are discrete and the histogram breaks are fine enough to ensure that each interval \code{(breaks[k],breaks[k+1])} contains only one possible value of the survival time. The vector \code{rs} is the reduced-sample estimator, \code{rs[k]} being the reduced sample estimate of \code{F(breaks[k+1])}. This value is exact, i.e. the use of histograms does not introduce any approximation error in the reduced-sample estimator. } \seealso{ \code{\link{reduced.sample}}, \code{\link{kaplan.meier}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/Jest.Rd0000644000176200001440000002246214141452520015017 0ustar liggesusers\name{Jest} \alias{Jest} \title{Estimate the J-function} \description{ Estimates the summary function \eqn{J(r)} for a point pattern in a window of arbitrary shape. } \usage{ Jest(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{J(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{eps}{ the resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{vector of values for the argument \eqn{r} at which \eqn{J(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \code{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. Character string specifying the choice of edge correction(s) in \code{\link{Fest}} and \code{\link{Gest}}. See Details. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{J} has been estimated} \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J(r)} computed from the border-corrected estimates of \eqn{F} and \eqn{G} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{J(r)} computed from the Kaplan-Meier estimates of \eqn{F} and \eqn{G} } \item{han}{the Hanisch-style estimator of \eqn{J(r)} computed from the Hanisch estimate of \eqn{G} and the Chiu-Stoyan estimate of \eqn{F} } \item{un}{the uncorrected estimate of \eqn{J(r)} computed from the uncorrected estimates of \eqn{F} and \eqn{G} } \item{theo}{the theoretical value of \eqn{J(r)} for a stationary Poisson process: identically equal to \eqn{1} } The data frame also has \bold{attributes} \item{F}{ the output of \code{\link{Fest}} for this point pattern, containing three estimates of the empty space function \eqn{F(r)} and an estimate of its hazard function } \item{G}{ the output of \code{\link{Gest}} for this point pattern, containing three estimates of the nearest neighbour distance distribution function \eqn{G(r)} and an estimate of its hazard function } } \note{ Sizeable amounts of memory may be needed during the calculation. } \details{ The \eqn{J} function (Van Lieshout and Baddeley, 1996) of a stationary point process is defined as \deqn{J(r) = \frac{1-G(r)}{1-F(r)} }{ % J(r) = (1-G(r))/(1-F(r))} where \eqn{G(r)} is the nearest neighbour distance distribution function of the point process (see \code{\link{Gest}}) and \eqn{F(r)} is its empty space function (see \code{\link{Fest}}). For a completely random (uniform Poisson) point process, the \eqn{J}-function is identically equal to \eqn{1}. Deviations \eqn{J(r) < 1} or \eqn{J(r) > 1} typically indicate spatial clustering or spatial regularity, respectively. The \eqn{J}-function is one of the few characteristics that can be computed explicitly for a wide range of point processes. See Van Lieshout and Baddeley (1996), Baddeley et al (2000), Thonnes and Van Lieshout (1999) for further information. An estimate of \eqn{J} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern. The estimate of \eqn{J(r)} is compared against the constant function \eqn{1}. Deviations \eqn{J(r) < 1} or \eqn{J(r) > 1} may suggest spatial clustering or spatial regularity, respectively. This algorithm estimates the \eqn{J}-function from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}()}. The functions \code{\link{Fest}} and \code{\link{Gest}} are called to compute estimates of \eqn{F(r)} and \eqn{G(r)} respectively. These estimates are then combined by simply taking the ratio \eqn{J(r) = (1-G(r))/(1-F(r))}. In fact several different estimates are computed using different edge corrections (Baddeley, 1998). The Kaplan-Meier estimate (returned as \code{km}) is the ratio \code{J = (1-G)/(1-F)} of the Kaplan-Meier estimates of \eqn{1-F} and \eqn{1-G} computed by \code{\link{Fest}} and \code{\link{Gest}} respectively. This is computed if \code{correction=NULL} or if \code{correction} includes \code{"km"}. The Hanisch-style estimate (returned as \code{han}) is the ratio \code{J = (1-G)/(1-F)} where \code{F} is the Chiu-Stoyan estimate of \eqn{F} and \code{G} is the Hanisch estimate of \eqn{G}. This is computed if \code{correction=NULL} or if \code{correction} includes \code{"cs"} or \code{"han"}. The reduced-sample or border corrected estimate (returned as \code{rs}) is the same ratio \code{J = (1-G)/(1-F)} of the border corrected estimates. This is computed if \code{correction=NULL} or if \code{correction} includes \code{"rs"} or \code{"border"}. These edge-corrected estimators are slightly biased for \eqn{J}, since they are ratios of approximately unbiased estimators. The logarithm of the Kaplan-Meier estimate is exactly unbiased for \eqn{\log J}{log J}. The uncorrected estimate (returned as \code{un} and computed only if \code{correction} includes \code{"none"}) is the ratio \code{J = (1-G)/(1-F)} of the uncorrected (``raw'') estimates of the survival functions of \eqn{F} and \eqn{G}, which are the empirical distribution functions of the empty space distances \code{Fest(X,\dots)$raw} and of the nearest neighbour distances \code{Gest(X,\dots)$raw}. The uncorrected estimates of \eqn{F} and \eqn{G} are severely biased. However the uncorrected estimate of \eqn{J} is approximately unbiased (if the process is close to Poisson); it is insensitive to edge effects, and should be used when edge effects are severe (see Baddeley et al, 2000). The algorithm for \code{\link{Fest}} uses two discrete approximations which are controlled by the parameter \code{eps} and by the spacing of values of \code{r} respectively. See \code{\link{Fest}} for details. First-time users are strongly advised not to specify these arguments. Note that the value returned by \code{Jest} includes the output of \code{\link{Fest}} and \code{\link{Gest}} as attributes (see the last example below). If the user is intending to compute the \code{F,G} and \code{J} functions for the point pattern, it is only necessary to call \code{Jest}. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37--78. Baddeley, A.J. and Gill, R.D. The empty space hazard of a spatial pattern. Research Report 1994/3, Department of Mathematics, University of Western Australia, May 1994. Baddeley, A.J. and Gill, R.D. Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25} (1997) 263--292. Baddeley, A., Kerscher, M., Schladitz, K. and Scott, B.T. Estimating the \emph{J} function without edge correction. \emph{Statistica Neerlandica} \bold{54} (2000) 315--328. Borgefors, G. Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34} (1986) 344--371. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Thonnes, E. and Van Lieshout, M.N.M, A comparative study on the power of Van Lieshout and Baddeley's J-function. \emph{Biometrical Journal} \bold{41} (1999) 721--734. Van Lieshout, M.N.M. and Baddeley, A.J. A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50} (1996) 344--361. } \seealso{ \code{\link{Jinhom}}, \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Kest}}, \code{\link{km.rs}}, \code{\link{reduced.sample}}, \code{\link{kaplan.meier}} } \examples{ data(cells) J <- Jest(cells, 0.01) plot(J, main="cells data") # values are far above J = 1, indicating regular pattern data(redwood) J <- Jest(redwood, 0.01, legendpos="center") plot(J, main="redwood data") # values are below J = 1, indicating clustered pattern } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/segregation.test.Rd0000644000176200001440000000555114141452520017377 0ustar liggesusers\name{segregation.test} \alias{segregation.test} \alias{segregation.test.ppp} \title{ Test of Spatial Segregation of Types } \description{ Performs a Monte Carlo test of spatial segregation of the types in a multitype point pattern. } \usage{ segregation.test(X, \dots) \method{segregation.test}{ppp}(X, \dots, nsim = 19, permute = TRUE, verbose = TRUE, Xname) } \arguments{ \item{X}{ Multitype point pattern (object of class \code{"ppp"} with factor-valued marks). } \item{\dots}{ Additional arguments passed to \code{\link{relrisk.ppp}} to control the smoothing parameter or bandwidth selection. } \item{nsim}{ Number of simulations for the Monte Carlo test. } \item{permute}{ Argument passed to \code{\link{rlabel}}. If \code{TRUE} (the default), randomisation is performed by randomly permuting the labels of \code{X}. If \code{FALSE}, randomisation is performing by resampling the labels with replacement. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{Xname}{ Optional character string giving the name of the dataset \code{X}. } } \details{ The Monte Carlo test of spatial segregation of types, proposed by Kelsall and Diggle (1995) and Diggle et al (2005), is applied to the point pattern \code{X}. The test statistic is \deqn{ T = \sum_i \sum_m \left( \widehat p(m \mid x_i) - \overline p_m \right)^2 }{ T = sum[i] sum[m] (phat(m | x[i]) - pbar[m])^2 } where \eqn{\widehat p(m \mid x_i)}{phat(m | x[i])} is the leave-one-out kernel smoothing estimate of the probability that the \eqn{i}-th data point has type \eqn{m}, and \eqn{\overline p_m}{pbar[m]} is the average fraction of data points which are of type \eqn{m}. The statistic \eqn{T} is evaluated for the data and for \code{nsim} randomised versions of \code{X}, generated by randomly permuting or resampling the marks. Note that, by default, automatic bandwidth selection will be performed separately for each randomised pattern. This computation can be very time-consuming but is necessary for the test to be valid in most conditions. A short-cut is to specify the value of the smoothing bandwidth \code{sigma} as shown in the examples. } \value{ An object of class \code{"htest"} representing the result of the test. } \references{ Kelsall, J.E. and Diggle, P.J. (1995) Kernel estimation of relative risk. \emph{Bernoulli} \bold{1}, 3--16. Diggle, P.J., Zheng, P. and Durr, P. (2005) Non-parametric estimation of spatial segregation in a multivariate point process: bovine tuberculosis in Cornwall, UK. \emph{Applied Statistics} \bold{54}, 645--658. } \seealso{ \code{\link{relrisk}} } \examples{ segregation.test(hyytiala, 5) if(interactive()) segregation.test(hyytiala, hmin=0.05) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{htest} spatstat.core/man/varblock.Rd0000644000176200001440000001050214141452520015705 0ustar liggesusers\name{varblock} \alias{varblock} \title{ Estimate Variance of Summary Statistic by Subdivision } \description{ This command estimates the variance of any summary statistic (such as the \eqn{K}-function) by spatial subdivision of a single point pattern dataset. } \usage{ varblock(X, fun = Kest, blocks = quadrats(X, nx = nx, ny = ny), \dots, nx = 3, ny = nx, confidence=0.95) } \arguments{ \item{X}{ Point pattern dataset (object of class \code{"ppp"}). } \item{fun}{ Function that computes the summary statistic. } \item{blocks}{ Optional. A tessellation that specifies the division of the space into blocks. } \item{\dots}{ Arguments passed to \code{fun}. } \item{nx,ny}{ Optional. Number of rectangular blocks in the \eqn{x} and \eqn{y} directions. Incompatible with \code{blocks}. } \item{confidence}{ Confidence level, as a fraction between 0 and 1. } } \details{ This command computes an estimate of the variance of the summary statistic \code{fun(X)} from a single point pattern dataset \code{X} using a subdivision method. It can be used to plot \bold{confidence intervals} for the true value of a summary function such as the \eqn{K}-function. The window containing \code{X} is divided into pieces by an \code{nx * ny} array of rectangles (or is divided into pieces of more general shape, according to the argument \code{blocks} if it is present). The summary statistic \code{fun} is applied to each of the corresponding sub-patterns of \code{X} as described below. Then the pointwise sample mean, sample variance and sample standard deviation of these summary statistics are computed. Then pointwise confidence intervals are computed, for the specified level of confidence, defaulting to 95 percent. The variance is estimated by equation (4.21) of Diggle (2003, page 52). This assumes that the point pattern \code{X} is stationary. For further details see Diggle (2003, pp 52--53). The estimate of the summary statistic from each block is computed as follows. For most functions \code{fun}, the estimate from block \code{B} is computed by finding the subset of \code{X} consisting of points that fall inside \code{B}, and applying \code{fun} to these points, by calling \code{fun(X[B])}. However if \code{fun} is the \eqn{K}-function \code{\link{Kest}}, or any function which has an argument called \code{domain}, the estimate for each block \code{B} is computed by calling \code{fun(X, domain=B)}. In the case of the \eqn{K}-function this means that the estimate from block \code{B} is computed by counting pairs of points in which the \emph{first} point lies in \code{B}, while the second point may lie anywhere. } \section{Errors}{ If the blocks are too small, there may be insufficient data in some blocks, and the function \code{fun} may report an error. If this happens, you need to take larger blocks. An error message about incompatibility may occur. The different function estimates may be incompatible in some cases, for example, because they use different default edge corrections (typically because the tiles of the tessellation are not the same kind of geometric object as the window of \code{X}, or because the default edge correction depends on the number of points). To prevent this, specify the choice of edge correction, in the \code{correction} argument to \code{fun}, if it has one. An alternative to \code{varblock} is Loh's mark bootstrap \code{\link{lohboot}}. } \value{ A function value table (object of class \code{"fv"}) that contains the result of \code{fun(X)} as well as the sample mean, sample variance and sample standard deviation of the block estimates, together with the upper and lower two-standard-deviation confidence limits. } \references{ Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. } \author{ \adrian and \rolf } \seealso{ \code{\link{tess}}, \code{\link{quadrats}} for basic manipulation. \code{\link{lohboot}} for an alternative bootstrap technique. } \examples{ v <- varblock(amacrine, Kest, nx=4, ny=2) v <- varblock(amacrine, Kcross, nx=4, ny=2) if(interactive()) plot(v, iso ~ r, shade=c("hiiso", "loiso")) } \keyword{nonparametric} \keyword{spatial} spatstat.core/man/simulate.kppm.Rd0000644000176200001440000001074714141452520016706 0ustar liggesusers\name{simulate.kppm} \alias{simulate.kppm} \title{Simulate a Fitted Cluster Point Process Model} \description{ Generates simulated realisations from a fitted cluster point process model. } \usage{ \method{simulate}{kppm}(object, nsim = 1, seed=NULL, ..., window=NULL, covariates=NULL, n.cond = NULL, w.cond = NULL, verbose=TRUE, retry=10, drop=FALSE) } \arguments{ \item{object}{ Fitted cluster point process model. An object of class \code{"kppm"}. } \item{nsim}{ Number of simulated realisations. } \item{seed}{ an object specifying whether and how to initialise the random number generator. Either \code{NULL} or an integer that will be used in a call to \code{\link[base:Random]{set.seed}} before simulating the point patterns. } \item{\dots}{ Additional arguments passed to the relevant random generator. See Details. } \item{window}{ Optional. Window (object of class \code{"owin"}) in which the model should be simulated. } \item{covariates}{ Optional. A named list containing new values for the covariates in the model. } \item{n.cond}{ Optional. Integer specifying a fixed number of points. See the section on \emph{Conditional Simulation}. } \item{w.cond}{ Optional. Conditioning region. A window (object of class \code{"owin"}) specifying the region which must contain exactly \code{n.cond} points. See the section on \emph{Conditional Simulation}. } \item{verbose}{ Logical. Whether to print progress reports (when \code{nsim > 1}). } \item{retry}{ Number of times to repeat the simulation if it fails (e.g. because of insufficient memory). } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE}, the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"kppm"} of fitted cluster point process models. Simulations are performed by \code{\link{rThomas}}, \code{\link{rMatClust}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}} or \code{\link{rLGCP}} depending on the model. Additional arguments \code{\dots} are passed to the relevant function performing the simulation. For example the argument \code{saveLambda} is recognised by all of the simulation functions. The return value is a list of point patterns. It also carries an attribute \code{"seed"} that captures the initial state of the random number generator. This follows the convention used in \code{simulate.lm} (see \code{\link[stats]{simulate}}). It can be used to force a sequence of simulations to be repeated exactly, as shown in the examples for \code{\link[stats]{simulate}}. } \section{Conditional Simulation}{ If \code{n.cond} is specified, it should be a single integer. Simulation will be conditional on the event that the pattern contains exactly \code{n.cond} points (or contains exactly \code{n.cond} points inside the region \code{w.cond} if it is given). Conditional simulation uses the rejection algorithm described in Section 6.2 of Moller, Syversveen and Waagepetersen (1998). There is a maximum number of proposals which will be attempted. Consequently the return value may contain fewer than \code{nsim} point patterns. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"ppp"}). (For conditional simulation, the length of the result may be shorter than \code{nsim}). The return value also carries an attribute \code{"seed"} that captures the initial state of the random number generator. See Details. } \examples{ if(offline <- !interactive()) { spatstat.options(npixel=32, ndummy.min=16) } fit <- kppm(redwood ~x, "Thomas") simulate(fit, 2) simulate(fit, n.cond=60) if(offline) reset.spatstat.options() } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. \Moller, J., Syversveen, A. and Waagepetersen, R. (1998) Log Gaussian Cox Processes. \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482. } \seealso{ \code{\link{kppm}}, \code{\link{rThomas}}, \code{\link{rMatClust}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, \code{\link{rLGCP}}, \code{\link{simulate.ppm}}, \code{\link[stats]{simulate}} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat.core/man/densityHeat.ppp.Rd0000644000176200001440000001744614141452520017177 0ustar liggesusers\name{densityHeat.ppp} \alias{densityHeat.ppp} \title{ Diffusion Estimate of Point Pattern Intensity } \description{ Computes the diffusion estimate of the intensity of a point pattern. } \usage{ \method{densityHeat}{ppp}(x, sigma, \dots, weights=NULL, connect=8, symmetric=FALSE, sigmaX=NULL, k=1, show=FALSE, se=FALSE, at=c("pixels", "points"), leaveoneout = TRUE, extrapolate = FALSE, coarsen = TRUE, verbose=TRUE, internal=NULL) } \arguments{ \item{x}{ Point pattern (object of class \code{"ppp"}). } \item{sigma}{ Smoothing bandwidth. A single number giving the equivalent standard deviation of the smoother. Alternatively, a pixel image (class \code{"im"}) or a \code{function(x,y)} giving the spatially-varying bandwidth. } \item{\dots}{ Arguments passed to \code{\link[spatstat.geom]{pixellate.ppp}} controlling the pixel resolution. } \item{weights}{ Optional numeric vector of weights associated with each point of \code{x}. } \item{connect}{ Grid connectivity: either 4 or 8. } \item{symmetric}{ Logical value indicating whether to \emph{force} the algorithm to use a symmetric random walk. } \item{sigmaX}{ Numeric vector of bandwidths, one associated with each data point in \code{x}. See Details. } \item{k}{ Integer. Calculations will be performed by repeatedly multiplying the current state by the \code{k}-step transition matrix. } \item{show}{ Logical value indicating whether to plot successive iterations. } \item{se}{ Logical value indicating whether to compute standard errors. } \item{at}{ Character string specifying whether to compute values at a grid of pixels (\code{at="pixels"}, the default) or at the data points of \code{x} (\code{at="points"}). } \item{leaveoneout}{ Logical value specifying whether to compute a leave-one-out estimate at each data point, when \code{at="points"}. } \item{extrapolate}{ Logical value specifying whether to use Richardson extrapolation to improve the accuracy of the computation. } \item{coarsen}{ Logical value, controlling the calculation performed when \code{extrapolate=TRUE}. See Details. } \item{verbose}{ Logical value specifying whether to print progress reports. } \item{internal}{ Developer use only. } } \details{ This command computes a diffusion kernel estimate of point process intensity from the observed point pattern \code{x}. The function \code{\link{densityHeat}} is generic, with methods for point patterns in two dimensions (class \code{"ppp"}) and point patterns on a linear network (class \code{"lpp"}). The function \code{densityHeat.ppp} described here is the method for class \code{"ppp"}. Given a two-dimensional point pattern \code{x}, it computes a diffusion kernel estimate of the intensity of the point process which generated \code{x}. Diffusion kernel estimates were developed by Botev et al (2010), Barry and McIntyre (2011) and Baddeley et al (2021). Barry and McIntyre (2011) proposed an estimator for point process intensity based on a random walk on the pixel grid inside the observation window. Baddeley et al (2021) showed that the Barry-McIntyre method is a special case of the \emph{diffusion estimator} proposed by Botev et al (2010). The original Barry-McIntyre algorithm assumes a symmetric random walk (i.e. each possible transition has the same probability \eqn{p}) and requires a square pixel grid (i.e. equal spacing in the \eqn{x} and \eqn{y} directions). Their original algorithm is used if \code{symmetric=TRUE}. Use the \code{\dots} arguments to ensure a square grid: for example, the argument \code{eps} specifies a square grid with spacing \code{eps} units. The more general algorithm used here (Baddeley et al, 2021) does not require a square grid of pixels. If the pixel grid is not square, and if \code{symmetric=FALSE} (the default), then the random walk is not symmetric, in the sense that the probabilities of different jumps will be different, in order to ensure that the smoothing is isotropic. This implementation also includes two generalizations to the case of adaptive smoothing (Baddeley et al, 2021). In the first version of adaptive smoothing, the bandwidth is spatially-varying. The argument \code{sigma} should be a pixel image (class \code{"im"}) or a \code{function(x,y)} specifying the bandwidth at each spatial location. The smoothing is performed by solving the heat equation with spatially-varying parameters. In the second version of adaptive smoothing, each data point in \code{x} is smoothed using a separate bandwidth. The argument \code{sigmaX} should be a numeric vector specifying the bandwidth for each point of \code{x}. The smoothing is performed using the lagged arrival algorithm. The argument \code{sigma} can be omitted. If \code{extrapolate=FALSE} (the default), calculations are performed using the Euler scheme for the heat equation. If \code{extrapolate=TRUE}, the accuracy of the result will be improved by applying Richardson extrapolation (Baddeley et al, 2021, Section 4). After computing the intensity estimate using the Euler scheme on the desired pixel grid, another estimate is computed using the same method on another pixel grid, and the two estimates are combined by Richardson extrapolation to obtain a more accurate result. The second grid is coarser than the original grid if \code{coarsen=TRUE} (the default), and finer than the original grid if \code{coarsen=FALSE}. Setting \code{extrapolate=TRUE} increases computation time by 35\% if \code{coarsen=TRUE} and by 400\% if \code{coarsen=FALSE}. } \value{ Pixel image (object of class \code{"im"}) giving the estimated intensity of the point process. If \code{se=TRUE}, the result has an attribute \code{"se"} which is another pixel image giving the estimated standard error. If \code{at="points"} then the result is a numeric vector with one entry for each point of \code{x}. } \seealso{ \code{\link[spatstat.core]{density.ppp}} for the usual kernel estimator, and \code{\link[spatstat.core]{adaptive.density}} for the tessellation-based estimator. } \references{ Baddeley, A., Davies, T., Rakshit, S., Nair, G. and McSwiggan, G. (2021) Diffusion smoothing for spatial point patterns. \emph{Statistical Science}, in press. Barry, R.P. and McIntyre, J. (2011) Estimating animal densities and home range in regions with irregular boundaries and holes: a lattice-based alternative to the kernel density estimator. \emph{Ecological Modelling} \bold{222}, 1666--1672. Botev, Z.I., Grotowski, J.F. and Kroese, D.P. (2010) Kernel density estimation via diffusion. \emph{Annals of Statistics} \bold{38}, 2916--2957. } \author{ Adrian Baddeley and Tilman Davies. } \examples{ online <- interactive() if(!online) op <- spatstat.options(npixel=32) X <- runifpoint(25, letterR) Z <- densityHeat(X, 0.2) if(online) { plot(Z, main="Diffusion estimator") plot(X, add=TRUE, pch=16) integral(Z) # should equal 25 } Z <- densityHeat(X, 0.2, se=TRUE) Zse <- attr(Z, "se") if(online) plot(solist(estimate=Z, SE=Zse), main="") Zex <- densityHeat(X, 0.2, extrapolate=TRUE) ZS <- densityHeat(X, 0.2, symmetric=TRUE, eps=0.125) if(online) { plot(ZS, main="fixed bandwidth") plot(X, add=TRUE, pch=16) } sig <- function(x,y) { (x-1.5)/10 } ZZ <- densityHeat(X, sig) if(online) { plot(ZZ, main="adaptive (I)") plot(X, add=TRUE, pch=16) } sigX <- sig(X$x, X$y) AA <- densityHeat(X, sigmaX=sigX) if(online) { plot(AA, main="adaptive (II)") plot(X, add=TRUE, pch=16) } if(!online) spatstat.options(op) } \keyword{spatial} \keyword{smooth} spatstat.core/man/Gmulti.Rd0000644000176200001440000001722714141452520015356 0ustar liggesusers\name{Gmulti} \alias{Gmulti} \title{ Marked Nearest Neighbour Distance Function } \description{ For a marked point pattern, estimate the distribution of the distance from a typical point in subset \code{I} to the nearest point of subset \eqn{J}. } \usage{ Gmulti(X, I, J, r=NULL, breaks=NULL, \dots, disjoint=NULL, correction=c("rs", "km", "han")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype distance distribution function \eqn{G_{IJ}(r)}{GIJ(r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset of points of \code{X} from which distances are measured. } \item{J}{Subset of points in \code{X} to which distances are measured. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{G_{IJ}(r)}{GIJ(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{disjoint}{Optional flag indicating whether the subsets \code{I} and \code{J} are disjoint. If missing, this value will be computed by inspecting the vectors \code{I} and \code{J}. } \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{G_{IJ}(r)}{GIJ(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G_{IJ}(r)}{GIJ(r)} } \item{han}{the Hanisch-style estimator of \eqn{G_{IJ}(r)}{GIJ(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{IJ}(r)}{GIJ(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G_{IJ}(r)}{GIJ(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G_{IJ}(r)}{GIJ(r)}, i.e. the empirical distribution of the distances from each point of type \eqn{i} to the nearest point of type \eqn{j} } \item{theo}{the theoretical value of \eqn{G_{IJ}(r)}{GIJ(r)} for a marked Poisson process with the same estimated intensity } } \details{ The function \code{Gmulti} generalises \code{\link{Gest}} (for unmarked point patterns) and \code{\link{Gdot}} and \code{\link{Gcross}} (for multitype point patterns) to arbitrary marked point patterns. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly overlapping, of a marked point process. This function computes an estimate of the cumulative distribution function \eqn{G_{IJ}(r)}{GIJ(r)} of the distance from a typical point of \eqn{X_I}{X[I]} to the nearest distinct point of \eqn{X_J}{X[J]}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. This algorithm estimates the distribution function \eqn{G_{IJ}(r)}{GIJ(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Gest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{IJ}(r)}{GIJ(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{IJ}(r)}{GIJ(r)}. This estimate should be used with caution as \eqn{G_{IJ}(r)}{GIJ(r)} is not necessarily differentiable. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G_{IJ}}{GIJ}. However this is also returned by the algorithm, as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{G_{IJ}}{GIJ} as if it were an unbiased estimator of \eqn{G_{IJ}}{GIJ}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The function \eqn{G_{IJ}}{GIJ} does not necessarily have a density. The reduced sample estimator of \eqn{G_{IJ}}{GIJ} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G_{IJ}}{GIJ} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{Gcross}}, \code{\link{Gdot}}, \code{\link{Gest}} } \examples{ trees <- longleaf # Longleaf Pine data: marks represent diameter \testonly{ trees <- trees[seq(1, npoints(trees), by=50), ] } Gm <- Gmulti(trees, marks(trees) <= 15, marks(trees) >= 25) plot(Gm) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/plot.leverage.ppm.Rd0000644000176200001440000001043214141452520017446 0ustar liggesusers\name{plot.leverage.ppm} \alias{plot.leverage.ppm} \alias{contour.leverage.ppm} \alias{persp.leverage.ppm} \title{ Plot Leverage Function } \description{ Generate a pixel image plot, or a contour plot, or a perspective plot, of a leverage function that has been computed by \code{\link{leverage.ppm}}. } \usage{ \method{plot}{leverage.ppm}(x, \dots, what=c("smooth", "nearest", "exact"), showcut=TRUE, args.cut=list(drawlabels=FALSE), multiplot=TRUE) \method{contour}{leverage.ppm}(x, \dots, what=c("smooth", "nearest"), showcut=TRUE, args.cut=list(col=3, lwd=3, drawlabels=FALSE), multiplot=TRUE) \method{persp}{leverage.ppm}(x, \dots, what=c("smooth", "nearest"), main, zlab="leverage") } \arguments{ \item{x}{ Leverage function (object of class \code{"leverage.ppm"}) computed by \code{\link{leverage.ppm}}. } \item{\dots}{ Arguments passed to \code{\link{plot.im}} or \code{\link{contour.im}} or \code{\link{persp.im}} controlling the plot. } \item{what}{ Character string (partially matched) specifying the values to be plotted. See Details. } \item{showcut}{ Logical. If \code{TRUE}, a contour line is plotted at the level equal to the theoretical mean of the leverage. } \item{args.cut}{ Optional list of arguments passed to \code{\link[graphics]{contour.default}} to control the plotting of the contour line for the mean leverage. } \item{multiplot}{ Logical value indicating whether it is permissible to display several plot panels. } \item{main}{ Optional main title. A character string or character vector. } \item{zlab}{ Label for the \eqn{z} axis. A character string. } } \details{ These functions are the \code{plot}, \code{contour} and \code{persp} methods for objects of class \code{"leverage.ppm"}. Such objects are computed by the command \code{\link{leverage.ppm}}. The \code{plot} method displays the leverage function as a colour pixel image using \code{\link{plot.im}}, and draws a single contour line at the mean leverage value using \code{\link{contour.default}}. Use the argument \code{clipwin} to restrict the plot to a subset of the full data. The \code{contour} method displays the leverage function as a contour plot, and also draws a single contour line at the mean leverage value, using \code{\link{contour.im}}. The \code{persp} method displays the leverage function as a surface in perspective view, using \code{\link{persp.im}}. Since the exact values of leverage are computed only at a finite set of quadrature locations, there are several options for these plots: \describe{ \item{\code{what="smooth"}:}{ (the default) an image plot showing a smooth function, obtained by applying kernel smoothing to the exact leverage values; } \item{\code{what="nearest"}:}{ an image plot showing a piecewise-constant function, obtained by taking the exact leverage value at the nearest quadrature point; } \item{\code{what="exact"}:}{ a symbol plot showing the exact values of leverage as circles, centred at the quadrature points, with diameters proportional to leverage. } } The pixel images are already contained in the object \code{x} and were computed by \code{\link{leverage.ppm}}; the resolution of these images is controlled by arguments to \code{\link{leverage.ppm}}. } \value{ Same as for \code{\link{plot.im}}, \code{\link{contour.im}} and \code{\link{persp.im}} respectively. } \references{ Baddeley, A., Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ \spatstatAuthors. } \seealso{ \code{\link{leverage.ppm}}. } \examples{ if(offline <- !interactive()) op <- spatstat.options(npixel=32, ndummy.min=16) X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) lef <- leverage(fit) plot(lef) contour(lef) persp(lef) if(offline) spatstat.options(op) } \keyword{spatial} \keyword{models} spatstat.core/man/plot.laslett.Rd0000644000176200001440000000324214141452520016532 0ustar liggesusers\name{plot.laslett} \alias{plot.laslett} \title{ Plot Laslett Transform } \description{ Plot the result of Laslett's Transform. } \usage{ \method{plot}{laslett}(x, \dots, Xpars = list(box = TRUE, col = "grey"), pointpars = list(pch = 3, cols = "blue"), rectpars = list(lty = 3, border = "green")) } \arguments{ \item{x}{ Object of class \code{"laslett"} produced by \code{\link{laslett}} representing the result of Laslett's transform. } \item{\dots}{ Additional plot arguments passed to \code{\link{plot.solist}}. } \item{Xpars}{ A list of plot arguments passed to \code{\link{plot.owin}} or \code{\link{plot.im}} to display the original region \code{X} before transformation. } \item{pointpars}{ A list of plot arguments passed to \code{\link{plot.ppp}} to display the tangent points. } \item{rectpars}{ A list of plot arguments passed to \code{\link{plot.owin}} to display the maximal rectangle. } } \details{ This is the \code{plot} method for the class \code{"laslett"}. The function \code{\link{laslett}} applies Laslett's Transform to a spatial region \code{X} and returns an object of class \code{"laslett"} representing the result of the transformation. The result is plotted by this method. The plot function \code{\link{plot.solist}} is used to align the before-and-after pictures. See \code{\link{plot.solist}} for further options to control the plot. } \value{ None. } \author{ Kassel Hingee and \adrian. } \seealso{ \code{\link{laslett}} } \examples{ b <- laslett(heather$coarse, plotit=FALSE) plot(b, main="Heather Data") } \keyword{spatial} \keyword{hplot} spatstat.core/man/Fiksel.Rd0000644000176200001440000000743214141452520015327 0ustar liggesusers\name{Fiksel} \alias{Fiksel} \title{The Fiksel Interaction} \description{ Creates an instance of Fiksel's double exponential pairwise interaction point process model, which can then be fitted to point pattern data. } \usage{ Fiksel(r, hc=NA, kappa) } \arguments{ \item{r}{The interaction radius of the Fiksel model} \item{hc}{The hard core distance} \item{kappa}{The rate parameter} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Fiksel process with interaction radius \eqn{r}, hard core distance \code{hc} and rate parameter \code{kappa}. } \details{ Fiksel (1984) introduced a pairwise interaction point process with the following interaction function \eqn{c}. For two points \eqn{u} and \eqn{v} separated by a distance \eqn{d=||u-v||}, the interaction \eqn{c(u,v)} is equal to \eqn{0} if \eqn{d < h}, equal to \eqn{1} if \eqn{d > r}, and equal to \deqn{ \exp(a \exp(-\kappa d))}{exp(a * exp(-kappa * d))} if \eqn{h \le d \le r}{h <= d <= r}, where \eqn{h,r,\kappa,a}{h,r,kappa,a} are parameters. A graph of this interaction function is shown in the Examples. The interpretation of the parameters is as follows. \itemize{ \item \eqn{h} is the hard core distance: distinct points are not permitted to come closer than a distance \eqn{h} apart. \item \eqn{r} is the interaction range: points further than this distance do not interact. \item \eqn{\kappa}{kappa} is the rate or slope parameter, controlling the decay of the interaction as distance increases. \item \eqn{a} is the interaction strength parameter, controlling the strength and type of interaction. If \eqn{a} is zero, the process is Poisson. If \code{a} is positive, the process is clustered. If \code{a} is negative, the process is inhibited (regular). } The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Fiksel pairwise interaction is yielded by the function \code{Fiksel()}. See the examples below. The parameters \eqn{h}, \eqn{r} and \eqn{\kappa}{kappa} must be fixed and given in the call to \code{Fiksel}, while the canonical parameter \eqn{a} is estimated by \code{\link{ppm}()}. To estimate \eqn{h}, \eqn{r} and\eqn{\kappa}{kappa} it is possible to use \code{\link{profilepl}}. The maximum likelihood estimator of\eqn{h} is the minimum interpoint distance. If the hard core distance argument \code{hc} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{hc} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. See also Stoyan, Kendall and Mecke (1987) page 161. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{StraussHard}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Fiksel, T. (1984) Estimation of parameterized pair potentials of marked and non-marked Gibbsian point processes. \emph{Electronische Informationsverabeitung und Kybernetika} \bold{20}, 270--278. Stoyan, D, Kendall, W.S. and Mecke, J. (1987) \emph{Stochastic geometry and its applications}. Wiley. } \examples{ Fiksel(r=1,hc=0.02, kappa=2) # prints a sensible description of itself data(spruces) X <- unmark(spruces) fit <- ppm(X ~ 1, Fiksel(r=3.5, kappa=1)) plot(fitin(fit)) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/Hardcore.Rd0000644000176200001440000000520214141452520015632 0ustar liggesusers\name{Hardcore} \alias{Hardcore} \title{The Hard Core Point Process Model} \description{ Creates an instance of the hard core point process model which can then be fitted to point pattern data. } \usage{ Hardcore(hc=NA) } \arguments{ \item{hc}{The hard core distance} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hard core process with hard core distance \code{hc}. } \details{ A hard core process with hard core distance \eqn{h} and abundance parameter \eqn{\beta}{beta} is a pairwise interaction point process in which distinct points are not allowed to come closer than a distance \eqn{h} apart. The probability density is zero if any pair of points is closer than \eqn{h} units apart, and otherwise equals \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, and \eqn{\alpha}{alpha} is the normalising constant. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hard core process pairwise interaction is yielded by the function \code{Hardcore()}. See the examples below. If the hard core distance argument \code{hc} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{hc} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. } \seealso{ \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{MultiHard}}, \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. } \examples{ Hardcore(0.02) # prints a sensible description of itself ppm(cells ~1, Hardcore(0.05)) # fit the stationary hard core process to `cells' # estimate hard core radius from data ppm(cells, ~1, Hardcore()) # ppm(cells ~1, Hardcore) # ppm(cells ~ polynom(x,y,3), Hardcore(0.05)) # fit a nonstationary hard core process # with log-cubic polynomial trend } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/plot.rppm.Rd0000644000176200001440000000425414141452520016044 0ustar liggesusers\name{plot.rppm} \alias{plot.rppm} \title{ Plot a Recursively Partitioned Point Process Model } \description{ Given a model which has been fitted to point pattern data by recursive partitioning, plot the partition tree or the fitted intensity. } \usage{ \method{plot}{rppm}(x, \dots, what = c("tree", "spatial"), treeplot=NULL) } \arguments{ \item{x}{ Fitted point process model of class \code{"rppm"} produced by the function \code{\link{rppm}}. } \item{what}{ Character string (partially matched) specifying whether to plot the partition tree or the fitted intensity. } \item{\dots}{ Arguments passed to \code{\link[rpart]{plot.rpart}} and \code{\link[rpart]{text.rpart}} (if \code{what="tree"}) or passed to \code{\link{plot.im}} (if \code{what="spatial"}) controlling the appearance of the plot. } \item{treeplot}{ Optional. A function to be used to plot and label the partition tree, replacing the two functions \code{\link[rpart]{plot.rpart}} and \code{\link[rpart]{text.rpart}}. } } \details{ If \code{what="tree"} (the default), the partition tree will be plotted using \code{\link[rpart]{plot.rpart}}, and labelled using \code{\link[rpart]{text.rpart}}. If the argument \code{treeplot} is given, then plotting and labelling will be performed by \code{treeplot} instead. A good choice is the function \code{prp} in package \pkg{rpart.plot}. If \code{what="spatial"}, the predicted intensity will be computed using \code{\link{predict.rppm}}, and this intensity will be plotted as an image using \code{\link{plot.im}}. } \value{ If \code{what="tree"}, a list containing \code{x} and \code{y} coordinates of the plotted nodes of the tree. If \code{what="spatial"}, the return value of \code{\link{plot.im}}. } \author{ \spatstatAuthors } \seealso{ \code{\link{rppm}} } \examples{ # Murchison gold data mur <- solapply(murchison, rescale, s=1000, unitname="km") mur$dfault <- distfun(mur$faults) # fit <- rppm(gold ~ dfault + greenstone, data=mur) # opa <- par(mfrow=c(1,2)) plot(fit) plot(fit, what="spatial") par(opa) } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat.core/man/MultiHard.Rd0000644000176200001440000000555014141452520016002 0ustar liggesusers\name{MultiHard} \alias{MultiHard} \title{The Multitype Hard Core Point Process Model} \description{ Creates an instance of the multitype hard core point process model which can then be fitted to point pattern data. } \usage{ MultiHard(hradii, types=NULL) } \arguments{ \item{hradii}{Matrix of hard core radii} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the multitype hard core process with hard core radii \eqn{hradii[i,j]}. } \details{ This is a multitype version of the hard core process. A pair of points of types \eqn{i} and \eqn{j} must not lie closer than \eqn{h_{ij}}{h[i,j]} units apart. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the MultiStrauss interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{hradii}. The matrix \code{hradii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no distance constraint should be applied for this combination of types. Note that only the hardcore radii are specified in \code{MultiHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} are estimated by \code{\link{ppm}()}, not fixed in \code{MultiHard()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Strauss}}. See \code{\link{ragsMultiHard}} and \code{\link{rmh}} for simulation. } \examples{ h <- matrix(c(1,2,2,1), nrow=2,ncol=2) # prints a sensible description of itself MultiHard(h) # Fit the stationary multitype hardcore process to `amacrine' # with hard core operating only between cells of the same type. h <- 0.02 * matrix(c(1, NA, NA, 1), nrow=2,ncol=2) ppm(amacrine ~1, MultiHard(h)) } \section{Warnings}{ In order that \code{\link{ppm}} can fit the multitype hard core model correctly to a point pattern \code{X}, this pattern must be marked, with \code{markformat} equal to \code{vector} and the mark vector \code{marks(X)} must be a factor. If the argument \code{types} is specified it is interpreted as a set of factor levels and this set must equal \code{levels(marks(X))}. } \section{Changed Syntax}{ Before \pkg{spatstat} version \code{1.37-0}, the syntax of this function was different: \code{MultiHard(types=NULL, hradii)}. The new code attempts to handle the old syntax as well. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.core/man/rpoispp.Rd0000644000176200001440000001373614141452520015612 0ustar liggesusers\name{rpoispp} \alias{rpoispp} \title{Generate Poisson Point Pattern} \description{ Generate a random point pattern using the (homogeneous or inhomogeneous) Poisson process. Includes CSR (complete spatial randomness). } \usage{ rpoispp(lambda, lmax=NULL, win=owin(), \dots, nsim=1, drop=TRUE, ex=NULL, warnwin=TRUE) } \arguments{ \item{lambda}{ Intensity of the Poisson process. Either a single positive number, a \code{function(x,y, \dots)}, or a pixel image. } \item{lmax}{ Optional. An upper bound for the value of \code{lambda(x,y)}, if \code{lambda} is a function. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Ignored if \code{lambda} is a pixel image. } \item{\dots}{ Arguments passed to \code{lambda} if it is a function. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{ex}{ Optional. A point pattern to use as the example. If \code{ex} is given and \code{lambda,lmax,win} are missing, then \code{lambda} and \code{win} will be calculated from the point pattern \code{ex}. } \item{warnwin}{ Logical value specifying whether to issue a warning when \code{win} is ignored (which occurs when \code{lambda} is an image and \code{win} is present). } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ If \code{lambda} is a single number, then this algorithm generates a realisation of the uniform Poisson process (also known as Complete Spatial Randomness, CSR) inside the window \code{win} with intensity \code{lambda} (points per unit area). If \code{lambda} is a function, then this algorithm generates a realisation of the inhomogeneous Poisson process with intensity function \code{lambda(x,y,\dots)} at spatial location \code{(x,y)} inside the window \code{win}. The function \code{lambda} must work correctly with vectors \code{x} and \code{y}. If \code{lmax} is given, it must be an upper bound on the values of \code{lambda(x,y,\dots)} for all locations \code{(x, y)} inside the window \code{win}. That is, we must have \code{lambda(x,y,\dots) <= lmax} for all locations \code{(x,y)}. If this is not true then the results of the algorithm will be incorrect. If \code{lmax} is missing or \code{NULL}, an approximate upper bound is computed by finding the maximum value of \code{lambda(x,y,\dots)} on a grid of locations \code{(x,y)} inside the window \code{win}, and adding a safety margin equal to 5 percent of the range of \code{lambda} values. This can be computationally intensive, so it is advisable to specify \code{lmax} if possible. If \code{lambda} is a pixel image object of class \code{"im"} (see \code{\link{im.object}}), this algorithm generates a realisation of the inhomogeneous Poisson process with intensity equal to the pixel values of the image. (The value of the intensity function at an arbitrary location is the pixel value of the nearest pixel.) The argument \code{win} is ignored; the window of the pixel image is used instead. It will be converted to a rectangle if possible, using \code{\link{rescue.rectangle}}. To generate an inhomogeneous Poisson process the algorithm uses ``thinning'': it first generates a uniform Poisson process of intensity \code{lmax}, then randomly deletes or retains each point, independently of other points, with retention probability \eqn{p(x,y) = \lambda(x,y)/\mbox{lmax}}{p(x,y) = lambda(x,y)/lmax}. For \emph{marked} point patterns, use \code{\link{rmpoispp}}. } \section{Warning}{ Note that \code{lambda} is the \bold{intensity}, that is, the expected number of points \bold{per unit area}. The total number of points in the simulated pattern will be random with expected value \code{mu = lambda * a} where \code{a} is the area of the window \code{win}. } \section{Reproducibility}{ The simulation algorithm, for the case where \code{lambda} is a pixel image, was changed in \pkg{spatstat} version \code{1.42-3}. Set \code{spatstat.options(fastpois=FALSE)} to use the previous, slower algorithm, if it is desired to reproduce results obtained with earlier versions. } \seealso{ \code{\link{rmpoispp}} for Poisson \emph{marked} point patterns, \code{\link{runifpoint}} for a fixed number of independent uniform random points; \code{\link{rpoint}}, \code{\link{rmpoint}} for a fixed number of independent random points with any distribution; \code{\link{rMaternI}}, \code{\link{rMaternII}}, \code{\link{rSSI}}, \code{\link{rStrauss}}, \code{\link{rstrat}} for random point processes with spatial inhibition or regularity; \code{\link{rThomas}}, \code{\link{rGaussPoisson}}, \code{\link{rMatClust}}, \code{\link{rcell}} for random point processes exhibiting clustering; \code{\link{rmh.default}} for Gibbs processes. See also \code{\link{ppp.object}}, \code{\link{owin.object}}. } \examples{ # uniform Poisson process with intensity 100 in the unit square pp <- rpoispp(100) # uniform Poisson process with intensity 1 in a 10 x 10 square pp <- rpoispp(1, win=owin(c(0,10),c(0,10))) # plots should look similar ! # inhomogeneous Poisson process in unit square # with intensity lambda(x,y) = 100 * exp(-3*x) # Intensity is bounded by 100 pp <- rpoispp(function(x,y) {100 * exp(-3*x)}, 100) # How to tune the coefficient of x lamb <- function(x,y,a) { 100 * exp( - a * x)} pp <- rpoispp(lamb, 100, a=3) # pixel image Z <- as.im(function(x,y){100 * sqrt(x+y)}, unit.square()) pp <- rpoispp(Z) # randomising an existing point pattern rpoispp(intensity(cells), win=Window(cells)) rpoispp(ex=cells) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{datagen} spatstat.core/man/rPoissonCluster.Rd0000644000176200001440000001121214141452520017257 0ustar liggesusers\name{rPoissonCluster} \alias{rPoissonCluster} \title{Simulate Poisson Cluster Process} \description{ Generate a random point pattern, a realisation of the general Poisson cluster process. } \usage{ rPoissonCluster(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)), \dots, lmax=NULL, nsim=1, drop=TRUE, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{expand}{ Size of the expansion of the simulation window for generating parent points. A single non-negative number. } \item{rcluster}{ A function which generates random clusters. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{\dots}{ Arguments passed to \code{rcluster} } \item{lmax}{ Optional. Upper bound on the values of \code{kappa} when \code{kappa} is a function or pixel image. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of the point pattern: see Details. } \details{ This algorithm generates a realisation of the general Poisson cluster process, with the cluster mechanism given by the function \code{rcluster}. First, the algorithm generates a Poisson point process of ``parent'' points with intensity \code{kappa} in an expanded window as explained below.. Here \code{kappa} may be a single positive number, a function \code{kappa(x, y)}, or a pixel image object of class \code{"im"} (see \code{\link{im.object}}). See \code{\link{rpoispp}} for details. Second, each parent point is replaced by a random cluster of points, created by calling the function \code{rcluster}. These clusters are combined together to yield a single point pattern, and the restriction of this pattern to the window \code{win} is then returned as the result of \code{rPoissonCluster}. The expanded window consists of \code{\link{as.rectangle}(win)} extended by the amount \code{expand} in each direction. The size of the expansion is saved in the attribute \code{"expand"} and may be extracted by \code{attr(X, "expand")} where \code{X} is the generated point pattern. The function \code{rcluster} should expect to be called as \code{rcluster(xp[i],yp[i],\dots)} for each parent point at a location \code{(xp[i],yp[i])}. The return value of \code{rcluster} should be a list with elements \code{x,y} which are vectors of equal length giving the absolute \eqn{x} and \code{y} coordinates of the points in the cluster. If the return value of \code{rcluster} is a point pattern (object of class \code{"ppp"}) then it may have marks. The result of \code{rPoissonCluster} will then be a marked point pattern. If required, the intermediate stages of the simulation (the parents and the individual clusters) can also be extracted from the return value of \code{rPoissonCluster} through the attributes \code{"parents"} and \code{"parentid"}. The attribute \code{"parents"} is the point pattern of parent points. The attribute \code{"parentid"} is an integer vector specifying the parent for each of the points in the simulated pattern. (If these data are not required, it is more efficient to set \code{saveparents=FALSE}.) } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rThomas}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, \code{\link{rNeymanScott}}, \code{\link{rGaussPoisson}}. } \examples{ # each cluster consist of 10 points in a disc of radius 0.2 nclust <- function(x0, y0, radius, n) { return(runifdisc(n, radius, centre=c(x0, y0))) } plot(rPoissonCluster(10, 0.2, nclust, radius=0.2, n=5)) # multitype Neyman-Scott process (each cluster is a multitype process) nclust2 <- function(x0, y0, radius, n, types=c("a", "b")) { X <- runifdisc(n, radius, centre=c(x0, y0)) M <- sample(types, n, replace=TRUE) marks(X) <- M return(X) } plot(rPoissonCluster(15,0.1,nclust2, radius=0.1, n=5)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/Kest.fft.Rd0000644000176200001440000000615014141452520015572 0ustar liggesusers\name{Kest.fft} \alias{Kest.fft} \title{K-function using FFT} \description{ Estimates the reduced second moment function \eqn{K(r)} from a point pattern in a window of arbitrary shape, using the Fast Fourier Transform. } \usage{ Kest.fft(X, sigma, r=NULL, \dots, breaks=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{K(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{sigma}{ Standard deviation of the isotropic Gaussian smoothing kernel. } \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{K(r)} should be evaluated. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} determining the spatial resolution for the FFT calculation. } \item{breaks}{ This argument is for internal use only. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{border}{the estimates of \eqn{K(r)} for these values of \eqn{r} } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} for a stationary Poisson process } } \details{ This is an alternative to the function \code{\link{Kest}} for estimating the \eqn{K} function. It may be useful for very large patterns of points. Whereas \code{\link{Kest}} computes the distance between each pair of points analytically, this function discretises the point pattern onto a rectangular pixel raster and applies Fast Fourier Transform techniques to estimate \eqn{K(t)}. The hard work is done by the function \code{\link{Kmeasure}}. The result is an approximation whose accuracy depends on the resolution of the pixel raster. The resolution is controlled by the arguments \code{\dots}, or by setting the parameter \code{npixel} in \code{\link{spatstat.options}}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{Kmeasure}}, \code{\link{spatstat.options}} } \examples{ pp <- runifpoint(10000) \testonly{ op <- spatstat.options(npixel=125) } Kpp <- Kest.fft(pp, 0.01) plot(Kpp) \testonly{spatstat.options(op)} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/methods.zclustermodel.Rd0000644000176200001440000000360014141452520020441 0ustar liggesusers\name{methods.zclustermodel} \alias{methods.zclustermodel} % DoNotExport \alias{pcfmodel.zclustermodel} \alias{Kmodel.zclustermodel} \alias{predict.zclustermodel} \alias{intensity.zclustermodel} \alias{print.zclustermodel} \alias{clusterradius.zclustermodel} \alias{reach.zclustermodel} \title{ Methods for Cluster Models } \description{ Methods for the experimental class of cluster models. } \usage{ \method{pcfmodel}{zclustermodel}(model, \dots) \method{Kmodel}{zclustermodel}(model, \dots) \method{intensity}{zclustermodel}(X, \dots) \method{predict}{zclustermodel}(object, \dots, locations, type = "intensity", ngrid = NULL) \method{print}{zclustermodel}(x, \dots) \method{clusterradius}{zclustermodel}(model,\dots,thresh=NULL, precision=FALSE) \method{reach}{zclustermodel}(x, \dots, epsilon) } \arguments{ \item{model,object,x,X}{ Object of class \code{"zclustermodel"}. } \item{\dots}{ Arguments passed to other methods. } \item{locations}{ Locations where prediction should be performed. A window or a point pattern. } \item{type}{ Currently must equal \code{"intensity"}. } \item{ngrid}{ Pixel grid dimensions for prediction, if \code{locations} is a rectangle or polygon. } \item{thresh,epsilon}{Tolerance thresholds} \item{precision}{ Logical value stipulating whether the precision should also be returned. } } \details{ Experimental. } \value{ Same as for other methods. } \author{ \adrian } \seealso{ \code{\link{zclustermodel}} } \examples{ m <- zclustermodel("Thomas", kappa=10, mu=5, scale=0.1) m2 <- zclustermodel("VarGamma", kappa=10, mu=10, scale=0.1, nu=0.7) m m2 g <- pcfmodel(m) g(0.2) g2 <- pcfmodel(m2) g2(1) Z <- predict(m, locations=square(2)) Z2 <- predict(m2, locations=square(1)) varcount(m, square(1)) varcount(m2, square(1)) } \keyword{spatial} \keyword{models} spatstat.core/man/bind.fv.Rd0000644000176200001440000000673414141452520015444 0ustar liggesusers\name{bind.fv} \alias{bind.fv} \alias{cbind.fv} \title{ Combine Function Value Tables } \description{ Advanced Use Only. Combine objects of class \code{"fv"}, or glue extra columns of data onto an existing \code{"fv"} object. } \usage{ \method{cbind}{fv}(...) bind.fv(x, y, labl = NULL, desc = NULL, preferred = NULL, clip=FALSE) } \arguments{ \item{\dots}{ Any number of arguments, which are objects of class \code{"fv"}. } \item{x}{ An object of class \code{"fv"}. } \item{y}{ Either a data frame or an object of class \code{"fv"}. } \item{labl}{ Plot labels (see \code{\link{fv}}) for columns of \code{y}. A character vector. } \item{desc}{ Descriptions (see \code{\link{fv}}) for columns of \code{y}. A character vector. } \item{preferred}{ Character string specifying the column which is to be the new recommended value of the function. } \item{clip}{ Logical value indicating whether each object must have exactly the same domain, that is, the same sequence of values of the function argument (\code{clip=FALSE}, the default) or whether objects with different domains are permissible and will be restricted to a common domain (\code{clip=TRUE}). } } \details{ This documentation is provided for experienced programmers who want to modify the internal behaviour of \pkg{spatstat}. The function \code{cbind.fv} is a method for the generic \R function \code{\link{cbind}}. It combines any number of objects of class \code{"fv"} into a single object of class \code{"fv"}. The objects must be compatible, in the sense that they have identical values of the function argument. The function \code{bind.fv} is a lower level utility which glues additional columns onto an existing object \code{x} of class \code{"fv"}. It has two modes of use: \itemize{ \item If the additional dataset \code{y} is an object of class \code{"fv"}, then \code{x} and \code{y} must be compatible as described above. Then the columns of \code{y} that contain function values will be appended to the object \code{x}. \item Alternatively if \code{y} is a data frame, then \code{y} must have the same number of rows as \code{x}. All columns of \code{y} will be appended to \code{x}. } The arguments \code{labl} and \code{desc} provide plot labels and description strings (as described in \code{\link{fv}}) for the \emph{new} columns. If \code{y} is an object of class \code{"fv"} then \code{labl} and \code{desc} are optional, and default to the relevant entries in the object \code{y}. If \code{y} is a data frame then \code{labl} and \code{desc} must be provided. } \value{ An object of class \code{"fv"}. } \author{ \spatstatAuthors. } \examples{ K1 <- Kest(cells, correction="border") K2 <- Kest(cells, correction="iso") # remove column 'theo' to avoid duplication K2 <- K2[, names(K2) != "theo"] cbind(K1, K2) bind.fv(K1, K2, preferred="iso") # constrain border estimate to be monotonically increasing bm <- cumsum(c(0, pmax(0, diff(K1$border)))) bind.fv(K1, data.frame(bmono=bm), "\%s[bmo](r)", "monotone border-corrected estimate of \%s", "bmono") } \seealso{ \code{\link{fv}}, \code{\link{with.fv}}. \emph{Undocumented} functions for modifying an \code{"fv"} object include \code{fvnames}, \code{fvnames<-}, \code{tweak.fv.entry} and \code{rebadge.fv}. } \keyword{spatial} \keyword{attribute} spatstat.core/man/plot.slrm.Rd0000644000176200001440000000234014141452520016035 0ustar liggesusers\name{plot.slrm} \Rdversion{1.1} \alias{plot.slrm} \title{ Plot a Fitted Spatial Logistic Regression } \description{ Plots a fitted Spatial Logistic Regression model. } \usage{ \method{plot}{slrm}(x, ..., type = "intensity") } \arguments{ \item{x}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Extra arguments passed to \code{\link{plot.im}} to control the appearance of the plot. } \item{type}{ Character string (partially) matching one of \code{"probabilities"}, \code{"intensity"} or \code{"link"}. } } \details{ This is a method for \code{\link{plot}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). This function plots the result of \code{\link{predict.slrm}}. } \value{ None. } \seealso{ \code{\link{slrm}}, \code{\link{predict.slrm}}, \code{\link{plot.im}} } \examples{ data(copper) X <- copper$SouthPoints Y <- copper$SouthLines Z <- distmap(Y) fit <- slrm(X ~ Z) plot(fit) plot(fit, type="link") } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat.core/man/varcount.Rd0000644000176200001440000000675014141452520015755 0ustar liggesusers\name{varcount} \alias{varcount} \title{ Predicted Variance of the Number of Points } \description{ Given a fitted point process model, calculate the predicted variance of the number of points in a nominated set \code{B}. } \usage{ varcount(model, B=Window(model), \dots, dimyx = NULL) } \arguments{ \item{model}{ A fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). } \item{B}{ A window (object of class \code{"owin"} specifying the region in which the points are counted. Alternatively a pixel image (object of class \code{"im"}) or a function of spatial coordinates specifying a numerical weight for each random point. The default is the window of the original point pattern data to which the model was fitted. } \item{\dots}{ Additional arguments passed to \code{B} when it is a function. } \item{dimyx}{ Spatial resolution for the calculations. Argument passed to \code{\link{as.mask}}. } } \details{ This command calculates the variance of the number of points falling in a specified window \code{B} according to the \code{model}. It can also calculate the variance of a sum of weights attached to each random point. The \code{model} should be a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). \itemize{ \item{ If \code{B} is a window, this command calculates the variance of the number of points falling in \code{B}, according to the fitted \code{model}. If the \code{model} depends on spatial covariates other than the Cartesian coordinates, then \code{B} should be a subset of the domain in which these covariates are defined. } \item{ If \code{B} is a pixel image, this command calculates the variance of \eqn{T = \sum_i B(x_i)}{T = sum[i] B(x[i])}, the sum of the values of \code{B} over all random points falling in the domain of the image. If the \code{model} depends on spatial covariates other than the Cartesian coordinates, then the domain of the pixel image, \code{as.owin(B)}, should be a subset of the domain in which these covariates are defined. } \item{ If \code{B} is a \code{function(x,y)} or \code{function(x,y,...)} this command calculates the variance of \eqn{T = \sum_i B(x_i)}{T = sum[i] B(x[i])}, the sum of the values of \code{B} over all random points falling inside the window \code{W=as.owin(model)}, the window in which the original data were observed. } } The variance calculation involves the intensity and the pair correlation function of the model. The calculation is exact (up to discretisation error) for models of class \code{"kppm"} and \code{"dppm"}, and for Poisson point process models of class \code{"ppm"}. For Gibbs point process models of class \code{"ppm"} the calculation depends on the Poisson-saddlepoint approximations to the intensity and pair correlation function, which are rough approximations. The approximation is not yet implemented for some Gibbs models. } \value{ A single number. } \author{ \spatstatAuthors } \seealso{ \code{\link{predict.ppm}}, \code{\link{predict.kppm}}, \code{\link{predict.dppm}} } \examples{ fitT <- kppm(redwood ~ 1, "Thomas") B <- owin(c(0, 0.5), c(-0.5, 0)) varcount(fitT, B) fitS <- ppm(swedishpines ~ 1, Strauss(9)) BS <- square(50) varcount(fitS, BS) } \keyword{spatial} \keyword{models} spatstat.core/man/Kmodel.ppm.Rd0000644000176200001440000000450214144333466016125 0ustar liggesusers\name{Kmodel.ppm} \alias{Kmodel.ppm} \alias{pcfmodel.ppm} \title{K Function or Pair Correlation Function of Gibbs Point Process model} \description{ Returns the theoretical \eqn{K} function or the pair correlation function of a fitted Gibbs point process model. } \usage{ \method{Kmodel}{ppm}(model, \dots) \method{pcfmodel}{ppm}(model, \dots) } \arguments{ \item{model}{ A fitted Poisson or Gibbs point process model (object of class \code{"ppm"}) typically obtained from the model-fitting algorithm \code{\link{ppm}}. } \item{\dots}{ Ignored. } } \value{ A \code{function} in the \R language, which takes one argument \code{r}. } \details{ This function computes an \emph{approximation} to the \eqn{K} function or the pair correlation function of a Gibbs point process. The functions \code{\link{Kmodel}} and \code{\link{pcfmodel}} are generic. The functions documented here are the methods for the class \code{"ppm"}. The approximation is only available for stationary pairwise-interaction models. It uses the second order Poisson-saddlepoint approximation (Baddeley and Nair, 2012b) which is a combination of the Poisson-Boltzmann-Emden and Percus-Yevick approximations. The return value is a \code{function} in the \R language, which takes one argument \code{r}. Evaluation of this function, on a numeric vector \code{r}, yields values of the desired \eqn{K} function or pair correlation function at these distance values. } \seealso{ \code{\link{Kest}} or \code{\link{pcf}} to estimate the \eqn{K} function or pair correlation function nonparametrically from data. \code{\link{ppm}} to fit Gibbs models. \code{\link{Kmodel}} for the generic functions. \code{\link{Kmodel.kppm}} for the method for cluster/Cox processes. } \examples{ fit <- ppm(swedishpines, ~1, Strauss(8)) p <- pcfmodel(fit) K <- Kmodel(fit) p(6) K(8) curve(K(x), from=0, to=15) } \references{ Baddeley, A. and Nair, G. (2012a) Fast approximation of the intensity of Gibbs point processes. \emph{Electronic Journal of Statistics} \bold{6} 1155--1169. Baddeley, A. and Nair, G. (2012b) Approximating the moments of a spatial point process. \emph{Stat} \bold{1}, 1, 18--30. \code{DOI: 10.1002/sta4.5} } \author{\adrian and Gopalan Nair. } \keyword{spatial} \keyword{models} spatstat.core/man/rmh.Rd0000644000176200001440000000565314141452520014703 0ustar liggesusers\name{rmh} \alias{rmh} \title{Simulate point patterns using the Metropolis-Hastings algorithm.} \description{ Generic function for running the Metropolis-Hastings algorithm to produce simulated realisations of a point process model. } \usage{rmh(model, \dots)} \arguments{ \item{model}{The point process model to be simulated. } \item{\dots}{Further arguments controlling the simulation. } } \details{ The Metropolis-Hastings algorithm can be used to generate simulated realisations from a wide range of spatial point processes. For caveats, see below. The function \code{rmh} is generic; it has methods \code{\link{rmh.ppm}} (for objects of class \code{"ppm"}) and \code{\link{rmh.default}} (the default). The actual implementation of the Metropolis-Hastings algorithm is contained in \code{\link{rmh.default}}. For details of its use, see \code{\link{rmh.ppm}} or \code{\link{rmh.default}}. [If the model is a Poisson process, then Metropolis-Hastings is not used; the Poisson model is generated directly using \code{\link{rpoispp}} or \code{\link{rmpoispp}}.] In brief, the Metropolis-Hastings algorithm is a Markov Chain, whose states are spatial point patterns, and whose limiting distribution is the desired point process. After running the algorithm for a very large number of iterations, we may regard the state of the algorithm as a realisation from the desired point process. However, there are difficulties in deciding whether the algorithm has run for ``long enough''. The convergence of the algorithm may indeed be extremely slow. No guarantees of convergence are given! While it is fashionable to decry the Metropolis-Hastings algorithm for its poor convergence and other properties, it has the advantage of being easy to implement for a wide range of models. } \section{Warning}{ As of version 1.22-1 of \code{spatstat} a subtle change was made to \code{rmh.default()}. We had noticed that the results produced were sometimes not ``scalable'' in that two models, differing in effect only by the units in which distances are measured and starting from the same seed, gave different results. This was traced to an idiosyncracy of floating point arithmetic. The code of \code{rmh.default()} has been changed so that the results produced by \code{rmh} are now scalable. The downside of this is that code which users previously ran may now give results which are different from what they formerly were. In order to recover former behaviour (so that previous results can be reproduced) set \code{spatstat.options(scalable=FALSE)}. See the last example in the help for \code{\link{rmh.default}}. } \value{ A point pattern, in the form of an object of class \code{"ppp"}. See \code{\link{rmh.default}} for details. } \seealso{ \code{\link{rmh.default}} } \examples{ # See examples in rmh.default and rmh.ppm } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/laslett.Rd0000644000176200001440000001371414141452520015562 0ustar liggesusers\name{laslett} \alias{laslett} \title{ Laslett's Transform } \description{ Apply Laslett's Transform to a spatial region, returning the original and transformed regions, and the original and transformed positions of the lower tangent points. This is a diagnostic for the Boolean model. } \usage{ laslett(X, \dots, verbose = FALSE, plotit = TRUE, discretise = FALSE, type=c("lower", "upper", "left", "right")) } \arguments{ \item{X}{ Spatial region to be transformed. A window (object of class \code{"owin"}) or a logical-valued pixel image (object of class \code{"im"}). } \item{\dots}{ Graphics arguments to control the plot (passed to \code{\link{plot.laslett}} when \code{plotit=TRUE}) or arguments determining the pixel resolution (passed to \code{\link{as.mask}}). } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{plotit}{ Logical value indicating whether to plot the result. } \item{discretise}{ Logical value indicating whether polygonal windows should first be converted to pixel masks before the Laslett transform is computed. This should be set to \code{TRUE} for very complicated polygons. } \item{type}{ Type of tangent points to be detected. This also determines the direction of contraction in the set transformation. Default is \code{type="lower"}. } } \details{ This function finds the lower tangent points of the spatial region \code{X}, then applies Laslett's Transform to the space, and records the transformed positions of the lower tangent points. Laslett's transform is a diagnostic for the Boolean Model. A test of the Boolean model can be performed by applying a test of CSR to the transformed tangent points. See the Examples. The rationale is that, if the region \code{X} was generated by a Boolean model with convex grains, then the lower tangent points of \code{X}, when subjected to Laslett's transform, become a Poisson point process (Cressie, 1993, section 9.3.5; Molchanov, 1997; Barbour and Schmidt, 2001). Intuitively, Laslett's transform is a way to account for the fact that tangent points of \code{X} cannot occur \emph{inside} \code{X}. It treats the interior of \code{X} as empty space, and collapses this empty space so that only the \emph{exterior} of \code{X} remains. In this collapsed space, the tangent points are completely random. Formally, Laslett's transform is a random (i.e. data-dependent) spatial transformation which maps each spatial location \eqn{(x,y)} to a new location \eqn{(x',y)} at the same height \eqn{y}. The transformation is defined so that \eqn{x'} is the total \emph{uncovered} length of the line segment from \eqn{(0,y)} to \eqn{(x,y)}, that is, the total length of the parts of this segment that fall outside the region \code{X}. In more colourful terms, suppose we use an abacus to display a pixellated version of \code{X}. Each wire of the abacus represents one horizontal line in the pixel image. Each pixel lying \emph{outside} the region \code{X} is represented by a bead of the abacus; pixels \emph{inside} \code{X} are represented by the absence of a bead. Next we find any beads which are lower tangent points of \code{X}, and paint them green. Then Laslett's Transform is applied by pushing all beads to the left, as far as possible. The final locations of all the beads provide a new spatial region, inside which is the point pattern of tangent points (marked by the green-painted beads). If \code{plotit=TRUE} (the default), a before-and-after plot is generated, showing the region \code{X} and the tangent points before and after the transformation. This plot can also be generated by calling \code{plot(a)} where \code{a} is the object returned by the function \code{laslett}. If the argument \code{type} is given, then this determines the type of tangents that will be detected, and also the direction of contraction in Laslett's transform. The computation is performed by first rotating \code{X}, applying Laslett's transform for lower tangent points, then rotating back. There are separate algorithms for polygonal windows and pixellated windows (binary masks). The polygonal algorithm may be slow for very complicated polygons. If this happens, setting \code{discretise=TRUE} will convert the polygonal window to a binary mask and invoke the pixel raster algorithm. } \value{ A list, which also belongs to the class \code{"laslett"} so that it can immediately be printed and plotted. The list elements are: \describe{ \item{oldX:}{the original dataset \code{X};} \item{TanOld:}{a point pattern, whose window is \code{Frame(X)}, containing the lower tangent points of \code{X};} \item{TanNew:}{a point pattern, whose window is the Laslett transform of \code{Frame(X)}, and which contains the Laslett-transformed positions of the tangent points;} \item{Rect:}{a rectangular window, which is the largest rectangle lying inside the transformed set;} \item{df:}{a data frame giving the locations of the tangent points before and after transformation. } \item{type:}{character string specifying the type of tangents.} } } \references{ Barbour, A.D. and Schmidt, V. (2001) On Laslett's Transform for the Boolean Model. \emph{Advances in Applied Probability} \bold{33}(1), 1--5. Cressie, N.A.C. (1993) \emph{Statistics for spatial data}, second edition. John Wiley and Sons. Molchanov, I. (1997) \emph{Statistics of the Boolean Model for Practitioners and Mathematicians}. Wiley. } \author{ Kassel Hingee and \adrian. } \seealso{ \code{\link{plot.laslett}} } \examples{ a <- laslett(heather$coarse) transformedHeather <- with(a, Window(TanNew)) plot(transformedHeather, invert=TRUE) with(a, clarkevans.test(TanNew[Rect], correction="D", nsim=39)) X <- discs(runifrect(15) \%mark\% 0.2, npoly=16) b <- laslett(X, type="left") b } \keyword{spatial} \keyword{manip} spatstat.core/man/Softcore.Rd0000644000176200001440000001321314141452520015670 0ustar liggesusers\name{Softcore} \alias{Softcore} \title{The Soft Core Point Process Model} \description{ Creates an instance of the Soft Core point process model which can then be fitted to point pattern data. } \usage{ Softcore(kappa, sigma0=NA) } \arguments{ \item{kappa}{The exponent \eqn{\kappa}{kappa} of the Soft Core interaction} \item{sigma0}{ Optional. Initial estimate of the parameter \eqn{\sigma}{sigma}. A positive number. } } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Soft Core process with exponent \eqn{\kappa}{kappa}. } \details{ The (stationary) Soft Core point process with parameters \eqn{\beta}{beta} and \eqn{\sigma}{sigma} and exponent \eqn{\kappa}{kappa} is the pairwise interaction point process in which each point contributes a factor \eqn{\beta}{beta} to the probability density of the point pattern, and each pair of points contributes a factor \deqn{ \exp \left\{ - \left( \frac{\sigma}{d} \right)^{2/\kappa} \right\} }{ exp( - (sigma/d)^(2/kappa) ) } to the density, where \eqn{d} is the distance between the two points. See the Examples for a plot of this interaction curve. Thus the process has probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \exp \left\{ - \sum_{i < j} \left( \frac{\sigma}{||x_i-x_j||} \right)^{2/\kappa} \right\} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) exp( - sum (sigma/||x[i]-x[j]||)^(2/kappa)) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{\alpha}{alpha} is the normalising constant, and the sum on the right hand side is over all unordered pairs of points of the pattern. This model describes an ``ordered'' or ``inhibitive'' process, with the strength of inhibition decreasing smoothly with distance. The interaction is controlled by the parameters \eqn{\sigma}{sigma} and \eqn{\kappa}{kappa}. \itemize{ \item The \emph{spatial scale} of interaction is controlled by the parameter \eqn{\sigma}{sigma}, which is a positive real number interpreted as a distance, expressed in the same units of distance as the spatial data. The parameter \eqn{\sigma}{sigma} is the distance at which the pair potential reaches the threshold value 0.37. \item The \emph{shape} of the interaction function is controlled by the exponent \eqn{\kappa}{kappa} which is a dimensionless number in the range \eqn{(0,1)}, with larger values corresponding to a flatter shape (or a more gradual decay rate). The process is well-defined only for \eqn{\kappa}{kappa} in \eqn{(0,1)}. The limit of the model as \eqn{\kappa \to 0}{kappa -> 0} is the hard core process with hard core distance \eqn{h=\sigma}{h=sigma}. \item The \dQuote{strength} of the interaction is determined by both of the parameters \eqn{\sigma}{sigma} and \eqn{\kappa}{kappa}. The larger the value of \eqn{\kappa}{kappa}, the wider the range of distances over which the interaction has an effect. If \eqn{\sigma}{sigma} is very small, the interaction is very weak for all practical purposes (theoretically if \eqn{\sigma = 0}{sigma = 0} the model reduces to the Poisson point process). } The nonstationary Soft Core process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Soft Core process pairwise interaction is yielded by the function \code{Softcore()}. See the examples below. The main argument is the exponent \code{kappa}. When \code{kappa} is fixed, the model becomes an exponential family with canonical parameters \eqn{\log \beta}{log(beta)} and \deqn{ \log \gamma = \frac{2}{\kappa} \log\sigma }{ log(gamma) = (2/kappa) log(sigma) } The canonical parameters are estimated by \code{\link{ppm}()}, not fixed in \code{Softcore()}. The optional argument \code{sigma0} can be used to improve numerical stability. If \code{sigma0} is given, it should be a positive number, and it should be a rough estimate of the parameter \eqn{\sigma}{sigma}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Ogata, Y, and Tanemura, M. (1981). Estimation of interaction potentials of spatial point patterns through the maximum likelihood procedure. \emph{Annals of the Institute of Statistical Mathematics}, B \bold{33}, 315--338. Ogata, Y, and Tanemura, M. (1984). Likelihood analysis of spatial point patterns. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 496--518. } \examples{ # fit the stationary Soft Core process to `cells' fit5 <- ppm(cells ~1, Softcore(kappa=0.5), correction="isotropic") # study shape of interaction and explore effect of parameters fit2 <- update(fit5, Softcore(kappa=0.2)) fit8 <- update(fit5, Softcore(kappa=0.8)) plot(fitin(fit2), xlim=c(0, 0.4), main="Pair potential (sigma = 0.1)", xlab=expression(d), ylab=expression(h(d)), legend=FALSE) plot(fitin(fit5), add=TRUE, col=4) plot(fitin(fit8), add=TRUE, col=3) legend("bottomright", col=c(1,4,3), lty=1, legend=expression(kappa==0.2, kappa==0.5, kappa==0.8)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.core/man/bw.scott.Rd0000644000176200001440000000603114141452520015647 0ustar liggesusers\name{bw.scott} \alias{bw.scott} \alias{bw.scott.iso} \title{ Scott's Rule for Bandwidth Selection for Kernel Density } \description{ Use Scott's rule of thumb to determine the smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.scott(X, isotropic=FALSE, d=NULL) bw.scott.iso(X) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}). } \item{isotropic}{ Logical value indicating whether to compute a single bandwidth for an isotropic Gaussian kernel (\code{isotropic=TRUE}) or separate bandwidths for each coordinate axis (\code{isotropic=FALSE}, the default). } \item{d}{ Advanced use only. An integer value that should be used in Scott's formula instead of the true number of spatial dimensions. } } \details{ These functions select a bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}} or other appropriate functions. They can be applied to a point pattern belonging to any class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}. The bandwidth \eqn{\sigma}{\sigma} is computed by the rule of thumb of Scott (1992, page 152, equation 6.42). The bandwidth is proportional to \eqn{n^{-1/(d+4)}}{n^(-1/(d+4))} where \eqn{n} is the number of points and \eqn{d} is the number of spatial dimensions. This rule is very fast to compute. It typically produces a larger bandwidth than \code{\link{bw.diggle}}. It is useful for estimating gradual trend. If \code{isotropic=FALSE} (the default), \code{bw.scott} provides a separate bandwidth for each coordinate axis, and the result of the function is a vector, of length equal to the number of coordinates. If \code{isotropic=TRUE}, a single bandwidth value is computed and the result is a single numeric value. \code{bw.scott.iso(X)} is equivalent to \code{bw.scott(X, isotropic=TRUE)}. The default value of \eqn{d} is as follows: \tabular{ll}{ \bold{class} \tab \bold{dimension} \cr \code{"ppp"} \tab 2 \cr \code{"lpp"} \tab 1 \cr \code{"pp3"} \tab 3 \cr \code{"ppx"} \tab number of spatial coordinates } The use of \code{d=1} for point patterns on a linear network (class \code{"lpp"}) was proposed by McSwiggan et al (2016) and Rakshit et al (2019). } \value{ A numerical value giving the selected bandwidth, or a numerical vector giving the selected bandwidths for each coordinate. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.ppl}}, \code{\link{bw.CvL}}, \code{\link{bw.frac}}. } \examples{ hickory <- split(lansing)[["hickory"]] b <- bw.scott(hickory) b if(interactive()) { plot(density(hickory, b)) } bw.scott.iso(hickory) bw.scott(osteo$pts[[1]]) } \references{ Scott, D.W. (1992) \emph{Multivariate Density Estimation. Theory, Practice and Visualization}. New York: Wiley. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/ppm.ppp.Rd0000644000176200001440000010442114141452520015500 0ustar liggesusers\name{ppm.ppp} \alias{ppm.ppp} \alias{ppm.quad} \concept{point process model} \concept{Poisson point process} \concept{Gibbs point process} \title{ Fit Point Process Model to Point Pattern Data } \description{ Fits a point process model to an observed point pattern. } \usage{ \method{ppm}{ppp}(Q, trend=~1, interaction=Poisson(), \dots, covariates=data, data=NULL, covfunargs = list(), subset, clipwin, correction="border", rbord=reach(interaction), use.gam=FALSE, method="mpl", forcefit=FALSE, emend=project, project=FALSE, prior.mean = NULL, prior.var = NULL, nd = NULL, eps = NULL, gcontrol=list(), nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE, callstring=NULL) \method{ppm}{quad}(Q, trend=~1, interaction=Poisson(), \dots, covariates=data, data=NULL, covfunargs = list(), subset, clipwin, correction="border", rbord=reach(interaction), use.gam=FALSE, method="mpl", forcefit=FALSE, emend=project, project=FALSE, prior.mean = NULL, prior.var = NULL, nd = NULL, eps = NULL, gcontrol=list(), nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE, callstring=NULL) } \arguments{ \item{Q}{ A data point pattern (of class \code{"ppp"}) to which the model will be fitted, or a quadrature scheme (of class \code{"quad"}) containing this pattern. } \item{trend}{ An \R formula object specifying the spatial trend to be fitted. The default formula, \code{~1}, indicates the model is stationary and no trend is to be fitted. } \item{interaction}{ An object of class \code{"interact"} describing the point process interaction structure, or a function that makes such an object, or \code{NULL} indicating that a Poisson process (stationary or nonstationary) should be fitted. } \item{\dots}{Ignored.} \item{data,covariates}{ The values of any spatial covariates (other than the Cartesian coordinates) required by the model. Either a data frame, or a list whose entries are images, functions, windows, tessellations or single numbers. See Details. } \item{subset}{ Optional. An expression (which may involve the names of the Cartesian coordinates \code{x} and \code{y} and the names of entries in \code{data}) defining a subset of the spatial domain, to which the likelihood or pseudolikelihood should be restricted. See Details. The result of evaluating the expression should be either a logical vector, or a window (object of class \code{"owin"}) or a logical-valued pixel image (object of class \code{"im"}). } \item{clipwin}{ Optional. A spatial window (object of class \code{"owin"}) to which data will be restricted, before model-fitting is performed. See Details. } \item{covfunargs}{ A named list containing the values of any additional arguments required by covariate functions. } \item{correction}{ The name of the edge correction to be used. The default is \code{"border"} indicating the border correction. Other possibilities may include \code{"Ripley"}, \code{"isotropic"}, \code{"periodic"}, \code{"translate"} and \code{"none"}, depending on the \code{interaction}. } \item{rbord}{ If \code{correction = "border"} this argument specifies the distance by which the window should be eroded for the border correction. } \item{use.gam}{ Logical flag; if \code{TRUE} then computations are performed using \code{gam} instead of \code{\link{glm}}. } \item{method}{ The method used to fit the model. Options are \code{"mpl"} for the method of Maximum PseudoLikelihood, \code{"logi"} for the Logistic Likelihood method, \code{"VBlogi"} for the Variational Bayes Logistic Likelihood method, and \code{"ho"} for the Huang-Ogata approximate maximum likelihood method. } \item{forcefit}{ Logical flag for internal use. If \code{forcefit=FALSE}, some trivial models will be fitted by a shortcut. If \code{forcefit=TRUE}, the generic fitting method will always be used. } \item{emend,project}{ (These are equivalent: \code{project} is an older name for \code{emend}.) Logical value. Setting \code{emend=TRUE} will ensure that the fitted model is always a valid point process by applying \code{\link{emend.ppm}}. } \item{prior.mean}{ Optional vector of prior means for canonical parameters (for \code{method="VBlogi"}). See Details. } \item{prior.var}{ Optional prior variance covariance matrix for canonical parameters (for \code{method="VBlogi"}). See Details. } \item{nd}{ Optional. Integer or pair of integers. The dimension of the grid of dummy points (\code{nd * nd} or \code{nd[1] * nd[2]}) used to evaluate the integral in the pseudolikelihood. Incompatible with \code{eps}. } \item{eps}{ Optional. A positive number, or a vector of two positive numbers, giving the horizontal and vertical spacing, respectively, of the grid of dummy points. Incompatible with \code{nd}. } \item{gcontrol}{ Optional. List of parameters passed to \code{\link{glm.control}} (or passed to \code{\link{gam.control}} if \code{use.gam=TRUE}) controlling the model-fitting algorithm. } \item{nsim}{ Number of simulated realisations to generate (for \code{method="ho"}) } \item{nrmh}{ Number of Metropolis-Hastings iterations for each simulated realisation (for \code{method="ho"}) } \item{start,control}{ Arguments passed to \code{\link{rmh}} controlling the behaviour of the Metropolis-Hastings algorithm (for \code{method="ho"}) } \item{verb}{ Logical flag indicating whether to print progress reports (for \code{method="ho"}) } \item{callstring}{ Internal use only. } } \value{ An object of class \code{"ppm"} describing a fitted point process model. See \code{\link{ppm.object}} for details of the format of this object and methods available for manipulating it. } \details{ \bold{NOTE:} This help page describes the \bold{old syntax} of the function \code{ppm}, described in many older documents. This old syntax is still supported. However, if you are learning about \code{ppm} for the first time, we recommend you use the \bold{new syntax} described in the help file for \code{\link{ppm}}. This function fits a point process model to an observed point pattern. The model may include spatial trend, interpoint interaction, and dependence on covariates. \describe{ \item{basic use:}{ In basic use, \code{Q} is a point pattern dataset (an object of class \code{"ppp"}) to which we wish to fit a model. The syntax of \code{ppm()} is closely analogous to the \R functions \code{\link{glm}} and \code{gam}. The analogy is: \tabular{ll}{ \bold{glm} \tab \bold{ppm} \cr \code{formula} \tab \code{trend} \cr \code{family} \tab \code{interaction} } The point process model to be fitted is specified by the arguments \code{trend} and \code{interaction} which are respectively analogous to the \code{formula} and \code{family} arguments of glm(). Systematic effects (spatial trend and/or dependence on spatial covariates) are specified by the argument \code{trend}. This is an \R formula object, which may be expressed in terms of the Cartesian coordinates \code{x}, \code{y}, the marks \code{marks}, or the variables in \code{covariates} (if supplied), or both. It specifies the \bold{logarithm} of the first order potential of the process. The formula should not use any names beginning with \code{.mpl} as these are reserved for internal use. If \code{trend} is absent or equal to the default, \code{~1}, then the model to be fitted is stationary (or at least, its first order potential is constant). The symbol \code{.} in the trend expression stands for all the covariates supplied in the argument \code{data}. For example the formula \code{~ .} indicates an additive model with a main effect for each covariate in \code{data}. Stochastic interactions between random points of the point process are defined by the argument \code{interaction}. This is an object of class \code{"interact"} which is initialised in a very similar way to the usage of family objects in \code{\link{glm}} and \code{gam}. The models currently available are: \GibbsInteractionsList. See the examples below. It is also possible to combine several interactions using \code{\link{Hybrid}}. If \code{interaction} is missing or \code{NULL}, then the model to be fitted has no interpoint interactions, that is, it is a Poisson process (stationary or nonstationary according to \code{trend}). In this case the methods of maximum pseudolikelihood and maximum logistic likelihood coincide with maximum likelihood. The fitted point process model returned by this function can be printed (by the print method \code{\link{print.ppm}}) to inspect the fitted parameter values. If a nonparametric spatial trend was fitted, this can be extracted using the predict method \code{\link{predict.ppm}}. } \item{Models with covariates:}{ To fit a model involving spatial covariates other than the Cartesian coordinates \eqn{x} and \eqn{y}, the values of the covariates should be supplied in the argument \code{covariates}. Note that it is not sufficient to have observed the covariate only at the points of the data point pattern; the covariate must also have been observed at other locations in the window. Typically the argument \code{covariates} is a list, with names corresponding to variables in the \code{trend} formula. Each entry in the list is either \describe{ \item{a pixel image,}{ giving the values of a spatial covariate at a fine grid of locations. It should be an object of class \code{"im"}, see \code{\link{im.object}}. } \item{a function,}{ which can be evaluated at any location \code{(x,y)} to obtain the value of the spatial covariate. It should be a \code{function(x, y)} or \code{function(x, y, ...)} in the \R language. For marked point pattern data, the covariate can be a \code{function(x, y, marks)} or \code{function(x, y, marks, ...)}. The first two arguments of the function should be the Cartesian coordinates \eqn{x} and \eqn{y}. The function may have additional arguments; if the function does not have default values for these additional arguments, then the user must supply values for them, in \code{covfunargs}. See the Examples. } \item{a window,}{ interpreted as a logical variable which is \code{TRUE} inside the window and \code{FALSE} outside it. This should be an object of class \code{"owin"}. } \item{a tessellation,}{ interpreted as a factor covariate. For each spatial location, the factor value indicates which tile of the tessellation it belongs to. This should be an object of class \code{"tess"}. } \item{a single number,}{indicating a covariate that is constant in this dataset. } } The software will look up the values of each covariate at the required locations (quadrature points). Note that, for covariate functions, only the \emph{name} of the function appears in the trend formula. A covariate function is treated as if it were a single variable. The function arguments do not appear in the trend formula. See the Examples. If \code{covariates} is a list, the list entries should have names corresponding to the names of covariates in the model formula \code{trend}. The variable names \code{x}, \code{y} and \code{marks} are reserved for the Cartesian coordinates and the mark values, and these should not be used for variables in \code{covariates}. If \code{covariates} is a data frame, \code{Q} must be a quadrature scheme (see under Quadrature Schemes below). Then \code{covariates} must have as many rows as there are points in \code{Q}. The \eqn{i}th row of \code{covariates} should contain the values of spatial variables which have been observed at the \eqn{i}th point of \code{Q}. } \item{Quadrature schemes:}{ In advanced use, \code{Q} may be a `quadrature scheme'. This was originally just a technicality but it has turned out to have practical uses, as we explain below. Quadrature schemes are required for our implementation of the method of maximum pseudolikelihood. The definition of the pseudolikelihood involves an integral over the spatial window containing the data. In practice this integral must be approximated by a finite sum over a set of quadrature points. We use the technique of Baddeley and Turner (2000), a generalisation of the Berman-Turner (1992) device. In this technique the quadrature points for the numerical approximation include all the data points (points of the observed point pattern) as well as additional `dummy' points. Quadrature schemes are also required for the method of maximum logistic likelihood, which combines the data points with additional `dummy' points. A quadrature scheme is an object of class \code{"quad"} (see \code{\link{quad.object}}) which specifies both the data point pattern and the dummy points for the quadrature scheme, as well as the quadrature weights associated with these points. If \code{Q} is simply a point pattern (of class \code{"ppp"}, see \code{\link{ppp.object}}) then it is interpreted as specifying the data points only; a set of dummy points specified by \code{\link{default.dummy}()} is added, and the default weighting rule is invoked to compute the quadrature weights. Finer quadrature schemes (i.e. those with more dummy points) generally yield a better approximation, at the expense of higher computational load. An easy way to fit models using a finer quadrature scheme is to let \code{Q} be the original point pattern data, and use the argument \code{nd} to determine the number of dummy points in the quadrature scheme. Complete control over the quadrature scheme is possible. See \code{\link{quadscheme}} for an overview. Use \code{quadscheme(X, D, method="dirichlet")} to compute quadrature weights based on the Dirichlet tessellation, or \code{quadscheme(X, D, method="grid")} to compute quadrature weights by counting points in grid squares, where \code{X} and \code{D} are the patterns of data points and dummy points respectively. Alternatively use \code{\link{pixelquad}} to make a quadrature scheme with a dummy point at every pixel in a pixel image. A practical advantage of quadrature schemes arises when we want to fit a model involving covariates (e.g. soil pH). Suppose we have only been able to observe the covariates at a small number of locations. Suppose \code{cov.dat} is a data frame containing the values of the covariates at the data points (i.e.\ \code{cov.dat[i,]} contains the observations for the \code{i}th data point) and \code{cov.dum} is another data frame (with the same columns as \code{cov.dat}) containing the covariate values at another set of points whose locations are given by the point pattern \code{Y}. Then setting \code{Q = quadscheme(X,Y)} combines the data points and dummy points into a quadrature scheme, and \code{covariates = rbind(cov.dat, cov.dum)} combines the covariate data frames. We can then fit the model by calling \code{ppm(Q, ..., covariates)}. } \item{Model-fitting technique:}{ There are several choices for the technique used to fit the model. \describe{ \item{method="mpl"}{ (the default): the model will be fitted by maximising the pseudolikelihood (Besag, 1975) using the Berman-Turner computational approximation (Berman and Turner, 1992; Baddeley and Turner, 2000). Maximum pseudolikelihood is equivalent to maximum likelihood if the model is a Poisson process. Maximum pseudolikelihood is biased if the interpoint interaction is very strong, unless there is a large number of dummy points. The default settings for \code{method='mpl'} specify a moderately large number of dummy points, striking a compromise between speed and accuracy. } \item{method="logi":}{ the model will be fitted by maximising the logistic likelihood (Baddeley et al, 2014). This technique is roughly equivalent in speed to maximum pseudolikelihood, but is believed to be less biased. Because it is less biased, the default settings for \code{method='logi'} specify a relatively small number of dummy points, so that this method is the fastest, in practice. } \item{method="VBlogi":}{ the model will be fitted in a Bayesian setup by maximising the posterior probability density for the canonical model parameters. This uses the variational Bayes approximation to the posterior derived from the logistic likelihood as described in Rajala (2014). The prior is assumed to be multivariate Gaussian with mean vector \code{prior.mean} and variance-covariance matrix \code{prior.var}. } \item{method="ho":}{ the model will be fitted by applying the approximate maximum likelihood method of Huang and Ogata (1999). See below. The Huang-Ogata method is slower than the other options, but has better statistical properties. } } Note that \code{method='logi'}, \code{method='VBlogi'} and \code{method='ho'} involve randomisation, so that the results are subject to random variation. } \item{Huang-Ogata method:}{ If \code{method="ho"} then the model will be fitted using the Huang-Ogata (1999) approximate maximum likelihood method. First the model is fitted by maximum pseudolikelihood as described above, yielding an initial estimate of the parameter vector \eqn{\theta_0}{theta0}. From this initial model, \code{nsim} simulated realisations are generated. The score and Fisher information of the model at \eqn{\theta=\theta_0}{theta=theta0} are estimated from the simulated realisations. Then one step of the Fisher scoring algorithm is taken, yielding an updated estimate \eqn{\theta_1}{theta1}. The corresponding model is returned. Simulated realisations are generated using \code{\link{rmh}}. The iterative behaviour of the Metropolis-Hastings algorithm is controlled by the arguments \code{start} and \code{control} which are passed to \code{\link{rmh}}. As a shortcut, the argument \code{nrmh} determines the number of Metropolis-Hastings iterations run to produce one simulated realisation (if \code{control} is absent). Also if \code{start} is absent or equal to \code{NULL}, it defaults to \code{list(n.start=N)} where \code{N} is the number of points in the data point pattern. } \item{Edge correction}{ Edge correction should be applied to the sufficient statistics of the model, to reduce bias. The argument \code{correction} is the name of an edge correction method. The default \code{correction="border"} specifies the border correction, in which the quadrature window (the domain of integration of the pseudolikelihood) is obtained by trimming off a margin of width \code{rbord} from the observation window of the data pattern. Not all edge corrections are implemented (or implementable) for arbitrary windows. Other options depend on the argument \code{interaction}, but these generally include \code{correction="periodic"} (the periodic or toroidal edge correction in which opposite edges of a rectangular window are identified) and \code{correction="translate"} (the translation correction, see Baddeley 1998 and Baddeley and Turner 2000). For pairwise interaction models there is also Ripley's isotropic correction, identified by \code{correction="isotropic"} or \code{"Ripley"}. } \item{Subsetting}{ The arguments \code{subset} and \code{clipwin} specify that the model should be fitted to a restricted subset of the available data. These arguments are equivalent for Poisson point process models, but different for Gibbs models. If \code{clipwin} is specified, then all the available data will be restricted to this spatial region, and data outside this region will be discarded, before the model is fitted. If \code{subset} is specified, then no data are deleted, but the domain of integration of the likelihood or pseudolikelihood is restricted to the \code{subset}. For Poisson models, these two arguments have the same effect; but for a Gibbs model, interactions between points inside and outside the \code{subset} are taken into account, while interactions between points inside and outside the \code{clipwin} are ignored. } } } \section{Interaction parameters}{ Apart from the Poisson model, every point process model fitted by \code{ppm} has parameters that determine the strength and range of \sQuote{interaction} or dependence between points. These parameters are of two types: \describe{ \item{regular parameters:}{ A parameter \eqn{\phi}{phi} is called \emph{regular} if the log likelihood is a linear function of \eqn{\theta}{theta} where \eqn{\theta = \theta(\psi)}{theta = theta(psi)} is some transformation of \eqn{\psi}{psi}. [Then \eqn{\theta}{theta} is called the canonical parameter.] } \item{irregular parameters}{ Other parameters are called \emph{irregular}. } } Typically, regular parameters determine the \sQuote{strength} of the interaction, while irregular parameters determine the \sQuote{range} of the interaction. For example, the Strauss process has a regular parameter \eqn{\gamma}{gamma} controlling the strength of interpoint inhibition, and an irregular parameter \eqn{r} determining the range of interaction. The \code{ppm} command is only designed to estimate regular parameters of the interaction. It requires the values of any irregular parameters of the interaction to be fixed. For example, to fit a Strauss process model to the \code{cells} dataset, you could type \code{ppm(cells, ~1, Strauss(r=0.07))}. Note that the value of the irregular parameter \code{r} must be given. The result of this command will be a fitted model in which the regular parameter \eqn{\gamma}{gamma} has been estimated. To determine the irregular parameters, there are several practical techniques, but no general statistical theory available. Useful techniques include maximum profile pseudolikelihood, which is implemented in the command \code{\link{profilepl}}, and Newton-Raphson maximisation, implemented in the experimental command \code{\link{ippm}}. Some irregular parameters can be estimated directly from data: the hard-core radius in the model \code{\link{Hardcore}} and the matrix of hard-core radii in \code{\link{MultiHard}} can be estimated easily from data. In these cases, \code{ppm} allows the user to specify the interaction without giving the value of the irregular parameter. The user can give the hard core interaction as \code{interaction=Hardcore()} or even \code{interaction=Hardcore}, and the hard core radius will then be estimated from the data. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014) Logistic regression for spatial Gibbs point processes. \emph{Biometrika} \bold{101} (2) 377--392. Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Berman, M. and Turner, T.R. Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41} (1992) 31--38. Besag, J. Statistical analysis of non-lattice data. \emph{The Statistician} \bold{24} (1975) 179-195. Diggle, P.J., Fiksel, T., Grabarnik, P., Ogata, Y., Stoyan, D. and Tanemura, M. On parameter estimation for pairwise interaction processes. \emph{International Statistical Review} \bold{62} (1994) 99-117. Huang, F. and Ogata, Y. Improvements of the maximum pseudo-likelihood estimators in various spatial statistical models. \emph{Journal of Computational and Graphical Statistics} \bold{8} (1999) 510-530. Jensen, J.L. and Moeller, M. Pseudolikelihood for exponential family models of spatial point processes. \emph{Annals of Applied Probability} \bold{1} (1991) 445--461. Jensen, J.L. and Kuensch, H.R. On asymptotic normality of pseudo likelihood estimates for pairwise interaction processes, \emph{Annals of the Institute of Statistical Mathematics} \bold{46} (1994) 475-486. Rajala T. (2014) \emph{A note on Bayesian logistic regression for spatial exponential family Gibbs point processes}, Preprint on ArXiv.org. \url{https://arxiv.org/abs/1411.0539} } \seealso{ \code{\link{ppm.object}} for details of how to print, plot and manipulate a fitted model. \code{\link{ppp}} and \code{\link{quadscheme}} for constructing data. Interactions: \GibbsInteractionsList. See \code{\link{profilepl}} for advice on fitting nuisance parameters in the interaction, and \code{\link{ippm}} for irregular parameters in the trend. See \code{\link{valid.ppm}} and \code{\link{emend.ppm}} for ensuring the fitted model is a valid point process. } \section{Error and Warning Messages}{ Some common error messages and warning messages are listed below, with explanations. \describe{ \item{\dQuote{System is computationally singular}}{ The Fisher information matrix of the fitted model has a determinant close to zero, so that the matrix cannot be inverted, and the software cannot calculate standard errors or confidence intervals. This error is usually reported when the model is printed, because the \code{print} method calculates standard errors for the fitted parameters. Singularity usually occurs because the spatial coordinates in the original data were very large numbers (e.g. expressed in metres) so that the fitted coefficients were very small numbers. The simple remedy is to \bold{rescale the data}, for example, to convert from metres to kilometres by \code{X <- \link{rescale}(X, 1000)}, then re-fit the model. Singularity can also occur if the covariate values are very large numbers, or if the covariates are approximately collinear. } \item{\dQuote{Covariate values were NA or undefined at X\% (M out of N) of the quadrature points}}{ The covariate data (typically a pixel image) did not provide values of the covariate at some of the spatial locations in the observation window of the point pattern. This means that the spatial domain of the pixel image does not completely cover the observation window of the point pattern. If the percentage is small, this warning can be ignored - typically it happens because of rounding effects which cause the pixel image to be one-pixel-width narrower than the observation window. However if more than a few percent of covariate values are undefined, it would be prudent to check that the pixel images are correct, and are correctly registered in their spatial relation to the observation window. } \item{\dQuote{Some tiles with positive area do not contain any quadrature points: relative error = X\%}}{ A problem has arisen when creating the quadrature scheme used to fit the model. In the default rule for computing the quadrature weights, space is divided into rectangular tiles, and the number of quadrature points (data and dummy points) in each tile is counted. It is possible for a tile with non-zero area to contain no quadrature points; in this case, the quadrature scheme will contribute a bias to the model-fitting procedure. \bold{A small relative error (less than 2 percent) is not important.} Relative errors of a few percent can occur because of the shape of the window. If the relative error is greater than about 5 percent, we recommend trying different parameters for the quadrature scheme, perhaps setting a larger value of \code{nd} to increase the number of dummy points. A relative error greater than 10 percent indicates a major problem with the input data: in this case, extract the quadrature scheme by applying \code{\link{quad.ppm}} to the fitted model, and inspect it. (The most likely cause of this problem is that the spatial coordinates of the original data were not handled correctly, for example, coordinates of the locations and the window boundary were incompatible.) } \item{\dQuote{Model is unidentifiable}}{ It is not possible to estimate all the model parameters from this dataset. The error message gives a further explanation, such as \dQuote{data pattern is empty}. Choose a simpler model, or check the data. } \item{\dQuote{N data points are illegal (zero conditional intensity)}}{ In a Gibbs model (i.e. with interaction between points), the conditional intensity may be zero at some spatial locations, indicating that the model forbids the presence of a point at these locations. However if the conditional intensity is zero \emph{at a data point}, this means that the model is inconsistent with the data. Modify the interaction parameters so that the data point is not illegal (e.g. reduce the value of the hard core radius) or choose a different interaction. } } } \section{Warnings}{ The implementation of the Huang-Ogata method is experimental; several bugs were fixed in \pkg{spatstat} 1.19-0. See the comments above about the possible inefficiency and bias of the maximum pseudolikelihood estimator. The accuracy of the Berman-Turner approximation to the pseudolikelihood depends on the number of dummy points used in the quadrature scheme. The number of dummy points should at least equal the number of data points. The parameter values of the fitted model do not necessarily determine a valid point process. Some of the point process models are only defined when the parameter values lie in a certain subset. For example the Strauss process only exists when the interaction parameter \eqn{\gamma}{gamma} is less than or equal to \eqn{1}, corresponding to a value of \code{ppm()$theta[2]} less than or equal to \code{0}. By default (if \code{emend=FALSE}) the algorithm maximises the pseudolikelihood without constraining the parameters, and does not apply any checks for sanity after fitting the model. This is because the fitted parameter value could be useful information for data analysis. To constrain the parameters to ensure that the model is a valid point process, set \code{emend=TRUE}. See also the functions \code{\link{valid.ppm}} and \code{\link{emend.ppm}}. The \code{trend} formula should not use any variable names beginning with the prefixes \code{.mpl} or \code{Interaction} as these names are reserved for internal use. The data frame \code{covariates} should have as many rows as there are points in \code{Q}. It should not contain variables called \code{x}, \code{y} or \code{marks} as these names are reserved for the Cartesian coordinates and the marks. If the model formula involves one of the functions \code{poly()}, \code{bs()} or \code{ns()} (e.g. applied to spatial coordinates \code{x} and \code{y}), the fitted coefficients can be misleading. The resulting fit is not to the raw spatial variates (\code{x}, \code{x^2}, \code{x*y}, etc.) but to a transformation of these variates. The transformation is implemented by \code{poly()} in order to achieve better numerical stability. However the resulting coefficients are appropriate for use with the transformed variates, not with the raw variates. This affects the interpretation of the constant term in the fitted model, \code{logbeta}. Conventionally, \eqn{\beta}{beta} is the background intensity, i.e. the value taken by the conditional intensity function when all predictors (including spatial or ``trend'' predictors) are set equal to \eqn{0}. However the coefficient actually produced is the value that the log conditional intensity takes when all the predictors, including the \emph{transformed} spatial predictors, are set equal to \code{0}, which is not the same thing. Worse still, the result of \code{\link{predict.ppm}} can be completely wrong if the trend formula contains one of the functions \code{poly()}, \code{bs()} or \code{ns()}. This is a weakness of the underlying function \code{\link{predict.glm}}. If you wish to fit a polynomial trend, we offer an alternative to \code{\link{poly}()}, namely \code{polynom()}, which avoids the difficulty induced by transformations. It is completely analogous to \code{poly} except that it does not orthonormalise. The resulting coefficient estimates then have their natural interpretation and can be predicted correctly. Numerical stability may be compromised. Values of the maximised pseudolikelihood are not comparable if they have been obtained with different values of \code{rbord}. } \examples{ # fit the stationary Poisson process # to point pattern 'nztrees' ppm(nztrees) ppm(nztrees ~ 1) # equivalent. Q <- quadscheme(nztrees) ppm(Q) # equivalent. fit1 <- ppm(nztrees, ~ x) # fit the nonstationary Poisson process # with intensity function lambda(x,y) = exp(a + bx) # where x,y are the Cartesian coordinates # and a,b are parameters to be estimated # For other examples, see help(ppm) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.core/man/Concom.Rd0000644000176200001440000001325414141452520015327 0ustar liggesusers\name{Concom} \alias{Concom} \title{The Connected Component Process Model} \description{ Creates an instance of the Connected Component point process model which can then be fitted to point pattern data. } \usage{ Concom(r) } \arguments{ \item{r}{Threshold distance} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the connected component process with disc radius \eqn{r}. } \details{ This function defines the interpoint interaction structure of a point process called the connected component process. It can be used to fit this model to point pattern data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the connected component interaction is yielded by the function \code{Concom()}. See the examples below. In \bold{standard form}, the connected component process (Baddeley and \Moller, 1989) with disc radius \eqn{r}, intensity parameter \eqn{\kappa}{\kappa} and interaction parameter \eqn{\gamma}{\gamma} is a point process with probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \kappa^{n(x)} \gamma^{-C(x)} }{ f(x[1],\ldots,x[n]) = \alpha . \kappa^n(x) . \gamma^(-C(x)) } for a point pattern \eqn{x}, where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, and \eqn{C(x)} is defined below. Here \eqn{\alpha}{\alpha} is a normalising constant. To define the term \code{C(x)}, suppose that we construct a planar graph by drawing an edge between each pair of points \eqn{x_i,x_j}{x[i],x[j]} which are less than \eqn{r} units apart. Two points belong to the same connected component of this graph if they are joined by a path in the graph. Then \eqn{C(x)} is the number of connected components of the graph. The interaction parameter \eqn{\gamma}{\gamma} can be any positive number. If \eqn{\gamma = 1}{\gamma = 1} then the model reduces to a Poisson process with intensity \eqn{\kappa}{\kappa}. If \eqn{\gamma < 1}{\gamma < 1} then the process is regular, while if \eqn{\gamma > 1}{\gamma > 1} the process is clustered. Thus, a connected-component interaction process can be used to model either clustered or regular point patterns. In \pkg{spatstat}, the model is parametrised in a different form, which is easier to interpret. In \bold{canonical form}, the probability density is rewritten as \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{-U(x)} }{ f(x_1,\ldots,x_n) = \alpha . \beta^n(x) \gamma^(-U(x)) } where \eqn{\beta}{\beta} is the new intensity parameter and \eqn{U(x) = C(x) - n(x)} is the interaction potential. In this formulation, each isolated point of the pattern contributes a factor \eqn{\beta}{\beta} to the probability density (so the first order trend is \eqn{\beta}{\beta}). The quantity \eqn{U(x)} is a true interaction potential, in the sense that \eqn{U(x) = 0} if the point pattern \eqn{x} does not contain any points that lie close together. When a new point \eqn{u} is added to an existing point pattern \eqn{x}, the rescaled potential \eqn{-U(x)} increases by zero or a positive integer. The increase is zero if \eqn{u} is not close to any point of \eqn{x}. The increase is a positive integer \eqn{k} if there are \eqn{k} different connected components of \eqn{x} that lie close to \eqn{u}. Addition of the point \eqn{u} contributes a factor \eqn{\beta \eta^\delta}{\beta * \eta^\delta} to the probability density, where \eqn{\delta}{\delta} is the increase in potential. If desired, the original parameter \eqn{\kappa}{\kappa} can be recovered from the canonical parameter by \eqn{\kappa = \beta\gamma}{\kappa = \beta * \gamma}. The \emph{nonstationary} connected component process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{\beta(x[i])} of location, rather than a constant beta. Note the only argument of \code{Concom()} is the threshold distance \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(\beta)} and \eqn{\log(\gamma)}{log(\gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Concom()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \section{Edge correction}{ The interaction distance of this process is infinite. There are no well-established procedures for edge correction for fitting such models, and accordingly the model-fitting function \code{\link{ppm}} will give an error message saying that the user must specify an edge correction. A reasonable solution is to use the border correction at the same distance \code{r}, as shown in the Examples. } \examples{ # prints a sensible description of itself Concom(r=0.1) # Fit the stationary connected component process to redwood data ppm(redwood, ~1, Concom(r=0.07), rbord=0.07) # Fit the stationary connected component process to `cells' data ppm(cells, ~1, Concom(r=0.06), rbord=0.06) # eta=0 indicates hard core process. # Fit a nonstationary connected component model # with log-cubic polynomial trend # ppm(swedishpines, ~polynom(x/10,y/10,3), Concom(r=7), rbord=7) } \references{ Baddeley, A.J. and \Moller, J. (1989) Nearest-neighbour Markov point processes and random sets. \emph{International Statistical Review} \bold{57}, 89--121. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat.core/man/polynom.Rd0000644000176200001440000000324714141452520015607 0ustar liggesusers\name{polynom} \alias{polynom} \title{ Polynomial in One or Two Variables } \description{ This function is used to represent a polynomial term in a model formula. It computes the homogeneous terms in the polynomial of degree \code{n} in one variable \code{x} or two variables \code{x,y}. } \usage{ polynom(x, \dots) } \arguments{ \item{x}{ A numerical vector. } \item{\dots}{ Either a single integer \code{n} specifying the degree of the polynomial, or two arguments \code{y,n} giving another vector of data \code{y} and the degree of the polynomial. } } \details{ This function is typically used inside a model formula in order to specify the most general possible polynomial of order \code{n} involving one numerical variable \code{x} or two numerical variables \code{x,y}. It is equivalent to \code{\link[stats]{poly}(, raw=TRUE)}. If only one numerical vector argument \code{x} is given, the function computes the vectors \code{x^k} for \code{k = 1, 2, \dots, n}. These vectors are combined into a matrix with \code{n} columns. If two numerical vector arguments \code{x,y} are given, the function computes the vectors \code{x^k * y^m} for \code{k >= 0} and \code{m >= 0} satisfying \code{0 < k + m <= n}. These vectors are combined into a matrix with one column for each homogeneous term. } \value{ A numeric matrix, with rows corresponding to the entries of \code{x}, and columns corresponding to the terms in the polynomial. } \author{ \spatstatAuthors. } \seealso{ \code{\link[stats]{poly}}, \code{\link{harmonic}} } \examples{ x <- 1:4 y <- 10 * (0:3) polynom(x, 3) polynom(x, y, 3) } \keyword{arith} spatstat.core/man/SatPiece.Rd0000644000176200001440000001072514141452520015606 0ustar liggesusers\name{SatPiece} \alias{SatPiece} \title{Piecewise Constant Saturated Pairwise Interaction Point Process Model} \description{ Creates an instance of a saturated pairwise interaction point process model with piecewise constant potential function. The model can then be fitted to point pattern data. } \usage{ SatPiece(r, sat) } \arguments{ \item{r}{vector of jump points for the potential function} \item{sat}{ vector of saturation values, or a single saturation value } } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This is a generalisation of the Geyer saturation point process model, described in \code{\link{Geyer}}, to the case of multiple interaction distances. It can also be described as the saturated analogue of a pairwise interaction process with piecewise-constant pair potential, described in \code{\link{PairPiece}}. The saturated point process with interaction radii \eqn{r_1,\ldots,r_k}{r[1], ..., r[k]}, saturation thresholds \eqn{s_1,\ldots,s_k}{s[1],...,s[k]}, intensity parameter \eqn{\beta}{beta} and interaction parameters \eqn{\gamma_1,\ldots,gamma_k}{gamma[1], ..., gamma[k]}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{ \beta \gamma_1^{v_1(x_i, X)} \ldots gamma_k^{v_k(x_i,X)} }{ beta gamma[1]^v(1, x_i, X) ... gamma[k]^v(k, x_i, X) } to the probability density of the point pattern, where \deqn{ v_j(x_i, X) = \min( s_j, t_j(x_i,X) ) }{ v(j, x_i, X) = min(s[j], t(j, x_i, X)) } where \eqn{t_j(x_i, X)}{t(j,x[i],X)} denotes the number of points in the pattern \eqn{X} which lie at a distance between \eqn{r_{j-1}}{r[j-1]} and \eqn{r_j}{r[j]} from the point \eqn{x_i}{x[i]}. We take \eqn{r_0 = 0}{r[0] = 0} so that \eqn{t_1(x_i,X)}{t(1, x[i], X)} is the number of points of \eqn{X} that lie within a distance \eqn{r_1}{r[1]} of the point \eqn{x_i}{x[i]}. \code{SatPiece} is used to fit this model to data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant Saturated pairwise interaction is yielded by the function \code{SatPiece()}. See the examples below. Simulation of this point process model is not yet implemented. This model is not locally stable (the conditional intensity is unbounded). The argument \code{r} specifies the vector of interaction distances. The entries of \code{r} must be strictly increasing, positive numbers. The argument \code{sat} specifies the vector of saturation parameters. It should be a vector of the same length as \code{r}, and its entries should be nonnegative numbers. Thus \code{sat[1]} corresponds to the distance range from \code{0} to \code{r[1]}, and \code{sat[2]} to the distance range from \code{r[1]} to \code{r[2]}, etc. Alternatively \code{sat} may be a single number, and this saturation value will be applied to every distance range. Infinite values of the saturation parameters are also permitted; in this case \eqn{v_j(x_i,X) = t_j(x_i,X)}{v(j, x_i, X) = t(j, x_i, X)} and there is effectively no `saturation' for the distance range in question. If all the saturation parameters are set to \code{Inf} then the model is effectively a pairwise interaction process, equivalent to \code{\link{PairPiece}} (however the interaction parameters \eqn{\gamma}{gamma} obtained from \code{\link{SatPiece}} are the square roots of the parameters \eqn{\gamma}{gamma} obtained from \code{\link{PairPiece}}). If \code{r} is a single number, this model is virtually equivalent to the Geyer process, see \code{\link{Geyer}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairsat.family}}, \code{\link{Geyer}}, \code{\link{PairPiece}}, \code{\link{BadGey}}. } \examples{ SatPiece(c(0.1,0.2), c(1,1)) # prints a sensible description of itself SatPiece(c(0.1,0.2), 1) ppm(cells ~1, SatPiece(c(0.07, 0.1, 0.13), 2)) # fit a stationary piecewise constant Saturated pairwise interaction process # ppm(cells ~polynom(x,y,3), SatPiece(c(0.07, 0.1, 0.13), 2)) # nonstationary process with log-cubic polynomial trend } \author{\adrian and \rolf in collaboration with Hao Wang and Jeff Picka } \keyword{spatial} \keyword{models} spatstat.core/man/pcfdot.inhom.Rd0000644000176200001440000001144514141452520016501 0ustar liggesusers\name{pcfdot.inhom} \alias{pcfdot.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Type-i-To-Any-Type) } \description{ Estimates the inhomogeneous multitype pair correlation function (from type \eqn{i} to any type) for a multitype point pattern. } \usage{ pcfdot.inhom(X, i, lambdaI = NULL, lambdadot = NULL, ..., r = NULL, breaks = NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma = NULL, varcov = NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous multitype pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity function of the points of type \code{i}. Either a vector giving the intensity values at the points of type \code{i}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdadot}{ Optional. Values of the estimated intensity function of the point pattern \code{X}. A numeric vector, pixel image or \code{function(x,y)}. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. There is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Bandwidth coefficient; see Details. } \item{correction}{ Choice of edge correction. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambdaI} or \code{lambdadot} is estimated by kernel smoothing. } } \details{ The inhomogeneous multitype (type \eqn{i} to any type) pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} is a summary of the dependence between different types of points in a multitype spatial point process that does not have a uniform density of points. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding a point of type \eqn{i} at location \eqn{x} and another point of any type at location \eqn{y}, where \eqn{x} and \eqn{y} are separated by a distance \eqn{r}, is equal to \deqn{ p(r) = \lambda_i(x) lambda(y) g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i](x) * lambda(y) * g(r) dx dy } where \eqn{\lambda_i}{lambda[i]} is the intensity function of the process of points of type \eqn{i}, and where \eqn{\lambda}{lambda} is the intensity function of the points of all types. For a multitype Poisson point process, this probability is \eqn{p(r) = \lambda_i(x) \lambda(y)}{p(r) = lambda[i](x) * lambda(y)} so \eqn{g_{i\bullet}(r) = 1}{g[i.](r) = 1}. The command \code{pcfdot.inhom} estimates the inhomogeneous multitype pair correlation using a modified version of the algorithm in \code{\link{pcf.ppp}}. If the arguments \code{lambdaI} and \code{lambdadot} are missing or null, they are estimated from \code{X} by kernel smoothing using a leave-one-out estimator. } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{ the vector of values of the argument \eqn{r} at which the inhomogeneous multitype pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g_{i\bullet}(r)}{g[i.](r)} for the Poisson process } \item{trans}{vector of values of \eqn{g_{i\bullet}(r)}{g[i.](r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g_{i\bullet}(r)}{g[i.](r)} estimated by Ripley isotropic correction } as required. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcfinhom}}, \code{\link{pcfdot}}, \code{\link{pcfcross.inhom}} } \examples{ data(amacrine) plot(pcfdot.inhom(amacrine, "on", stoyan=0.1), legendpos="bottom") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/Saturated.Rd0000644000176200001440000000136514141452520016045 0ustar liggesusers\name{Saturated} \alias{Saturated} \title{Saturated Pairwise Interaction model} \description{ Experimental. } \usage{ Saturated(pot, name) } \arguments{ \item{pot}{An S language function giving the user-supplied pairwise interaction potential.} \item{name}{Character string.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This is experimental. It constructs a member of the ``saturated pairwise'' family \code{\link{pairsat.family}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairsat.family}}, \code{\link{Geyer}}, \code{\link{SatPiece}}, \code{\link{ppm.object}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/dppspecdenrange.Rd0000644000176200001440000000111114141452520017240 0ustar liggesusers\name{dppspecdenrange} \alias{dppspecdenrange} \title{Range of Spectral Density of a Determinantal Point Process Model} \description{ Computes the range of the spectral density of a determinantal point process model. } \usage{dppspecdenrange(model)} \arguments{ \item{model}{Model of class \code{"detpointprocfamily"}.} } \value{Numeric value (possibly \code{Inf}).} \author{ \adrian \rolf and \ege } \examples{ m <- dppBessel(lambda=100, alpha=0.05, sigma=1, d=2) dppspecdenrange(m) } \seealso{ \code{\link{dppspecden}} } \keyword{spatial} \keyword{models} spatstat.core/man/Tstat.Rd0000644000176200001440000000562514141452520015213 0ustar liggesusers\name{Tstat} \alias{Tstat} \title{ Third order summary statistic } \description{ Computes the third order summary statistic \eqn{T(r)} of a spatial point pattern. } \usage{ Tstat(X, ..., r = NULL, rmax = NULL, correction = c("border", "translate"), ratio = FALSE, verbose=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{T(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{T(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. } \item{rmax}{ Optional. Numeric. The maximum value of \eqn{r} for which \eqn{T(r)} should be estimated. } \item{correction}{ Optional. A character vector containing any selection of the options \code{"none"}, \code{"border"}, \code{"bord.modif"}, \code{"translate"}, \code{"translation"}, or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{verbose}{ Logical. If \code{TRUE}, an estimate of the computation time is printed. } } \details{ This command calculates the third-order summary statistic \eqn{T(r)} for a spatial point patterns, defined by Schladitz and Baddeley (2000). The definition of \eqn{T(r)} is similar to the definition of Ripley's \eqn{K} function \eqn{K(r)}, except that \eqn{K(r)} counts pairs of points while \eqn{T(r)} counts triples of points. Essentially \eqn{T(r)} is a rescaled cumulative distribution function of the diameters of triangles in the point pattern. The diameter of a triangle is the length of its longest side. } \section{Computation time}{ If the number of points is large, the algorithm can take a very long time to inspect all possible triangles. A rough estimate of the total computation time will be printed at the beginning of the calculation. If this estimate seems very large, stop the calculation using the user interrupt signal, and call \code{Tstat} again, using \code{rmax} to restrict the range of \code{r} values, thus reducing the number of triangles to be inspected. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ Schladitz, K. and Baddeley, A. (2000) A third order point process characteristic. \emph{Scandinavian Journal of Statistics} \bold{27} (2000) 657--671. } \seealso{ \code{\link{Kest}} } \examples{ plot(Tstat(redwood)) } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/rmhcontrol.Rd0000644000176200001440000003345314141452520016303 0ustar liggesusers\name{rmhcontrol} \alias{rmhcontrol} \alias{rmhcontrol.default} \title{Set Control Parameters for Metropolis-Hastings Algorithm.} \description{ Sets up a list of parameters controlling the iterative behaviour of the Metropolis-Hastings algorithm. } \usage{ rmhcontrol(\dots) \method{rmhcontrol}{default}(\dots, p=0.9, q=0.5, nrep=5e5, expand=NULL, periodic=NULL, ptypes=NULL, x.cond=NULL, fixall=FALSE, nverb=0, nsave=NULL, nburn=nsave, track=FALSE, pstage=c("block", "start")) } \arguments{ \item{\dots}{Arguments passed to methods.} \item{p}{Probability of proposing a shift (as against a birth/death).} \item{q}{Conditional probability of proposing a death given that a birth or death will be proposed.} \item{nrep}{Total number of steps (proposals) of Metropolis-Hastings algorithm that should be run.} \item{expand}{ Simulation window or expansion rule. Either a window (object of class \code{"owin"}) or a numerical expansion factor, specifying that simulations are to be performed in a domain other than the original data window, then clipped to the original data window. This argument is passed to \code{\link{rmhexpand}}. A numerical expansion factor can be in several formats: see \code{\link{rmhexpand}}. } \item{periodic}{ Logical value (or \code{NULL}) indicating whether to simulate ``periodically'', i.e. identifying opposite edges of the rectangular simulation window. A \code{NULL} value means ``undecided.'' } \item{ptypes}{For multitype point processes, the distribution of the mark attached to a new random point (when a birth is proposed)} \item{x.cond}{Conditioning points for conditional simulation.} \item{fixall}{(Logical) for multitype point processes, whether to fix the number of points of each type.} \item{nverb}{Progress reports will be printed every \code{nverb} iterations} \item{nsave,nburn}{ If these values are specified, then intermediate states of the simulation algorithm will be saved every \code{nsave} iterations, after an initial burn-in period of \code{nburn} iterations. } \item{track}{ Logical flag indicating whether to save the transition history of the simulations. } \item{pstage}{ Character string specifying when to generate proposal points. Either \code{"start"} or \code{"block"}. } } \value{ An object of class \code{"rmhcontrol"}, which is essentially a list of parameter values for the algorithm. There is a \code{print} method for this class, which prints a sensible description of the parameters chosen. } \details{ The Metropolis-Hastings algorithm, implemented as \code{\link{rmh}}, generates simulated realisations of point process models. The function \code{rmhcontrol} sets up a list of parameters which control the iterative behaviour and termination of the Metropolis-Hastings algorithm, for use in a subsequent call to \code{\link{rmh}}. It also checks that the parameters are valid. (A separate function \code{\link{rmhstart}} determines the initial state of the algorithm, and \code{\link{rmhmodel}} determines the model to be simulated.) The parameters are as follows: \describe{ \item{p}{The probability of proposing a ``shift'' (as opposed to a birth or death) in the Metropolis-Hastings algorithm. If \eqn{p = 1} then the algorithm only alters existing points, so the number of points never changes, i.e. we are simulating conditionally upon the number of points. The number of points is determined by the initial state (specified by \code{\link{rmhstart}}). If \eqn{p=1} and \code{fixall=TRUE} and the model is a multitype point process model, then the algorithm only shifts the locations of existing points and does not alter their marks (types). This is equivalent to simulating conditionally upon the number of points of each type. These numbers are again specified by the initial state. If \eqn{p = 1} then no expansion of the simulation window is allowed (see \code{expand} below). The default value of \code{p} can be changed by setting the parameter \code{rmh.p} in \code{\link{spatstat.options}}. } \item{q}{The conditional probability of proposing a death (rather than a birth) given that a shift is not proposed. This is of course ignored if \code{p} is equal to 1. The default value of \code{q} can be changed by setting the parameter \code{rmh.q} in \code{\link{spatstat.options}}. } \item{nrep}{The number of repetitions or iterations to be made by the Metropolis-Hastings algorithm. It should be large. The default value of \code{nrep} can be changed by setting the parameter \code{rmh.nrep} in \code{\link{spatstat.options}}. } \item{expand}{ Either a number or a window (object of class \code{"owin"}). Indicates that the process is to be simulated on a domain other than the original data window \code{w}, then clipped to \code{w} when the algorithm has finished. This would often be done in order to approximate the simulation of a stationary process (Geyer, 1999) or more generally a process existing in the whole plane, rather than just in the window \code{w}. If \code{expand} is a window object, it is taken as the larger domain in which simulation is performed. If \code{expand} is numeric, it is interpreted as an expansion factor or expansion distance for determining the simulation domain from the data window. It should be a \emph{named} scalar, such as \code{expand=c(area=2)}, \code{expand=c(distance=0.1)}, \code{expand=c(length=1.2)}. See \code{\link{rmhexpand}()} for more details. If the name is omitted, it defaults to \code{area}. Expansion is not permitted if the number of points has been fixed by setting \code{p = 1} or if the starting configuration has been specified via the argument \code{x.start} in \code{\link{rmhstart}}. If \code{expand} is \code{NULL}, this is interpreted to mean \dQuote{not yet decided}. An expansion rule will be determined at a later stage, using appropriate defaults. See \code{\link{rmhexpand}}. } \item{periodic}{A logical value (or \code{NULL}) determining whether to simulate \dQuote{periodically}. If \code{periodic} is \code{TRUE}, and if the simulation window is a rectangle, then the simulation algorithm effectively identifies opposite edges of the rectangle. Points near the right-hand edge of the rectangle are deemed to be close to points near the left-hand edge. Periodic simulation usually gives a better approximation to a stationary point process. For periodic simulation, the simulation window must be a rectangle. (The simulation window is determined by \code{expand} as described above.) The value \code{NULL} means \sQuote{undecided}. The decision is postponed until \code{\link{rmh}} is called. Depending on the point process model to be simulated, \code{rmh} will then set \code{periodic=TRUE} if the simulation window is expanded \emph{and} the expanded simulation window is rectangular; otherwise \code{periodic=FALSE}. Note that \code{periodic=TRUE} is only permitted when the simulation window (i.e. the expanded window) is rectangular. } \item{ptypes}{A vector of probabilities (summing to 1) to be used in assigning a random type to a new point. Defaults to a vector each of whose entries is \eqn{1/nt} where \eqn{nt} is the number of types for the process. Convergence of the simulation algorithm should be improved if \code{ptypes} is close to the relative frequencies of the types which will result from the simulation. } \item{x.cond}{ If this argument is given, then \emph{conditional simulation} will be performed, and \code{x.cond} specifies the location of the fixed points as well as the type of conditioning. It should be either a point pattern (object of class \code{"ppp"}) or a \code{list(x,y)} or a \code{data.frame}. See the section on Conditional Simulation. } \item{fixall}{A logical scalar specifying whether to condition on the number of points of each type. Meaningful only if a marked process is being simulated, and if \eqn{p = 1}. A warning message is given if \code{fixall} is set equal to \code{TRUE} when it is not meaningful. } \item{nverb}{An integer specifying how often ``progress reports'' (which consist simply of the number of repetitions completed) should be printed out. If nverb is left at 0, the default, the simulation proceeds silently. } \item{nsave,nburn}{ If these integers are given, then the current state of the simulation algorithm (i.e. the current random point pattern) will be saved every \code{nsave} iterations, starting from iteration \code{nburn}. (Alternatively \code{nsave} can be a vector, specifying different numbers of iterations between each successive save. This vector will be recycled until the end of the simulations.) } \item{track}{ Logical flag indicating whether to save the transition history of the simulations (i.e. information specifying what type of proposal was made, and whether it was accepted or rejected, for each iteration). } \item{pstage}{ Character string specifying the stage of the algorithm at which the randomised proposal points should be generated. If \code{pstage="start"} or if \code{nsave=0}, the entire sequence of \code{nrep} random proposal points is generated at the start of the algorithm. This is the original behaviour of the code, and should be used in order to maintain consistency with older versions of \pkg{spatstat}. If \code{pstage="block"} and \code{nsave > 0}, then a set of \code{nsave} random proposal points will be generated before each block of \code{nsave} iterations. This is much more efficient. The default is \code{pstage="block"}. } } } \section{Conditional Simulation}{ For a Gibbs point process \eqn{X}, the Metropolis-Hastings algorithm easily accommodates several kinds of conditional simulation: \describe{ \item{conditioning on the total number of points:}{ We fix the total number of points \eqn{N(X)} to be equal to \eqn{n}. We simulate from the conditional distribution of \eqn{X} given \eqn{N(X) = n}. } \item{conditioning on the number of points of each type:}{ In a multitype point process, where \eqn{Y_j}{Y[[j]]} denotes the process of points of type \eqn{j}, we fix the number \eqn{N(Y_j)}{N(Y[[j]])} of points of type \eqn{j} to be equal to \eqn{n_j}{n[j]}, for \eqn{j=1,2,\ldots,m}{j=1,2,...,m}. We simulate from the conditional distribution of \eqn{X} given \eqn{N(Y_j)=n_j}{N(Y[[j]]) = n[j]} for \eqn{j=1,2,\ldots,m}{j=1,2,...,m}. } \item{conditioning on the realisation in a subwindow:}{ We require that the point process \eqn{X} should, within a specified sub-window \eqn{V}, coincide with a specified point pattern \eqn{y}. We simulate from the conditional distribution of \eqn{X} given \eqn{X \cap V = y}{(X intersect V) = y}. } \item{Palm conditioning:}{ We require that the point process \eqn{X} include a specified list of points \eqn{y}. We simulate from the point process with probability density \eqn{g(x) = c f(x \cup y)}{g(x) = c * f(x union y)} where \eqn{f} is the probability density of the original process \eqn{X}, and \eqn{c} is a normalising constant. } } To achieve each of these types of conditioning we do as follows: \describe{ \item{conditioning on the total number of points:}{ Set \code{p=1}. The number of points is determined by the initial state of the simulation: see \code{\link{rmhstart}}. } \item{conditioning on the number of points of each type:}{ Set \code{p=1} and \code{fixall=TRUE}. The number of points of each type is determined by the initial state of the simulation: see \code{\link{rmhstart}}. } \item{conditioning on the realisation in a subwindow:}{ Set \code{x.cond} to be a point pattern (object of class \code{"ppp"}). Its window \code{V=Window(x.cond)} becomes the conditioning subwindow \eqn{V}. } \item{Palm conditioning:}{ Set \code{x.cond} to be a \code{list(x,y)} or \code{data.frame} with two columns containing the coordinates of the points, or a \code{list(x,y,marks)} or \code{data.frame} with three columns containing the coordinates and marks of the points. } } The arguments \code{x.cond}, \code{p} and \code{fixall} can be combined. } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{rmh}}, \code{\link{rmhmodel}}, \code{\link{rmhstart}}, \code{\link{rmhexpand}}, \code{\link{spatstat.options}} } \examples{ # parameters given as named arguments c1 <- rmhcontrol(p=0.3,periodic=TRUE,nrep=1e6,nverb=1e5) # parameters given as a list liz <- list(p=0.9, nrep=1e4) c2 <- rmhcontrol(liz) # parameters given in rmhcontrol object c3 <- rmhcontrol(c1) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{datagen} spatstat.core/man/rstrat.Rd0000644000176200001440000000377114141452520015433 0ustar liggesusers\name{rstrat} \alias{rstrat} \title{Simulate Stratified Random Point Pattern} \description{ Generates a ``stratified random'' pattern of points in a window, by dividing the window into rectangular tiles and placing \code{k} random points independently in each tile. } \usage{ rstrat(win=square(1), nx, ny=nx, k = 1, nsim=1, drop=TRUE) } \arguments{ \item{win}{ A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{Number of tiles in each column. } \item{ny}{Number of tiles in each row. } \item{k}{Number of random points to generate in each tile. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates a random pattern of points in a ``stratified random'' sampling design. It can be useful for generating random spatial sampling points. The bounding rectangle of \code{win} is divided into a regular \eqn{nx \times ny}{nx * ny} grid of rectangular tiles. In each tile, \code{k} random points are generated independently with a uniform distribution in that tile. Some of these grid points may lie outside the window \code{win}: if they do, they are deleted. The result is a point pattern inside the window \code{win}. This function is useful in creating dummy points for quadrature schemes (see \code{\link{quadscheme}}) as well as in simulating random point patterns. } \seealso{ \code{\link{rsyst}}, \code{\link{runifpoint}}, \code{\link{quadscheme}} } \examples{ X <- rstrat(nx=10) plot(X) # polygonal boundary X <- rstrat(letterR, 5, 10, k=3) plot(X) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/is.dppm.Rd0000644000176200001440000000062114141452520015455 0ustar liggesusers\name{is.dppm} \alias{is.dppm} \title{Recognise Fitted Determinantal Point Process Models} \description{Check that an object inherits the class dppm} \usage{is.dppm(x)} \arguments{ \item{x}{Any object.} } \value{A single logical value.} \author{\ege , \adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat.core/man/Strauss.Rd0000644000176200001440000000573614141452520015563 0ustar liggesusers\name{Strauss} \alias{Strauss} \title{The Strauss Point Process Model} \description{ Creates an instance of the Strauss point process model which can then be fitted to point pattern data. } \usage{ Strauss(r) } \arguments{ \item{r}{The interaction radius of the Strauss process} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Strauss process with interaction radius \eqn{r}. } \details{ The (stationary) Strauss process with interaction radius \eqn{r} and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma} is the pairwise interaction point process in which each point contributes a factor \eqn{\beta}{beta} to the probability density of the point pattern, and each pair of points closer than \eqn{r} units apart contributes a factor \eqn{\gamma}{gamma} to the density. Thus the probability density is \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of distinct unordered pairs of points that are closer than \eqn{r} units apart, and \eqn{\alpha}{alpha} is the normalising constant. The interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1} so that this model describes an ``ordered'' or ``inhibitive'' pattern. The nonstationary Strauss process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Strauss process pairwise interaction is yielded by the function \code{Strauss()}. See the examples below. Note the only argument is the interaction radius \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\gamma)}{log(gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Strauss()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. Strauss, D.J. (1975) A model for clustering. \emph{Biometrika} \bold{62}, 467--475. } \examples{ Strauss(r=0.1) # prints a sensible description of itself # ppm(cells ~1, Strauss(r=0.07)) # fit the stationary Strauss process to `cells' ppm(cells ~polynom(x,y,3), Strauss(r=0.07)) # fit a nonstationary Strauss process with log-cubic polynomial trend } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/compareFit.Rd0000644000176200001440000001007014141452520016173 0ustar liggesusers\name{compareFit} \alias{compareFit} \title{ Residual Diagnostics for Multiple Fitted Models } \description{ Compares several fitted point process models using the same residual diagnostic. } \usage{ compareFit(object, Fun, r = NULL, breaks = NULL, ..., trend = ~1, interaction = Poisson(), rbord = NULL, modelnames = NULL, same = NULL, different = NULL) } \arguments{ \item{object}{ Object or objects to be analysed. Either a fitted point process model (object of class \code{"ppm"}), a point pattern (object of class \code{"ppp"}), or a list of these objects. } \item{Fun}{ Diagnostic function to be computed for each model. One of the functions \code{Kcom}, \code{Kres}, \code{Gcom}, \code{Gres}, \code{psst}, \code{psstA} or \code{psstG} or a string containing one of these names. } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the diagnostic should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Extra arguments passed to \code{Fun}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern or list of point patterns. See \code{\link{ppm}} for details. Each of these arguments can be a list, specifying different \code{trend}, \code{interaction} and/or \code{rbord} values to be used to generate different fitted models. } \item{modelnames}{ Character vector. Short descriptive names for the different models. } \item{same,different}{ Character strings or character vectors passed to \code{\link{collapse.fv}} to determine the format of the output. } } \details{ This is a convenient way to collect diagnostic information for several different point process models fitted to the same point pattern dataset, or for point process models of the same form fitted to several different datasets, etc. The first argument, \code{object}, is usually a list of fitted point process models (objects of class \code{"ppm"}), obtained from the model-fitting function \code{\link{ppm}}. For convenience, \code{object} can also be a list of point patterns (objects of class \code{"ppp"}). In that case, point process models will be fitted to each of the point pattern datasets, by calling \code{\link{ppm}} using the arguments \code{trend} (for the first order trend), \code{interaction} (for the interpoint interaction) and \code{rbord} (for the erosion distance in the border correction for the pseudolikelihood). See \code{\link{ppm}} for details of these arguments. Alternatively \code{object} can be a single point pattern (object of class \code{"ppp"}) and one or more of the arguments \code{trend}, \code{interaction} or \code{rbord} can be a list. In this case, point process models will be fitted to the same point pattern dataset, using each of the model specifications listed. The diagnostic function \code{Fun} will be applied to each of the point process models. The results will be collected into a single function value table. The \code{modelnames} are used to label the results from each fitted model. } \value{ Function value table (object of class \code{"fv"}). } \author{ \ege, \adrian and Jesper \Moller. } \seealso{ \code{\link{ppm}}, \code{\link{Kcom}}, \code{\link{Kres}}, \code{\link{Gcom}}, \code{\link{Gres}}, \code{\link{psst}}, \code{\link{psstA}}, \code{\link{psstG}}, \code{\link{collapse.fv}} } \examples{ nd <- 40 \testonly{ nd <- 10 } ilist <- list(Poisson(), Geyer(7, 2), Strauss(7)) iname <- c("Poisson", "Geyer", "Strauss") \testonly{ ilist <- ilist[c(1,3)] iname <- iname[c(1,3)] } K <- compareFit(swedishpines, Kcom, interaction=ilist, rbord=9, correction="translate", same="trans", different="tcom", modelnames=iname, nd=nd) K } \keyword{spatial} \keyword{models} spatstat.core/man/rshift.psp.Rd0000644000176200001440000001044114141452520016204 0ustar liggesusers\name{rshift.psp} \alias{rshift.psp} \title{Randomly Shift a Line Segment Pattern} \description{ Randomly shifts the segments in a line segment pattern. } \usage{ \method{rshift}{psp}(X, \dots, group=NULL, which=NULL) } \arguments{ \item{X}{Line segment pattern to be subjected to a random shift. An object of class \code{"psp"}. } \item{\dots}{ Arguments controlling the randomisation and the handling of edge effects. See \code{\link{rshift.ppp}}. } \item{group}{ Optional. Factor specifying a grouping of the line segments of \code{X}, or \code{NULL} indicating that all line segments belong to the same group. Each group will be shifted together, and separately from other groups. } \item{which}{ Optional. Identifies which groups of the pattern will be shifted, while other groups are not shifted. A vector of levels of \code{group}. } } \value{ A line segment pattern (object of class \code{"psp"}). } \details{ This operation randomly shifts the locations of the line segments in a line segment pattern. The function \code{rshift} is generic. This function \code{rshift.psp} is the method for line segment patterns. The line segments of \code{X} are first divided into groups, then the line segments within a group are shifted by a common random displacement vector. Different groups of line segments are shifted independently. If the argument \code{group} is present, then this determines the grouping. Otherwise, all line segments belong to a single group. The argument \code{group} should be a factor, of length equal to the number of line segments in \code{X}. Alternatively \code{group} may be \code{NULL}, which specifies that all line segments of \code{X} belong to a single group. By default, every group of line segments will be shifted. The argument \code{which} indicates that only some of the groups should be shifted, while other groups should be left unchanged. \code{which} must be a vector of levels of \code{group} indicating which groups are to be shifted. The displacement vector, i.e. the vector by which the data line segments are shifted, is generated at random. The \emph{default} behaviour is to generate a displacement vector at random with equal probability for all possible displacements. This means that the \eqn{x} and \eqn{y} coordinates of the displacement vector are independent random variables, uniformly distributed over the range of possible coordinates. Alternatively, the displacement vector can be generated by another random mechanism, controlled by the arguments \code{radius}, \code{width} and \code{height}. \describe{ \item{rectangular:}{ if \code{width} and \code{height} are given, then the displacement vector is uniformly distributed in a rectangle of these dimensions, centred at the origin. The maximum possible displacement in the \eqn{x} direction is \code{width/2}. The maximum possible displacement in the \eqn{y} direction is \code{height/2}. The \eqn{x} and \eqn{y} displacements are independent. (If \code{width} and \code{height} are actually equal to the dimensions of the observation window, then this is equivalent to the default.) } \item{radial:}{ if \code{radius} is given, then the displacement vector is generated by choosing a random line segment inside a disc of the given radius, centred at the origin, with uniform probability density over the disc. Thus the argument \code{radius} determines the maximum possible displacement distance. The argument \code{radius} is incompatible with the arguments \code{width} and \code{height}. } } The argument \code{edge} controls what happens when a shifted line segment lies partially or completely outside the window of \code{X}. Currently the only option is \code{"erode"} which specifies that the segments will be clipped to a smaller window. The optional argument \code{clip} specifies a smaller window to which the pattern should be restricted. } \seealso{ \code{\link{rshift}}, \code{\link{rshift.ppp}} } \examples{ X <- psp(runif(20), runif(20), runif(20), runif(20), window=owin()) Y <- rshift(X, radius=0.1) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/markmarkscatter.Rd0000644000176200001440000000411114141452520017274 0ustar liggesusers\name{markmarkscatter} \alias{markmarkscatter} \title{ Mark-Mark Scatter Plot } \description{ Generates the mark-mark scatter plot of a point pattern. } \usage{ markmarkscatter(X, rmax, \dots, col = NULL, symap = NULL, transform=I, jit=FALSE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}, \code{"pp3"}, \code{"lpp"} or \code{"ppx"}) with numeric marks. } \item{rmax}{ Maximum distance between pairs of points which contribute to the plot. } \item{\dots}{ Additional arguments passed to \code{\link{plot.ppp}} to control the scatterplot. } \item{transform}{ Optional. A function which should be applied to the mark values. } \item{jit}{ Logical value indicating whether mark values should be randomly perturbed using \code{\link[base]{jitter}}. } \item{col}{ Optional. A vector of colour values, or a \code{\link{colourmap}} to be used to portray the pairwise distance values. Ignored if \code{symap} is given. } \item{symap}{ Optional. A \code{\link{symbolmap}} to be used to portray the pairwise distance values. Overrides \code{col}. } } \details{ The mark-mark scatter plot (Ballani et al, 2019) is a scatterplot of the mark values of all pairs of distinct points in \code{X} which are closer than the distance \code{rmax}. The dots in the scatterplot are coloured according to the pairwise distance between the two spatial points. The plot is augmented by three curves explained by Ballani et al (2019). If the marks only take a few different values, then it is usually appropriate to apply random perturbation (jitter) to the mark values, by setting \code{jit=TRUE}. } \value{ Null. } \references{ Ballani, F., Pommerening, A. and Stoyan, D. (2019) Mark-mark scatterplots improve pattern analysis in spatial plant ecology. \emph{Ecological Informatics} \bold{49}, 13--21. } \author{ Adrian Baddeley (coded from the description in Ballani et al.) } \examples{ markmarkscatter(longleaf, 10) markmarkscatter(spruces, 10, jit=TRUE) } \keyword{spatial} \keyword{hplot} spatstat.core/man/reach.dppm.Rd0000644000176200001440000000212514141452520016125 0ustar liggesusers\name{reach.dppm} \alias{reach.dppm} \alias{reach.detpointprocfamily} \title{Range of Interaction for a Determinantal Point Process Model} \description{ Returns the range of interaction for a determinantal point process model. } \usage{ \method{reach}{dppm}(x, \dots) \method{reach}{detpointprocfamily}(x, \dots) } \arguments{ \item{x}{Model of class \code{"detpointprocfamily"} or \code{"dppm"}.} \item{\dots}{Additional arguments passed to the range function of the given model. } } \details{ The range of interaction for a determinantal point process model may defined as the smallest number \eqn{R} such that \eqn{g(r)=1} for all \eqn{r\ge R}{r>=R}, where \eqn{g} is the pair correlation function. For many models the range is infinite, but one may instead use a value where the pair correlation function is sufficiently close to 1. For example in the \Matern model this defaults to finding \eqn{R} such that \eqn{g(R)=0.99}. } \value{Numeric} \author{ \spatstatAuthors. } \examples{ reach(dppMatern(lambda=100, alpha=.01, nu=1, d=2)) } \keyword{spatial} \keyword{models} spatstat.core/man/Kcross.Rd0000644000176200001440000001714014141452520015353 0ustar liggesusers\name{Kcross} \alias{Kcross} \title{ Multitype K Function (Cross-type) } \description{ For a multitype point pattern, estimate the multitype \eqn{K} function which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}. } \usage{ Kcross(X, i, j, r=NULL, breaks=NULL, correction, \dots, ratio=FALSE, from, to ) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{Ignored.} \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{from,to}{ An alternative way to specify \code{i} and \code{j} respectively. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{ij}(r)}{Kij(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{ij}(r)}{Kij(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{ij}(r)}{Kij(r)} obtained by the edge corrections named. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ This function \code{Kcross} and its companions \code{\link{Kdot}} and \code{\link{Kmulti}} are generalisations of the function \code{\link{Kest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{X$marks}. If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The ``cross-type'' (type \eqn{i} to type \eqn{j}) \eqn{K} function of a stationary multitype point process \eqn{X} is defined so that \eqn{\lambda_j K_{ij}(r)}{lambda[j] Kij(r)} equals the expected number of additional random points of type \eqn{j} within a distance \eqn{r} of a typical point of type \eqn{i} in the process \eqn{X}. Here \eqn{\lambda_j}{lambda[j]} is the intensity of the type \eqn{j} points, i.e. the expected number of points of type \eqn{j} per unit area. The function \eqn{K_{ij}}{Kij} is determined by the second order moment properties of \eqn{X}. An estimate of \eqn{K_{ij}(r)}{Kij(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the process of type \eqn{i} points were independent of the process of type \eqn{j} points, then \eqn{K_{ij}(r)}{Kij(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{ij}}{Kij} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} may suggest dependence between the points of types \eqn{i} and \eqn{j}. This algorithm estimates the distribution function \eqn{K_{ij}(r)}{Kij(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}, using the border correction. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. The pair correlation function can also be applied to the result of \code{Kcross}; see \code{\link{pcf}}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Kdot}}, \code{\link{Kest}}, \code{\link{Kmulti}}, \code{\link{pcf}} } \examples{ # amacrine cells data K01 <- Kcross(amacrine, "off", "on") plot(K01) \testonly{ K01 <- Kcross(amacrine, "off", "on", ratio=TRUE) } ## K10 <- Kcross(amacrine, "on", "off") # synthetic example: point pattern with marks 0 and 1 ## pp <- runifpoispp(50) ## pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE)) ## K <- Kcross(pp, "0", "1") ## K <- Kcross(pp, 0, 1) # equivalent } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/relrisk.ppm.Rd0000644000176200001440000001716114141452520016360 0ustar liggesusers\name{relrisk.ppm} \alias{relrisk.ppm} \title{ Parametric Estimate of Spatially-Varying Relative Risk } \description{ Given a point process model fitted to a multitype point pattern, this function computes the fitted spatially-varying probability of each type of point, or the ratios of such probabilities, according to the fitted model. Optionally the standard errors of the estimates are also computed. } \usage{ \method{relrisk}{ppm}(X, \dots, at = c("pixels", "points"), relative = FALSE, se = FALSE, casecontrol = TRUE, control = 1, case, ngrid = NULL, window = NULL) } \arguments{ \item{X}{ A fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored. } \item{at}{ String specifying whether to compute the probability values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } \item{relative}{ Logical. If \code{FALSE} (the default) the algorithm computes the probabilities of each type of point. If \code{TRUE}, it computes the \emph{relative risk}, the ratio of probabilities of each type relative to the probability of a control. } \item{se}{ Logical value indicating whether to compute standard errors as well. } \item{casecontrol}{ Logical. Whether to treat a bivariate point pattern as consisting of cases and controls, and return only the probability or relative risk of a case. Ignored if there are more than 2 types of points. See Details. } \item{control}{ Integer, or character string, identifying which mark value corresponds to a control. } \item{case}{ Integer, or character string, identifying which mark value corresponds to a case (rather than a control) in a bivariate point pattern. This is an alternative to the argument \code{control} in a bivariate point pattern. Ignored if there are more than 2 types of points. } \item{ngrid}{ Optional. Dimensions of a rectangular grid of locations inside \code{window} where the predictions should be computed. An integer, or an integer vector of length 2, specifying the number of grid points in the \eqn{y} and \eqn{x} directions. (Applies only when \code{at="pixels"}.) } \item{window}{ Optional. A window (object of class \code{"owin"}) \emph{delimiting} the locations where predictions should be computed. Defaults to the window of the original data used to fit the model \code{object}. (Applies only when \code{at="pixels"}.) } } \details{ The command \code{\link{relrisk}} is generic and can be used to estimate relative risk in different ways. This function \code{relrisk.ppm} is the method for fitted point process models (class \code{"ppm"}). It computes \emph{parametric} estimates of relative risk, using the fitted model. If \code{X} is a bivariate point pattern (a multitype point pattern consisting of two types of points) then by default, the points of the first type (the first level of \code{marks(X)}) are treated as controls or non-events, and points of the second type are treated as cases or events. Then by default this command computes the spatially-varying \emph{probability} of a case, i.e. the probability \eqn{p(u)} that a point at spatial location \eqn{u} will be a case. If \code{relative=TRUE}, it computes the spatially-varying \emph{relative risk} of a case relative to a control, \eqn{r(u) = p(u)/(1- p(u))}. If \code{X} is a multitype point pattern with \eqn{m > 2} types, or if \code{X} is a bivariate point pattern and \code{casecontrol=FALSE}, then by default this command computes, for each type \eqn{j}, a nonparametric estimate of the spatially-varying \emph{probability} of an event of type \eqn{j}. This is the probability \eqn{p_j(u)}{p[j](u)} that a point at spatial location \eqn{u} will belong to type \eqn{j}. If \code{relative=TRUE}, the command computes the \emph{relative risk} of an event of type \eqn{j} relative to a control, \eqn{r_j(u) = p_j(u)/p_k(u)}{r[j](u) = p[j](u)/p[k](u)}, where events of type \eqn{k} are treated as controls. The argument \code{control} determines which type \eqn{k} is treated as a control. If \code{at = "pixels"} the calculation is performed for every spatial location \eqn{u} on a fine pixel grid, and the result is a pixel image representing the function \eqn{p(u)} or a list of pixel images representing the functions \eqn{p_j(u)}{p[j](u)} or \eqn{r_j(u)}{r[j](u)} for \eqn{j = 1,\ldots,m}{j = 1,...,m}. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{NA}. If \code{at = "points"} the calculation is performed only at the data points \eqn{x_i}{x[i]}. By default the result is a vector of values \eqn{p(x_i)}{p(x[i])} giving the estimated probability of a case at each data point, or a matrix of values \eqn{p_j(x_i)}{p[j](x[i])} giving the estimated probability of each possible type \eqn{j} at each data point. If \code{relative=TRUE} then the relative risks \eqn{r(x_i)}{r(x[i])} or \eqn{r_j(x_i)}{r[j](x[i])} are returned. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{Inf}. Probabilities and risks are computed from the fitted intensity of the model, using \code{\link{predict.ppm}}. If \code{se=TRUE} then standard errors will also be computed, based on asymptotic theory, using \code{\link{vcov.ppm}}. } \value{ If \code{se=FALSE} (the default), the format is described below. If \code{se=TRUE}, the result is a list of two entries, \code{estimate} and \code{SE}, each having the format described below. If \code{X} consists of only two types of points, and if \code{casecontrol=TRUE}, the result is a pixel image (if \code{at="pixels"}) or a vector (if \code{at="points"}). The pixel values or vector values are the probabilities of a case if \code{relative=FALSE}, or the relative risk of a case (probability of a case divided by the probability of a control) if \code{relative=TRUE}. If \code{X} consists of more than two types of points, or if \code{casecontrol=FALSE}, the result is: \itemize{ \item (if \code{at="pixels"}) a list of pixel images, with one image for each possible type of point. The result also belongs to the class \code{"solist"} so that it can be printed and plotted. \item (if \code{at="points"}) a matrix of probabilities, with rows corresponding to data points \eqn{x_i}{x[i]}, and columns corresponding to types \eqn{j}. } The pixel values or matrix entries are the probabilities of each type of point if \code{relative=FALSE}, or the relative risk of each type (probability of each type divided by the probability of a control) if \code{relative=TRUE}. If \code{relative=FALSE}, the resulting values always lie between 0 and 1. If \code{relative=TRUE}, the results are either non-negative numbers, or the values \code{Inf} or \code{NA}. } \author{ \spatstatAuthors. } \seealso{ There is another method \code{\link{relrisk.ppp}} for point pattern datasets which computes \emph{nonparametric} estimates of relative risk by kernel smoothing. See also \code{\link{relrisk}}, \code{\link{relrisk.ppp}}, \code{\link{ppm}} } \examples{ fit <- ppm(chorley ~ marks * (x+y)) rr <- relrisk(fit, relative=TRUE, control="lung", se=TRUE) plot(rr$estimate) plot(rr$SE) rrX <- relrisk(fit, at="points", relative=TRUE, control="lung") } \keyword{spatial} \keyword{models} spatstat.core/man/AreaInter.Rd0000644000176200001440000001733714141452520015771 0ustar liggesusers\name{AreaInter} \alias{AreaInter} \title{The Area Interaction Point Process Model} \description{ Creates an instance of the Area Interaction point process model (Widom-Rowlinson penetrable spheres model) which can then be fitted to point pattern data. } \usage{ AreaInter(r) } \arguments{ \item{r}{The radius of the discs in the area interaction process} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the area-interaction process with disc radius \eqn{r}. } \details{ This function defines the interpoint interaction structure of a point process called the Widom-Rowlinson penetrable sphere model or area-interaction process. It can be used to fit this model to point pattern data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the area interaction structure is yielded by the function \code{AreaInter()}. See the examples below. In \bold{standard form}, the area-interaction process (Widom and Rowlinson, 1970; Baddeley and Van Lieshout, 1995) with disc radius \eqn{r}, intensity parameter \eqn{\kappa}{\kappa} and interaction parameter \eqn{\gamma}{\gamma} is a point process with probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \kappa^{n(x)} \gamma^{-A(x)} }{ f(x[1],\ldots,x[n]) = \alpha . \kappa^n(x) . \gamma^(-A(x)) } for a point pattern \eqn{x}, where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, and \eqn{A(x)} is the area of the region formed by the union of discs of radius \eqn{r} centred at the points \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]}. Here \eqn{\alpha}{\alpha} is a normalising constant. The interaction parameter \eqn{\gamma}{\gamma} can be any positive number. If \eqn{\gamma = 1}{\gamma = 1} then the model reduces to a Poisson process with intensity \eqn{\kappa}{\kappa}. If \eqn{\gamma < 1}{\gamma < 1} then the process is regular, while if \eqn{\gamma > 1}{\gamma > 1} the process is clustered. Thus, an area interaction process can be used to model either clustered or regular point patterns. Two points interact if the distance between them is less than \eqn{2r}{2 * r}. The standard form of the model, shown above, is a little complicated to interpret in practical applications. For example, each isolated point of the pattern \eqn{x} contributes a factor \eqn{\kappa \gamma^{-\pi r^2}}{\kappa * \gamma^(-\pi * r^2)} to the probability density. In \pkg{spatstat}, the model is parametrised in a different form, which is easier to interpret. In \bold{canonical scale-free form}, the probability density is rewritten as \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \eta^{-C(x)} }{ f(x_1,\ldots,x_n) = \alpha . \beta^n(x) \eta^(-C(x)) } where \eqn{\beta}{\beta} is the new intensity parameter, \eqn{\eta}{\eta} is the new interaction parameter, and \eqn{C(x) = B(x) - n(x)} is the interaction potential. Here \deqn{ B(x) = \frac{A(x)}{\pi r^2} }{ B(x) = A(x)/(\pi * r^2) } is the normalised area (so that the discs have unit area). In this formulation, each isolated point of the pattern contributes a factor \eqn{\beta}{\beta} to the probability density (so the first order trend is \eqn{\beta}{\beta}). The quantity \eqn{C(x)} is a true interaction potential, in the sense that \eqn{C(x) = 0} if the point pattern \eqn{x} does not contain any points that lie close together (closer than \eqn{2r}{2*r} units apart). When a new point \eqn{u} is added to an existing point pattern \eqn{x}, the rescaled potential \eqn{-C(x)} increases by a value between 0 and 1. The increase is zero if \eqn{u} is not close to any point of \eqn{x}. The increase is 1 if the disc of radius \eqn{r} centred at \eqn{u} is completely contained in the union of discs of radius \eqn{r} centred at the data points \eqn{x_i}{x[i]}. Thus, the increase in potential is a measure of how close the new point \eqn{u} is to the existing pattern \eqn{x}. Addition of the point \eqn{u} contributes a factor \eqn{\beta \eta^\delta}{\beta * \eta^\delta} to the probability density, where \eqn{\delta}{\delta} is the increase in potential. The old parameters \eqn{\kappa,\gamma}{\kappa,\gamma} of the standard form are related to the new parameters \eqn{\beta,\eta}{\beta,\eta} of the canonical scale-free form, by \deqn{ \beta = \kappa \gamma^{-\pi r^2} = \kappa /\eta }{ \beta = \kappa * \gamma^(-\pi * r^2)= \kappa / \eta } and \deqn{ \eta = \gamma^{\pi r^2} }{ \eta = \gamma^(\pi * r^2) } provided \eqn{\gamma}{\gamma} and \eqn{\kappa}{\kappa} are positive and finite. In the canonical scale-free form, the parameter \eqn{\eta}{\eta} can take any nonnegative value. The value \eqn{\eta = 1}{\eta = 1} again corresponds to a Poisson process, with intensity \eqn{\beta}{\beta}. If \eqn{\eta < 1}{\eta < 1} then the process is regular, while if \eqn{\eta > 1}{\eta > 1} the process is clustered. The value \eqn{\eta = 0}{\eta = 0} corresponds to a hard core process with hard core radius \eqn{r} (interaction distance \eqn{2r}). The \emph{nonstationary} area interaction process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{\beta(x[i])} of location, rather than a constant beta. Note the only argument of \code{AreaInter()} is the disc radius \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(\beta)} and \eqn{\log(\eta)}{log(\eta)} are estimated by \code{\link{ppm}()}, not fixed in \code{AreaInter()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} \code{\link{ragsAreaInter}} and \code{\link{rmh}} for simulation of area-interaction models. } \section{Warnings}{ The interaction distance of this process is equal to \code{2 * r}. Two discs of radius \code{r} overlap if their centres are closer than \code{2 * r} units apart. The estimate of the interaction parameter \eqn{\eta}{\eta} is unreliable if the interaction radius \code{r} is too small or too large. In these situations the model is approximately Poisson so that \eqn{\eta}{\eta} is unidentifiable. As a rule of thumb, one can inspect the empty space function of the data, computed by \code{\link{Fest}}. The value \eqn{F(r)} of the empty space function at the interaction radius \code{r} should be between 0.2 and 0.8. } \examples{ \testonly{op <- spatstat.options(ngrid.disc=8)} # prints a sensible description of itself AreaInter(r=0.1) # Note the reach is twice the radius reach(AreaInter(r=1)) # Fit the stationary area interaction process to Swedish Pines data data(swedishpines) ppm(swedishpines ~1, AreaInter(r=7)) # Fit the stationary area interaction process to `cells' ppm(cells ~1, AreaInter(r=0.06)) # eta=0 indicates hard core process. # Fit a nonstationary area interaction with log-cubic polynomial trend # ppm(swedishpines ~polynom(x/10,y/10,3), AreaInter(r=7)) \testonly{spatstat.options(op)} } \references{ Baddeley, A.J. and Van Lieshout, M.N.M. (1995). Area-interaction point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{47} (1995) 601--619. Widom, B. and Rowlinson, J.S. (1970). New model for the study of liquid-vapor phase transitions. \emph{The Journal of Chemical Physics} \bold{52} (1970) 1670--1684. } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/ranef.mppm.Rd0000644000176200001440000000321314141452520016146 0ustar liggesusers\name{ranef.mppm} \alias{ranef.mppm} \title{ Extract Random Effects from Point Process Model } \description{ Given a point process model fitted to a list of point patterns, extract the fixed effects of the model. A method for \code{ranef}. } \usage{ \method{ranef}{mppm}(object, \dots) } \arguments{ \item{object}{ A fitted point process model (an object of class \code{"mppm"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[nlme]{ranef}}. The argument \code{object} must be a fitted point process model (object of class \code{"mppm"}) produced by the fitting algorithm \code{\link{mppm}}). This represents a point process model that has been fitted to a list of several point pattern datasets. See \code{\link{mppm}} for information. This function extracts the coefficients of the random effects of the model. } \value{ A data frame, or list of data frames, as described in the help for \code{\link[nlme]{ranef.lme}}. } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{fixef.mppm}}, \code{\link{coef.mppm}} } \examples{ H <- hyperframe(Y = waterstriders) # Tweak data to exaggerate differences H$Y[[1]] <- rthin(H$Y[[1]], 0.3) m1 <- mppm(Y ~ id, data=H, Strauss(7)) ranef(m1) m2 <- mppm(Y ~ 1, random=~1|id, data=H, Strauss(7)) ranef(m2) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.core/man/formula.ppm.Rd0000644000176200001440000000326214141452520016347 0ustar liggesusers\name{formula.ppm} \alias{formula.ppm} \alias{terms.ppm} \title{ Model Formulae for Gibbs Point Process Models } \description{ Extract the trend formula, or the terms in the trend formula, in a fitted Gibbs point process model. } \usage{ \method{formula}{ppm}(x, \dots) \method{terms}{ppm}(x, \dots) } \arguments{ \item{x}{ An object of class \code{"ppm"}, representing a fitted point process model. } \item{\dots}{ Arguments passed to other methods. } } \details{ These functions are methods for the generic commands \code{\link{formula}} and \code{\link{terms}} for the class \code{"ppm"}. An object of class \code{"ppm"} represents a fitted Poisson or Gibbs point process model. It is obtained from the model-fitting function \code{\link{ppm}}. The method \code{formula.ppm} extracts the trend formula from the fitted model \code{x} (the formula originally specified as the argument \code{trend} to \code{\link{ppm}}). The method \code{terms.ppm} extracts the individual terms in the trend formula. } \value{ See the help files for the corresponding generic functions. } \author{ \adrian } \seealso{ \code{\link{ppm}}, \code{\link{as.owin}}, \code{\link{coef.ppm}}, \code{\link{extractAIC.ppm}}, \code{\link{fitted.ppm}}, \code{\link{logLik.ppm}}, \code{\link{model.frame.ppm}}, \code{\link{model.matrix.ppm}}, \code{\link{plot.ppm}}, \code{\link{predict.ppm}}, \code{\link{residuals.ppm}}, \code{\link{simulate.ppm}}, \code{\link{summary.ppm}}, \code{\link{update.ppm}}, \code{\link{vcov.ppm}}. } \examples{ data(cells) fit <- ppm(cells, ~x) formula(fit) terms(fit) } \keyword{spatial} \keyword{methods} spatstat.core/man/stieltjes.Rd0000644000176200001440000000443714141452520016122 0ustar liggesusers\name{stieltjes} \alias{stieltjes} \title{Compute Integral of Function Against Cumulative Distribution} \description{ Computes the Stieltjes integral of a function \eqn{f} with respect to a function \eqn{M}. } \usage{ stieltjes(f, M, ...) } \arguments{ \item{f}{ The integrand. A function in the \R language. } \item{M}{ The cumulative function against which \code{f} will be integrated. An object of class \code{"fv"} or \code{"stepfun"}. } \item{\dots}{ Additional arguments passed to \code{f}. } } \details{ This command computes the Stieltjes integral \deqn{I = \int f(x) dM(x)}{I = integral f(x) dM(x)} of a real-valued function \eqn{f(x)} with respect to a nondecreasing function \eqn{M(x)}. One common use of the Stieltjes integral is to find the mean value of a random variable from its cumulative distribution function \eqn{F(x)}. The mean value is the Stieltjes integral of \eqn{f(x)=x} with respect to \eqn{F(x)}. The argument \code{f} should be a \code{function} in the \R language. It should accept a numeric vector argument \code{x} and should return a numeric vector of the same length. The argument \code{M} should be either a step function (object of class \code{"stepfun"}) or a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). Objects of class \code{"stepfun"} are returned by \code{\link[stats]{ecdf}}, \code{\link{ewcdf}}, \code{\link[spatstat.core]{spatialcdf}} and other utilities. Objects of class \code{"fv"} are returned by the commands \code{\link[spatstat.core]{Kest}}, \code{\link[spatstat.core]{Gest}}, etc. } \value{ A list containing the value of the Stieltjes integral computed using each of the versions of the function \code{M}. } \seealso{ \code{\link{fv.object}}, \code{\link[spatstat.core]{Gest}} } \examples{ # estimate cdf of nearest neighbour distance in redwood data G <- Gest(redwood) # compute estimate of mean nearest neighbour distance stieltjes(function(x){x}, G) # estimated probability of a distance in the interval [0.1,0.2] stieltjes(function(x,a,b){ (x >= a) & (x <= b)}, G, a=0.1, b=0.2) # stepfun example H <- spatialcdf(bei.extra$elev, normalise=TRUE) stieltjes(function(x){x}, H) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat.core/man/valid.detpointprocfamily.Rd0000644000176200001440000000121414141452520021114 0ustar liggesusers\name{valid.detpointprocfamily} \alias{valid.detpointprocfamily} \title{Check Validity of a Determinantal Point Process Model} \description{ Checks the validity of a determinantal point process model. } \usage{ \method{valid}{detpointprocfamily}(object, \dots) } \arguments{ \item{object}{Model of class \code{"detpointprocfamily"}.} \item{\dots}{Ignored.} } \value{Logical} \author{ \adrian \rolf and \ege } \examples{ model1 <- dppMatern(lambda=100, alpha=.01, nu=1, d=2) valid(model1) model2 <- dppMatern(lambda=100, alpha=1, nu=1, d=2) valid(model2) } \seealso{ \code{\link{valid}} } \keyword{spatial} \keyword{models} spatstat.core/man/quad.ppm.Rd0000644000176200001440000000624014141452520015633 0ustar liggesusers\name{quad.ppm} \alias{quad.ppm} \title{Extract Quadrature Scheme Used to Fit a Point Process Model} \description{ Given a fitted point process model, this function extracts the quadrature scheme used to fit the model. } \usage{ quad.ppm(object, drop=FALSE, clip=FALSE) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"}). } \item{drop}{ Logical value determining whether to delete quadrature points that were not used to fit the model. } \item{clip}{ Logical value determining whether to erode the window, if \code{object} was fitted using the border correction. See Details. } } \value{ A quadrature scheme (object of class \code{"quad"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The maximum pseudolikelihood algorithm in \code{\link{ppm}} approximates the pseudolikelihood integral by a sum over a finite set of quadrature points, which is constructed by augmenting the original data point pattern by a set of ``dummy'' points. The fitted model object returned by \code{\link{ppm}} contains complete information about this quadrature scheme. See \code{\link{ppm}} or \code{\link{ppm.object}} for further information. This function \code{quad.ppm} extracts the quadrature scheme. A typical use of this function would be to inspect the quadrature scheme (points and weights) to gauge the accuracy of the approximation to the exact pseudolikelihood. Some quadrature points may not have been used in fitting the model. This happens if the border correction is used, and in other cases (e.g. when the value of a covariate is \code{NA} at these points). The argument \code{drop} specifies whether these unused quadrature points shall be deleted (\code{drop=TRUE}) or retained (\code{drop=FALSE}) in the return value. The quadrature scheme has a \emph{window}, which by default is set to equal the window of the original data. However this window may be larger than the actual domain of integration of the pseudolikelihood or composite likelihood that was used to fit the model. If \code{clip=TRUE} then the window of the quadrature scheme is set to the actual domain of integration. This option only has an effect when the model was fitted using the border correction; then the window is obtained by eroding the original data window by the border correction distance. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. See \code{\link{quad.object}} for a list of all operations that can be performed on objects of class \code{"quad"}. This function can also be applied to objects of class \code{"kppm"} and \code{"lppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{quad.object}}, \code{\link{ppm}} } \examples{ fit <- ppm(cells ~1, Strauss(r=0.1)) Q <- quad.ppm(fit) # plot(Q) npoints(Q$data) npoints(Q$dummy) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat.core/man/exactMPLEstrauss.Rd0000644000176200001440000001034514144333466017330 0ustar liggesusers\name{exactMPLEstrauss} \alias{exactMPLEstrauss} \title{ Exact Maximum Pseudolikelihood Estimate for Stationary Strauss Process } \description{ Computes, to very high accuracy, the Maximum Pseudolikelihood Estimates of the parameters of a stationary Strauss point process. } \usage{ exactMPLEstrauss(X, R, ngrid = 2048, plotit = FALSE, project=TRUE) } \arguments{ \item{X}{ Data to which the Strauss process will be fitted. A point pattern dataset (object of class \code{"ppp"}). } \item{R}{ Interaction radius of the Strauss process. A non-negative number. } \item{ngrid}{ Grid size for calculation of integrals. An integer, giving the number of grid points in the \eqn{x} and \eqn{y} directions. } \item{plotit}{ Logical. If \code{TRUE}, the log pseudolikelihood is plotted on the current device. } \item{project}{ Logical. If \code{TRUE} (the default), the parameter \eqn{\gamma}{gamma} is constrained to lie in the interval \eqn{[0,1]}. If \code{FALSE}, this constraint is not applied. } } \details{ This function is intended mainly for technical investigation of algorithm performance. Its practical use is quite limited. It fits the stationary Strauss point process model to the point pattern dataset \code{X} by maximum pseudolikelihood (with the border edge correction) using an algorithm with very high accuracy. This algorithm is more accurate than the \emph{default} behaviour of the model-fitting function \code{\link{ppm}} because the discretisation is much finer. Ripley (1988) and Baddeley and Turner (2000) derived the log pseudolikelihood for the stationary Strauss process, and eliminated the parameter \eqn{\beta}{beta}, obtaining an exact formula for the partial log pseudolikelihood as a function of the interaction parameter \eqn{\gamma}{gamma} only. The algorithm evaluates this expression to a high degree of accuracy, using numerical integration on a \code{ngrid * ngrid} lattice, uses \code{\link[stats]{optim}} to maximise the log pseudolikelihood with respect to \eqn{\gamma}{gamma}, and finally recovers \eqn{\beta}{beta}. The result is a vector of length 2, containing the fitted coefficients \eqn{\log\beta}{log(beta)} and \eqn{\log\gamma}{log(gamma)}. These values correspond to the entries that would be obtained with \code{coef(ppm(X, ~1, Strauss(R)))}. The fitted coefficients are typically accurate to within \eqn{10^{-6}}{10^(-6)} as shown in Baddeley and Turner (2013). Note however that (by default) \code{exactMPLEstrauss} constrains the parameter \eqn{\gamma}{gamma} to lie in the interval \eqn{[0,1]} in which the point process is well defined (Kelly and Ripley, 1976) whereas \code{\link{ppm}} does not constrain the value of \eqn{\gamma}{gamma} (by default). This behaviour is controlled by the argument \code{project} to \code{\link{ppm}} and \code{exactMPLEstrauss}. The default for \code{\link{ppm}} is \code{project=FALSE}, while the default for \code{exactMPLEstrauss} is \code{project=TRUE}. } \value{ Vector of length 2. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Baddeley, A. and Turner, R. (2013) Bias correction for parameter estimates of spatial point process models. \emph{Journal of Statistical Computation and Simulation} \bold{2012}. \code{DOI: 10.1080/00949655.2012.755976} Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. Ripley, B.D. (1988) \emph{Statistical inference for spatial processes}. Cambridge University Press. } \author{ \adrian and \rolf } \seealso{ \code{\link{ppm}} } \examples{ if(interactive()) { rc <- 0.09 exactMPLEstrauss(cells, rc, plotit=TRUE) coef(ppm(cells ~1, Strauss(rc))) coef(ppm(cells ~1, Strauss(rc), nd=128)) rr <- 0.04 exactMPLEstrauss(redwood, rr) exactMPLEstrauss(redwood, rr, project=FALSE) coef(ppm(redwood ~1, Strauss(rr))) } else { rc <- 0.09 exactMPLEstrauss(cells, rc, ngrid=64, plotit=TRUE) exactMPLEstrauss(cells, rc, ngrid=64, project=FALSE) } } \keyword{spatial} \keyword{models} spatstat.core/man/residuals.mppm.Rd0000644000176200001440000000476214141452520017060 0ustar liggesusers\name{residuals.mppm} \alias{residuals.mppm} \title{Residuals for Point Process Model Fitted to Multiple Point Patterns} \description{ Given a point process model fitted to multiple point patterns, compute residuals for each pattern. } \usage{ \method{residuals}{mppm}(object, type = "raw", ..., fittedvalues = fitted.mppm(object)) } \arguments{ \item{object}{Fitted point process model (object of class \code{"mppm"}).} \item{\dots}{Ignored.} \item{type}{Type of residuals: either \code{"raw"}, \code{"pearson"} or \code{"inverse"}. Partially matched.} \item{fittedvalues}{Advanced use only. Fitted values of the model to be used in the calculation. } } \details{ Baddeley et al (2005) defined residuals for the fit of a point process model to spatial point pattern data. For an explanation of these residuals, see the help file for \code{\link{residuals.ppm}}. This function computes the residuals for a point process model fitted to \emph{multiple} point patterns. The \code{object} should be an object of class \code{"mppm"} obtained from \code{\link{mppm}}. The return value is a list. The number of entries in the list equals the number of point patterns in the original data. Each entry in the list has the same format as the output of \code{\link{residuals.ppm}}. That is, each entry in the list is a signed measure (object of class \code{"msr"}) giving the residual measure for the corresponding point pattern. } \value{ A list of signed measures (objects of class \code{"msr"}) giving the residual measure for each of the original point patterns. See Details. } \examples{ fit <- mppm(Bugs ~ x, hyperframe(Bugs=waterstriders)) r <- residuals(fit) # compute total residual for each point pattern rtot <- sapply(r, integral.msr) # standardise the total residuals areas <- sapply(windows.mppm(fit), area.owin) rtot/sqrt(areas) } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \adrian \rolf and \ege } \seealso{ \code{\link{mppm}}, \code{\link{residuals.mppm}} } \keyword{spatial} \keyword{models} spatstat.core/man/will.expand.Rd0000644000176200001440000000171414141452520016334 0ustar liggesusers\name{will.expand} \alias{will.expand} \title{ Test Expansion Rule } \description{ Determines whether an expansion rule will actually expand the window or not. } \usage{ will.expand(x) } \arguments{ \item{x}{ Expansion rule. An object of class \code{"rmhexpand"}. } } \details{ An object of class \code{"rmhexpand"} describes a rule for expanding a simulation window. See \code{\link{rmhexpand}} for details. One possible expansion rule is to do nothing, i.e. not to expand the window. This command inspects the expansion rule \code{x} and determines whether it will or will not actually expand the window. It returns \code{TRUE} if the window will be expanded. } \value{ Logical value. } \author{\adrian and \rolf } \seealso{ \code{\link{rmhexpand}}, \code{\link{expand.owin}} } \examples{ x <- rmhexpand(distance=0.2) y <- rmhexpand(area=1) will.expand(x) will.expand(y) } \keyword{spatial} \keyword{manip} spatstat.core/man/is.marked.ppm.Rd0000644000176200001440000000440114141452520016553 0ustar liggesusers\name{is.marked.ppm} \alias{is.marked.ppm} \title{Test Whether A Point Process Model is Marked} \description{ Tests whether a fitted point process model involves ``marks'' attached to the points. } \usage{ \method{is.marked}{ppm}(X, \dots) } \arguments{ \item{X}{ Fitted point process model (object of class \code{"ppm"}) usually obtained from \code{\link{ppm}}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a model that was fitted to a marked point pattern dataset. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link[spatstat.data]{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. The argument \code{X} is a fitted point process model (an object of class \code{"ppm"}) typically obtained by fitting a model to point pattern data using \code{\link{ppm}}. This function returns \code{TRUE} if the \emph{original data} (to which the model \code{X} was fitted) were a marked point pattern. Note that this is not the same as testing whether the model involves terms that depend on the marks (i.e. whether the fitted model ignores the marks in the data). See the Examples for a trick to do this. If this function returns \code{TRUE}, the implications are (for example) that any simulation of this model will require simulation of random marks as well as random point locations. } \seealso{ \code{\link{is.marked}}, \code{\link{is.marked.ppp}} } \examples{ X <- lansing # Multitype point pattern --- trees marked by species \testonly{ # Smaller dataset X <- amacrine } fit1 <- ppm(X, ~ marks, Poisson()) is.marked(fit1) fit2 <- ppm(X, ~ 1, Poisson()) is.marked(fit2) ## test whether the model formula involves marks "marks" \%in\% spatstat.utils::variablesinformula(formula(fit2)) # Unmarked point pattern fit3 <- ppm(cells, ~ 1, Poisson()) is.marked(fit3) # FALSE } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{manip} \keyword{models} spatstat.core/man/psib.Rd0000644000176200001440000000304314141452520015041 0ustar liggesusers\name{psib} \alias{psib} \alias{psib.kppm} \title{ Sibling Probability of Cluster Point Process } \description{ Computes the sibling probability of a cluster point process model. } \usage{ psib(object) \method{psib}{kppm}(object) } \arguments{ \item{object}{ Fitted cluster point process model (object of class \code{"kppm"}). } } \details{ In a Poisson cluster process, two points are called \emph{siblings} if they belong to the same cluster, that is, if they had the same parent point. If two points of the process are separated by a distance \eqn{r}, the probability that they are siblings is \eqn{p(r) = 1 - 1/g(r)} where \eqn{g} is the pair correlation function of the process. The value \eqn{p(0) = 1 - 1/g(0)} is the probability that, if two points of the process are situated very close to each other, they came from the same cluster. This probability is an index of the strength of clustering, with high values suggesting strong clustering. This concept was proposed in Baddeley, Rubak and Turner (2015, page 479) and Baddeley (2017). } \value{ A single number. } \references{ Baddeley, A. (2017) Local composite likelihood for spatial point processes. \emph{Spatial Statistics} \bold{22}, 261--295. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. } \author{ \adrian. } \seealso{ \code{\link{kppm}} } \examples{ fit <- kppm(redwood ~1, "Thomas") psib(fit) } \keyword{spatial} \keyword{models} spatstat.core/man/pcfdot.Rd0000644000176200001440000001400414141452520015362 0ustar liggesusers\name{pcfdot} \alias{pcfdot} \title{Multitype pair correlation function (i-to-any)} \description{ Calculates an estimate of the multitype pair correlation function (from points of type \code{i} to points of any type) for a multitype point pattern. } \usage{ pcfdot(X, i, ..., r = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("isotropic", "Ripley", "translate"), divisor = c("r", "d")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot-type pair correlation function \eqn{g_{i\bullet}(r)}{gdot[i](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{\dots}{ Ignored. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{stoyan}{ Coefficient for default bandwidth rule; see Details. } \item{correction}{ Choice of edge correction. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See Details. } } \details{ This is a generalisation of the pair correlation function \code{\link{pcf}} to multitype point patterns. For two locations \eqn{x} and \eqn{y} separated by a nonzero distance \eqn{r}, the probability \eqn{p(r)} of finding a point of type \eqn{i} at location \eqn{x} and a point of any type at location \eqn{y} is \deqn{ p(r) = \lambda_i \lambda g_{i\bullet}(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i] * lambda * gdot[i](r) dx dy } where \eqn{\lambda}{lambda} is the intensity of all points, and \eqn{\lambda_i}{lambda[i]} is the intensity of the points of type \eqn{i}. For a completely random Poisson marked point process, \eqn{p(r) = \lambda_i \lambda}{p(r) = lambda[i] * lambda} so \eqn{g_{i\bullet}(r) = 1}{gdot[i](r) = 1}. For a stationary multitype point process, the type-\code{i}-to-any-type pair correlation function between marks \eqn{i} and \eqn{j} is formally defined as \deqn{ g_{i\bullet}(r) = \frac{K_{i\bullet}^\prime(r)}{2\pi r} }{ g(r) = Kdot[i]'(r)/ ( 2 * pi * r) } where \eqn{K_{i\bullet}^\prime}{Kdot[i]'(r)} is the derivative of the type-\code{i}-to-any-type \eqn{K} function \eqn{K_{i\bullet}(r)}{Kdot[i](r)}. of the point process. See \code{\link{Kdot}} for information about \eqn{K_{i\bullet}(r)}{Kdot[i](r)}. The command \code{pcfdot} computes a kernel estimate of the multitype pair correlation function from points of type \eqn{i} to points of any type. \itemize{ \item If \code{divisor="r"} (the default), then the multitype counterpart of the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used: the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \code{correction="translate"} is the Ohser-Stoyan translation correction, and \code{correction="isotropic"} or \code{"Ripley"} is Ripley's isotropic correction. The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density}}. The default is the Epanechnikov kernel. The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Its precise interpretation is explained in the documentation for \code{\link{density.default}}. For the Epanechnikov kernel with support \eqn{[-h,h]}, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. If \code{bw} is not specified, the default bandwidth is determined by Stoyan's rule of thumb (Stoyan and Stoyan, 1994, page 285). That is, \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the unmarked point process, and \eqn{c} is a constant in the range from 0.1 to 0.2. The argument \code{stoyan} determines the value of \eqn{c}. The companion function \code{\link{pcfcross}} computes the corresponding analogue of \code{\link{Kcross}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{g_{i\bullet}}{gdot[i]} has been estimated } \item{theo}{the theoretical value \eqn{g_{i\bullet}(r) = 1}{gdot[i](r) = r} for independent marks. } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{g_{i,j}}{g[i,j]} obtained by the edge corrections named. } \seealso{ Mark connection function \code{\link{markconnect}}. Multitype pair correlation \code{\link{pcfcross}}, \code{\link{pcfmulti}}. Pair correlation \code{\link{pcf}},\code{\link{pcf.ppp}}. \code{\link{Kdot}} } \examples{ data(amacrine) p <- pcfdot(amacrine, "on") p <- pcfdot(amacrine, "on", stoyan=0.1) plot(p) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/lgcp.estpcf.Rd0000644000176200001440000002031714141452520016317 0ustar liggesusers\name{lgcp.estpcf} \alias{lgcp.estpcf} \title{Fit a Log-Gaussian Cox Point Process by Minimum Contrast} \description{ Fits a log-Gaussian Cox point process model to a point pattern dataset by the Method of Minimum Contrast using the pair correlation function. } \usage{ lgcp.estpcf(X, startpar=c(var=1,scale=1), covmodel=list(model="exponential"), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list()) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the log-Gaussian Cox process model. } \item{covmodel}{ Specification of the covariance model for the log-Gaussian field. See Details. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits a log-Gaussian Cox point process (LGCP) model to a point pattern dataset by the Method of Minimum Contrast, using the estimated pair correlation function of the point pattern. The shape of the covariance of the LGCP must be specified: the default is the exponential covariance function, but other covariance models can be selected. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits a log-Gaussian Cox point process (LGCP) model to \code{X}, by finding the parameters of the LGCP model which give the closest match between the theoretical pair correlation function of the LGCP model and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model fitted is a stationary, isotropic log-Gaussian Cox process (\Moller and Waagepetersen, 2003, pp. 72-76). To define this process we start with a stationary Gaussian random field \eqn{Z} in the two-dimensional plane, with constant mean \eqn{\mu}{mu} and covariance function \eqn{C(r)}. Given \eqn{Z}, we generate a Poisson point process \eqn{Y} with intensity function \eqn{\lambda(u) = \exp(Z(u))}{lambda(u) = exp(Z(u))} at location \eqn{u}. Then \eqn{Y} is a log-Gaussian Cox process. The theoretical pair correlation function of the LGCP is \deqn{ g(r) = \exp(C(s)) }{ g(r) = exp(C(s)) } The intensity of the LGCP is \deqn{ \lambda = \exp(\mu + \frac{C(0)}{2}). }{ lambda= exp(mu + C(0)/2). } The covariance function \eqn{C(r)} takes the form \deqn{ C(r) = \sigma^2 c(r/\alpha) }{ C(r) = sigma^2 * c(-r/alpha) } where \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} are parameters controlling the strength and the scale of autocorrelation, respectively, and \eqn{c(r)} is a known covariance function determining the shape of the covariance. The strength and scale parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} will be estimated by the algorithm. The template covariance function \eqn{c(r)} must be specified as explained below. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. The template covariance function \eqn{c(r)} is specified using the argument \code{covmodel}. This should be of the form \code{list(model="modelname", \dots)} where \code{modelname} is a string identifying the template model as explained below, and \code{\dots} are optional arguments of the form \code{tag=value} giving the values of parameters controlling the \emph{shape} of the template model. The default is the exponential covariance \eqn{c(r) = e^{-r}}{c(r) = e^(-r)} so that the scaled covariance is \deqn{ C(r) = \sigma^2 e^{-r/\alpha}. }{ C(r) = sigma^2 * exp(-r/alpha). } To determine the template model, the string \code{"modelname"} will be prefixed by \code{"RM"} and the code will search for a function of this name in the \pkg{RandomFields} package. For a list of available models see \code{\link[RandomFields]{RMmodel}} in the \pkg{RandomFields} package. For example the \Matern covariance with exponent \eqn{\nu=0.3}{nu = 0.3} is specified by \code{covmodel=list(model="matern", nu=0.3)} corresponding to the function \code{RMmatern} in the \pkg{RandomFields} package. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ \Moller, J., Syversveen, A. and Waagepetersen, R. (1998) Log Gaussian Cox Processes. \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ \adrian with modifications by Shen Guochun and Rasmus Waagepetersen \email{rw@math.auc.dk} and \ege. } \seealso{ \code{\link{lgcp.estK}} for alternative method of fitting LGCP. \code{\link{matclust.estpcf}}, \code{\link{thomas.estpcf}} for other models. \code{\link{mincontrast}} for the generic minimum contrast fitting algorithm, including important parameters that affect the accuracy of the fit. \code{\link[RandomFields]{RMmodel}} in the \pkg{RandomFields} package, for covariance function models. \code{\link{pcf}} for the pair correlation function. } \examples{ data(redwood) u <- lgcp.estpcf(redwood, c(var=1, scale=0.1)) u plot(u) if(require(RandomFields)) { lgcp.estpcf(redwood, covmodel=list(model="matern", nu=0.3)) } } \keyword{spatial} \keyword{models} spatstat.core/man/methods.rho2hat.Rd0000644000176200001440000000412414141452520017116 0ustar liggesusers\name{methods.rho2hat} \alias{methods.rho2hat} %DoNotExport \alias{predict.rho2hat} \alias{print.rho2hat} \alias{plot.rho2hat} \title{ Methods for Intensity Functions of Two Spatial Covariates } \description{ These are methods for the class \code{"rho2hat"}. } \usage{ \method{plot}{rho2hat}(x, \dots, do.points=FALSE) \method{print}{rho2hat}(x, \dots) \method{predict}{rho2hat}(object, \dots, relative=FALSE) } \arguments{ \item{x,object}{ An object of class \code{"rho2hat"}. } \item{\dots}{ Arguments passed to other methods. } \item{do.points}{ Logical value indicating whether to plot the observed values of the covariates at the data points. } \item{relative}{ Logical value indicating whether to compute the estimated point process intensity (\code{relative=FALSE}) or the relative risk (\code{relative=TRUE}) in the case of a relative risk estimate. } } \details{ These functions are methods for the generic commands \code{\link{print}}, \code{\link{predict}} and \code{\link{plot}} for the class \code{"rho2hat"}. An object of class \code{"rho2hat"} is an estimate of the intensity of a point process, as a function of two given spatial covariates. See \code{\link{rho2hat}}. The method \code{plot.rho2hat} displays the estimated function \eqn{\rho}{rho} using \code{\link{plot.fv}}, and optionally adds a \code{\link{rug}} plot of the observed values of the covariate. In this plot the two axes represent possible values of the two covariates. The method \code{predict.rho2hat} computes a pixel image of the intensity \eqn{\rho(Z_1(u), Z_2(u))}{rho(Z1(u), Z2(u))} at each spatial location \eqn{u}, where \eqn{Z_1(u)}{Z1(u)} and \eqn{Z_2(u)}{Z2(u)} are the two spatial covariates. } \value{ For \code{predict.rho2hat} the value is a pixel image (object of class \code{"im"}). For other functions, the value is \code{NULL}. } \author{ \adrian } \seealso{ \code{\link{rho2hat}} } \examples{ r2 <- with(bei.extra, rho2hat(bei, elev, grad)) r2 plot(r2) plot(predict(r2)) } \keyword{spatial} \keyword{methods} spatstat.core/man/dppapproxkernel.Rd0000644000176200001440000000120614141452520017321 0ustar liggesusers\name{dppapproxkernel} \alias{dppapproxkernel} \title{Approximate Determinantal Point Process Kernel} \description{ Returns an approximation to the kernel of a determinantal point process, as a function of one argument \eqn{x}. } \usage{dppapproxkernel(model, trunc = 0.99, W = NULL)} \arguments{ \item{model}{Object of class \code{"detpointprocfamily"}.} \item{trunc}{Numeric specifying how the model truncation is performed. See Details section of \code{\link{simulate.detpointprocfamily}}. } \item{W}{Optional window -- undocumented at the moment.} } \value{A function} \author{ \adrian \rolf and \ege } spatstat.core/man/Kmodel.dppm.Rd0000644000176200001440000000230014141452520016251 0ustar liggesusers\name{Kmodel.dppm} \alias{Kmodel.detpointprocfamily} \alias{pcfmodel.detpointprocfamily} \alias{Kmodel.dppm} \alias{pcfmodel.dppm} \title{ K-function or Pair Correlation Function of a Determinantal Point Process Model } \description{Returns the theoretical \eqn{K}-function or theoretical pair correlation function of a determinantal point process model as a function of one argument \eqn{r}. } \usage{ \method{Kmodel}{dppm}(model, \dots) \method{pcfmodel}{dppm}(model, \dots) \method{Kmodel}{detpointprocfamily}(model, \dots) \method{pcfmodel}{detpointprocfamily}(model, \dots) } \arguments{ \item{model}{Model of class \code{"detpointprocfamily"} or \code{"dppm"}.} \item{\dots}{Ignored (not quite true -- there is some undocumented internal use)} } \value{ A function in the \R language, with one numeric argument \code{r}, that can be used to evaluate the theoretical \eqn{K}-function or pair correlation function of the model at distances \code{r}. } \author{ \spatstatAuthors. } \examples{ model <- dppMatern(lambda=100, alpha=.01, nu=1, d=2) KMatern <- Kmodel(model) pcfMatern <- pcfmodel(model) plot(KMatern, xlim = c(0,0.05)) plot(pcfMatern, xlim = c(0,0.05)) } spatstat.core/man/Kest.Rd0000644000176200001440000003246214141452520015021 0ustar liggesusers\name{Kest} \alias{Kest} \title{K-function} \description{ Estimates Ripley's reduced second moment function \eqn{K(r)} from a point pattern in a window of arbitrary shape. } \usage{ Kest(X, \dots, r=NULL, rmax=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), nlarge=3000, domain=NULL, var.approx=FALSE, ratio=FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{K(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{K(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. If necessary, specify \code{rmax}. } \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. A character vector containing any selection of the options \code{"none"}, \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"rigid"}, \code{"none"}, \code{"good"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{nlarge}{ Optional. Efficiency threshold. If the number of points exceeds \code{nlarge}, then only the border correction will be computed (by default), using a fast algorithm. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } \item{var.approx}{Logical. If \code{TRUE}, the approximate variance of \eqn{\hat K(r)}{Kest(r)} under CSR will also be computed. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K(r)} obtained by the edge corrections named. If \code{var.approx=TRUE} then the return value also has columns \code{rip} and \code{ls} containing approximations to the variance of \eqn{\hat K(r)}{Kest(r)} under CSR. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ The \eqn{K} function (variously called ``Ripley's K-function'' and the ``reduced second moment function'') of a stationary point process \eqn{X} is defined so that \eqn{\lambda K(r)}{lambda K(r)} equals the expected number of additional random points within a distance \eqn{r} of a typical random point of \eqn{X}. Here \eqn{\lambda}{lambda} is the intensity of the process, i.e. the expected number of points of \eqn{X} per unit area. The \eqn{K} function is determined by the second order moment properties of \eqn{X}. An estimate of \eqn{K} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1977, 1988). In exploratory analyses, the estimate of \eqn{K} is a useful statistic summarising aspects of inter-point ``dependence'' and ``clustering''. For inferential purposes, the estimate of \eqn{K} is usually compared to the true value of \eqn{K} for a completely random (Poisson) point process, which is \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}. Deviations between the empirical and theoretical \eqn{K} curves may suggest spatial clustering or spatial regularity. This routine \code{Kest} estimates the \eqn{K} function of a stationary point process, given observation of the process inside a known, bounded window. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}()}. The estimation of \eqn{K} is hampered by edge effects arising from the unobservability of points of the random pattern outside the window. An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). The corrections implemented here are \describe{ \item{border}{the border method or ``reduced sample'' estimator (see Ripley, 1988). This is the least efficient (statistically) and the fastest to compute. It can be computed for a window of arbitrary shape. } \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented for rectangular and polygonal windows (not for binary masks). } \item{translate/translation}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } \item{rigid}{Rigid motion correction (Ohser and Stoyan, 1981). Implemented for all window geometries, but slow for complex windows. } \item{none}{ Uncorrected estimate. An estimate of the K function \emph{without} edge correction. (i.e. setting \eqn{e_{ij} = 1}{e[i,j] = 1} in the equation below. This estimate is \bold{biased} and should not be used for data analysis, \emph{unless} you have an extremely large point pattern (more than 100,000 points). } \item{best}{ Selects the best edge correction that is available for the geometry of the window. Currently this is Ripley's isotropic correction for a rectangular or polygonal window, and the translation correction for masks. } \item{good}{ Selects the best edge correction that can be computed in a reasonable time. This is the same as \code{"best"} for datasets with fewer than 3000 points; otherwise the selected edge correction is \code{"border"}, unless there are more than 100,000 points, when it is \code{"none"}. } } The estimates of \eqn{K(r)} are of the form \deqn{ \hat K(r) = \frac a {n (n-1) } \sum_i \sum_j I(d_{ij}\le r) e_{ij} }{ Kest(r) = (a/(n * (n-1))) * sum[i,j] I(d[i,j] <= r) e[i,j]) } where \eqn{a} is the area of the window, \eqn{n} is the number of data points, and the sum is taken over all ordered pairs of points \eqn{i} and \eqn{j} in \code{X}. Here \eqn{d_{ij}}{d[i,j]} is the distance between the two points, and \eqn{I(d_{ij} \le r)}{I(d[i,j] <= r)} is the indicator that equals 1 if the distance is less than or equal to \eqn{r}. The term \eqn{e_{ij}}{e[i,j]} is the edge correction weight (which depends on the choice of edge correction listed above). Note that this estimator assumes the process is stationary (spatially homogeneous). For inhomogeneous point patterns, see \code{\link{Kinhom}}. If the point pattern \code{X} contains more than about 3000 points, the isotropic and translation edge corrections can be computationally prohibitive. The computations for the border method are much faster, and are statistically efficient when there are large numbers of points. Accordingly, if the number of points in \code{X} exceeds the threshold \code{nlarge}, then only the border correction will be computed. Setting \code{nlarge=Inf} or \code{correction="best"} will prevent this from happening. Setting \code{nlarge=0} is equivalent to selecting only the border correction with \code{correction="border"}. If \code{X} contains more than about 100,000 points, even the border correction is time-consuming. You may want to consider setting \code{correction="none"} in this case. There is an even faster algorithm for the uncorrected estimate. Approximations to the variance of \eqn{\hat K(r)}{Kest(r)} are available, for the case of the isotropic edge correction estimator, \bold{assuming complete spatial randomness} (Ripley, 1988; Lotwick and Silverman, 1982; Diggle, 2003, pp 51-53). If \code{var.approx=TRUE}, then the result of \code{Kest} also has a column named \code{rip} giving values of Ripley's (1988) approximation to \eqn{\mbox{var}(\hat K(r))}{var(Kest(r))}, and (if the window is a rectangle) a column named \code{ls} giving values of Lotwick and Silverman's (1982) approximation. If the argument \code{domain} is given, the calculations will be restricted to a subset of the data. In the formula for \eqn{K(r)} above, the \emph{first} point \eqn{i} will be restricted to lie inside \code{domain}. The result is an approximately unbiased estimate of \eqn{K(r)} based on pairs of points in which the first point lies inside \code{domain} and the second point is unrestricted. This is useful in bootstrap techniques. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. It must be a subset of the window of the point pattern \code{X}. The estimator \code{Kest} ignores marks. Its counterparts for multitype point patterns are \code{\link{Kcross}}, \code{\link{Kdot}}, and for general marked point patterns see \code{\link{Kmulti}}. Some writers, particularly Stoyan (1994, 1995) advocate the use of the ``pair correlation function'' \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}. See \code{\link{pcf}} on how to estimate this function. } \section{Envelopes, significance bands and confidence intervals}{ To compute simulation envelopes for the \eqn{K}-function under CSR, use \code{\link{envelope}}. To compute a confidence interval for the true \eqn{K}-function, use \code{\link{varblock}} or \code{\link{lohboot}}. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37--78. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ohser, J. and Stoyan, D. (1981) On the second-order and orientation analysis of planar stationary point processes. \emph{Biometrical Journal} \bold{23}, 523--533. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \section{Warnings}{ The estimator of \eqn{K(r)} is approximately unbiased for each fixed \eqn{r}, for point processes which do not have very strong interaction. (For point processes with a strong clustering interaction, the estimator is negatively biased; for point processes with a strong inhibitive interaction, the estimator is positively biased.) Bias increases with \eqn{r} and depends on the window geometry. For a rectangular window it is prudent to restrict the \eqn{r} values to a maximum of \eqn{1/4} of the smaller side length of the rectangle (Ripley, 1977, 1988; Diggle, 1983). Bias may become appreciable for point patterns consisting of fewer than 15 points. While \eqn{K(r)} is always a non-decreasing function, the estimator of \eqn{K} is not guaranteed to be non-decreasing. This is rarely a problem in practice, except for the border correction estimators when the number of points is small. } \seealso{ \code{\link{localK}} to extract individual summands in the \eqn{K} function. \code{\link{pcf}} for the pair correlation. \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} for alternative summary functions. \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kinhom}}, \code{\link{Kmulti}} for counterparts of the \eqn{K} function for multitype point patterns. \code{\link{reduced.sample}} for the calculation of reduced sample estimators. } \examples{ X <- runifpoint(50) K <- Kest(X) K <- Kest(cells, correction="isotropic") plot(K) plot(K, main="K function for cells") # plot the L function plot(K, sqrt(iso/pi) ~ r) plot(K, sqrt(./pi) ~ r, ylab="L(r)", main="L function for cells") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/update.kppm.Rd0000644000176200001440000000472614141452520016345 0ustar liggesusers\name{update.kppm} \alias{update.kppm} \title{Update a Fitted Cluster Point Process Model} \description{ \code{update} method for class \code{"kppm"}. } \usage{ \method{update}{kppm}(object, \dots, evaluate=TRUE, envir=environment(terms(object))) } \arguments{ \item{object}{ Fitted cluster point process model. An object of class \code{"kppm"}, obtained from \code{\link{kppm}}. } \item{\dots}{ Arguments passed to \code{\link{kppm}}. } \item{evaluate}{ Logical value indicating whether to return the updated fitted model (\code{evaluate=TRUE}, the default) or just the updated call to \code{kppm} (\code{evaluate=FALSE}). } \item{envir}{ Environment in which to re-evaluate the call to \code{\link{ppm}}. } } \details{ \code{object} should be a fitted cluster point process model, obtained from the model-fitting function \code{\link{kppm}}. The model will be updated according to the new arguments provided. If the argument \code{trend} is provided, it determines the intensity in the updated model. It should be an \R formula (with or without a left hand side). It may include the symbols \code{+} or \code{-} to specify addition or deletion of terms in the current model formula, as shown in the Examples below. The symbol \code{.} refers to the current contents of the formula. The intensity in the updated model is determined by the argument \code{trend} if it is provided, or otherwise by any unnamed argument that is a formula, or otherwise by the formula of the original model, \code{formula(object)}. The spatial point pattern data to which the new model is fitted is determined by the left hand side of the updated model formula, if this is present. Otherwise it is determined by the argument \code{X} if it is provided, or otherwise by any unnamed argument that is a point pattern or a quadrature scheme. The model is refitted using \code{\link{kppm}}. } \value{ Another fitted cluster point process model (object of class \code{"kppm"}. } \seealso{ \code{\link{kppm}}, \code{\link{plot.kppm}}, \code{\link{predict.kppm}}, \code{\link{simulate.kppm}}, \code{\link{methods.kppm}}, \code{\link{vcov.kppm}} } \examples{ fit <- kppm(redwood ~1, "Thomas") fitx <- update(fit, ~ . + x) fitM <- update(fit, clusters="MatClust") fitC <- update(fit, cells) fitCx <- update(fit, cells ~ x) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat.core/man/Jmulti.Rd0000644000176200001440000001364314141452520015357 0ustar liggesusers\name{Jmulti} \alias{Jmulti} \title{ Marked J Function } \description{ For a marked point pattern, estimate the multitype \eqn{J} function summarising dependence between the points in subset \eqn{I} and those in subset \eqn{J}. } \usage{ Jmulti(X, I, J, eps=NULL, r=NULL, breaks=NULL, \dots, disjoint=NULL, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype distance distribution function \eqn{J_{IJ}(r)}{J[IJ](r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset of points of \code{X} from which distances are measured. See Details. } \item{J}{Subset of points in \code{X} to which distances are measured. See Details. } \item{eps}{A positive number. The pixel resolution of the discrete approximation to Euclidean distance (see \code{\link{Jest}}). There is a sensible default. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{J_{IJ}(r)}{J[IJ](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{disjoint}{Optional flag indicating whether the subsets \code{I} and \code{J} are disjoint. If missing, this value will be computed by inspecting the vectors \code{I} and \code{J}. } \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{J_{IJ}(r)}{J[IJ](r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J_{IJ}(r)}{J[IJ](r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{J_{IJ}(r)}{J[IJ](r)} } \item{han}{the Hanisch-style estimator of \eqn{J_{IJ}(r)}{J[IJ](r)} } \item{un}{the uncorrected estimate of \eqn{J_{IJ}(r)}{J[IJ](r)}, formed by taking the ratio of uncorrected empirical estimators of \eqn{1 - G_{IJ}(r)}{1 - G[IJ](r)} and \eqn{1 - F_{J}(r)}{1 - F[J](r)}, see \code{\link{Gdot}} and \code{\link{Fest}}. } \item{theo}{the theoretical value of \eqn{J_{IJ}(r)}{J[IJ](r)} for a marked Poisson process with the same estimated intensity, namely 1. } } \details{ The function \code{Jmulti} generalises \code{\link{Jest}} (for unmarked point patterns) and \code{\link{Jdot}} and \code{\link{Jcross}} (for multitype point patterns) to arbitrary marked point patterns. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly overlapping, of a marked point process. Define \deqn{J_{IJ}(r) = \frac{1 - G_{IJ}(r)}{1 - F_J(r)}}{ J[IJ](r) = (1 - G[IJ](r))/(1 - F[J](r))} where \eqn{F_J(r)}{F[J](r)} is the cumulative distribution function of the distance from a fixed location to the nearest point of \eqn{X_J}{X[J]}, and \eqn{G_{IJ}(r)}{GJ(r)} is the distribution function of the distance from a typical point of \eqn{X_I}{X[I]} to the nearest distinct point of \eqn{X_J}{X[J]}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. It is assumed that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Jest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{J_{IJ}(r)}{J[IJ](r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \seealso{ \code{\link{Jcross}}, \code{\link{Jdot}}, \code{\link{Jest}} } \examples{ trees <- longleaf # Longleaf Pine data: marks represent diameter \testonly{ trees <- trees[seq(1,npoints(trees), by=50)] } Jm <- Jmulti(trees, marks(trees) <= 15, marks(trees) >= 25) plot(Jm) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/compileK.Rd0000644000176200001440000001017314141452520015651 0ustar liggesusers\name{compileK} \alias{compileK} \alias{compilepcf} \title{ Generic Calculation of K Function and Pair Correlation Function } \description{ Low-level functions which calculate the estimated \eqn{K} function and estimated pair correlation function (or any similar functions) from a matrix of pairwise distances and optional weights. } \usage{ compileK(D, r, weights = NULL, denom = 1, check = TRUE, ratio = FALSE, fname = "K") compilepcf(D, r, weights = NULL, denom = 1, check = TRUE, endcorrect = TRUE, ratio=FALSE, \dots, fname = "g") } \arguments{ \item{D}{ A square matrix giving the distances between all pairs of points. } \item{r}{ An equally spaced, finely spaced sequence of distance values. } \item{weights}{ Optional numerical weights for the pairwise distances. A numeric matrix with the same dimensions as \code{D}. If absent, the weights are taken to equal 1. } \item{denom}{ Denominator for the estimator. A single number, or a numeric vector with the same length as \code{r}. See Details. } \item{check}{ Logical value specifying whether to check that \code{D} is a valid matrix of pairwise distances. } \item{ratio}{ Logical value indicating whether to store ratio information. See Details. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{density.default}} controlling the kernel smoothing. } \item{endcorrect}{ Logical value indicating whether to apply End Correction of the pair correlation estimate at \code{r=0}. } \item{fname}{ Character string giving the name of the function being estimated. } } \details{ These low-level functions construct estimates of the \eqn{K} function or pair correlation function, or any similar functions, given only the matrix of pairwise distances and optional weights associated with these distances. These functions are useful for code development and for teaching, because they perform a common task, and do the housekeeping required to make an object of class \code{"fv"} that represents the estimated function. However, they are not very efficient. \code{compileK} calculates the weighted estimate of the \eqn{K} function, \deqn{ \hat K(r) = (1/v(r)) \sum_i \sum_j 1\{ d_{ij} \le r\} w_{ij} }{ K(r) = (1/v(r)) \sum[i] \sum[j] 1(d[i,j] \le r) w[i,j] } and \code{compilepcf} calculates the weighted estimate of the pair correlation function, \deqn{ \hat g(r) = (1/v(r)) \sum_i \sum_j \kappa( d_{ij} - r ) w_{ij} }{ g(r) = (1/v(r)) \sum[i] \sum[j] \kappa ( d[i,j] - r) w[i,j] } where \eqn{d_{ij}}{d[i,j]} is the distance between spatial points \eqn{i} and \eqn{j}, with corresponding weight \eqn{w_{ij}}{w[i,j]}, and \eqn{v(r)} is a specified denominator. Here \eqn{\kappa}{\kappa} is a fixed-bandwidth smoothing kernel. For a point pattern in two dimensions, the usual denominator \eqn{v(r)} is constant for the \eqn{K} function, and proportional to \eqn{r} for the pair correlation function. See the Examples. The result is an object of class \code{"fv"} representing the estimated function. This object has only one column of function values. Additional columns (such as a column giving the theoretical value) must be added by the user, with the aid of \code{\link{bind.fv}}. If \code{ratio=TRUE}, the result also belongs to class \code{"rat"} and has attributes containing the numerator and denominator of the function estimate. This allows function estimates from several datasets to be pooled using \code{\link{pool}}. } \value{ An object of class \code{"fv"} representing the estimated function. } \author{ \adrian } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} for definitions of the \eqn{K} function and pair correlation function. \code{\link{bind.fv}} to add more columns. } \examples{ X <- japanesepines D <- pairdist(X) Wt <- edge.Ripley(X, D) lambda <- intensity(X) a <- (npoints(X)-1) * lambda r <- seq(0, 0.25, by=0.01) K <- compileK(D=D, r=r, weights=Wt, denom=a) g <- compilepcf(D=D, r=r, weights=Wt, denom= a * 2 * pi * r) } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/matclust.estpcf.Rd0000644000176200001440000001476114141452520017234 0ustar liggesusers\name{matclust.estpcf} \alias{matclust.estpcf} \title{Fit the \Matern Cluster Point Process by Minimum Contrast Using Pair Correlation} \description{ Fits the \Matern Cluster point process to a point pattern dataset by the Method of Minimum Contrast using the pair correlation function. } \usage{ matclust.estpcf(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list()) } \arguments{ \item{X}{ Data to which the \Matern Cluster model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the \Matern Cluster process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the \Matern Cluster point process model to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the \Matern Cluster point process to \code{X}, by finding the parameters of the \Matern Cluster model which give the closest match between the theoretical pair correlation function of the \Matern Cluster process and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The \Matern Cluster point process is described in \Moller and Waagepetersen (2003, p. 62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and uniformly distributed inside a circle of radius \eqn{R} centred on the parent point, where \eqn{R}{R} is equal to the parameter \code{scale}. The named vector of stating values can use either \code{R} or \code{scale} as the name of the second component, but the latter is recommended for consistency with other cluster models. The theoretical pair correlation function of the \Matern Cluster process is \deqn{ g(r) = 1 + \frac 1 {4\pi R \kappa r} h(\frac{r}{2R}) }{ g(r) = 1 + h(r/(2*R))/(4 * pi * R * kappa * r) } where the radius R is the parameter \code{scale} and \deqn{ h(z) = \frac {16} \pi [ z \mbox{arccos}(z) - z^2 \sqrt{1 - z^2} ] }{ h(z) = (16/pi) * ((z * arccos(z) - z^2 * sqrt(1 - z^2)) } for \eqn{z <= 1}, and \eqn{h(z) = 0} for \eqn{z > 1}. The theoretical intensity of the \Matern Cluster process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{R}{R}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The \Matern Cluster process can be simulated, using \code{\link{rMatClust}}. Homogeneous or inhomogeneous \Matern Cluster models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ \adrian } \seealso{ \code{\link{kppm}}, \code{\link{matclust.estK}}, \code{\link{thomas.estpcf}}, \code{\link{thomas.estK}}, \code{\link{lgcp.estK}}, \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{rMatClust}} to simulate the fitted model. } \examples{ data(redwood) u <- matclust.estpcf(redwood, c(kappa=10, R=0.1)) u plot(u, legendpos="topright") } \keyword{spatial} \keyword{models} spatstat.core/man/fitin.Rd0000644000176200001440000000472314141452520015223 0ustar liggesusers\name{fitin.ppm} \alias{fitin} \alias{fitin.ppm} \alias{fitin.profilepl} \title{Extract the Interaction from a Fitted Point Process Model} \description{ Given a point process model that has been fitted to point pattern data, this function extracts the interpoint interaction part of the model as a separate object. } \usage{ fitin(object) \method{fitin}{ppm}(object) \method{fitin}{profilepl}(object) } \arguments{ \item{object}{A fitted point process model (object of class \code{"ppm"} or \code{"profilepl"}). } } \details{ An object of class \code{"ppm"} describes a fitted point process model. It contains information about the original data to which the model was fitted, the spatial trend that was fitted, the interpoint interaction that was fitted, and other data. See \code{\link{ppm.object}}) for details of this class. The function \code{fitin} extracts from this model the information about the fitted interpoint interaction only. The information is organised as an object of class \code{"fii"} (fitted interpoint interaction). This object can be printed or plotted. Users may find this a convenient way to plot the fitted interpoint interaction term, as shown in the Examples. For a pairwise interaction, the plot of the fitted interaction shows the pair interaction function (the contribution to the probability density from a pair of points as a function of the distance between them). For a higher-order interaction, the plot shows the strongest interaction (the value most different from 1) that could ever arise at the given distance. The fitted interaction coefficients can also be extracted from this object using \code{\link{coef}}. } \value{ An object of class \code{"fii"} representing the fitted interpoint interaction. This object can be printed and plotted. } \author{ \spatstatAuthors. } \seealso{ Methods for handling fitted interactions: \code{\link{methods.fii}}, \code{\link{reach.fii}}, \code{\link{as.interact.fii}}. Background: \code{\link{ppm}}, \code{\link{ppm.object}}. } \examples{ # unmarked model <- ppm(swedishpines ~1, PairPiece(seq(3,19,by=4))) f <- fitin(model) f plot(f) # extract fitted interaction coefficients coef(f) # multitype # fit the stationary multitype Strauss process to `amacrine' r <- 0.02 * matrix(c(1,2,2,1), nrow=2,ncol=2) model <- ppm(amacrine ~1, MultiStrauss(r)) f <- fitin(model) f plot(f) } \keyword{spatial} \keyword{models} spatstat.core/man/ragsMultiHard.Rd0000644000176200001440000000543214141452520016656 0ustar liggesusers\name{ragsMultiHard} \alias{ragsMultiHard} \title{ Alternating Gibbs Sampler for Multitype Hard Core Process } \description{ Generate a realisation of the multitype hard core point process using the alternating Gibbs sampler. } \usage{ ragsMultiHard(beta, hradii, \dots, types=NULL, bmax = NULL, periodic=FALSE, ncycles = 100) } \arguments{ \item{beta}{ First order trend. A numeric vector, a pixel image, a function, a list of functions, or a list of pixel images. } \item{hradii}{ Matrix of hard core radii between each pair of types. Diagonal entries should be \code{0} or \code{NA}. } \item{types}{ Vector of all possible types for the multitype point pattern. } \item{\dots}{ Arguments passed to \code{\link{rmpoispp}} when generating random points. } \item{bmax}{ Optional upper bound on \code{beta}. } \item{periodic}{ Logical value indicating whether to measure distances in the periodic sense, so that opposite sides of the (rectangular) window are treated as identical. } \item{ncycles}{ Number of cycles of the sampler to be performed. } } \details{ The Alternating Gibbs Sampler for a multitype point process is an iterative simulation procedure. Each step of the sampler updates the pattern of points of a particular type \code{i}, by drawing a realisation from the conditional distribution of points of type \code{i} given the points of all other types. Successive steps of the sampler update the points of type 1, then type 2, type 3, and so on. This is an experimental implementation which currently works only for multitype hard core processes (see \code{\link{MultiHard}}) in which there is no interaction between points of the same type, and for the area-interaction process (see \code{\link{ragsAreaInter}}). The argument \code{beta} gives the first order trend for each possible type of point. It may be a single number, a numeric vector, a \code{function(x,y)}, a pixel image, a list of functions, a \code{function(x,y,m)}, or a list of pixel images. The argument \code{hradii} is the matrix of hard core radii between each pair of possible types of points. Two points of types \code{i} and \code{j} respectively are forbidden to lie closer than a distance \code{hradii[i,j]} apart. The diagonal of this matrix must contain \code{NA} or \code{0} values, indicating that there is no hard core constraint applying between points of the same type. } \value{ A point pattern (object of class \code{"ppp"}). } \author{ \adrian } \seealso{ \code{\link{rags}}, \code{\link{ragsAreaInter}} } \examples{ b <- c(30,20) h <- 0.05 * matrix(c(0,1,1,0), 2, 2) ragsMultiHard(b, h, ncycles=10) ragsMultiHard(b, h, ncycles=5, periodic=TRUE) } \keyword{spatial} \keyword{datagen} spatstat.core/man/bw.relrisk.Rd0000644000176200001440000000726714141452520016202 0ustar liggesusers\name{bw.relrisk} \alias{bw.relrisk} \title{ Cross Validated Bandwidth Selection for Relative Risk Estimation } \description{ Uses cross-validation to select a smoothing bandwidth for the estimation of relative risk. } \usage{ bw.relrisk(X, method = "likelihood", nh = spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE) } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"} which has factor valued marks). } \item{method}{ Character string determining the cross-validation method. Current options are \code{"likelihood"}, \code{"leastsquares"} or \code{"weightedleastsquares"}. } \item{nh}{ Number of trial values of smoothing bandwith \code{sigma} to consider. The default is 32. } \item{hmin, hmax}{ Optional. Numeric values. Range of trial values of smoothing bandwith \code{sigma} to consider. There is a sensible default. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the minimum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth for the nonparametric estimation of relative risk using \code{\link{relrisk}}. Consider the indicators \eqn{y_{ij}}{y[i,j]} which equal \eqn{1} when data point \eqn{x_i}{x[i]} belongs to type \eqn{j}, and equal \eqn{0} otherwise. For a particular value of smoothing bandwidth, let \eqn{\hat p_j(u)}{p*[j](u)} be the estimated probabilities that a point at location \eqn{u} will belong to type \eqn{j}. Then the bandwidth is chosen to minimise either the negative likelihood, the squared error, or the approximately standardised squared error, of the indicators \eqn{y_{ij}}{y[i,j]} relative to the fitted values \eqn{\hat p_j(x_i)}{p*[j](x[i])}. See Diggle (2003) or Baddeley et al (2015). The result is a numerical value giving the selected bandwidth \code{sigma}. The result also belongs to the class \code{"bw.optim"} allowing it to be printed and plotted. The plot shows the cross-validation criterion as a function of bandwidth. The range of values for the smoothing bandwidth \code{sigma} is set by the arguments \code{hmin, hmax}. There is a sensible default, based on multiples of Stoyan's rule of thumb \code{\link{bw.stoyan}}. If the optimal bandwidth is achieved at an endpoint of the interval \code{[hmin, hmax]}, the algorithm will issue a warning (unless \code{warn=FALSE}). If this occurs, then it is probably advisable to expand the interval by changing the arguments \code{hmin, hmax}. Computation time depends on the number \code{nh} of trial values considered, and also on the range \code{[hmin, hmax]} of values considered, because larger values of \code{sigma} require calculations involving more pairs of data points. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{relrisk}}, \code{\link{bw.stoyan}} } \examples{ data(urkiola) \testonly{op <- spatstat.options(n.bandwidth=8)} b <- bw.relrisk(urkiola) b plot(b) b <- bw.relrisk(urkiola, hmax=20) plot(b) \testonly{spatstat.options(op)} } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Kelsall, J.E. and Diggle, P.J. (1995) Kernel estimation of relative risk. \emph{Bernoulli} \bold{1}, 3--16. } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/markvario.Rd0000644000176200001440000000721414141452520016103 0ustar liggesusers\name{markvario} \alias{markvario} \title{Mark Variogram} \description{ Estimate the mark variogram of a marked point pattern. } \usage{ markvario(X, correction = c("isotropic", "Ripley", "translate"), r = NULL, method = "density", ..., normalise=FALSE) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. It must have marks which are numeric. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the mark variogram \eqn{\gamma(r)}{gamma(r)} should be evaluated. There is a sensible default. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Other arguments passed to \code{\link{markcorr}}, or passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{If \code{TRUE}, normalise the variogram by dividing it by the estimated mark variance. } } \details{ The mark variogram \eqn{\gamma(r)}{gamma(r)} of a marked point process \eqn{X} is a measure of the dependence between the marks of two points of the process a distance \eqn{r} apart. It is informally defined as \deqn{ \gamma(r) = E[\frac 1 2 (M_1 - M_2)^2] }{ gamma(r) = E[(1/2) * (M1 - M2)^2 ] } where \eqn{E[ ]} denotes expectation and \eqn{M_1,M_2}{M1,M2} are the marks attached to two points of the process a distance \eqn{r} apart. The mark variogram of a marked point process is analogous, but \bold{not equivalent}, to the variogram of a random field in geostatistics. See Waelder and Stoyan (1996). } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark variogram \eqn{\gamma(r)}{gamma(r)} has been estimated } \item{theo}{the theoretical value of \eqn{\gamma(r)}{gamma(r)} when the marks attached to different points are independent; equal to the sample variance of the marks } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{\gamma(r)}{gamma(r)} obtained by the edge corrections named. } \references{ Cressie, N.A.C. (1991) \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Mase, S. (1996) The threshold method for estimating annual rainfall. \emph{Annals of the Institute of Statistical Mathematics} \bold{48} (1996) 201-213. Waelder, O. and Stoyan, D. (1996) On variograms in point process statistics. \emph{Biometrical Journal} \bold{38} (1996) 895-905. } \seealso{ Mark correlation function \code{\link{markcorr}} for numeric marks. Mark connection function \code{\link{markconnect}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}} for factor-valued marks. } \examples{ # Longleaf Pine data # marks represent tree diameter data(longleaf) # Subset of this large pattern swcorner <- owin(c(0,100),c(0,100)) sub <- longleaf[ , swcorner] # mark correlation function mv <- markvario(sub) plot(mv) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/envelope.envelope.Rd0000644000176200001440000000656114141452520017545 0ustar liggesusers\name{envelope.envelope} \alias{envelope.envelope} \title{ Recompute Envelopes } \description{ Given a simulation envelope (object of class \code{"envelope"}), compute another envelope from the same simulation data using different parameters. } \usage{ \method{envelope}{envelope}(Y, fun = NULL, ..., transform=NULL, global=FALSE, VARIANCE=FALSE) } \arguments{ \item{Y}{ A simulation envelope (object of class \code{"envelope"}). } \item{fun}{ Optional. Summary function to be applied to the simulated point patterns. } \item{\dots,transform,global,VARIANCE}{ Parameters controlling the type of envelope that is re-computed. See \code{\link{envelope}}. } } \details{ This function can be used to re-compute a simulation envelope from previously simulated data, using different parameter settings for the envelope: for example, a different significance level, or a global envelope instead of a pointwise envelope. The function \code{\link{envelope}} is generic. This is the method for the class \code{"envelope"}. The argument \code{Y} should be a simulation envelope (object of class \code{"envelope"}) produced by any of the methods for \code{\link{envelope}}. Additionally, \code{Y} must contain either \itemize{ \item the simulated point patterns that were used to create the original envelope (so \code{Y} should have been created by calling \code{\link{envelope}} with \code{savepatterns=TRUE}); \item the summary functions of the simulated point patterns that were used to create the original envelope (so \code{Y} should have been created by calling \code{\link{envelope}} with \code{savefuns=TRUE}). } If the argument \code{fun} is given, it should be a summary function that can be applied to the simulated point patterns that were used to create \code{Y}. The envelope of the summary function \code{fun} for these point patterns will be computed using the parameters specified in \code{\dots}. If \code{fun} is not given, then: \itemize{ \item If \code{Y} contains the summary functions that were used to compute the original envelope, then the new envelope will be computed from these original summary functions. \item Otherwise, if \code{Y} contains the simulated point patterns. then the \eqn{K} function \code{\link{Kest}} will be applied to each of these simulated point patterns, and the new envelope will be based on the \eqn{K} functions. } The new envelope will be computed using the parameters specified in \code{\dots}. See \code{\link{envelope}} for a full list of envelope parameters. Frequently-used parameters include \code{nrank} and \code{nsim} (to change the number of simulations used and the significance level of the envelope), \code{global} (to change from pointwise to global envelopes) and \code{VARIANCE} (to compute the envelopes from the sample moments instead of the ranks). } \value{ An envelope (object of class \code{"envelope"}. } \seealso{ \code{\link{envelope}} } \examples{ E <- envelope(cells, Kest, nsim=19, savefuns=TRUE, savepatterns=TRUE) E2 <- envelope(E, nrank=2) Eg <- envelope(E, global=TRUE) EG <- envelope(E, Gest) EL <- envelope(E, transform=expression(sqrt(./pi))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat.core/man/rmhexpand.Rd0000644000176200001440000001273214141452520016077 0ustar liggesusers\name{rmhexpand} \alias{rmhexpand} \title{ Specify Simulation Window or Expansion Rule } \description{ Specify a spatial domain in which point process simulations will be performed. Alternatively, specify a rule which will be used to determine the simulation window. } \usage{ rmhexpand(x = NULL, ..., area = NULL, length = NULL, distance = NULL) } \arguments{ \item{x}{ Any kind of data determining the simulation window or the expansion rule. A window (object of class \code{"owin"}) specifying the simulation window, a numerical value specifying an expansion factor or expansion distance, a list containing one numerical value, an object of class \code{"rmhexpand"}, or \code{NULL}. } \item{\dots}{ Ignored. } \item{area}{ Area expansion factor. Incompatible with other arguments. } \item{length}{ Length expansion factor. Incompatible with other arguments. } \item{distance}{ Expansion distance (buffer width). Incompatible with other arguments. } } \details{ In the Metropolis-Hastings algorithm \code{\link{rmh}} for simulating spatial point processes, simulations are usually carried out on a spatial domain that is larger than the original window of the point process model, then subsequently clipped to the original window. The command \code{rmhexpand} can be used to specify the simulation window, or to specify a rule which will later be used to determine the simulation window from data. The arguments are all incompatible: at most one of them should be given. If the first argument \code{x} is given, it may be any of the following: \itemize{ \item a window (object of class \code{"owin"}) specifying the simulation window. \item an object of class \code{"rmhexpand"} specifying the expansion rule. \item a single numerical value, without attributes. This will be interpreted as the value of the argument \code{area}. \item either \code{c(area=v)} or \code{list(area=v)}, where \code{v} is a single numeric value. This will be interpreted as the value of the argument \code{area}. \item either \code{c(length=v)} or \code{list(length=v)}, where \code{v} is a single numeric value. This will be interpreted as the value of the argument \code{length}. \item either \code{c(distance=v)} or \code{list(distance=v)}, where \code{v} is a single numeric value. This will be interpreted as the value of the argument \code{distance}. \item \code{NULL}, meaning that the expansion rule is not yet determined. } If one of the arguments \code{area}, \code{length} or \code{distance} is given, then the simulation window is determined from the original data window as follows. \describe{ \item{area}{ The bounding box of the original data window will be extracted, and the simulation window will be a scalar dilation of this rectangle. The argument \code{area} should be a numerical value, greater than or equal to 1. It specifies the area expansion factor, i.e. the ratio of the area of the simulation window to the area of the original point process window's bounding box. } \item{length}{ The bounding box of the original data window will be extracted, and the simulation window will be a scalar dilation of this rectangle. The argument \code{length} should be a numerical value, greater than or equal to 1. It specifies the length expansion factor, i.e. the ratio of the width (height) of the simulation window to the width (height) of the original point process window's bounding box. } \item{distance}{ The argument \code{distance} should be a numerical value, greater than or equal to 0. It specifies the width of a buffer region around the original data window. If the original data window is a rectangle, then this window is extended by a margin of width equal to \code{distance} around all sides of the original rectangle. The result is a rectangle. If the original data window is not a rectangle, then morphological dilation is applied using \code{\link{dilation.owin}} so that a margin or buffer of width equal to \code{distance} is created around all sides of the original window. The result is a non-rectangular window, typically of a different shape. } } } \section{Undetermined expansion}{ If \code{expand=NULL}, this is interpreted to mean that the expansion rule is \dQuote{not yet decided}. Expansion will be decided later, by the simulation algorithm \code{\link{rmh}}. If the model cannot be expanded (for example if the covariate data in the model are not available on a larger domain) then expansion will not occur. If the model can be expanded, then if the point process model has a finite interaction range \code{r}, the default is \code{rmhexpand(distance=2*r)}, and otherwise \code{rmhexpand(area=2)}. } \value{ An object of class \code{"rmhexpand"} specifying the expansion rule. There is a \code{print} method for this class. } \author{\adrian and \rolf } \seealso{ \code{\link{expand.owin}} to apply the rule to a window. \code{\link{will.expand}} to test whether expansion will occur. \code{\link{rmh}}, \code{\link{rmhcontrol}} for background details. } \examples{ rmhexpand() rmhexpand(2) rmhexpand(1) rmhexpand(length=1.5) rmhexpand(distance=0.1) rmhexpand(letterR) } \keyword{spatial} \keyword{datagen} spatstat.core/man/as.function.fv.Rd0000644000176200001440000000736714141452520016762 0ustar liggesusers\name{as.function.fv} \alias{as.function.fv} \title{ Convert Function Value Table to Function } \description{ Converts an object of class \code{"fv"} to an \R language function. } \usage{ \method{as.function}{fv}(x, ..., value=".y", extrapolate=FALSE) } \arguments{ \item{x}{ Object of class \code{"fv"} or \code{"rhohat"}. } \item{\dots}{ Ignored. } \item{value}{ Optional. Character string or character vector selecting one or more of the columns of \code{x} for use as the function value. See Details. } \item{extrapolate}{ Logical, indicating whether to extrapolate the function outside the domain of \code{x}. See Details. } } \details{ A function value table (object of class \code{"fv"}) is a convenient way of storing and plotting several different estimates of the same function. Objects of this class are returned by many commands in \pkg{spatstat}, such as \code{\link[spatstat.core]{Kest}}, which returns an estimate of Ripley's \eqn{K}-function for a point pattern dataset. Sometimes it is useful to convert the function value table to a \code{function} in the \R language. This is done by \code{as.function.fv}. It converts an object \code{x} of class \code{"fv"} to an \R function \code{f}. If \code{f <- as.function(x)} then \code{f} is an \R function that accepts a numeric argument and returns a corresponding value for the summary function by linear interpolation between the values in the table \code{x}. Argument values lying outside the range of the table yield an \code{NA} value (if \code{extrapolate=FALSE}) or the function value at the nearest endpoint of the range (if \code{extrapolate = TRUE}). To apply different rules to the left and right extremes, use \code{extrapolate=c(TRUE,FALSE)} and so on. Typically the table \code{x} contains several columns of function values corresponding to different edge corrections. Auxiliary information for the table identifies one of these columns as the \emph{recommended value}. By default, the values of the function \code{f <- as.function(x)} are taken from this column of recommended values. This default can be changed using the argument \code{value}, which can be a character string or character vector of names of columns of \code{x}. Alternatively \code{value} can be one of the abbreviations used by \code{\link{fvnames}}. If \code{value} specifies a single column of the table, then the result is a function \code{f(r)} with a single numeric argument \code{r} (with the same name as the orginal argument of the function table). If \code{value} specifies several columns of the table, then the result is a function \code{f(r,what)} where \code{r} is the numeric argument and \code{what} is a character string identifying the column of values to be used. The formal arguments of the resulting function are \code{f(r, what=value)}, which means that in a call to this function \code{f}, the permissible values of \code{what} are the entries of the original vector \code{value}; the default value of \code{what} is the first entry of \code{value}. The command \code{as.function.fv} is a method for the generic command \code{\link{as.function}}. } \value{ A \code{function(r)} or \code{function(r,what)} where \code{r} is the name of the original argument of the function table. } \author{ \adrian and \rolf } \seealso{ \code{\link[spatstat.core]{as.function.rhohat}}, \code{\link{fv}}, \code{\link{fv.object}}, \code{\link{fvnames}}, \code{\link{plot.fv}}, \code{\link[spatstat.core]{Kest}} } \examples{ K <- Kest(cells) f <- as.function(K) f f(0.1) g <- as.function(K, value=c("iso", "trans")) g g(0.1, "trans") } \keyword{spatial} \keyword{methods} spatstat.core/man/pool.fv.Rd0000644000176200001440000000317214141452520015472 0ustar liggesusers\name{pool.fv} \alias{pool.fv} \title{Pool Several Functions} \description{ Combine several summary functions into a single function. } \usage{ \method{pool}{fv}(..., weights=NULL, relabel=TRUE, variance=TRUE) } \arguments{ \item{\dots}{ Objects of class \code{"fv"}. } \item{weights}{ Optional numeric vector of weights for the functions. } \item{relabel}{ Logical value indicating whether the columns of the resulting function should be labelled to show that they were obtained by pooling. } \item{variance}{ Logical value indicating whether to compute the sample variance and related terms. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"fv"} of summary functions. It is used to combine several estimates of the same function into a single function. Each of the arguments \code{\dots} must be an object of class \code{"fv"}. They must be compatible, in that they are estimates of the same function, and were computed using the same options. The sample mean and sample variance of the corresponding estimates will be computed. } \value{ An object of class \code{"fv"}. } \seealso{ \code{\link{pool}}, \code{\link{pool.anylist}}, \code{\link{pool.rat}} } \examples{ K <- lapply(waterstriders, Kest, correction="iso") Kall <- pool(K[[1]], K[[2]], K[[3]]) Kall <- pool(as.anylist(K)) plot(Kall, cbind(pooliso, pooltheo) ~ r, shade=c("loiso", "hiiso"), main="Pooled K function of waterstriders") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat.core/man/intensity.dppm.Rd0000644000176200001440000000133214141452520017070 0ustar liggesusers\name{intensity.dppm} \alias{intensity.dppm} \alias{intensity.detpointprocfamily} \title{Intensity of Determinantal Point Process Model} \description{Extracts the intensity of a determinantal point process model.} \usage{ \method{intensity}{detpointprocfamily}(X, \dots) \method{intensity}{dppm}(X, \dots) } \arguments{ \item{X}{ A determinantal point process model (object of class \code{"detpointprocfamily"} or \code{"dppm"}). } \item{\dots}{Ignored.} } \value{ A numeric value (if the model is stationary), a pixel image (if the model is non-stationary) or \code{NA} if the intensity is unknown for the model. } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat.core/man/predict.slrm.Rd0000644000176200001440000000536714141452520016525 0ustar liggesusers\name{predict.slrm} \Rdversion{1.1} \alias{predict.slrm} \title{ Predicted or Fitted Values from Spatial Logistic Regression } \description{ Given a fitted Spatial Logistic Regression model, this function computes the fitted probabilities for each pixel, or the fitted point process intensity, or the values of the linear predictor in each pixel. } \usage{ \method{predict}{slrm}(object, ..., type = "intensity", newdata=NULL, window=NULL) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Optional arguments passed to \code{\link{pixellate}} determining the pixel resolution for the discretisation of the point pattern. } \item{type}{ Character string (partially) matching one of \code{"probabilities"}, \code{"intensity"} or \code{"link"}. } \item{newdata}{ Optional. List containing new covariate values for the prediction. See Details. } \item{window}{ Optional. New window in which to predict. An object of class \code{"owin"}. } } \details{ This is a method for \code{\link[stats]{predict}} for spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). The argument \code{type} determines which quantity is computed. If \code{type="intensity"}), the value of the point process intensity is computed at each pixel. If \code{type="probabilities"}) the probability of the presence of a random point in each pixel is computed. If \code{type="link"}, the value of the linear predictor is computed at each pixel. If \code{newdata = NULL} (the default), the algorithm computes fitted values of the model (based on the data that was originally used to fit the model \code{object}). If \code{newdata} is given, the algorithm computes predicted values of the model, using the new values of the covariates provided by \code{newdata}. The argument \code{newdata} should be a list; names of entries in the list should correspond to variables appearing in the model formula of the \code{object}. Each list entry may be a pixel image or a single numeric value. } \value{ A pixel image (object of class \code{"im"}) containing the predicted values for each pixel. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) plot(predict(fit)) data(copper) X <- copper$SouthPoints Y <- copper$SouthLines Z <- distmap(Y) fitc <- slrm(X ~ Z) pc <- predict(fitc) Znew <- distmap(copper$Lines)[copper$SouthWindow] pcnew <- predict(fitc, newdata=list(Z=Znew)) } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.core/man/nncorr.Rd0000644000176200001440000002000314141452520015400 0ustar liggesusers\name{nncorr} \alias{nncorr} \alias{nnmean} \alias{nnvario} \title{Nearest-Neighbour Correlation Indices of Marked Point Pattern} \description{ Computes nearest-neighbour correlation indices of a marked point pattern, including the nearest-neighbour mark product index (default case of \code{nncorr}), the nearest-neighbour mark index (\code{nnmean}), and the nearest-neighbour variogram index (\code{nnvario}). } \usage{ nncorr(X, f = function(m1, m2) { m1 * m2 }, k = 1, \dots, use = "all.obs", method = c("pearson", "kendall", "spearman"), denominator=NULL, na.action="warn") nnmean(X, k=1, na.action="warn") nnvario(X, k=1, na.action="warn") } \arguments{ \item{X}{ The observed point pattern. An object of class \code{"ppp"}. } \item{f}{ Function \eqn{f} used in the definition of the nearest neighbour correlation. There is a sensible default that depends on the type of marks of \code{X}. } \item{k}{ Integer. The \code{k}-th nearest neighbour of each point will be used. } \item{\dots}{ Extra arguments passed to \code{f}. } \item{use,method}{ Arguments passed to the standard correlation function \code{\link{cor}}. } \item{denominator}{ Internal use only. } \item{na.action}{ Character string (passed to \code{\link{is.marked.ppp}}) specifying what to do if the marks contain \code{NA} values. } } \details{ The nearest neighbour correlation index \eqn{\bar n_f}{nbar} of a marked point process \eqn{X} is a number measuring the dependence between the mark of a typical point and the mark of its nearest neighbour. The command \code{nncorr} computes the nearest neighbour correlation index based on any test function \code{f} provided by the user. The default behaviour of \code{nncorr} is to compute the nearest neighbour mark product index. The commands \code{nnmean} and \code{nnvario} are convenient abbreviations for other special choices of \code{f}. In the default case, \code{nncorr(X)} computes three different versions of the nearest-neighbour correlation index: the unnormalised, normalised, and classical correlations. \describe{ \item{unnormalised:}{ The \bold{unnormalised} nearest neighbour correlation (Stoyan and Stoyan, 1994, section 14.7) is defined as \deqn{\bar n_f = E[f(M, M^\ast)]}{nbar[f] = E[f(M, M*)]} where \eqn{E[]} denotes mean value, \eqn{M} is the mark attached to a typical point of the point process, and \eqn{M^\ast}{M*} is the mark attached to its nearest neighbour (i.e. the nearest other point of the point process). Here \eqn{f} is any function \eqn{f(m_1,m_2)}{f(m1,m2)} with two arguments which are possible marks of the pattern, and which returns a nonnegative real value. Common choices of \eqn{f} are: for continuous real-valued marks, \deqn{f(m_1,m_2) = m_1 m_2}{f(m1,m2)= m1 * m2} for discrete marks (multitype point patterns), \deqn{f(m_1,m_2) = 1(m_1 = m_2)}{f(m1,m2)= (m1 == m2)} and for marks taking values in \eqn{[0,2\pi)}{[0,2 * pi)}, \deqn{f(m_1,m_2) = \sin(m_1 - m_2)}{f(m1,m2) = sin(m1-m2).} For example, in the second case, the unnormalised nearest neighbour correlation \eqn{\bar n_f}{nbar[f]} equals the proportion of points in the pattern which have the same mark as their nearest neighbour. Note that \eqn{\bar n_f}{nbar[f]} is not a ``correlation'' in the usual statistical sense. It can take values greater than 1. } \item{normalised:}{ We can define a \bold{normalised} nearest neighbour correlation by \deqn{\bar m_f = \frac{E[f(M,M^\ast)]}{E[f(M,M')]}}{mbar[f] = E[f(M,M*)]/E[f(M,M')]} where again \eqn{M} is the mark attached to a typical point, \eqn{M^\ast}{M*} is the mark attached to its nearest neighbour, and \eqn{M'} is an independent copy of \eqn{M} with the same distribution. This normalisation is also not a ``correlation'' in the usual statistical sense, but is normalised so that the value 1 suggests ``lack of correlation'': if the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{\bar m_f = 1}{mbar[f] = 1}. The interpretation of values larger or smaller than 1 depends on the choice of function \eqn{f}. } \item{classical:}{ Finally if the marks of \code{X} are real numbers, we can also compute the \bold{classical} correlation, that is, the correlation coefficient of the two random variables \eqn{M} and \eqn{M^\ast}{M*}. The classical correlation has a value between \eqn{-1} and \eqn{1}. Values close to \eqn{-1} or \eqn{1} indicate strong dependence between the marks. } } In the default case where \code{f} is not given, \code{nncorr(X)} computes \itemize{ \item If the marks of \code{X} are real numbers, the unnormalised and normalised versions of the nearest-neighbour product index \eqn{E[M \, M^\ast]}{E[M * M*]}, and the classical correlation between \eqn{M} and \eqn{M^\ast}{M*}. \item If the marks of \code{X} are factor valued, the unnormalised and normalised versions of the nearest-neighbour equality index \eqn{P[M = M^\ast]}{P[M = M*]}. } The wrapper functions \code{nnmean} and \code{nnvario} compute the correlation indices for two special choices of the function \eqn{f(m_1,m_2)}{f(m1,m2)}. They are defined only when the marks are numeric. \itemize{ \item \code{nnmean} computes the correlation indices for \eqn{f(m_1,m_2) = m_1}{f(m1,m2) = m1}. The unnormalised index is simply the mean value of the mark of the neighbour of a typical point, \eqn{E[M^\ast]}{E[M*]}, while the normalised index is \eqn{E[M^\ast]/E[M]}{E[M*]/E[M]}, the ratio of the mean mark of the neighbour of a typical point to the mean mark of a typical point. \item \code{nnvario} computes the correlation indices for \eqn{f(m_1,m_2) = (1/2) (m_1-m_2)^2}{f(m1,m2) = (1/2) * (m1-m2)^2}. } The argument \code{X} must be a point pattern (object of class \code{"ppp"}) and must be a marked point pattern. (The marks may be a data frame, containing several columns of mark variables; each column is treated separately.) If the argument \code{f} is given, it must be a function, accepting two arguments \code{m1} and \code{m2} which are vectors of equal length containing mark values (of the same type as the marks of \code{X}). It must return a vector of numeric values of the same length as \code{m1} and \code{m2}. The values must be non-negative. The arguments \code{use} and \code{method} control the calculation of the classical correlation using \code{\link{cor}}, as explained in the help file for \code{\link{cor}}. Other arguments may be passed to \code{f} through the \code{...} argument. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated using the \sQuote{border method} edge correction. } \value{ Labelled vector of length 2 or 3 containing the unnormalised and normalised nearest neighbour correlations, and the classical correlation if appropriate. Alternatively a matrix with 2 or 3 rows, containing this information for each mark variable. } \examples{ nnmean(finpines) nnvario(finpines) nncorr(finpines) # heights of neighbouring trees are slightly negatively correlated nncorr(amacrine) # neighbouring cells are usually of different type } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/clusterkernel.Rd0000644000176200001440000000231614141452520016770 0ustar liggesusers\name{clusterkernel} \alias{clusterkernel} \alias{clusterkernel.character} \alias{clusterkernel.kppm} \title{ Extract Cluster Offspring Kernel } \description{ Given a cluster point process model, this command returns the probability density of the cluster offspring. } \usage{ clusterkernel(model, \dots) \method{clusterkernel}{kppm}(model, \dots) \method{clusterkernel}{character}(model, \dots) } \arguments{ \item{model}{ Cluster model. Either a fitted cluster or Cox model (object of class \code{"kppm"}), or a character string specifying the type of cluster model. } \item{\dots}{ Parameter values for the model, when \code{model} is a character string. } } \details{ Given a specification of a cluster point process model, this command returns a \code{function(x,y)} giving the two-dimensional probability density of the cluster offspring points assuming a cluster parent located at the origin. } \value{ A function in the \R language with arguments \code{x,y,\dots}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{clusterfield}}, \code{\link{kppm}} } \examples{ fit <- kppm(redwood ~ x, "MatClust") f <- clusterkernel(fit) f(0.1, 0.2) } \keyword{spatial} spatstat.core/man/coef.ppm.Rd0000644000176200001440000000370614141452520015621 0ustar liggesusers\name{coef.ppm} \alias{coef.ppm} \title{ Coefficients of Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, extract the coefficients of the fitted model. A method for \code{coef}. } \usage{ \method{coef}{ppm}(object, \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) } \item{\dots}{ Ignored. } } \value{ A vector containing the fitted coefficients. } \details{ This function is a method for the generic function \code{\link{coef}}. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). This function extracts the vector of coefficients of the fitted model. This is the estimate of the parameter vector \eqn{\theta}{\theta} such that the conditional intensity of the model is of the form \deqn{ \lambda(u,x) = \exp(\theta S(u,x)) }{ \lambda(u,x) = exp(\theta . S(u,x)) } where \eqn{S(u,x)} is a (vector-valued) statistic. For example, if the model \code{object} is the uniform Poisson process, then \code{coef(object)} will yield a single value (named \code{"(Intercept)"}) which is the logarithm of the fitted intensity of the Poisson process. Use \code{\link{print.ppm}} to print a more useful description of the fitted model. } \seealso{ \code{\link{print.ppm}}, \code{\link{ppm.object}}, \code{\link{ppm}} } \examples{ data(cells) poi <- ppm(cells, ~1, Poisson()) coef(poi) # This is the log of the fitted intensity of the Poisson process stra <- ppm(cells, ~1, Strauss(r=0.07)) coef(stra) # The two entries "(Intercept)" and "Interaction" # are respectively log(beta) and log(gamma) # in the usual notation for Strauss(beta, gamma, r) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.core/man/with.msr.Rd0000644000176200001440000000536314141452520015666 0ustar liggesusers\name{with.msr} \alias{with.msr} \title{Evaluate Expression Involving Components of a Measure} \description{ An expression involving the names of components of a measure is evaluated. } \usage{ \method{with}{msr}(data, expr, \dots) } \arguments{ \item{data}{ A measure (object of class \code{"msr"}). } \item{expr}{ An expression to be evaluated. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[base]{with}} for the class \code{"msr"}. The argument \code{data} should be an object of class \code{"msr"} representing a measure (a function which assigns a value to each subset of two-dimensional space). This function can be used to extract the components of the measure, or to perform more complicated manipulations of the components. The argument \code{expr} should be an un-evaluated expression in the \R language. The expression may involve any of the variable names listed below with their corresponding meanings. \tabular{ll}{ \code{qlocations} \tab (point pattern) all quadrature locations \cr \code{qweights} \tab (numeric) all quadrature weights \cr \code{density} \tab (numeric) density value at each quadrature point \cr \code{discrete} \tab (numeric) discrete mass at each quadrature point \cr \code{continuous} \tab (numeric) increment of continuous component \cr \code{increment} \tab (numeric) increment of measure \cr \code{is.atom} \tab (logical) whether quadrature point is an atom \cr \code{atoms} \tab (point pattern) locations of atoms \cr \code{atommass} \tab (numeric) massess of atoms } The measure is the sum of discrete and continuous components. The discrete component assigns non-zero mass to several points called atoms. The continuous component has a density which should be integrated over a region to determine the value for that region. An object of class \code{"msr"} approximates the continuous component by a sum over quadrature points. The quadrature points are chosen so that they include the atoms of the measure. In the list above, we have \code{increment = continuous + discrete}, \code{continuous = density * qweights}, \code{is.atom = (discrete > 0)}, \code{atoms = qlocations[is.atom]} and \code{atommass = discrete[is.atom]}. } \value{ The result of evaluating the expression could be an object of any kind. } \author{ \spatstatAuthors. } \seealso{ \code{\link{msr}}, \code{\link{split.msr}}, \code{\link{measureContinuous}}, \code{\link{measurePositive}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") with(rp, atoms) with(rp, qlocations \%mark\% continuous) } \keyword{spatial} \keyword{manip} spatstat.core/man/clusterset.Rd0000644000176200001440000001131014141452520016275 0ustar liggesusers\name{clusterset} \alias{clusterset} \title{ Allard-Fraley Estimator of Cluster Feature } \description{ Detect high-density features in a spatial point pattern using the (unrestricted) Allard-Fraley estimator. } \usage{ clusterset(X, what=c("marks", "domain"), \dots, verbose=TRUE, fast=FALSE, exact=!fast) } \arguments{ \item{X}{ A dimensional spatial point pattern (object of class \code{"ppp"}). } \item{what}{ Character string or character vector specifying the type of result. See Details. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{fast}{ Logical. If \code{FALSE} (the default), the Dirichlet tile areas will be computed exactly using polygonal geometry, so that the optimal choice of tiles will be computed exactly. If \code{TRUE}, the Dirichlet tile areas will be approximated using pixel counting, so the optimal choice will be approximate. } \item{exact}{ Logical. If \code{TRUE}, the Allard-Fraley estimator of the domain will be computed exactly using polygonal geometry. If \code{FALSE}, the Allard-Fraley estimator of the domain will be approximated by a binary pixel mask. The default is initially set to \code{FALSE}. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to control the pixel resolution if \code{exact=FALSE}. } } \details{ Allard and Fraley (1997) developed a technique for recognising features of high density in a spatial point pattern in the presence of random clutter. This algorithm computes the \emph{unrestricted} Allard-Fraley estimator. The Dirichlet (Voronoi) tessellation of the point pattern \code{X} is computed. The smallest \code{m} Dirichlet cells are selected, where the number \code{m} is determined by a maximum likelihood criterion. \itemize{ \item If \code{fast=FALSE} (the default), the areas of the tiles of the Dirichlet tessellation will be computed exactly using polygonal geometry. This ensures that the optimal selection of tiles is computed exactly. \item If \code{fast=TRUE}, the Dirichlet tile areas will be approximated by counting pixels. This is faster, and is usually correct (depending on the pixel resolution, which is controlled by the arguments \code{\dots}). } The type of result depends on the character vector \code{what}. \itemize{ \item If \code{what="marks"} the result is the point pattern \code{X} with a vector of marks labelling each point with a value \code{yes} or \code{no} depending on whether the corresponding Dirichlet cell is selected by the Allard-Fraley estimator. In other words each point of \code{X} is labelled as either a cluster point or a non-cluster point. \item If \code{what="domain"}, the result is the Allard-Fraley estimator of the cluster feature set, which is the union of all the selected Dirichlet cells, represented as a window (object of class \code{"owin"}). \item If \code{what=c("marks", "domain")} the result is a list containing both of the results described above. } Computation of the Allard-Fraley set estimator depends on the argument \code{exact}. \itemize{ \item If \code{exact=TRUE} (the default), the Allard-Fraley set estimator will be computed exactly using polygonal geometry. The result is a polygonal window. \item If \code{exact=FALSE}, the Allard-Fraley set estimator will be approximated by a binary pixel mask. This is faster than the exact computation. The result is a binary mask. } } \value{ If \code{what="marks"}, a multitype point pattern (object of class \code{"ppp"}). If \code{what="domain"}, a window (object of class \code{"owin"}). If \code{what=c("marks", "domain")} (the default), a list consisting of a multitype point pattern and a window. } \references{ Allard, D. and Fraley, C. (1997) Nonparametric maximum likelihood estimation of features in spatial point processes using Voronoi tessellation. \emph{Journal of the American Statistical Association} \bold{92}, 1485--1493. } \author{ \adrian and \rolf } \seealso{ \code{\link{nnclean}}, \code{\link{sharpen}} } \examples{ opa <- par(mfrow=c(1,2)) W <- grow.rectangle(as.rectangle(letterR), 1) X <- superimpose(runifpoint(300, letterR), runifpoint(50, W), W=W) plot(W, main="clusterset(X, 'm')") plot(clusterset(X, "marks", fast=TRUE), add=TRUE, chars=c(1, 3), cols=1:2) plot(letterR, add=TRUE) plot(W, main="clusterset(X, 'd')") plot(clusterset(X, "domain", exact=FALSE), add=TRUE) plot(letterR, add=TRUE) par(opa) } \keyword{spatial} \keyword{classif} spatstat.core/man/methods.kppm.Rd0000644000176200001440000000310014141452520016507 0ustar liggesusers\name{methods.kppm} \alias{methods.kppm} %DoNotExport \alias{coef.kppm} \alias{formula.kppm} \alias{print.kppm} \alias{terms.kppm} \alias{labels.kppm} \title{ Methods for Cluster Point Process Models } \description{ These are methods for the class \code{"kppm"}. } \usage{ \method{coef}{kppm}(object, \dots) \method{formula}{kppm}(x, \dots) \method{print}{kppm}(x, ...) \method{terms}{kppm}(x, \dots) \method{labels}{kppm}(object, \dots) } \arguments{ \item{x,object}{ An object of class \code{"kppm"}, representing a fitted cluster point process model. } \item{\dots}{ Arguments passed to other methods. } } \details{ These functions are methods for the generic commands \code{\link{coef}}, \code{\link{formula}}, \code{\link{print}}, \code{\link{terms}} and \code{\link{labels}} for the class \code{"kppm"}. An object of class \code{"kppm"} represents a fitted cluster point process model. It is obtained from \code{\link{kppm}}. The method \code{coef.kppm} returns the vector of \emph{regression coefficients} of the fitted model. It does not return the clustering parameters. } \value{ See the help files for the corresponding generic functions. } \author{ \adrian } \seealso{ \code{\link{kppm}}, \code{\link{plot.kppm}}, \code{\link{predict.kppm}}, \code{\link{simulate.kppm}}, \code{\link{update.kppm}}, \code{\link{vcov.kppm}}, \code{\link{as.ppm.kppm}}. } \examples{ data(redwood) fit <- kppm(redwood ~ x, "MatClust") coef(fit) formula(fit) tf <- terms(fit) labels(fit) } \keyword{spatial} \keyword{methods} spatstat.core/man/clusterfield.Rd0000644000176200001440000000674114141452520016601 0ustar liggesusers\name{clusterfield} \alias{clusterfield} \alias{clusterfield.character} \alias{clusterfield.function} \alias{clusterfield.kppm} \title{Field of clusters} \description{ Calculate the superposition of cluster kernels at the location of a point pattern. } \usage{ clusterfield(model, locations = NULL, \dots) \method{clusterfield}{character}(model, locations = NULL, \dots) \method{clusterfield}{function}(model, locations = NULL, \dots, mu = NULL) \method{clusterfield}{kppm}(model, locations = NULL, \dots) } \arguments{ \item{model}{ Cluster model. Either a fitted cluster model (object of class \code{"kppm"}), a character string specifying the type of cluster model, or a function defining the cluster kernel. See Details. } \item{locations}{ A point pattern giving the locations of the kernels. Defaults to the centroid of the observation window for the \code{"kppm"} method and to the center of a unit square otherwise. } \item{\dots}{ Additional arguments passed to \code{\link{density.ppp}} or the cluster kernel. See Details. } \item{mu}{ Mean number of offspring per cluster. A single number or a pixel image. } } \details{ The actual calculations are preformed by \code{\link{density.ppp}} and \code{\dots} arguments are passed thereto for control over the pixel resolution etc. (These arguments are then passed on to \code{\link{pixellate.ppp}} and \code{\link{as.mask}}.) For the function method the given kernel function should accept vectors of x and y coordinates as its first two arguments. Any additional arguments may be passed through the \code{\dots}. The function method also accepts the optional parameter \code{mu} (defaulting to 1) specifying the mean number of points per cluster (as a numeric) or the inhomogeneous reference cluster intensity (as an \code{"im"} object or a \code{function(x,y)}). The interpretation of \code{mu} is as explained in the simulation functions referenced in the See Also section below. For the character method \code{model} must be one of: \code{model="Thomas"} for the Thomas process, \code{model="MatClust"} for the \Matern cluster process, \code{model="Cauchy"} for the Neyman-Scott cluster process with Cauchy kernel, or \code{model="VarGamma"} for the Neyman-Scott cluster process with Variance Gamma kernel. For all these models the parameter \code{scale} is required and passed through \code{\dots} as well as the parameter \code{nu} when \code{model="VarGamma"}. This method calls \code{clusterfield.function} so the parameter \code{mu} may also be passed through \code{\dots} and will be interpreted as explained above. The kppm method extracts the relevant information from the fitted model (including \code{mu}) and calls \code{clusterfield.function}. } \value{ A pixel image (object of class \code{"im"}). } \seealso{ \code{\link{density.ppp}} and \code{\link{kppm}} Simulation algorithms for cluster models: \code{\link{rCauchy}} \code{\link{rMatClust}} \code{\link{rThomas}} \code{\link{rVarGamma}} } \examples{ # method for fitted model fit <- kppm(redwood~1, "Thomas") clusterfield(fit, eps = 0.01) # method for functions kernel <- function(x,y,scal) { r <- sqrt(x^2 + y^2) ifelse(r > 0, dgamma(r, shape=5, scale=scal)/(2 * pi * r), 0) } X <- runifpoint(10) clusterfield(kernel, X, scal=0.05) } \author{\adrian , \rolf and \ege . } \keyword{spatial} spatstat.core/man/mppm.Rd0000644000176200001440000002506014141452520015060 0ustar liggesusers\name{mppm} \alias{mppm} \title{Fit Point Process Model to Several Point Patterns} \description{ Fits a Gibbs point process model to several point patterns simultaneously. } \usage{ mppm(formula, data, interaction=Poisson(), ..., iformula=NULL, random=NULL, weights=NULL, use.gam = FALSE, reltol.pql=1e-3, gcontrol=list()) } \arguments{ \item{formula}{ A formula describing the systematic part of the model. Variables in the formula are names of columns in \code{data}. } \item{data}{ A hyperframe (object of class \code{"hyperframe"}, see \code{\link{hyperframe}}) containing the point pattern responses and the explanatory variables. } \item{interaction}{ Interpoint interaction(s) appearing in the model. Either an object of class \code{"interact"} describing the point process interaction structure, or a hyperframe (with the same number of rows as \code{data}) whose entries are objects of class \code{"interact"}. } \item{\dots}{Arguments passed to \code{\link{ppm}} controlling the fitting procedure. } \item{iformula}{ Optional. A formula (with no left hand side) describing the interaction to be applied to each case. Each variable name in the formula should either be the name of a column in the hyperframe \code{interaction}, or the name of a column in the hyperframe \code{data} that is a vector or factor. } \item{random}{ Optional. A formula (with no left hand side) describing a random effect. Variable names in the formula may be any of the column names of \code{data} and \code{interaction}. The formula must be recognisable to \code{\link{lme}}. } \item{weights}{ Optional. Numeric vector of case weights for each row of \code{data}. } \item{use.gam}{Logical flag indicating whether to fit the model using \code{\link[mgcv]{gam}} or \code{\link[stats]{glm}}. } \item{reltol.pql}{ Relative tolerance for successive steps in the penalised quasi-likelihood algorithm, used when the model includes random effects. The algorithm terminates when the root mean square of the relative change in coefficients is less than \code{reltol.pql}. } \item{gcontrol}{ List of arguments to control the fitting algorithm. Arguments are passed to \code{\link[stats]{glm.control}} or \code{\link[mgcv]{gam.control}} or \code{\link[nlme]{lmeControl}} depending on the kind of model being fitted. If the model has random effects, the arguments are passed to \code{\link[nlme]{lmeControl}}. Otherwise, if \code{use.gam=TRUE} the arguments are passed to \code{\link[mgcv]{gam.control}}, and if \code{use.gam=FALSE} (the default) they are passed to \code{\link[stats]{glm.control}}. } } \details{ This function fits a common point process model to a dataset containing several different point patterns. It extends the capabilities of the function \code{\link{ppm}} to deal with data such as \itemize{ \item replicated observations of spatial point patterns \item two groups of spatial point patterns \item a designed experiment in which the response from each unit is a point pattern. } The syntax of this function is similar to that of standard \R model-fitting functions like \code{\link{lm}} and \code{\link{glm}}. The first argument \code{formula} is an \R formula describing the systematic part of the model. The second argument \code{data} contains the responses and the explanatory variables. Other arguments determine the stochastic structure of the model. Schematically, the data are regarded as the results of a designed experiment involving \eqn{n} experimental units. Each unit has a \sQuote{response}, and optionally some \sQuote{explanatory variables} (covariates) describing the experimental conditions for that unit. In this context, \emph{the response from each unit is a point pattern}. The value of a particular covariate for each unit can be either a single value (numerical, logical or factor), or a spatial covariate. A \sQuote{spatial} covariate is a quantity that depends on spatial location, for example, the soil acidity or altitude at each location. For the purposes of \code{mppm}, a spatial covariate must be stored as a pixel image (object of class \code{"im"}) which gives the values of the covariate at a fine grid of locations. The argument \code{data} is a hyperframe (a generalisation of a data frame, see \code{\link{hyperframe}}). This is like a data frame except that the entries can be objects of any class. The hyperframe has one row for each experimental unit, and one column for each variable (response or explanatory variable). The \code{formula} should be an \R formula. The left hand side of \code{formula} determines the \sQuote{response} variable. This should be a single name, which should correspond to a column in \code{data}. The right hand side of \code{formula} determines the spatial trend of the model. It specifies the linear predictor, and effectively represents the \bold{logarithm} of the spatial trend. Variables in the formula must be the names of columns of \code{data}, or one of the reserved names \describe{ \item{x,y}{Cartesian coordinates of location} \item{marks}{Mark attached to point} \item{id}{which is a factor representing the serial number (\eqn{1} to \eqn{n}) of the point pattern, i.e. the row number in the data hyperframe. } } The column of responses in \code{data} must consist of point patterns (objects of class \code{"ppp"}). The individual point pattern responses can be defined in different spatial windows. If some of the point patterns are marked, then they must all be marked, and must have the same type of marks. The scope of models that can be fitted to each pattern is the same as the scope of \code{\link{ppm}}, that is, Gibbs point processes with interaction terms that belong to a specified list, including for example the Poisson process, Strauss process, Geyer's saturation model, and piecewise constant pairwise interaction models. Additionally, it is possible to include random effects as explained in the section on Random Effects below. The stochastic part of the model is determined by the arguments \code{interaction} and (optionally) \code{iformula}. \itemize{ \item In the simplest case, \code{interaction} is an object of class \code{"interact"}, determining the interpoint interaction structure of the point process model, for all experimental units. \item Alternatively, \code{interaction} may be a hyperframe, whose entries are objects of class \code{"interact"}. It should have the same number of rows as \code{data}. \itemize{ \item If \code{interaction} consists of only one column, then the entry in row \code{i} is taken to be the interpoint interaction for the \code{i}th experimental unit (corresponding to the \code{i}th row of \code{data}). \item If \code{interaction} has more than one column, then the argument \code{iformula} is also required. Each row of \code{interaction} determines several interpoint interaction structures that might be applied to the corresponding row of \code{data}. The choice of interaction is determined by \code{iformula}; this should be an \R formula, without a left hand side. For example if \code{interaction} has two columns called \code{A} and \code{B} then \code{iformula = ~B} indicates that the interpoint interactions are taken from the second column. } } Variables in \code{iformula} typically refer to column names of \code{interaction}. They can also be names of columns in \code{data}, but only for columns of numeric, logical or factor values. For example \code{iformula = ~B * group} (where \code{group} is a column of \code{data} that contains a factor) causes the model with interpoint interaction \code{B} to be fitted with different interaction parameters for each level of \code{group}. } \section{Random Effects}{ It is also possible to include random effects in the trend term. The argument \code{random} is a formula, with no left-hand side, that specifies the structure of the random effects. The formula should be recognisable to \code{\link{lme}} (see the description of the argument \code{random} for \code{\link{lme}}). The names in the formula \code{random} may be any of the covariates supplied by \code{data}. Additionally the formula may involve the name \code{id}, which is a factor representing the serial number (\eqn{1} to \eqn{n}) of the point pattern in the list \code{X}. } \value{ An object of class \code{"mppm"} representing the fitted model. There are methods for \code{print}, \code{summary}, \code{coef}, \code{AIC}, \code{anova}, \code{fitted}, \code{fixef}, \code{logLik}, \code{plot}, \code{predict}, \code{ranef}, \code{residuals}, \code{summary}, \code{terms} and \code{vcov} for this class. The default methods for \code{\link[stats]{update}} and \code{\link[stats]{formula}} also work on this class. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Baddeley, A., Bischof, L., Sintorn, I.-M., Haggarty, S., Bell, M. and Turner, R. Analysis of a designed experiment where the response is a spatial point pattern. In preparation. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. Bell, M. and Grunwald, G. (2004) Mixed models for the analysis of replicated spatial point patterns. \emph{Biostatistics} \bold{5}, 633--648. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{ppm}}, \code{\link{print.mppm}}, \code{\link{summary.mppm}}, \code{\link{coef.mppm}}, } \examples{ # Waterstriders data H <- hyperframe(Y = waterstriders) mppm(Y ~ 1, data=H) mppm(Y ~ 1, data=H, Strauss(7)) mppm(Y ~ id, data=H) mppm(Y ~ x, data=H) # Synthetic data from known model n <- 10 H <- hyperframe(V=1:n, U=runif(n, min=-1, max=1), M=factor(letters[1 + (1:n) \%\% 3])) H$Z <- setcov(square(1)) H$U <- with(H, as.im(U, as.rectangle(Z))) H$Y <- with(H, rpoispp(eval.im(exp(2+3*Z)))) fit <- mppm(Y ~Z + U + V, data=H) } \keyword{spatial} \keyword{models} spatstat.core/man/rectcontact.Rd0000644000176200001440000000302314141452520016413 0ustar liggesusers\name{rectcontact} \alias{rectcontact} \title{ Contact Distribution Function using Rectangular Structuring Element } \description{ Computes an estimate of the contact distribution function of a set, using a rectangular structuring element. } \usage{ rectcontact(X, \dots, asp = 1, npasses=4, eps = NULL, r = NULL, breaks = NULL, correction = c("rs", "km")) } \arguments{ \item{X}{ Logical-valued image. The \code{TRUE} values in the image determine the spatial region whose contact distribution function should be estimated. } \item{\dots}{ Ignored. } \item{asp}{ Aspect ratio for the rectangular metric. A single positive number. See \code{\link{rectdistmap}} for explanation. } \item{npasses}{ Number of passes to perform in the distance algorithm. A positive integer. See \code{\link{rectdistmap}} for explanation. } \item{eps}{ Pixel size, if the image should be converted to a finer grid. } \item{r}{ Optional vector of distance values. Do Not Use This. } \item{breaks}{ Do Not Use This. } \item{correction}{ Character vector specifying the edge correction. } } \details{ To be written. } \value{ Object of class \code{"fv"}. } \author{ \adrian. } \seealso{ \code{\link{Hest}} } \examples{ ## make an image which is TRUE/FALSE inside/outside the letter R V <- letterR Frame(V) <- grow.rectangle(Frame(V), 0.5) Z <- as.im(V, value=TRUE, na.replace=FALSE) ## analyse plot(rectcontact(Z)) } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/F3est.Rd0000644000176200001440000001270714141452520015077 0ustar liggesusers\name{F3est} \Rdversion{1.1} \alias{F3est} \title{ Empty Space Function of a Three-Dimensional Point Pattern } \description{ Estimates the empty space function \eqn{F_3(r)}{F3(r)} from a three-dimensional point pattern. } \usage{ F3est(X, ..., rmax = NULL, nrval = 128, vside = NULL, correction = c("rs", "km", "cs"), sphere = c("fudge", "ideal", "digital")) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{F_3(r)}{F3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{F_3(r)}{F3(r)} will be estimated. A large value of \code{nrval} is required to avoid discretisation effects. } \item{vside}{ Optional. Side length of the voxels in the discrete approximation. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } \item{sphere}{ Optional. Character string specifying how to calculate the theoretical value of \eqn{F_3(r)}{F3(r)} for a Poisson process. See Details. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the empty space function is \deqn{ F_3(r) = P(d(0,\Phi) \le r) }{ F3(r) = P(d(0,Phi) <= r) } where \eqn{d(0,\Phi)}{d(0,Phi)} denotes the distance from a fixed origin \eqn{0} to the nearest point of \eqn{\Phi}{Phi}. The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The empty space function of \eqn{\Phi}{Phi} can then be estimated using techniques described in the References. The box containing the point pattern is discretised into cubic voxels of side length \code{vside}. The distance function \eqn{d(u,\Phi)}{d(u,Phi)} is computed for every voxel centre point \eqn{u} using a three-dimensional version of the distance transform algorithm (Borgefors, 1986). The empirical cumulative distribution function of these values, with appropriate edge corrections, is the estimate of \eqn{F_3(r)}{F3(r)}. The available edge corrections are: \describe{ \item{\code{"rs"}:}{ the reduced sample (aka minus sampling, border correction) estimator (Baddeley et al, 1993) } \item{\code{"km"}:}{ the three-dimensional version of the Kaplan-Meier estimator (Baddeley and Gill, 1997) } \item{\code{"cs"}:}{ the three-dimensional generalisation of the Chiu-Stoyan or Hanisch estimator (Chiu and Stoyan, 1998). } } Alternatively \code{correction="all"} selects all options. The result includes a column \code{theo} giving the theoretical value of \eqn{F_3(r)}{F3(r)} for a uniform Poisson process (Complete Spatial Randomness). This value depends on the volume of the sphere of radius \code{r} measured in the discretised distance metric. The argument \code{sphere} determines how this will be calculated. \itemize{ \item If \code{sphere="ideal"} the calculation will use the volume of an ideal sphere of radius \eqn{r} namely \eqn{(4/3) \pi r^3}{(4/3) * pi * r^3}. This is not recommended because the theoretical values of \eqn{F_3(r)}{F3(r)} are inaccurate. \item If \code{sphere="fudge"} then the volume of the ideal sphere will be multiplied by 0.78, which gives the approximate volume of the sphere in the discretised distance metric. \item If \code{sphere="digital"} then the volume of the sphere in the discretised distance metric is computed exactly using another distance transform. This takes longer to compute, but is exact. } } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42} (1993) 641--668. Baddeley, A.J. and Gill, R.D. (1997) Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25}, 263--292. Borgefors, G. (1986) Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34}, 344--371. Chiu, S.N. and Stoyan, D. (1998) Estimators of distance distributions for spatial patterns. \emph{Statistica Neerlandica} \bold{52}, 239--246. } \author{ \adrian and Rana Moyeed. } \section{Warnings}{ A small value of \code{vside} and a large value of \code{nrval} are required for reasonable accuracy. The default value of \code{vside} ensures that the total number of voxels is \code{2^22} or about 4 million. To change the default number of voxels, see \code{\link{spatstat.options}("nvoxel")}. } \seealso{ \code{\link{pp3}} to create a three-dimensional point pattern (object of class \code{"pp3"}). \code{\link{G3est}}, \code{\link{K3est}}, \code{\link{pcf3est}} for other summary functions of a three-dimensional point pattern. \code{\link{Fest}} to estimate the empty space function of point patterns in two dimensions. } \examples{ \testonly{op <- spatstat.options(nvoxel=2^18)} X <- rpoispp3(42) Z <- F3est(X) if(interactive()) plot(Z) \testonly{spatstat.options(op)} } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/runifpointx.Rd0000644000176200001440000000245714141452520016501 0ustar liggesusers\name{runifpointx} \alias{runifpointx} \title{ Generate N Uniform Random Points in Any Dimensions } \description{ Generate a random point pattern containing \code{n} independent, uniform random points in any number of spatial dimensions. } \usage{ runifpointx(n, domain, nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of points to be generated. } \item{domain}{ Multi-dimensional box in which the process should be generated. An object of class \code{"boxx"}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a single point pattern. } } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern (an object of class \code{"ppx"}). If \code{nsim > 1} or \code{drop=FALSE}, a list of such point patterns. } \details{ This function generates a pattern of \code{n} independent random points, uniformly distributed in the multi-dimensional box \code{domain}. } \seealso{ \code{\link{rpoisppx}}, \code{\link{ppx}}, \code{\link{boxx}} } \examples{ w <- boxx(x=c(0,1), y=c(0,1), z=c(0,1), t=c(0,3)) X <- runifpointx(50, w) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/kernel.factor.Rd0000644000176200001440000000265214141452520016646 0ustar liggesusers\name{kernel.factor} \alias{kernel.factor} \title{Scale factor for density kernel} \description{ Returns a scale factor for the kernels used in density estimation for numerical data. } \usage{ kernel.factor(kernel = "gaussian") } \arguments{ \item{kernel}{ String name of the kernel. Options are \code{"gaussian"}, \code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"}, \code{"biweight"}, \code{"cosine"} and \code{"optcosine"}. (Partial matching is used). } } \details{ Kernel estimation of a probability density in one dimension is performed by \code{\link[stats]{density.default}} using a kernel function selected from the list above. This function computes a scale constant for the kernel. For the Gaussian kernel, this constant is equal to 1. Otherwise, the constant \eqn{c} is such that the kernel with standard deviation \eqn{1} is supported on the interval \eqn{[-c,c]}. For more information about these kernels, see \code{\link[stats]{density.default}}. } \value{ A single number. } \seealso{ \code{\link[stats]{density.default}}, \code{\link{dkernel}}, \code{\link{kernel.moment}}, \code{\link{kernel.squint}} } \examples{ kernel.factor("rect") # bandwidth for Epanechnikov kernel with half-width h=1 h <- 1 bw <- h/kernel.factor("epa") } \author{\adrian and Martin Hazelton } \keyword{methods} \keyword{nonparametric} \keyword{smooth} spatstat.core/man/cauchy.estK.Rd0000644000176200001440000001302214141452520016263 0ustar liggesusers\name{cauchy.estK} \alias{cauchy.estK} \title{Fit the Neyman-Scott cluster process with Cauchy kernel} \description{ Fits the Neyman-Scott Cluster point process with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast. } \usage{ cauchy.estK(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Neyman-Scott cluster point process model with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Neyman-Scott cluster point process with Cauchy kernel to \code{X}, by finding the parameters of the \Matern Cluster model which give the closest match between the theoretical \eqn{K} function of the \Matern Cluster process and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{\kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{\mu}, and the locations of the offspring points of one parent follow a common distribution described in Jalilian et al (2013). If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{\lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{\lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{\lambda} cannot be estimated, and the parameter \eqn{\mu}{\mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rCauchy}}. For computational reasons, the optimisation procedure uses the parameter \code{eta2}, which is equivalent to \code{4 * scale^2} where \code{scale} is the scale parameter for the model as used in \code{\link{rCauchy}}. Homogeneous or inhomogeneous Neyman-Scott/Cauchy models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Ghorbani, M. (2012) Cauchy cluster process. \emph{Metrika}, to appear. Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{cauchy.estpcf}}, \code{\link{lgcp.estK}}, \code{\link{thomas.estK}}, \code{\link{vargamma.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{Kmodel}}. \code{\link{rCauchy}} to simulate the model. } \examples{ u <- cauchy.estK(redwood) u plot(u) } \keyword{spatial} \keyword{models} spatstat.core/man/pcfcross.inhom.Rd0000644000176200001440000001147014141452520017042 0ustar liggesusers\name{pcfcross.inhom} \alias{pcfcross.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Cross-Type) } \description{ Estimates the inhomogeneous cross-type pair correlation function for a multitype point pattern. } \usage{ pcfcross.inhom(X, i, j, lambdaI = NULL, lambdaJ = NULL, ..., r = NULL, breaks = NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma = NULL, varcov = NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity function of the points of type \code{i}. Either a vector giving the intensity values at the points of type \code{i}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaJ}{ Optional. Values of the estimated intensity function of the points of type \code{j}. A numeric vector, pixel image or \code{function(x,y)}. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g_{ij}(r)}{g[i,j](r)} should be evaluated. There is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Bandwidth coefficient; see Details. } \item{correction}{ Choice of edge correction. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambdaI} or \code{lambdaJ} is estimated by kernel smoothing. } } \details{ The inhomogeneous cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} is a summary of the dependence between two types of points in a multitype spatial point process that does not have a uniform density of points. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding two points, of types \eqn{i} and \eqn{j} respectively, at locations \eqn{x} and \eqn{y} separated by a distance \eqn{r} is equal to \deqn{ p(r) = \lambda_i(x) lambda_j(y) g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i](x) * lambda[j](y) * g(r) dx dy } where \eqn{\lambda_i}{lambda[i]} is the intensity function of the process of points of type \eqn{i}. For a multitype Poisson point process, this probability is \eqn{p(r) = \lambda_i(x) \lambda_j(y)}{p(r) = lambda[i](x) * lambda[j](y)} so \eqn{g_{ij}(r) = 1}{g[i,j](r) = 1}. The command \code{pcfcross.inhom} estimates the inhomogeneous pair correlation using a modified version of the algorithm in \code{\link{pcf.ppp}}. If the arguments \code{lambdaI} and \code{lambdaJ} are missing or null, they are estimated from \code{X} by kernel smoothing using a leave-one-out estimator. } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{ the vector of values of the argument \eqn{r} at which the inhomogeneous cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g_{ij}(r)}{g[i,j](r)} for the Poisson process } \item{trans}{vector of values of \eqn{g_{ij}(r)}{g[i,j](r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g_{ij}(r)}{g[i,j](r)} estimated by Ripley isotropic correction } as required. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcfinhom}}, \code{\link{pcfcross}}, \code{\link{pcfdot.inhom}} } \examples{ data(amacrine) plot(pcfcross.inhom(amacrine, "on", "off", stoyan=0.1), legendpos="bottom") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/vcov.kppm.Rd0000644000176200001440000000557114141452520016037 0ustar liggesusers\name{vcov.kppm} \alias{vcov.kppm} \title{Variance-Covariance Matrix for a Fitted Cluster Point Process Model} \description{ Returns the variance-covariance matrix of the estimates of the parameters of a fitted cluster point process model. } \usage{ \method{vcov}{kppm}(object, ..., what=c("vcov", "corr", "fisher", "internals"), fast = NULL, rmax = NULL, eps.rmax = 0.01, verbose = TRUE) } \arguments{ \item{object}{ A fitted cluster point process model (an object of class \code{"kppm"}.) } \item{\dots}{ Ignored. } \item{what}{ Character string (partially-matched) that specifies what matrix is returned. Options are \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} for the Fisher information matrix. } \item{fast}{ Logical specifying whether tapering (using sparse matrices from \pkg{Matrix}) should be used to speed up calculations. Warning: This is expected to underestimate the true asymptotic variances/covariances. } \item{rmax}{ Optional. The dependence range. Not usually specified by the user. Only used when \code{fast=TRUE}. } \item{eps.rmax}{ Numeric. A small positive number which is used to determine \code{rmax} from the tail behaviour of the pair correlation function when fast option (\code{fast=TRUE}) is used. Namely \code{rmax} is the smallest value of \eqn{r} at which \eqn{(g(r)-1)/(g(0)-1)} falls below \code{eps.rmax}. Only used when \code{fast=TRUE}. Ignored if \code{rmax} is provided. } \item{verbose}{ Logical value indicating whether to print progress reports during very long calculations. } } \details{ This function computes the asymptotic variance-covariance matrix of the estimates of the canonical (regression) parameters in the cluster point process model \code{object}. It is a method for the generic function \code{\link{vcov}}. The result is an \code{n * n} matrix where \code{n = length(coef(model))}. To calculate a confidence interval for a regression parameter, use \code{\link[stats]{confint}} as shown in the examples. } \value{ A square matrix. } \references{ Waagepetersen, R. (2007) Estimating functions for inhomogeneous spatial point processes with incomplete covariate data. \emph{Biometrika} \bold{95}, 351--363. } \author{ Abdollah Jalilian and Rasmus Waagepetersen. Ported to \pkg{spatstat} by \adrian and \ege. } \seealso{ \code{\link{kppm}}, \code{\link{vcov}}, \code{\link{vcov.ppm}} } \examples{ fit <- kppm(redwood ~ x + y) vcov(fit) vcov(fit, what="corr") # confidence interval confint(fit) # cross-check the confidence interval by hand: sd <- sqrt(diag(vcov(fit))) t(coef(fit) + 1.96 * outer(sd, c(lower=-1, upper=1))) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.core/man/lurking.Rd0000644000176200001440000003041514141452520015562 0ustar liggesusers\name{lurking} \alias{lurking} \alias{lurking.ppp} \alias{lurking.ppm} \title{Lurking Variable Plot} \description{ Plot spatial point process residuals against a covariate } \usage{ lurking(object, \dots) \method{lurking}{ppm}(object, covariate, type="eem", cumulative=TRUE, \dots, plot.it = TRUE, plot.sd = is.poisson(object), clipwindow=default.clipwindow(object), rv = NULL, envelope=FALSE, nsim=39, nrank=1, typename, covname, oldstyle=FALSE, check=TRUE, verbose=TRUE, nx=128, splineargs=list(spar=0.5), internal=NULL) \method{lurking}{ppp}(object, covariate, type="eem", cumulative=TRUE, \dots, plot.it = TRUE, plot.sd = is.poisson(object), clipwindow=default.clipwindow(object), rv = NULL, envelope=FALSE, nsim=39, nrank=1, typename, covname, oldstyle=FALSE, check=TRUE, verbose=TRUE, nx=128, splineargs=list(spar=0.5), internal=NULL) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which diagnostics should be produced. This object is usually obtained from \code{\link{ppm}}. Alternatively, \code{object} may be a point pattern (object of class \code{"ppp"}). } \item{covariate}{ The covariate against which residuals should be plotted. Either a numeric vector, a pixel image, or an \code{expression}. See \emph{Details} below. } \item{type}{ String indicating the type of residuals or weights to be computed. Choices include \code{"eem"}, \code{"raw"}, \code{"inverse"} and \code{"pearson"}. See \code{\link{diagnose.ppm}} for all possible choices. } \item{cumulative}{ Logical flag indicating whether to plot a cumulative sum of marks (\code{cumulative=TRUE}) or the derivative of this sum, a marginal density of the smoothed residual field (\code{cumulative=FALSE}). } \item{\dots}{ Arguments passed to \code{\link{plot.default}} and \code{\link{lines}} to control the plot behaviour. } \item{plot.it}{ Logical value indicating whether plots should be shown. If \code{plot.it=FALSE}, only the computed coordinates for the plots are returned. See \emph{Value}. } \item{plot.sd}{ Logical value indicating whether error bounds should be added to plot. The default is \code{TRUE} for Poisson models and \code{FALSE} for non-Poisson models. See Details. } \item{clipwindow}{ If not \code{NULL} this argument indicates that residuals shall only be computed inside a subregion of the window containing the original point pattern data. Then \code{clipwindow} should be a window object of class \code{"owin"}. } \item{rv}{ Usually absent. If this argument is present, the point process residuals will not be calculated from the fitted model \code{object}, but will instead be taken directly from \code{rv}. } \item{envelope}{ Logical value indicating whether to compute simulation envelopes for the plot. Alternatively \code{envelope} may be a list of point patterns to use for computing the simulation envelopes, or an object of class \code{"envelope"} containing simulated point patterns. } \item{nsim}{ Number of simulated point patterns to be generated to produce the simulation envelope, if \code{envelope=TRUE}. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{typename}{ Usually absent. If this argument is present, it should be a string, and will be used (in the axis labels of plots) to describe the type of residuals. } \item{covname}{ A string name for the covariate, to be used in axis labels of plots. } \item{oldstyle}{ Logical flag indicating whether error bounds should be plotted using the approximation given in the original paper (\code{oldstyle=TRUE}), or using the correct asymptotic formula (\code{oldstyle=FALSE}). } \item{check}{ Logical flag indicating whether the integrity of the data structure in \code{object} should be checked. } \item{verbose}{ Logical value indicating whether to print progress reports during Monte Carlo simulation. } \item{nx}{ Integer. Number of covariate values to be used in the plot. } \item{splineargs}{ A list of arguments passed to \code{smooth.spline} for the estimation of the derivatives in the case \code{cumulative=FALSE}. } \item{internal}{ Internal use only. } } \value{ The (invisible) return value is an object belonging to the class \code{"lurk"}, for which there are methods for \code{plot} and \code{print}. This object is a list containing two dataframes \code{empirical} and \code{theoretical}. The first dataframe \code{empirical} contains columns \code{covariate} and \code{value} giving the coordinates of the lurking variable plot. The second dataframe \code{theoretical} contains columns \code{covariate}, \code{mean} and \code{sd} giving the coordinates of the plot of the theoretical mean and standard deviation. } \details{ This function generates a \sQuote{lurking variable} plot for a fitted point process model. Residuals from the model represented by \code{object} are plotted against the covariate specified by \code{covariate}. This plot can be used to reveal departures from the fitted model, in particular, to reveal that the point pattern depends on the covariate. The function \code{lurking} is generic, with methods for \code{ppm} and \code{ppp} documented here, and possibly other methods. The argument \code{object} would usually be a fitted point process model (object of class \code{"ppm"}) produced by the model-fitting algorithm \code{\link{ppm}}). If \code{object} is a point pattern (object of class \code{"ppp"}) then the model is taken to be the uniform Poisson process (Complete Spatial Randomness) fitted to this point pattern. First the residuals from the fitted model (Baddeley et al, 2004) are computed at each quadrature point, or alternatively the `exponential energy marks' (Stoyan and Grabarnik, 1991) are computed at each data point. The argument \code{type} selects the type of residual or weight. See \code{\link{diagnose.ppm}} for options and explanation. A lurking variable plot for point processes (Baddeley et al, 2004) displays either the cumulative sum of residuals/weights (if \code{cumulative = TRUE}) or a kernel-weighted average of the residuals/weights (if \code{cumulative = FALSE}) plotted against the covariate. The empirical plot (solid lines) is shown together with its expected value assuming the model is true (dashed lines) and optionally also the pointwise two-standard-deviation limits (grey shading). To be more precise, let \eqn{Z(u)} denote the value of the covariate at a spatial location \eqn{u}. \itemize{ \item If \code{cumulative=TRUE} then we plot \eqn{H(z)} against \eqn{z}, where \eqn{H(z)} is the sum of the residuals over all quadrature points where the covariate takes a value less than or equal to \eqn{z}, or the sum of the exponential energy weights over all data points where the covariate takes a value less than or equal to \eqn{z}. \item If \code{cumulative=FALSE} then we plot \eqn{h(z)} against \eqn{z}, where \eqn{h(z)} is the derivative of \eqn{H(z)}, computed approximately by spline smoothing. } For the point process residuals \eqn{E(H(z)) = 0}, while for the exponential energy weights \eqn{E(H(z)) = } area of the subset of the window satisfying \eqn{Z(u) <= z}{Z(u) \le z}. If the empirical and theoretical curves deviate substantially from one another, the interpretation is that the fitted model does not correctly account for dependence on the covariate. The correct form (of the spatial trend part of the model) may be suggested by the shape of the plot. If \code{plot.sd = TRUE}, then superimposed on the lurking variable plot are the pointwise two-standard-deviation error limits for \eqn{H(x)} calculated for the inhomogeneous Poisson process. The default is \code{plot.sd = TRUE} for Poisson models and \code{plot.sd = FALSE} for non-Poisson models. By default, the two-standard-deviation limits are calculated from the exact formula for the asymptotic variance of the residuals under the asymptotic normal approximation, equation (37) of Baddeley et al (2006). However, for compatibility with the original paper of Baddeley et al (2005), if \code{oldstyle=TRUE}, the two-standard-deviation limits are calculated using the innovation variance, an over-estimate of the true variance of the residuals. The argument \code{covariate} is either a numeric vector, a pixel image, or an R language expression. If it is a numeric vector, it is assumed to contain the values of the covariate for each of the quadrature points in the fitted model. The quadrature points can be extracted by \code{\link{quad.ppm}(object)}. If \code{covariate} is a pixel image, it is assumed to contain the values of the covariate at each location in the window. The values of this image at the quadrature points will be extracted. Alternatively, if \code{covariate} is an \code{expression}, it will be evaluated in the same environment as the model formula used in fitting the model \code{object}. It must yield a vector of the same length as the number of quadrature points. The expression may contain the terms \code{x} and \code{y} representing the cartesian coordinates, and may also contain other variables that were available when the model was fitted. Certain variable names are reserved words; see \code{\link{ppm}}. Note that lurking variable plots for the \eqn{x} and \eqn{y} coordinates are also generated by \code{\link{diagnose.ppm}}, amongst other types of diagnostic plots. This function is more general in that it enables the user to plot the residuals against any chosen covariate that may have been present. For advanced use, even the values of the residuals/weights can be altered. If the argument \code{rv} is present, the residuals will not be calculated from the fitted model \code{object} but will instead be taken directly from the object \code{rv}. If \code{type = "eem"} then \code{rv} should be similar to the return value of \code{\link{eem}}, namely, a numeric vector with length equal to the number of data points in the original point pattern. Otherwise, \code{rv} should be similar to the return value of \code{\link{residuals.ppm}}, that is, \code{rv} should be an object of class \code{"msr"} (see \code{\link{msr}}) representing a signed measure. } \seealso{ \code{\link{residuals.ppm}}, \code{\link{diagnose.ppm}}, \code{\link{residuals.ppm}}, \code{\link{qqplot.ppm}}, \code{\link{eem}}, \code{\link{ppm}} } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2006) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \examples{ (a <- lurking(nztrees, expression(x), type="raw")) fit <- ppm(nztrees ~x, Poisson(), nd=128) (b <- lurking(fit, expression(x), type="raw")) lurking(fit, expression(x), type="raw", cumulative=FALSE) } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat.core/man/with.ssf.Rd0000644000176200001440000000301614141452520015651 0ustar liggesusers\name{with.ssf} \alias{with.ssf} \alias{apply.ssf} \title{ Evaluate Expression in a Spatially Sampled Function } \description{ Given a spatially sampled function, evaluate an expression involving the function values. } \usage{ apply.ssf(X, \dots) \method{with}{ssf}(data, \dots) } \arguments{ \item{X, data}{ A spatially sampled function (object of class \code{"ssf"}). } \item{\dots}{ Arguments passed to \code{\link{with.default}} or \code{\link{apply}} specifying what to compute. } } \details{ An object of class \code{"ssf"} represents a function (real- or vector-valued) that has been sampled at a finite set of points. It contains a data frame which provides the function values at the sample points. In \code{with.ssf}, the expression specified by \code{\dots} will be evaluated in this dataframe. In \code{apply.ssf}, the dataframe will be subjected to the \code{\link{apply}} operator using the additional arguments \code{\dots}. If the result of evaluation is a data frame with one row for each data point, or a numeric vector with one entry for each data point, then the result will be an object of class \code{"ssf"} containing this information. Otherwise, the result will be a numeric vector. } \value{ An object of class \code{"ssf"} or a numeric vector. } \author{ \adrian. } \seealso{ \code{\link{ssf}} } \examples{ a <- ssf(cells, data.frame(d=nndist(cells), i=1:npoints(cells))) with(a, i/d) } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat.core/man/eval.fasp.Rd0000644000176200001440000000570414141452520015771 0ustar liggesusers\name{eval.fasp} \alias{eval.fasp} \title{Evaluate Expression Involving Function Arrays} \description{ Evaluates any expression involving one or more function arrays (\code{fasp} objects) and returns another function array. } \usage{ eval.fasp(expr, envir, dotonly=TRUE) } \arguments{ \item{expr}{ An expression involving the names of objects of class \code{"fasp"}. } \item{envir}{ Optional. The environment in which to evaluate the expression, or a named list containing \code{"fasp"} objects to be used in the expression. } \item{dotonly}{Logical. Passed to \code{\link{eval.fv}}.} } \details{ This is a wrapper to make it easier to perform pointwise calculations with the arrays of summary functions used in spatial statistics. A function array (object of class \code{"fasp"}) can be regarded as a matrix whose entries are functions. Objects of this kind are returned by the command \code{\link[spatstat.core]{alltypes}}. Suppose \code{X} is an object of class \code{"fasp"}. Then \code{eval.fasp(X+3)} effectively adds 3 to the value of every function in the array \code{X}, and returns the resulting object. Suppose \code{X} and \code{Y} are two objects of class \code{"fasp"} which are compatible (for example the arrays must have the same dimensions). Then \code{eval.fasp(X + Y)} will add the corresponding functions in each cell of the arrays \code{X} and \code{Y}, and return the resulting array of functions. Suppose \code{X} is an object of class \code{"fasp"} and \code{f} is an object of class \code{"fv"}. Then \code{eval.fasp(X + f)} will add the function \code{f} to the functions in each cell of the array \code{X}, and return the resulting array of functions. In general, \code{expr} can be any expression involving (a) the \emph{names} of objects of class \code{"fasp"} or \code{"fv"}, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.fasp} determines which of the \emph{variable names} in the expression \code{expr} refer to objects of class \code{"fasp"}. The expression is then evaluated for each cell of the array using \code{\link{eval.fv}}. The expression \code{expr} must be vectorised. There must be at least one object of class \code{"fasp"} in the expression. All such objects must be compatible. } \value{ Another object of class \code{"fasp"}. } \seealso{ \code{\link{fasp.object}}, \code{\link[spatstat.core]{Kest}} } \examples{ K <- alltypes(amacrine, "K") # expressions involving a fasp object eval.fasp(K + 3) L <- eval.fasp(sqrt(K/pi)) # expression involving two fasp objects D <- eval.fasp(K - L) # subtracting the unmarked K function from the cross-type K functions K0 <- Kest(unmark(amacrine)) DK <- eval.fasp(K - K0) ## Use of 'envir' S <- eval.fasp(1-G, list(G=alltypes(amacrine, 'G'))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat.core/man/pcf.Rd0000644000176200001440000000714114141452520014657 0ustar liggesusers\name{pcf} \alias{pcf} \title{Pair Correlation Function} \description{ Estimate the pair correlation function. } \usage{ pcf(X, \dots) } \arguments{ \item{X}{ Either the observed data point pattern, or an estimate of its \eqn{K} function, or an array of multitype \eqn{K} functions (see Details). } \item{\dots}{ Other arguments passed to the appropriate method. } } \value{ Either a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}) representing a pair correlation function, or a function array (object of class \code{"fasp"}, see \code{\link{fasp.object}}) representing an array of pair correlation functions. } \details{ The pair correlation function of a stationary point process is \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. We also apply the same definition to other variants of the classical \eqn{K} function, such as the multitype \eqn{K} functions (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}). For all these variants, the benchmark value of \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to \eqn{g(r) = 1}. This routine computes an estimate of \eqn{g(r)} either directly from a point pattern, or indirectly from an estimate of \eqn{K(r)} or one of its variants. This function is generic, with methods for the classes \code{"ppp"}, \code{"fv"} and \code{"fasp"}. If \code{X} is a point pattern (object of class \code{"ppp"}) then the pair correlation function is estimated using a traditional kernel smoothing method (Stoyan and Stoyan, 1994). See \code{\link{pcf.ppp}} for details. If \code{X} is a function value table (object of class \code{"fv"}), then it is assumed to contain estimates of the \eqn{K} function or one of its variants (typically obtained from \code{\link{Kest}} or \code{\link{Kinhom}}). This routine computes an estimate of \eqn{g(r)} using smoothing splines to approximate the derivative. See \code{\link{pcf.fv}} for details. If \code{X} is a function value array (object of class \code{"fasp"}), then it is assumed to contain estimates of several \eqn{K} functions (typically obtained from \code{\link{Kmulti}} or \code{\link{alltypes}}). This routine computes an estimate of \eqn{g(r)} for each cell in the array, using smoothing splines to approximate the derivatives. See \code{\link{pcf.fasp}} for details. } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcf.fv}}, \code{\link{pcf.fasp}}, \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kmulti}}, \code{\link{alltypes}} } \examples{ # ppp object X <- simdat \testonly{ X <- X[seq(1,npoints(X), by=4)] } p <- pcf(X) plot(p) # fv object K <- Kest(X) p2 <- pcf(K, spar=0.8, method="b") plot(p2) # multitype pattern; fasp object amaK <- alltypes(amacrine, "K") amap <- pcf(amaK, spar=1, method="b") plot(amap) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/rHardcore.Rd0000644000176200001440000000660614141452520016025 0ustar liggesusers\name{rHardcore} \alias{rHardcore} \title{Perfect Simulation of the Hardcore Process} \description{ Generate a random pattern of points, a simulated realisation of the Hardcore process, using a perfect simulation algorithm. } \usage{ rHardcore(beta, R = 0, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{R}{ hard core distance (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. Currently this must be a rectangular window. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Hardcore point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. The Hardcore process is a model for strong spatial inhibition. Two points of the process are forbidden to lie closer than \code{R} units apart. The Hardcore process is the special case of the Strauss process (see \code{\link{rStrauss}}) with interaction parameter \eqn{\gamma}{gamma} equal to zero. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. \Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. } \author{ \adrian, based on original code for the Strauss process by Kasper Klitgaard Berthelsen. } \examples{ X <- rHardcore(0.05,1.5,square(50)) } \seealso{ \code{\link{rmh}}, \code{\link{Hardcore}}, \code{\link{rStrauss}}, \code{\link{rStraussHard}}, \code{\link{rDiggleGratton}}. \code{\link{rDGS}}, \code{\link{rPenttinen}}. } \keyword{spatial} \keyword{datagen} spatstat.core/man/dppMatern.Rd0000644000176200001440000000243614141452520016043 0ustar liggesusers\name{dppMatern} \alias{dppMatern} \title{Whittle-Matern Determinantal Point Process Model} \description{ Function generating an instance of the Whittle-\Matern determinantal point process model } \usage{dppMatern(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the parameters. See Details. } } \details{ The Whittle-\Matern DPP is defined in (Lavancier, \Moller and Rubak, 2015) The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the shape parameter \code{nu} as a positive numeric (artificially required to be less than 20 in the code for numerical stability) \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ \adrian \rolf and \ege } \references{ Lavancier, F. \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \examples{ m <- dppMatern(lambda=100, alpha=.02, nu=1, d=2) } \seealso{ \code{\link{dppBessel}}, \code{\link{dppCauchy}}, \code{\link{dppGauss}}, \code{\link{dppPowerExp}} } spatstat.core/man/pool.envelope.Rd0000644000176200001440000000544414141452520016700 0ustar liggesusers\name{pool.envelope} \alias{pool.envelope} \title{ Pool Data from Several Envelopes } \description{ Pool the simulation data from several simulation envelopes (objects of class \code{"envelope"}) and compute a new envelope. } \usage{ \method{pool}{envelope}(..., savefuns=FALSE, savepatterns=FALSE) } \arguments{ \item{\dots}{ Objects of class \code{"envelope"}. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"envelope"} of simulation envelopes. It is used to combine the simulation data from several simulation envelopes and to compute an envelope based on the combined data. Each of the arguments \code{\dots} must be an object of class \code{"envelope"}. These envelopes must be compatible, in that they are envelopes for the same function, and were computed using the same options. \itemize{ \item In normal use, each envelope object will have been created by running the command \code{\link{envelope}} with the argument \code{savefuns=TRUE}. This ensures that each object contains the simulated data (summary function values for the simulated point patterns) that were used to construct the envelope. The simulated data are extracted from each object and combined. A new envelope is computed from the combined set of simulations. \item Alternatively, if each envelope object was created by running \code{\link{envelope}} with \code{VARIANCE=TRUE}, then the saved functions are not required. The sample means and sample variances from each envelope will be pooled. A new envelope is computed from the pooled mean and variance. } Warnings or errors will be issued if the envelope objects \code{\dots} appear to be incompatible. Apart from these basic checks, the code is not smart enough to decide whether it is sensible to pool the data. To modify the envelope parameters or the type of envelope that is computed, first pool the envelope data using \code{pool.envelope}, then use \code{\link{envelope.envelope}} to modify the envelope parameters. } \value{ An object of class \code{"envelope"}. } \seealso{ \code{\link{envelope}}, \code{\link{envelope.envelope}}, \code{\link{pool}}, \code{\link{pool.fasp}} } \examples{ E1 <- envelope(cells, Kest, nsim=10, savefuns=TRUE) E2 <- envelope(cells, Kest, nsim=20, savefuns=TRUE) pool(E1, E2) V1 <- envelope(E1, VARIANCE=TRUE) V2 <- envelope(E2, VARIANCE=TRUE) pool(V1, V2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat.core/man/hierpair.family.Rd0000644000176200001440000000140414141452520017166 0ustar liggesusers\name{hierpair.family} \alias{hierpair.family} \title{Hierarchical Pairwise Interaction Process Family} \description{ An object describing the family of all hierarchical pairwise interaction Gibbs point processes. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the hierarchical pairwise interaction family of point process models. } \value{ Object of class \code{"isf"}, see \code{\link{isf.object}}. } \seealso{ Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{ord.family}}, \code{\link{inforder.family}}. Hierarchical Strauss interaction: \code{\link{HierStrauss}}. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.core/man/methods.slrm.Rd0000644000176200001440000000361214141452520016525 0ustar liggesusers\name{methods.slrm} \alias{methods.slrm} %DoNotExport \alias{formula.slrm} \alias{update.slrm} \alias{print.slrm} \alias{summary.slrm} \alias{terms.slrm} \alias{labels.slrm} \alias{deviance.slrm} \title{ Methods for Spatial Logistic Regression Models } \description{ These are methods for the class \code{"slrm"}. } \usage{ \method{formula}{slrm}(x, \dots) \method{print}{slrm}(x, ...) \method{summary}{slrm}(object, ...) \method{terms}{slrm}(x, \dots) \method{labels}{slrm}(object, \dots) \method{deviance}{slrm}(object, \dots) \method{update}{slrm}(object, \dots, evaluate = TRUE, env = parent.frame()) } \arguments{ \item{x,object}{ An object of class \code{"slrm"}, representing a fitted spatial logistic regression model. } \item{\dots}{ Arguments passed to other methods. } \item{evaluate}{ Logical value. If \code{TRUE}, evaluate the updated call to \code{slrm}, so that the model is refitted; if \code{FALSE}, simply return the updated call. } \item{env}{ Optional environment in which the model should be updated. } } \details{ These functions are methods for the generic commands \code{\link[stats]{formula}}, \code{\link[stats]{update}}, \code{\link{print}}, \code{\link{summary}}, \code{\link[stats]{terms}}, \code{\link{labels}} and \code{\link[stats]{deviance}} for the class \code{"slrm"}. An object of class \code{"slrm"} represents a fitted spatial logistic regression model. It is obtained from \code{\link{slrm}}. } \value{ See the help files for the corresponding generic functions. } \author{ \adrian } \seealso{ \code{\link{slrm}}, \code{\link{plot.slrm}}, \code{\link{predict.slrm}}, \code{\link{simulate.slrm}}, \code{\link{vcov.slrm}}, \code{\link{coef.slrm}}. } \examples{ fit <- slrm(redwood ~ x) coef(fit) formula(fit) tf <- terms(fit) labels(fit) deviance(fit) } \keyword{spatial} \keyword{methods} spatstat.core/man/runifpoint3.Rd0000644000176200001440000000233514141452520016367 0ustar liggesusers\name{runifpoint3} \alias{runifpoint3} \title{ Generate N Uniform Random Points in Three Dimensions } \description{ Generate a random point pattern containing \code{n} independent, uniform random points in three dimensions. } \usage{ runifpoint3(n, domain = box3(), nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of points to be generated. } \item{domain}{ Three-dimensional box in which the process should be generated. An object of class \code{"box3"}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern in three dimensions (an object of class \code{"pp3"}). If \code{nsim > 1}, a list of such point patterns. } \details{ This function generates \code{n} independent random points, uniformly distributed in the three-dimensional box \code{domain}. } \seealso{ \code{\link{rpoispp3}}, \code{\link{pp3}}, \code{\link{box3}} } \examples{ X <- runifpoint3(50) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/markcrosscorr.Rd0000644000176200001440000000730114141452520016777 0ustar liggesusers\name{markcrosscorr} \alias{markcrosscorr} \title{ Mark Cross-Correlation Function } \description{ Given a spatial point pattern with several columns of marks, this function computes the mark correlation function between each pair of columns of marks. } \usage{ markcrosscorr(X, r = NULL, correction = c("isotropic", "Ripley", "translate"), method = "density", \dots, normalise = TRUE, Xname = NULL) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{ If \code{normalise=FALSE}, compute only the numerator of the expression for the mark correlation. } \item{Xname}{ Optional character string name for the dataset \code{X}. } } \details{ First, all columns of marks are converted to numerical values. A factor with \eqn{m} possible levels is converted to \eqn{m} columns of dummy (indicator) values. Next, each pair of columns is considered, and the mark cross-correlation is defined as \deqn{ k_{mm}(r) = \frac{E_{0u}[M_i(0) M_j(u)]}{E[M_i,M_j]} }{ k[mm](r) = E[0u](M(i,0) * M(j,u))/E(Mi * Mj) } where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation given that there are points of the process at the locations \eqn{0} and \eqn{u} separated by a distance \eqn{r}. On the numerator, \eqn{M_i(0)}{M(i,0)} and \eqn{M_j(u)}{M(j,u)} are the marks attached to locations \eqn{0} and \eqn{u} respectively in the \eqn{i}th and \eqn{j}th columns of marks respectively. On the denominator, \eqn{M_i}{Mi} and \eqn{M_j}{Mj} are independent random values drawn from the \eqn{i}th and \eqn{j}th columns of marks, respectively, and \eqn{E} is the usual expectation. Note that \eqn{k_{mm}(r)}{k[mm](r)} is not a ``correlation'' in the usual statistical sense. It can take any nonnegative real value. The value 1 suggests ``lack of correlation'': if the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{k_{mm}(r) \equiv 1}{k[mm](r) = 1}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern. The cross-correlations are estimated in the same manner as for \code{\link{markcorr}}. } \value{ A function array (object of class \code{"fasp"}) containing the mark cross-correlation functions for each possible pair of columns of marks. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{markcorr}} } \examples{ # The dataset 'betacells' has two columns of marks: # 'type' (factor) # 'area' (numeric) if(interactive()) plot(betacells) plot(markcrosscorr(betacells)) } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/dppapproxpcf.Rd0000644000176200001440000000202614141452520016612 0ustar liggesusers\name{dppapproxpcf} \alias{dppapproxpcf} \title{Approximate Pair Correlation Function of Determinantal Point Process Model} \description{ Returns an approximation to the theoretical pair correlation function of a determinantal point process model, as a function of one argument \eqn{x}. } \usage{dppapproxpcf(model, trunc = 0.99, W = NULL)} \arguments{ \item{model}{Object of class \code{"detpointprocfamily"}.} \item{trunc}{Numeric value specifying how the model truncation is performed. See Details section of \code{\link{simulate.detpointprocfamily}}.} \item{W}{Optional window -- undocumented at the moment.} } \details{ This function is usually NOT needed for anything. It only exists for investigative purposes. } \value{ A function in the \R language, with one numeric argument \code{x}, that returns the value of the approximate pair correlation at distances \code{x}. } \author{ \spatstatAuthors. } \examples{ f <- dppapproxpcf(dppMatern(lambda = 100, alpha=.028, nu=1, d=2)) plot(f, xlim = c(0,0.1)) } spatstat.core/man/fv.Rd0000644000176200001440000001627614141452520014533 0ustar liggesusers\name{fv} \alias{fv} \title{ Create a Function Value Table } \description{ Advanced Use Only. This low-level function creates an object of class \code{"fv"} from raw numerical data. } \usage{ fv(x, argu = "r", ylab = NULL, valu, fmla = NULL, alim = NULL, labl = names(x), desc = NULL, unitname = NULL, fname = NULL, yexp = ylab) } \arguments{ \item{x}{ A data frame with at least 2 columns containing the values of the function argument and the corresponding values of (one or more versions of) the function. } \item{argu}{ String. The name of the column of \code{x} that contains the values of the function argument. } \item{ylab}{ Either \code{NULL}, or an \R language expression representing the mathematical name of the function. See Details. } \item{valu}{ String. The name of the column of \code{x} that should be taken as containing the function values, in cases where a single column is required. } \item{fmla}{ Either \code{NULL}, or a \code{formula} specifying the default plotting behaviour. See Details. } \item{alim}{ Optional. The default range of values of the function argument for which the function will be plotted. Numeric vector of length 2. } \item{labl}{ Optional. Plot labels for the columns of \code{x}. A vector of strings, with one entry for each column of \code{x}. } \item{desc}{ Optional. Descriptions of the columns of \code{x}. A vector of strings, with one entry for each column of \code{x}. } \item{unitname}{ Optional. Name of the unit (usually a unit of length) in which the function argument is expressed. Either a single character string, or a vector of two character strings giving the singular and plural forms, respectively. } \item{fname}{ Optional. The name of the function itself. A character string. } \item{yexp}{ Optional. Alternative form of \code{ylab} more suitable for annotating an axis of the plot. See Details. } } \details{ This documentation is provided for experienced programmers who want to modify the internal behaviour of \pkg{spatstat}. Other users please see \code{\link{fv.object}}. The low-level function \code{fv} is used to create an object of class \code{"fv"} from raw numerical data. The data frame \code{x} contains the numerical data. It should have one column (typically but not necessarily named \code{"r"}) giving the values of the function argument for which the function has been evaluated; and at least one other column, containing the corresponding values of the function. Typically there is more than one column of function values. These columns typically give the values of different versions or estimates of the same function, for example, different estimates of the \eqn{K} function obtained using different edge corrections. However they may also contain the values of related functions such as the derivative or hazard rate. \code{argu} specifies the name of the column of \code{x} that contains the values of the function argument (typically \code{argu="r"} but this is not compulsory). \code{valu} specifies the name of another column that contains the \sQuote{recommended} estimate of the function. It will be used to provide function values in those situations where a single column of data is required. For example, \code{\link{envelope}} computes its simulation envelopes using the recommended value of the summary function. \code{fmla} specifies the default plotting behaviour. It should be a formula, or a string that can be converted to a formula. Variables in the formula are names of columns of \code{x}. See \code{\link{plot.fv}} for the interpretation of this formula. \code{alim} specifies the recommended range of the function argument. This is used in situations where statistical theory or statistical practice indicates that the computed estimates of the function are not trustworthy outside a certain range of values of the function argument. By default, \code{\link{plot.fv}} will restrict the plot to this range. \code{fname} is a string giving the name of the function itself. For example, the \eqn{K} function would have \code{fname="K"}. \code{ylab} is a mathematical expression for the function value, used when labelling an axis of the plot, or when printing a description of the function. It should be an \R language object. For example the \eqn{K} function's mathematical name \eqn{K(r)} is rendered by \code{ylab=quote(K(r))}. If \code{yexp} is present, then \code{ylab} will be used only for printing, and \code{yexp} will be used for annotating axes in a plot. (Otherwise \code{yexp} defaults to \code{ylab}). For example the cross-type \eqn{K} function \eqn{K_{1,2}(r)}{K[1,2](r)} is rendered by something like \code{ylab=quote(Kcross[1,2](r))} and \code{yexp=quote(Kcross[list(1,2)](r))} to get the most satisfactory behaviour. (A useful tip: use \code{\link{substitute}} instead of \code{\link{quote}} to insert values of variables into an expression, e.g. \code{substitute(Kcross[i,j](r), list(i=42,j=97))} yields the same as \code{quote(Kcross[42, 97](r))}.) \code{labl} is a character vector specifying plot labels for each column of \code{x}. These labels will appear on the plot axes (in non-default plots), legends and printed output. Entries in \code{labl} may contain the string \code{"\%s"} which will be replaced by \code{fname}. For example the border-corrected estimate of the \eqn{K} function has label \code{"\%s[bord](r)"} which becomes \code{"K[bord](r)"}. \code{desc} is a character vector containing intelligible explanations of each column of \code{x}. Entries in \code{desc} may contain the string \code{"\%s"} which will be replaced by \code{ylab}. For example the border correction estimate of the \eqn{K} function has description \code{"border correction estimate of \%s"}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}. } \seealso{ See \code{\link{plot.fv}} for plotting an \code{"fv"} object. See \code{\link{as.function.fv}} to convert an \code{"fv"} object to an \R function. Use \code{\link{cbind.fv}} to combine several \code{"fv"} objects. Use \code{\link{bind.fv}} to glue additional columns onto an existing \code{"fv"} object. Use \code{\link{range.fv}} to compute the range of \eqn{y} values for a function, and \code{\link{with.fv}} for more complicated calculations. The functions \code{fvnames}, \code{fvnames<-} allow the user to use standard abbreviations to refer to columns of an \code{"fv"} object. \emph{Undocumented} functions for modifying an \code{"fv"} object include \code{tweak.fv.entry} and \code{rebadge.fv}. } \author{\adrian and \rolf } \examples{ df <- data.frame(r=seq(0,5,by=0.1)) df <- transform(df, a=pi*r^2, b=3*r^2) X <- fv(df, "r", quote(A(r)), "a", cbind(a, b) ~ r, alim=c(0,4), labl=c("r", "\%s[true](r)", "\%s[approx](r)"), desc=c("radius of circle", "true area \%s", "rough area \%s"), fname="A") X } \keyword{spatial} \keyword{classes} spatstat.core/man/Gcross.Rd0000644000176200001440000002207314141452520015350 0ustar liggesusers\name{Gcross} \alias{Gcross} \title{ Multitype Nearest Neighbour Distance Function (i-to-j) } \description{ For a multitype point pattern, estimate the distribution of the distance from a point of type \eqn{i} to the nearest point of type \eqn{j}. } \usage{ Gcross(X, i, j, r=NULL, breaks=NULL, \dots, correction=c("rs", "km", "han")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type distance distribution function \eqn{G_{ij}(r)}{Gij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{G_{ij}(r)}{Gij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{ Ignored. } \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{G_{ij}(r)}{Gij(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G_{ij}(r)}{Gij(r)} } \item{han}{the Hanisch-style estimator of \eqn{G_{ij}(r)}{Gij(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{ij}(r)}{Gij(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G_{ij}(r)}{Gij(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G_{ij}(r)}{Gij(r)}, i.e. the empirical distribution of the distances from each point of type \eqn{i} to the nearest point of type \eqn{j} } \item{theo}{the theoretical value of \eqn{G_{ij}(r)}{Gij(r)} for a marked Poisson process with the same estimated intensity (see below). } } \details{ This function \code{Gcross} and its companions \code{\link{Gdot}} and \code{\link{Gmulti}} are generalisations of the function \code{\link{Gest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). The ``cross-type'' (type \eqn{i} to type \eqn{j}) nearest neighbour distance distribution function of a multitype point process is the cumulative distribution function \eqn{G_{ij}(r)}{Gij(r)} of the distance from a typical random point of the process with type \eqn{i} the nearest point of type \eqn{j}. An estimate of \eqn{G_{ij}(r)}{Gij(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the process of type \eqn{i} points were independent of the process of type \eqn{j} points, then \eqn{G_{ij}(r)}{Gij(r)} would equal \eqn{F_j(r)}{Fj(r)}, the empty space function of the type \eqn{j} points. For a multitype Poisson point process where the type \eqn{i} points have intensity \eqn{\lambda_i}{lambda[i]}, we have \deqn{G_{ij}(r) = 1 - e^{ - \lambda_j \pi r^2} }{% Gij(r) = 1 - exp( - lambda[j] * pi * r^2)} Deviations between the empirical and theoretical \eqn{G_{ij}}{Gij} curves may suggest dependence between the points of types \eqn{i} and \eqn{j}. This algorithm estimates the distribution function \eqn{G_{ij}(r)}{Gij(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Gest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{ij}(r)}{Gij(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{ij}(r)}{Gij(r)}. This estimate should be used with caution as \eqn{G_{ij}(r)}{Gij(r)} is not necessarily differentiable. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G_{ij}}{Gij}. However this is also returned by the algorithm, as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{G_{ij}}{Gij} as if it were an unbiased estimator of \eqn{G_{ij}}{Gij}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. The function \eqn{G_{ij}}{Gij} does not necessarily have a density. The reduced sample estimator of \eqn{G_{ij}}{Gij} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G_{ij}}{Gij} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{Gdot}}, \code{\link{Gest}}, \code{\link{Gmulti}} } \examples{ # amacrine cells data G01 <- Gcross(amacrine) # equivalent to: # G01 <- Gcross(amacrine, "off", "on") plot(G01) # empty space function of `on' points if(interactive()) { F1 <- Fest(split(amacrine)$on, r = G01$r) lines(F1$r, F1$km, lty=3) } # synthetic example pp <- runifpoispp(30) pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE)) G <- Gcross(pp, "0", "1") # note: "0" not 0 } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/rpoisppx.Rd0000644000176200001440000000307314141452520015773 0ustar liggesusers\name{rpoisppx} \alias{rpoisppx} \title{ Generate Poisson Point Pattern in Any Dimensions } \description{ Generate a random multi-dimensional point pattern using the homogeneous Poisson process. } \usage{ rpoisppx(lambda, domain, nsim=1, drop=TRUE) } \arguments{ \item{lambda}{ Intensity of the Poisson process. A single positive number. } \item{domain}{ Multi-dimensional box in which the process should be generated. An object of class \code{"boxx"}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a single point pattern. } } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern (an object of class \code{"ppx"}). If \code{nsim > 1} or \code{drop=FALSE}, a list of such point patterns. } \details{ This function generates a realisation of the homogeneous Poisson process in multi dimensions, with intensity \code{lambda} (points per unit volume). The realisation is generated inside the multi-dimensional region \code{domain} which currently must be a rectangular box (object of class \code{"boxx"}). } \note{ The intensity \code{lambda} is the expected number of points \emph{per unit volume}. } \seealso{ \code{\link{runifpointx}}, \code{\link{ppx}}, \code{\link{boxx}} } \examples{ w <- boxx(x=c(0,1), y=c(0,1), z=c(0,1), t=c(0,3)) X <- rpoisppx(10, w) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/residuals.slrm.Rd0000644000176200001440000000565314141452520017064 0ustar liggesusers\name{residuals.slrm} \alias{residuals.slrm} \title{ Residuals for Fitted Spatial Logistic Regression Model } \description{ Given a spatial logistic regression model fitted to a point pattern, compute the residuals for each pixel. } \usage{ \method{residuals}{slrm}(object, type=c("raw", "deviance", "pearson", "working", "response", "partial", "score"), \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which residuals should be calculated. } \item{type}{ String (partially matched) indicating the type of residuals to be calculated. } \item{\dots}{ Ignored. } } \value{ A pixel image (if the residual values are scalar), or a list of pixel images (if the residual values are vectors). } \details{ This function computes several kinds of residuals for the fit of a spatial logistic regression model to a spatial point pattern dataset. The argument \code{object} must be a fitted spatial logistic regression model (object of class \code{"slrm"}). Such objects are created by the fitting algorithm \code{\link{slrm}}. The residuals are computed for each pixel that was used to fit the original model. The residuals are returned as a pixel image (if the residual values are scalar), or a list of pixel images (if the residual values are vectors). The type of residual is chosen by the argument \code{type}. For a given pixel, suppose \eqn{p} is the fitted probability of presence of a point, and \eqn{y} is the presence indicator (equal to 1 if the pixel contains any data points, and equal to 0 otherwise). Then \itemize{ \item \code{type="raw"} or \code{type="response"} specifies the response residual \deqn{r = y - p} \item \code{type="pearson"} is the Pearson residual \deqn{ r_P = \frac{y - p}{\sqrt{p (1-p)}} }{ rP = (y-p)/sqrt(p * (1-p)) } \item \code{type="deviance"} is the deviance residual \deqn{ r_D = (-1)^{y+1} \sqrt{-2(y log p + (1-y) log(1-p))} }{ rD = (-1)^(y+1) (-2(y log p + (1-y) log(1-p)))^(1/2) } \item \code{type="score"} specifies the score residuals \deqn{ r_S = (y-p) x }{ rS = (y-p) x } where \code{x} is the vector of canonical covariate values for the pixel \item \code{type="working"} specifies the working residuals as defined in \code{\link[stats]{residuals.glm}} \item \code{type="partial"} specifies the partial residuals as defined in \code{\link[stats]{residuals.glm}} } } \seealso{ \code{\link[stats]{residuals.glm}}, \code{\link{residuals.ppm}} } \examples{ d <- if(interactive()) 128 else 32 H <- unmark(humberside) fit <- slrm(H ~ x + y, dimyx=d) plot(residuals(fit)) plot(residuals(fit, type="score")) } \author{ \adrian } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.core/man/rose.Rd0000644000176200001440000001261214141452520015056 0ustar liggesusers\name{rose} \alias{rose} \alias{rose.default} \alias{rose.histogram} \alias{rose.density} \alias{rose.fv} \title{Rose Diagram} \description{ Plots a rose diagram (rose of directions), the analogue of a histogram or density plot for angular data. } \usage{ rose(x, \dots) \method{rose}{default}(x, breaks = NULL, \dots, weights=NULL, nclass = NULL, unit = c("degree", "radian"), start=0, clockwise=FALSE, main) \method{rose}{histogram}(x, \dots, unit = c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot = TRUE) \method{rose}{density}(x, \dots, unit = c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot = TRUE) \method{rose}{fv}(x, \dots, unit = c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot = TRUE) } \arguments{ \item{x}{ Data to be plotted. A numeric vector containing angles, or a \code{histogram} object containing a histogram of angular values, or a \code{density} object containing a smooth density estimate for angular data, or an \code{fv} object giving a function of an angular argument. } \item{breaks, nclass}{ Arguments passed to \code{\link[graphics]{hist}} to determine the histogram breakpoints. } \item{\dots}{ Additional arguments passed to \code{\link[graphics]{polygon}} controlling the appearance of the plot (or passed from \code{rose.default} to \code{\link[graphics]{hist}} to control the calculation of the histogram). } \item{unit}{ The unit in which the angles are expressed. } \item{start}{ The starting direction for measurement of angles, that is, the spatial direction which corresponds to a measured angle of zero. Either a character string giving a compass direction (\code{"N"} for north, \code{"S"} for south, \code{"E"} for east, or \code{"W"} for west) or a number giving the angle from the the horizontal (East) axis to the starting direction. For example, if \code{unit="degree"} and \code{clockwise=FALSE}, then \code{start=90} and \code{start="N"} are equivalent. The default is to measure angles anti-clockwise from the horizontal axis (East direction). } \item{clockwise}{ Logical value indicating whether angles increase in the clockwise direction (\code{clockwise=TRUE}) or anti-clockwise, counter-clockwise direction (\code{clockwise=FALSE}, the default). } \item{weights}{ Optional vector of numeric weights associated with \code{x}. } \item{main}{ Optional main title for the plot. } \item{labels}{ Either a logical value indicating whether to plot labels next to the tick marks, or a vector of labels for the tick marks. } \item{at}{ Optional vector of angles at which tick marks should be plotted. Set \code{at=numeric(0)} to suppress tick marks. } \item{do.plot}{ Logical value indicating whether to really perform the plot. } } \details{ A rose diagram or rose of directions is the analogue of a histogram or bar chart for data which represent angles in two dimensions. The bars of the bar chart are replaced by circular sectors in the rose diagram. The function \code{rose} is generic, with a default method for numeric data, and methods for histograms and function tables. If \code{x} is a numeric vector, it must contain angular values in the range 0 to 360 (if \code{unit="degree"}) or in the range 0 to \code{2 * pi} (if \code{unit="radian"}). A histogram of the data will first be computed using \code{\link[graphics]{hist}}. Then the rose diagram of this histogram will be plotted by \code{rose.histogram}. If \code{x} is an object of class \code{"histogram"} produced by the function \code{\link[graphics]{hist}}, representing the histogram of angular data, then the rose diagram of the densities (rather than the counts) in this histogram object will be plotted. If \code{x} is an object of class \code{"density"} produced by \code{\link{circdensity}} or \code{\link[stats]{density.default}}, representing a kernel smoothed density estimate of angular data, then the rose diagram of the density estimate will be plotted. If \code{x} is a function value table (object of class \code{"fv"}) then the argument of the function will be interpreted as an angle, and the value of the function will be interpreted as the radius. By default, angles are interpreted using the mathematical convention where the zero angle is the horizontal \eqn{x} axis, and angles increase anti-clockwise. Other conventions can be specified using the arguments \code{start} and \code{clockwise}. Standard compass directions are obtained by setting \code{unit="degree"}, \code{start="N"} and \code{clockwise=TRUE}. } \value{A window (class \code{"owin"}) containing the plotted region.} \author{\adrian \rolf and \ege } \seealso{ \code{\link{fv}}, \code{\link[graphics]{hist}}, \code{\link{circdensity}}, \code{\link[stats]{density.default}}. } \examples{ ang <- runif(1000, max=360) rose(ang, col="grey") rose(ang, col="grey", start="N", clockwise=TRUE) } \keyword{spatial} \keyword{hplot} spatstat.core/man/ppm.object.Rd0000644000176200001440000001355214141452520016153 0ustar liggesusers\name{ppm.object} \alias{ppm.object} %DoNotExport \alias{methods.ppm} %DoNotExport \title{Class of Fitted Point Process Models} \description{ A class \code{ppm} to represent a fitted stochastic model for a point process. The output of \code{\link{ppm}}. } \details{ An object of class \code{ppm} represents a stochastic point process model that has been fitted to a point pattern dataset. Typically it is the output of the model fitter, \code{\link{ppm}}. The class \code{ppm} has methods for the following standard generic functions: \tabular{lll}{ generic \tab method \tab description \cr \code{print} \tab \code{\link{print.ppm}} \tab print details \cr \code{plot} \tab \code{\link{plot.ppm}} \tab plot fitted model \cr \code{predict} \tab \code{\link{predict.ppm}} \tab fitted intensity and conditional intensity \cr \code{fitted} \tab \code{\link{fitted.ppm}} \tab fitted intensity \cr \code{coef} \tab \code{\link{coef.ppm}} \tab fitted coefficients of model \cr \code{anova} \tab \code{\link{anova.ppm}} \tab Analysis of Deviance \cr \code{formula} \tab \code{\link{formula.ppm}} \tab Extract model formula \cr \code{terms} \tab \code{\link{terms.ppm}} \tab Terms in the model formula \cr \code{labels} \tab \code{labels.ppm} \tab Names of estimable terms in the model formula \cr \code{residuals} \tab \code{\link{residuals.ppm}} \tab Point process residuals \cr \code{simulate} \tab \code{\link{simulate.ppm}} \tab Simulate the fitted model \cr \code{update} \tab \code{\link{update.ppm}} \tab Change or refit the model \cr \code{vcov} \tab \code{\link{vcov.ppm}} \tab Variance/covariance matrix of parameter estimates \cr \code{model.frame} \tab \code{\link{model.frame.ppm}} \tab Model frame \cr \code{model.matrix} \tab \code{\link{model.matrix.ppm}} \tab Design matrix \cr \code{logLik} \tab \code{\link{logLik.ppm}} \tab log \emph{pseudo} likelihood \cr \code{extractAIC} \tab \code{\link{extractAIC.ppm}} \tab pseudolikelihood counterpart of AIC \cr \code{nobs} \tab \code{\link{nobs.ppm}} \tab number of observations } Objects of class \code{ppm} can also be handled by the following standard functions, without requiring a special method: \tabular{ll}{ name \tab description \cr \code{\link{confint}} \tab Confidence intervals for parameters \cr \code{\link{step}} \tab Stepwise model selection \cr \code{\link{drop1}} \tab One-step model improvement \cr \code{\link{add1}} \tab One-step model improvement } The class \code{ppm} also has methods for the following generic functions defined in the \pkg{spatstat} package: \tabular{lll}{ generic \tab method \tab description \cr \code{\link{as.interact}} \tab \code{\link{as.interact.ppm}} \tab Interpoint interaction structure \cr \code{\link{as.owin}} \tab \code{\link{as.owin.ppm}} \tab Observation window of data \cr \code{\link{berman.test}} \tab \code{\link{berman.test.ppm}} \tab Berman's test \cr \code{\link{envelope}} \tab \code{\link{envelope.ppm}} \tab Simulation envelopes \cr \code{\link{fitin}} \tab \code{\link{fitin.ppm}} \tab Fitted interaction \cr \code{\link{is.marked}} \tab \code{\link{is.marked.ppm}} \tab Determine whether the model is marked \cr \code{\link{is.multitype}} \tab \code{\link{is.multitype.ppm}} \tab Determine whether the model is multitype \cr \code{\link{is.poisson}} \tab \code{\link{is.poisson.ppm}} \tab Determine whether the model is Poisson \cr \code{\link{is.stationary}} \tab \code{\link{is.stationary.ppm}} \tab Determine whether the model is stationary \cr \code{\link{cdf.test}} \tab \code{\link{cdf.test.ppm}} \tab Spatial distribution test \cr \code{\link{quadrat.test}} \tab \code{\link{quadrat.test.ppm}} \tab Quadrat counting test \cr \code{\link{reach}} \tab \code{\link{reach.ppm}} \tab Interaction range of model \cr \code{\link{rmhmodel}} \tab \code{\link{rmhmodel.ppm}} \tab Model in a form that can be simulated \cr \code{\link{rmh}} \tab \code{\link{rmh.ppm}} \tab Perform simulation \cr \code{\link{unitname}} \tab \code{\link{unitname.ppm}} \tab Name of unit of length } Information about the data (to which the model was fitted) can be extracted using \code{\link{data.ppm}}, \code{\link{dummy.ppm}} and \code{\link{quad.ppm}}. } \section{Internal format}{ If you really need to get at the internals, a \code{ppm} object contains at least the following entries: \tabular{ll}{ \code{coef} \tab the fitted regular parameters (as returned by \code{glm}) \cr \code{trend} \tab the trend formula or \code{NULL} \cr \code{interaction} \tab the point process interaction family (an object of class \code{"interact"}) or \code{NULL} \cr \code{Q} \tab the quadrature scheme used \cr \code{maxlogpl} \tab the maximised value of log pseudolikelihood \cr \code{correction} \tab name of edge correction method used \cr } See \code{\link{ppm}} for explanation of these concepts. The irregular parameters (e.g. the interaction radius of the Strauss process) are encoded in the \code{interaction} entry. However see the Warnings. } \seealso{ \code{\link{ppm}}, \code{\link{coef.ppm}}, \code{\link{fitted.ppm}}, \code{\link{print.ppm}}, \code{\link{predict.ppm}}, \code{\link{plot.ppm}}. } \section{Warnings}{ The internal representation of \code{ppm} objects may change slightly between releases of the \pkg{spatstat} package. } \examples{ fit <- ppm(cells ~ x, Strauss(0.1), correction="periodic") fit coef(fit) # pred <- predict(fit) pred <- predict(fit, ngrid=20, type="trend") if(interactive()) { plot(fit) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat.core/man/roc.Rd0000644000176200001440000000600514141452520014670 0ustar liggesusers\name{roc} \alias{roc} \alias{roc.ppp} \alias{roc.ppm} \alias{roc.kppm} \alias{roc.slrm} \title{ Receiver Operating Characteristic } \description{ Computes the Receiver Operating Characteristic curve for a point pattern or a fitted point process model. } \usage{ roc(X, \dots) \method{roc}{ppp}(X, covariate, \dots, high = TRUE) \method{roc}{ppm}(X, \dots) \method{roc}{kppm}(X, \dots) \method{roc}{slrm}(X, \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"} or \code{"lpp"}) or fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"slrm"} or \code{"lppm"}). } \item{covariate}{ Spatial covariate. Either a \code{function(x,y)}, a pixel image (object of class \code{"im"}), or one of the strings \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} controlling the pixel resolution for calculations. } \item{high}{ Logical value indicating whether the threshold operation should favour high or low values of the covariate. } } \details{ This command computes Receiver Operating Characteristic curve. The area under the ROC is computed by \code{\link{auc}}. For a point pattern \code{X} and a covariate \code{Z}, the ROC is a plot showing the ability of the covariate to separate the spatial domain into areas of high and low density of points. For each possible threshold \eqn{z}, the algorithm calculates the fraction \eqn{a(z)} of area in the study region where the covariate takes a value greater than \eqn{z}, and the fraction \eqn{b(z)} of data points for which the covariate value is greater than \eqn{z}. The ROC is a plot of \eqn{b(z)} against \eqn{a(z)} for all thresholds \eqn{z}. For a fitted point process model, the ROC shows the ability of the fitted model intensity to separate the spatial domain into areas of high and low density of points. The ROC is \bold{not} a diagnostic for the goodness-of-fit of the model (Lobo et al, 2007). (For spatial logistic regression models (class \code{"slrm"}) replace \dQuote{intensity} by \dQuote{probability of presence} in the text above.) } \value{ Function value table (object of class \code{"fv"}) which can be plotted to show the ROC curve. } \references{ Lobo, J.M., \ifelse{latex}{\out{Jim{\'e}nez}}{Jimenez}-Valverde, A. and Real, R. (2007) AUC: a misleading measure of the performance of predictive distribution models. \emph{Global Ecology and Biogeography} \bold{17}(2) 145--151. Nam, B.-H. and D'Agostino, R. (2002) Discrimination index, the area under the {ROC} curve. Pages 267--279 in Huber-Carol, C., Balakrishnan, N., Nikulin, M.S. and Mesbah, M., \emph{Goodness-of-fit tests and model validity}, \ifelse{latex}{\out{Birkh{\"a}user}}{Birkhauser}, Basel. } \author{ \spatstatAuthors. } \seealso{ \code{\link{auc}} } \examples{ plot(roc(swedishpines, "x")) fit <- ppm(swedishpines ~ x+y) plot(roc(fit)) } \keyword{spatial} spatstat.core/man/split.msr.Rd0000644000176200001440000000476514141452520016053 0ustar liggesusers\name{split.msr} \alias{split.msr} \title{ Divide a Measure into Parts } \description{ Decomposes a measure into components, each component being a measure. } \usage{ \method{split}{msr}(x, f, drop = FALSE, \dots) } \arguments{ \item{x}{ Measure (object of class \code{"msr"}) to be decomposed. } \item{f}{ Factor or tessellation determining the decomposition. Argument passed to \code{\link{split.ppp}}. See Details. } \item{drop}{ Logical value indicating whether empty components should be retained in the list (\code{drop=FALSE}, the default) or deleted (\code{drop=TRUE}). } \item{\dots}{ Ignored. } } \details{ An object of class \code{"msr"} represents a signed (i.e. real-valued) or vector-valued measure in the \pkg{spatstat} package. See \code{\link{msr}} for explanation. This function is a method for the generic \code{\link[base]{split}}. It divides the measure \code{x} into components, each of which is a measure. A measure \code{x} is represented in \pkg{spatstat} by a finite set of sample points with values attached to them. The function \code{split.msr} divides this pattern of sample points into several sub-patterns of points using \code{\link{split.ppp}}. For each sub-pattern, the values attached to these points are extracted from \code{x}, and these values and sample points determine a measure, which is a component or piece of the original \code{x}. The argument \code{f} can be missing, if the sample points of \code{x} are multitype points. In this case, \code{x} represents a measure associated with marked spatial locations, and the command \code{split(x)} separates \code{x} into a list of component measures, one for each possible mark. Otherwise the argument \code{f} is passed to \code{\link{split.ppp}}. It should be either a factor (of length equal to the number of sample points of \code{x}) or a tessellation (object of class \code{"tess"} representing a division of space into tiles) as documented under \code{\link{split.ppp}}. } \value{ A list, each of whose entries is a measure (object of class \code{"msr"}). } \author{ \spatstatAuthors } \seealso{ \code{\link{msr}}, \code{\link{[.msr}}, \code{\link{with.msr}} } \examples{ ## split by tessellation a <- residuals(ppm(cells ~ x)) aa <- split(a, dirichlet(runifpoint(4))) aa sapply(aa, integral) ## split by type of point b <- residuals(ppm(amacrine ~ marks + x)) bb <- split(b) bb } \keyword{spatial} \keyword{manip} spatstat.core/man/fitted.slrm.Rd0000644000176200001440000000222714141452520016342 0ustar liggesusers\name{fitted.slrm} \Rdversion{1.1} \alias{fitted.slrm} \title{ Fitted Probabilities for Spatial Logistic Regression } \description{ Given a fitted Spatial Logistic Regression model, this function computes the fitted probabilities for each pixel. } \usage{ \method{fitted}{slrm}(object, ...) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[stats:fitted.values]{fitted}} for spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). The algorithm computes the fitted probabilities of the presence of a random point in each pixel. } \value{ A pixel image (object of class \code{"im"}) containing the fitted probability for each pixel. } \seealso{ \code{\link{slrm}}, \code{\link[stats:fitted.values]{fitted}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) plot(fitted(fit)) } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.core/man/predict.mppm.Rd0000644000176200001440000001500214141452520016504 0ustar liggesusers\name{predict.mppm} \alias{predict.mppm} \title{Prediction for Fitted Multiple Point Process Model} \description{ Given a fitted multiple point process model obtained by \code{\link{mppm}}, evaluate the spatial trend and/or the conditional intensity of the model. By default, predictions are evaluated over a grid of locations, yielding pixel images of the trend and conditional intensity. Alternatively predictions may be evaluated at specified locations with specified values of the covariates. } \usage{ \method{predict}{mppm}(object, ..., newdata = NULL, type = c("trend", "cif"), ngrid = 40, locations=NULL, verbose=FALSE) } \arguments{ \item{object}{The fitted model. An object of class \code{"mppm"} obtained from \code{\link{mppm}}. } \item{\dots}{Ignored.} \item{newdata}{ Optional. New values of the covariates, for which the predictions should be computed. See Details. } \item{type}{ Type of predicted values required. A character string or vector of character strings. Options are \code{"trend"} for the spatial trend (first-order term) and \code{"cif"} or \code{"lambda"} for the conditional intensity. Alternatively \code{type="all"} selects all options. } \item{ngrid}{ Dimensions of the grid of spatial locations at which prediction will be performed (if \code{locations=NULL}). An integer or a pair of integers. } \item{locations}{ Optional. The locations at which predictions should be performed. A list of point patterns, with one entry for each row of \code{newdata}. } \item{verbose}{ Logical flag indicating whether to print progress reports. } } \details{ This function computes the spatial trend and the conditional intensity of a spatial point process model that has been fitted to several spatial point patterns. See Chapter 16 of Baddeley, Rubak and Turner (2015) for explanation and examples. Note that by ``spatial trend'' we mean the (exponentiated) first order potential and not the intensity of the process. [For example if we fit the stationary Strauss process with parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, then the spatial trend is constant and equal to \eqn{\beta}{beta}.] The conditional intensity \eqn{\lambda(u,X)}{lambda(u,X)} of the fitted model is evaluated at each required spatial location u, with respect to the response point pattern X. If \code{newdata=NULL}, predictions are computed for the original values of the covariates, to which the model was fitted. Otherwise \code{newdata} should be a hyperframe (see \code{\link{hyperframe}}) containing columns of covariates as required by the model. If \code{type} includes \code{"cif"}, then \code{newdata} must also include a column of spatial point pattern responses, in order to compute the conditional intensity. If \code{locations=NULL}, then predictions are performed at an \code{ngrid} by \code{ngrid} grid of locations in the window for each response point pattern. The result will be a hyperframe containing a column of images of the trend (if selected) and a column of images of the conditional intensity (if selected). The result can be plotted. If \code{locations} is given, then it should be a list of point patterns (objects of class \code{"ppp"}). Predictions are performed at these points, and the results are returned as mark values attached to the \code{locations}. The result is a hyperframe containing columns called \code{trend} and/or \code{cif}. The column called \code{trend} contains marked point patterns in which the point locations are the \code{locations} and the mark value is the predicted trend. The column called \code{cif} contains marked point patterns in which the point locations are the \code{locations} and the mark value is the predicted conditional intensity. } \section{Models that depend on row number}{ The point process model that is described by an \code{mppm} object may be a different point process for each row of the original hyperframe of data. This occurs if the model formula includes the variable \code{id} (representing row number) or if the model has a different interpoint interaction on each row. If the point process model is different on each row of the original data, then either \itemize{ \item \code{newdata} is missing. Predictions are computed for each row of the original data using the point process model that applies on each row. \item \code{newdata} must have the same number of rows as the original data. Each row of \code{newdata} is assumed to be a replacement for the corresponding row of the original data. The prediction for row \code{i} of \code{newdata} will be computed for the point process model that applies to row \code{i} of the original data. \item \code{newdata} must include a column called \code{id} specifying the row number, and therefore identifying which of the point process models should apply. The predictions for row \code{i} of \code{newdata} will be computed for the point process model that applies to row \code{k} of the original data, where \code{k = newdata$id[i]}. } } \value{ A hyperframe with columns named \code{trend} and/or \code{cif}. If \code{locations=NULL}, the entries of the hyperframe are pixel images. If \code{locations} is not null, the entries are marked point patterns constructed by attaching the predicted values to the \code{locations} point patterns. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Baddeley, A., Bischof, L., Sintorn, I.-M., Haggarty, S., Bell, M. and Turner, R. Analysis of a designed experiment where the response is a spatial point pattern. In preparation. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \seealso{ \code{\link{mppm}}, \code{\link{fitted.mppm}}, \code{\link{hyperframe}} } \examples{ h <- hyperframe(Bugs=waterstriders) fit <- mppm(Bugs ~ x, data=h, interaction=Strauss(7)) # prediction on a grid p <- predict(fit) plot(p$trend) # prediction at specified locations loc <- with(h, runifpoint(20, Window(Bugs))) p2 <- predict(fit, locations=loc) plot(p2$trend) } \keyword{spatial} \keyword{models} spatstat.core/man/methods.objsurf.Rd0000644000176200001440000000337314141452520017226 0ustar liggesusers\name{methods.objsurf} \Rdversion{1.1} \alias{methods.objsurf} %DoNotExport \alias{print.objsurf} \alias{plot.objsurf} \alias{persp.objsurf} \alias{image.objsurf} \alias{contour.objsurf} \alias{summary.objsurf} \alias{print.summary.objsurf} \title{ Methods for Objective Function Surfaces } \description{ Methods for printing and plotting an objective function surface. } \usage{ \method{print}{objsurf}(x, ...) \method{plot}{objsurf}(x, ...) \method{image}{objsurf}(x, ...) \method{contour}{objsurf}(x, ...) \method{persp}{objsurf}(x, ...) \method{summary}{objsurf}(object, ...) \method{print}{summary.objsurf}(x, ...) } \arguments{ \item{x,object}{ Object of class \code{"objsurf"} representing an objective function surface. } \item{\dots}{ Additional arguments passed to plot methods. } } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{plot}}, \code{\link{image}}, \code{\link{contour}}, \code{\link{persp}} and \code{\link{summary}} for the class \code{"objsurf"}. } \value{ For \code{print.objsurf}, \code{print.summary.objsurf}, \code{plot.objsurf} and \code{image.objsurf} the value is \code{NULL}. For \code{contour.objsurf} and \code{persp.objsurf} the value is described in the help for \code{\link{contour.default}} and \code{\link{persp.default}} respectively. For \code{summary.objsurf} the result is a list, of class \code{summary.objsurf}, containing summary information. This list is printed in sensible format by \code{print.summary.objsurf}. } \author{ \adrian and \ege. } \seealso{ \code{\link{objsurf}} } \examples{ fit <- kppm(redwood ~ 1, "Thomas") os <- objsurf(fit) os summary(os) plot(os) contour(os, add=TRUE) persp(os) } \keyword{spatial} \keyword{hplot} spatstat.core/man/rcellnumber.Rd0000644000176200001440000000312714141452520016421 0ustar liggesusers\name{rcellnumber} \alias{rcellnumber} \title{ Generate Random Numbers of Points for Cell Process } \description{ Generates random integers for the Baddeley-Silverman counterexample. } \usage{ rcellnumber(n, N = 10, mu=1) } \arguments{ \item{n}{ Number of random integers to be generated. } \item{N}{ Distributional parameter: the largest possible value (when \code{mu <= 1}). An integer greater than 1. } \item{mu}{ Mean of the distribution (equals the variance). Any positive real number. } } \details{ If \code{mu = 1} (the default), this function generates random integers which have mean and variance equal to 1, but which do not have a Poisson distribution. The random integers take the values \eqn{0}, \eqn{1} and \eqn{N} with probabilities \eqn{1/N}, \eqn{(N-2)/(N-1)} and \eqn{1/(N(N-1))} respectively. See Baddeley and Silverman (1984). If \code{mu} is another positive number, the random integers will have mean and variance equal to \code{mu}. They are obtained by generating the one-dimensional counterpart of the cell process and counting the number of points in the interval from \code{0} to \code{mu}. The maximum possible value of each random integer is \code{N * ceiling(mu)}. } \value{ An integer vector of length \code{n}. } \references{ Baddeley, A.J. and Silverman, B.W. (1984) A cautionary example on the use of second-order methods for analyzing point patterns. \emph{Biometrics} \bold{40}, 1089-1094. } \author{ \spatstatAuthors. } \seealso{ \code{\link{rcell}} } \examples{ rcellnumber(30, 3) } \keyword{datagen} spatstat.core/man/bw.CvL.Rd0000644000176200001440000000621314141452520015201 0ustar liggesusers\name{bw.CvL} \alias{bw.CvL} \title{ Cronie and van Lieshout's Criterion for Bandwidth Selection for Kernel Density } \description{ Uses Cronie and van Lieshout's criterion based on Cambell's formula to select a smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.CvL(X, \dots, srange = NULL, ns = 16, sigma = NULL, warn=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{\dots}{Ignored.} \item{srange}{ Optional numeric vector of length 2 giving the range of values of bandwidth to be searched. } \item{ns}{ Optional integer giving the number of values of bandwidth to search. } \item{sigma}{ Optional. Vector of values of the bandwidth to be searched. Overrides the values of \code{ns} and \code{srange}. } \item{warn}{ Logical. If \code{TRUE}, a warning is issued if the optimal value of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{\sigma} is chosen to minimise the discrepancy between the area of the observation window and the sum of reciprocal estimated intensity values at the points of the point process \deqn{ \mbox{CvL}(\sigma) = (|W| - \sum_i 1/\hat\lambda(x_i))^2 }{ CvL(\sigma) = (|W| - sum[i] 1/\lambda(x[i]))^2 } where the sum is taken over all the data points \eqn{x_i}{x[i]}, and where \eqn{\hat\lambda(x_i)}{\lambda(x[i])} is the kernel-smoothing estimate of the intensity at \eqn{x_i}{x[i]} with smoothing bandwidth \eqn{\sigma}{\sigma}. The value of \eqn{\mbox{CvL}(\sigma)}{CvL(\sigma)} is computed directly, using \code{\link{density.ppp}}, for \code{ns} different values of \eqn{\sigma}{\sigma} between \code{srange[1]} and \code{srange[2]}. The result is a numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted to show the bandwidth selection criterion as a function of \code{sigma}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.scott}}, \code{\link{bw.ppl}}, \code{\link{bw.frac}}. } \examples{ if(interactive()) { b <- bw.CvL(redwood) b plot(b, main="Cronie and van Lieshout bandwidth criterion for redwoods") plot(density(redwood, b)) plot(density(redwood, bw.CvL)) } \testonly{ b <- bw.CvL(redwood, srange=c(0.03, 0.07), ns=2) } } \references{ Cronie, O and Van Lieshout, M N M (2018) A non-model-based approach to bandwidth selection for kernel estimators of spatial intensity functions, \emph{Biometrika}, \bold{105}, 455-462. } \author{Ottmar Cronie \email{ottmar.cronie@umu.se} and Marie-Colette van Lieshout \email{Marie-Colette.van.Lieshout@cwi.nl} adapted for \pkg{spatstat} by \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/rmpoint.Rd0000644000176200001440000002547114141452520015605 0ustar liggesusers\name{rmpoint} \alias{rmpoint} \title{Generate N Random Multitype Points} \description{ Generate a random multitype point pattern with a fixed number of points, or a fixed number of points of each type. } \usage{ rmpoint(n, f=1, fmax=NULL, win=unit.square(), types, ptypes, \dots, giveup=1000, verbose=FALSE, nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of marked points to generate. Either a single number specifying the total number of points, or a vector specifying the number of points of each type. } \item{f}{ The probability density of the multitype points, usually un-normalised. Either a constant, a vector, a function \code{f(x,y,m, ...)}, a pixel image, a list of functions \code{f(x,y,...)} or a list of pixel images. } \item{fmax}{ An upper bound on the values of \code{f}. If missing, this number will be estimated. } \item{win}{ Window in which to simulate the pattern. Ignored if \code{f} is a pixel image or list of pixel images. } \item{types}{ All the possible types for the multitype pattern. } \item{ptypes}{ Optional vector of probabilities for each type. } \item{\dots}{ Arguments passed to \code{f} if it is a function. } \item{giveup}{ Number of attempts in the rejection method after which the algorithm should stop trying to generate new points. } \item{verbose}{ Flag indicating whether to report details of performance of the simulation algorithm. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates random multitype point patterns consisting of a fixed number of points. Three different models are available: \describe{ \item{I. Random location and type:}{ If \code{n} is a single number and the argument \code{ptypes} is missing, then \code{n} independent, identically distributed random multitype points are generated. Their locations \code{(x[i],y[i])} and types \code{m[i]} have joint probability density proportional to \eqn{f(x,y,m)}. } \item{II. Random type, and random location given type:}{ If \code{n} is a single number and \code{ptypes} is given, then \code{n} independent, identically distributed random multitype points are generated. Their types \code{m[i]} have probability distribution \code{ptypes}. Given the types, the locations \code{(x[i],y[i])} have conditional probability density proportional to \eqn{f(x,y,m)}. } \item{III. Fixed types, and random location given type:}{ If \code{n} is a vector, then we generate \code{n[i]} independent, identically distributed random points of type \code{types[i]}. For points of type \eqn{m} the conditional probability density of location \eqn{(x,y)} is proportional to \eqn{f(x,y,m)}. } } Note that the density \code{f} is normalised in different ways in Model I and Models II and III. In Model I the normalised joint density is \eqn{g(x,y,m)=f(x,y,m)/Z} where \deqn{ Z = \sum_m \int\int \lambda(x,y,m) {\rm d}x \, {\rm d}y }{ Z = sum_[m] integral lambda(x,y,m) dx dy } while in Models II and III the normalised conditional density is \eqn{g(x,y\mid m) = f(x,y,m)/Z_m}{g(x,y|m) = f(x,y,m)/Z[m]} where \deqn{ Z_m = \int\int \lambda(x,y,m) {\rm d}x \, {\rm d}y. }{ Z[m] = integral lambda(x,y,m) dx dy. } In Model I, the marginal distribution of types is \eqn{p_m = Z_m/Z}{p[m] = Z[m]/Z}. The unnormalised density \code{f} may be specified in any of the following ways. \describe{ \item{single number:}{ If \code{f} is a single number, the conditional density of location given type is uniform. That is, the points of each type are uniformly distributed. In Model I, the marginal distribution of types is also uniform (all possible types have equal probability). } \item{vector:}{ If \code{f} is a numeric vector, the conditional density of location given type is uniform. That is, the points of each type are uniformly distributed. In Model I, the marginal distribution of types is proportional to the vector \code{f}. In Model II, the marginal distribution of types is \code{ptypes}, that is, the values in \code{f} are ignored. The argument \code{types} defaults to \code{names(f)}, or if that is null, \code{1:length(f)}. } \item{function:}{ If \code{f} is a function, it will be called in the form \code{f(x,y,m,\dots)} at spatial location \code{(x,y)} for points of type \code{m}. In Model I, the joint probability density of location and type is proportional to \code{f(x,y,m,\dots)}. In Models II and III, the conditional probability density of location \code{(x,y)} given type \code{m} is proportional to \code{f(x,y,m,\dots)}. The function \code{f} must work correctly with vectors \code{x}, \code{y} and \code{m}, returning a vector of function values. (Note that \code{m} will be a factor with levels \code{types}.) The value \code{fmax} must be given and must be an upper bound on the values of \code{f(x,y,m,\dots)} for all locations \code{(x, y)} inside the window \code{win} and all types \code{m}. The argument \code{types} must be given. } \item{list of functions:}{ If \code{f} is a list of functions, then the functions will be called in the form \code{f[[i]](x,y,\dots)} at spatial location \code{(x,y)} for points of type \code{types[i]}. In Model I, the joint probability density of location and type is proportional to \code{f[[m]](x,y,\dots)}. In Models II and III, the conditional probability density of location \code{(x,y)} given type \code{m} is proportional to \code{f[[m]](x,y,\dots)}. The function \code{f[[i]]} must work correctly with vectors \code{x} and \code{y}, returning a vector of function values. The value \code{fmax} must be given and must be an upper bound on the values of \code{f[[i]](x,y,\dots)} for all locations \code{(x, y)} inside the window \code{win}. The argument \code{types} defaults to \code{names(f)}, or if that is null, \code{1:length(f)}. } \item{pixel image:}{ If \code{f} is a pixel image object of class \code{"im"} (see \code{\link{im.object}}), the unnormalised density at a location \code{(x,y)} for points of any type is equal to the pixel value of \code{f} for the pixel nearest to \code{(x,y)}. In Model I, the marginal distribution of types is uniform. The argument \code{win} is ignored; the window of the pixel image is used instead. The argument \code{types} must be given. } \item{list of pixel images:}{ If \code{f} is a list of pixel images, then the image \code{f[[i]]} determines the density values of points of type \code{types[i]}. The argument \code{win} is ignored; the window of the pixel image is used instead. The argument \code{types} defaults to \code{names(f)}, or if that is null, \code{1:length(f)}. } } The implementation uses the rejection method. For Model I, \code{\link{rmpoispp}} is called repeatedly until \code{n} points have been generated. It gives up after \code{giveup} calls if there are still fewer than \code{n} points. For Model II, the types are first generated according to \code{ptypes}, then the locations of the points of each type are generated using \code{\link{rpoint}}. For Model III, the locations of the points of each type are generated using \code{\link{rpoint}}. } \seealso{ \code{\link{ppp.object}}, \code{\link{owin.object}} } \examples{ abc <- c("a","b","c") ##### Model I rmpoint(25, types=abc) rmpoint(25, 1, types=abc) # 25 points, equal probability for each type, uniformly distributed locations rmpoint(25, function(x,y,m) {rep(1, length(x))}, types=abc) # same as above rmpoint(25, list(function(x,y){rep(1, length(x))}, function(x,y){rep(1, length(x))}, function(x,y){rep(1, length(x))}), types=abc) # same as above rmpoint(25, function(x,y,m) { x }, types=abc) # 25 points, equal probability for each type, # locations nonuniform with density proportional to x rmpoint(25, function(x,y,m) { ifelse(m == "a", 1, x) }, types=abc) rmpoint(25, list(function(x,y) { rep(1, length(x)) }, function(x,y) { x }, function(x,y) { x }), types=abc) # 25 points, UNEQUAL probabilities for each type, # type "a" points uniformly distributed, # type "b" and "c" points nonuniformly distributed. ##### Model II rmpoint(25, 1, types=abc, ptypes=rep(1,3)/3) rmpoint(25, 1, types=abc, ptypes=rep(1,3)) # 25 points, equal probability for each type, # uniformly distributed locations rmpoint(25, function(x,y,m) {rep(1, length(x))}, types=abc, ptypes=rep(1,3)) # same as above rmpoint(25, list(function(x,y){rep(1, length(x))}, function(x,y){rep(1, length(x))}, function(x,y){rep(1, length(x))}), types=abc, ptypes=rep(1,3)) # same as above rmpoint(25, function(x,y,m) { x }, types=abc, ptypes=rep(1,3)) # 25 points, equal probability for each type, # locations nonuniform with density proportional to x rmpoint(25, function(x,y,m) { ifelse(m == "a", 1, x) }, types=abc, ptypes=rep(1,3)) # 25 points, EQUAL probabilities for each type, # type "a" points uniformly distributed, # type "b" and "c" points nonuniformly distributed. ###### Model III rmpoint(c(12, 8, 4), 1, types=abc) # 12 points of type "a", # 8 points of type "b", # 4 points of type "c", # each uniformly distributed rmpoint(c(12, 8, 4), function(x,y,m) { ifelse(m=="a", 1, x)}, types=abc) rmpoint(c(12, 8, 4), list(function(x,y) { rep(1, length(x)) }, function(x,y) { x }, function(x,y) { x }), types=abc) # 12 points of type "a", uniformly distributed # 8 points of type "b", nonuniform # 4 points of type "c", nonuniform ######### ## Randomising an existing point pattern: # same numbers of points of each type, uniform random locations (Model III) rmpoint(table(marks(demopat)), 1, win=Window(demopat)) # same total number of points, distribution of types estimated from X, # uniform random locations (Model II) rmpoint(npoints(demopat), 1, types=levels(marks(demopat)), win=Window(demopat), ptypes=table(marks(demopat))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/runifpoint.Rd0000644000176200001440000000636214141452520016310 0ustar liggesusers\name{runifpoint} \alias{runifpoint} \title{Generate N Uniform Random Points} \description{ Generate a random point pattern containing \eqn{n} independent uniform random points. } \usage{ runifpoint(n, win=owin(c(0,1),c(0,1)), giveup=1000, warn=TRUE, \dots, nsim=1, drop=TRUE, ex=NULL) } \arguments{ \item{n}{ Number of points. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{giveup}{ Number of attempts in the rejection method after which the algorithm should stop trying to generate new points. } \item{warn}{ Logical. Whether to issue a warning if \code{n} is very large. See Details. } \item{\dots}{Ignored.} \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{ex}{ Optional. A point pattern to use as the example. If \code{ex} is given and \code{n} and \code{win} are missing, then \code{n} and \code{win} will be calculated from the point pattern \code{ex}. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates \code{n} independent random points, uniformly distributed in the window \code{win}. (For nonuniform distributions, see \code{\link{rpoint}}.) The algorithm depends on the type of window, as follows: \itemize{ \item If \code{win} is a rectangle then \eqn{n} independent random points, uniformly distributed in the rectangle, are generated by assigning uniform random values to their cartesian coordinates. \item If \code{win} is a binary image mask, then a random sequence of pixels is selected (using \code{\link{sample}}) with equal probabilities. Then for each pixel in the sequence we generate a uniformly distributed random point in that pixel. \item If \code{win} is a polygonal window, the algorithm uses the rejection method. It finds a rectangle enclosing the window, generates points in this rectangle, and tests whether they fall in the desired window. It gives up when \code{giveup * n} tests have been performed without yielding \code{n} successes. } The algorithm for binary image masks is faster than the rejection method but involves discretisation. If \code{warn=TRUE}, then a warning will be issued if \code{n} is very large. The threshold is \code{\link{spatstat.options}("huge.npoints")}. This warning has no consequences, but it helps to trap a number of common errors. } \seealso{ \code{\link{ppp.object}}, \code{\link{owin.object}}, \code{\link{rpoispp}}, \code{\link{rpoint}} } \examples{ # 100 random points in the unit square pp <- runifpoint(100) # irregular window data(letterR) # polygonal pp <- runifpoint(100, letterR) # binary image mask pp <- runifpoint(100, as.mask(letterR)) ## # randomising an existing point pattern runifpoint(npoints(cells), win=Window(cells)) runifpoint(ex=cells) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/blur.Rd0000644000176200001440000000730514141452520015055 0ustar liggesusers\name{blur} \alias{blur} \alias{Smooth.im} \title{Apply Gaussian Blur to a Pixel Image} \description{ Applies a Gaussian blur to a pixel image. } \usage{ blur(x, sigma = NULL, \dots, kernel="gaussian", normalise=FALSE, bleed = TRUE, varcov=NULL) \method{Smooth}{im}(X, sigma = NULL, \dots, kernel="gaussian", normalise=FALSE, bleed = TRUE, varcov=NULL) } \arguments{ \item{x,X}{The pixel image. An object of class \code{"im"}.} \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel. } \item{\dots}{ Ignored. } \item{kernel}{ String (partially matched) specifying the smoothing kernel. Current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}. } \item{normalise}{ Logical flag indicating whether the output values should be divided by the corresponding blurred image of the window itself. See Details. } \item{bleed}{ Logical flag indicating whether to allow blur to extend outside the original domain of the image. See Details. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel. Incompatible with \code{sigma}. } } \details{ This command applies a Gaussian blur to the pixel image \code{x}. \code{Smooth.im} is a method for the generic \code{\link{Smooth}} for pixel images. It is currently identical to \code{blur}, apart from the name of the first argument. The blurring kernel is the isotropic Gaussian kernel with standard deviation \code{sigma}, or the anisotropic Gaussian kernel with variance-covariance matrix \code{varcov}. The arguments \code{sigma} and \code{varcov} are incompatible. Also \code{sigma} may be a vector of length 2 giving the standard deviations of two independent Gaussian coordinates, thus equivalent to \code{varcov = diag(sigma^2)}. If the pixel values of \code{x} include some \code{NA} values (meaning that the image domain does not completely fill the rectangular frame) then these \code{NA} values are first reset to zero. The algorithm then computes the convolution \eqn{x \ast G}{x * G} of the (zero-padded) pixel image \eqn{x} with the specified Gaussian kernel \eqn{G}. If \code{normalise=FALSE}, then this convolution \eqn{x\ast G}{x * G} is returned. If \code{normalise=TRUE}, then the convolution \eqn{x \ast G}{x * G} is normalised by dividing it by the convolution \eqn{w \ast G}{w * G} of the image domain \code{w} with the same Gaussian kernel. Normalisation ensures that the result can be interpreted as a weighted average of input pixel values, without edge effects due to the shape of the domain. If \code{bleed=FALSE}, then pixel values outside the original image domain are set to \code{NA}. Thus the output is a pixel image with the same domain as the input. If \code{bleed=TRUE}, then no such alteration is performed, and the result is a pixel image defined everywhere in the rectangular frame containing the input image. Computation is performed using the Fast Fourier Transform. } \value{ A pixel image with the same pixel array as the input image \code{x}. } \seealso{ \code{\link{interp.im}} for interpolating a pixel image to a finer resolution, \code{\link{density.ppp}} for blurring a point pattern, \code{\link{Smooth.ppp}} for interpolating marks attached to points. } \examples{ Z <- as.im(function(x,y) { 4 * x^2 + 3 * y }, letterR) par(mfrow=c(1,3)) plot(Z) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=TRUE)) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=FALSE)) plot(letterR, add=TRUE) par(mfrow=c(1,1)) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} \keyword{manip} spatstat.core/man/rMosaicField.Rd0000644000176200001440000000262014141452520016445 0ustar liggesusers\name{rMosaicField} \alias{rMosaicField} \title{Mosaic Random Field} \description{ Generate a realisation of a random field which is piecewise constant on the tiles of a given tessellation. } \usage{ rMosaicField(X, rgen = function(n) { sample(0:1, n, replace = TRUE)}, ..., rgenargs=NULL) } \arguments{ \item{X}{ A tessellation (object of class \code{"tess"}). } \item{\dots}{ Arguments passed to \code{\link{as.mask}} determining the pixel resolution. } \item{rgen}{ Function that generates random values for the tiles of the tessellation. } \item{rgenargs}{ List containing extra arguments that should be passed to \code{rgen} (typically specifying parameters of the distribution of the values). } } \details{ This function generates a realisation of a random field which is piecewise constant on the tiles of the given tessellation \code{X}. The values in each tile are independent and identically distributed. } \value{ A pixel image (object of class \code{"im"}). } \author{\adrian and \rolf } \seealso{ \code{\link{rpoislinetess}}, \code{\link{rMosaicSet}} } \examples{ X <- rpoislinetess(3) plot(rMosaicField(X, runif)) plot(rMosaicField(X, runif, dimyx=256)) plot(rMosaicField(X, rnorm, rgenargs=list(mean=10, sd=2))) plot(rMosaicField(dirichlet(runifpoint(30)), rnorm)) } \keyword{spatial} \keyword{datagen} spatstat.core/man/summary.kppm.Rd0000644000176200001440000000532714141452520016556 0ustar liggesusers\name{summary.kppm} \alias{summary.kppm} \alias{print.summary.kppm} \title{Summarizing a Fitted Cox or Cluster Point Process Model} \description{ \code{summary} method for class \code{"kppm"}. } \usage{ \method{summary}{kppm}(object, \dots, quick=FALSE) \method{print}{summary.kppm}(x, \dots) } \arguments{ \item{object}{ A fitted Cox or cluster point process model (object of class \code{"kppm"}). } \item{quick}{Logical value controlling the scope of the summary.} \item{\dots}{Arguments passed to \code{\link{summary.ppm}} or \code{\link{print.summary.ppm}} controlling the treatment of the trend component of the model.} \item{x}{Object of class \code{"summary.kppm"} as returned by \code{summary.kppm}. } } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"kppm"}. An object of class \code{"kppm"} describes a fitted Cox or cluster point process model. See \code{\link{kppm}}. \code{summary.kppm} extracts information about the type of model that has been fitted, the data to which the model was fitted, and the values of the fitted coefficients. \code{print.summary.kppm} prints this information in a comprehensible format. In normal usage, \code{print.summary.kppm} is invoked implicitly when the user calls \code{summary.kppm} without assigning its value to anything. See the examples. You can also type \code{coef(summary(object))} to extract a table of the fitted coefficients of the point process model \code{object} together with standard errors and confidence limits. } \value{ \code{summary.kppm} returns an object of class \code{"summary.kppm"}, while \code{print.summary.kppm} returns \code{NULL}. The result of \code{summary.kppm} includes at least the following components: \item{Xname}{character string name of the original point pattern data} \item{stationary}{logical value indicating whether the model is stationary} \item{clusters}{the \code{clusters} argument to \code{\link{kppm}}} \item{modelname}{character string describing the model} \item{isPCP}{\code{TRUE} if the model is a Poisson cluster process, \code{FALSE} if it is a log-Gaussian Cox process} \item{lambda}{Estimated intensity: numeric value, or pixel image} \item{mu}{Mean cluster size: numeric value, pixel image, or \code{NULL}} \item{clustpar}{list of fitted parameters for the cluster model} \item{clustargs}{list of fixed parameters for the cluster model, if any} \item{callstring}{character string representing the original call to \code{\link{kppm}}} } \examples{ fit <- kppm(redwood ~ 1, "Thomas") summary(fit) coef(summary(fit)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.core/man/domain.Rd0000644000176200001440000000432214141452520015354 0ustar liggesusers\name{domain} \alias{domain.ppm} \alias{domain.kppm} \alias{domain.dppm} \alias{domain.slrm} \alias{domain.msr} \alias{domain.quadrattest} \alias{domain.rmhmodel} \title{ Extract the Domain of any Spatial Object } \description{ Given a spatial object such as a point pattern, in any number of dimensions, this function extracts the spatial domain in which the object is defined. } \usage{ \method{domain}{ppm}(X, \dots, from=c("points", "covariates")) \method{domain}{kppm}(X, \dots, from=c("points", "covariates")) \method{domain}{dppm}(X, \dots, from=c("points", "covariates")) \method{domain}{slrm}(X, \dots, from=c("points", "covariates")) \method{domain}{msr}(X, \dots) \method{domain}{quadrattest}(X, \dots) \method{domain}{rmhmodel}(X, \dots) } \arguments{ \item{X}{ A spatial object such as a point pattern (in any number of dimensions), line segment pattern or pixel image. } \item{\dots}{ Extra arguments. They are ignored by all the methods listed here. } \item{from}{Character string. See Details.} } \details{ The function \code{domain} is generic. For a spatial object \code{X} in any number of dimensions, \code{domain(X)} extracts the spatial domain in which \code{X} is defined. For a two-dimensional object \code{X}, typically \code{domain(X)} is the same as \code{Window(X)}. Exceptions occur for methods related to linear networks. The argument \code{from} applies when \code{X} is a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{from="data"} (the default), \code{domain} extracts the window of the original point pattern data to which the model was fitted. If \code{from="covariates"} then \code{domain} returns the window in which the spatial covariates of the model were provided. } \value{ A spatial object representing the domain of \code{X}. Typically a window (object of class \code{"owin"}), a three-dimensional box (\code{"box3"}), a multidimensional box (\code{"boxx"}) or a linear network (\code{"linnet"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{Window}}, \code{\link{Frame}} } \examples{ domain(cells) domain(bei.extra$elev) } \keyword{spatial} \keyword{manip} spatstat.core/man/kernel.squint.Rd0000644000176200001440000000274314141452520016714 0ustar liggesusers\name{kernel.squint} \alias{kernel.squint} \title{Integral of Squared Kernel} \description{ Computes the integral of the squared kernel, for the kernels used in density estimation for numerical data. } \usage{ kernel.squint(kernel = "gaussian", bw=1) } \arguments{ \item{kernel}{ String name of the kernel. Options are \code{"gaussian"}, \code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"}, \code{"biweight"}, \code{"cosine"} and \code{"optcosine"}. (Partial matching is used). } \item{bw}{ Bandwidth (standard deviation) of the kernel. } } \details{ Kernel estimation of a probability density in one dimension is performed by \code{\link[stats]{density.default}} using a kernel function selected from the list above. This function computes the integral of the squared kernel, \deqn{ R = \int_{-\infty}^{\infty} k(x)^2 \, {\rm d}x }{ R = integral of k(x)^2 dx from x = -infinity to x = +infinity } where \eqn{k(x)} is the kernel with bandwidth \code{bw}. } \value{ A single number. } \seealso{ \code{\link[stats]{density.default}}, \code{\link{dkernel}}, \code{\link{kernel.moment}}, \code{\link{kernel.factor}} } \examples{ kernel.squint("gaussian", 3) # integral of squared Epanechnikov kernel with half-width h=1 h <- 1 bw <- h/kernel.factor("epa") kernel.squint("epa", bw) } \author{ \spatstatAuthors and Martin Hazelton } \keyword{methods} \keyword{nonparametric} \keyword{smooth} spatstat.core/man/cov.im.Rd0000644000176200001440000000416614141452520015306 0ustar liggesusers\name{cov.im} \alias{cov.im} \alias{cor.im} \title{ Covariance and Correlation between Images } \description{ Compute the covariance or correlation between (the corresponding pixel values in) several images. } \usage{ cov.im(\dots, use = "everything", method = c("pearson", "kendall", "spearman")) } \arguments{ \item{\dots}{ Any number of arguments, each of which is a pixel image (object of class \code{"im"}). Alternatively, a single argument which is a list of pixel images. } \item{use}{ Argument passed to \code{\link[stats]{cov}} or \code{\link[stats]{cor}} determining how to handle \code{NA} values in the data. } \item{method}{ Argument passed to \code{\link[stats]{cov}} or \code{\link[stats]{cor}} determining the type of correlation that will be computed. } } \details{ The arguments \code{\dots} should be pixel images (objects of class \code{"im"}). Their spatial domains must overlap, but need not have the same pixel dimensions. These functions compute the covariance or correlation between the corresponding pixel values in the images given. The pixel image domains are intersected, and converted to a common pixel resolution. Then the corresponding pixel values of each image are extracted. Finally the correlation or covariance between the pixel values of each pair of images, at corresponding pixels, is computed. The result is a symmetric matrix with one row and column for each image. The \code{[i,j]} entry is the correlation or covariance between the \code{i}th and \code{j}th images in the argument list. The row names and column names of the matrix are copied from the argument names if they were given (i.e. if the arguments were given as \code{name=value}). Note that \code{\link[stats]{cor}} and \code{\link[stats]{cov}} are not generic, so you have to type \code{cor.im}, \code{cov.im}. } \value{ A symmetric matrix. } \author{ \spatstatAuthors. } \seealso{ \code{\link[stats]{cor}}, \code{\link[stats]{cov}} \code{\link{pairs.im}} } \examples{ cor.im(bei.extra) } \keyword{spatial} \keyword{univar} \keyword{nonparametric} spatstat.core/man/npfun.Rd0000644000176200001440000000143214141452520015232 0ustar liggesusers\name{npfun} \alias{npfun} \title{ Dummy Function Returns Number of Points } \description{ Returns a summary function which is constant with value equal to the number of points in the point pattern. } \usage{ npfun(X, ..., r) } \arguments{ \item{X}{ Point pattern. } \item{\dots}{ Ignored. } \item{r}{ Vector of values of the distance argument \eqn{r}. } } \details{ This function is normally not called by the user. Instead it is passed as an argument to the function \code{\link{psst}}. } \value{ Object of class \code{"fv"} representing a constant function. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ \code{\link{psst}} } \examples{ fit0 <- ppm(cells, ~1, nd=10) v <- psst(fit0, npfun) } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/Ord.Rd0000644000176200001440000000352614141452520014636 0ustar liggesusers\name{Ord} \alias{Ord} \title{Generic Ord Interaction model} \description{ Creates an instance of an Ord-type interaction point process model which can then be fitted to point pattern data. } \usage{ Ord(pot, name) } \arguments{ \item{pot}{An S language function giving the user-supplied interaction potential.} \item{name}{Character string.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Ord's point process model (Ord, 1977) is a Gibbs point process of infinite order. Each point \eqn{x_i}{x[i]} in the point pattern \eqn{x} contributes a factor \eqn{g(a_i)}{g(a[i])} where \eqn{a_i = a(x_i, x)}{a[i] = a(x[i], x)} is the area of the tile associated with \eqn{x_i}{x[i]} in the Dirichlet tessellation of \eqn{x}. Ord (1977) proposed fitting this model to forestry data when \eqn{g(a)} has a simple ``threshold'' form. That model is implemented in our function \code{\link{OrdThresh}}. The present function \code{Ord} implements the case of a completely general Ord potential \eqn{g(a)} specified as an S language function \code{pot}. This is experimental. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ord, J.K. (1977) Contribution to the discussion of Ripley (1977). Ord, J.K. (1978) How many trees in a forest? \emph{Mathematical Scientist} \bold{3}, 23--33. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{OrdThresh}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/nnorient.Rd0000644000176200001440000000675514141452520015755 0ustar liggesusers\name{nnorient} \alias{nnorient} \title{ Nearest Neighbour Orientation Distribution } \description{ Computes the distribution of the orientation of the vectors from each point to its nearest neighbour. } \usage{ nnorient(X, \dots, cumulative = FALSE, correction, k = 1, unit = c("degree", "radian"), domain = NULL, ratio = FALSE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link{circdensity}} to control the kernel smoothing, if \code{cumulative=FALSE}. } \item{cumulative}{ Logical value specifying whether to estimate the probability density (\code{cumulative=FALSE}, the default) or the cumulative distribution function (\code{cumulative=TRUE}). } \item{correction}{ Character vector specifying edge correction or corrections. Options are \code{"none"}, \code{"bord.modif"}, \code{"good"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{k}{ Integer. The \eqn{k}th nearest neighbour will be used. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{unit}{ Unit in which the angles should be expressed. Either \code{"degree"} or \code{"radian"}. } \item{domain}{ Optional window. The first point \eqn{x_i}{x[i]} of each pair of points will be constrained to lie in \code{domain}. } } \details{ This algorithm considers each point in the pattern \code{X} and finds its nearest neighbour (or \eqn{k}th nearest neighour). The \emph{direction} of the arrow joining the data point to its neighbour is measured, as an angle in degrees or radians, anticlockwise from the \eqn{x} axis. If \code{cumulative=FALSE} (the default), a kernel estimate of the probability density of the angles is calculated using \code{\link{circdensity}}. This is the function \eqn{\vartheta(\phi)}{theta(phi)} defined in Illian et al (2008), equation (4.5.3), page 253. If \code{cumulative=TRUE}, then the cumulative distribution function of these angles is calculated. In either case the result can be plotted as a rose diagram by \code{\link{rose}}, or as a function plot by \code{\link{plot.fv}}. The algorithm gives each observed direction a weight, determined by an edge correction, to adjust for the fact that some interpoint distances are more likely to be observed than others. The choice of edge correction or corrections is determined by the argument \code{correction}. It is also possible to calculate an estimate of the probability density from the cumulative distribution function, by numerical differentiation. Use \code{\link{deriv.fv}} with the argument \code{Dperiodic=TRUE}. } \value{ A function value table (object of class \code{"fv"}) containing the estimates of the probability density or the cumulative distribution function of angles, in degrees (if \code{unit="degree"}) or radians (if \code{unit="radian"}). } \references{ Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008) \emph{Statistical Analysis and Modelling of Spatial Point Patterns.} Wiley. } \seealso{ \code{\link{pairorient}} } \examples{ rose(nnorient(redwood, adjust=0.6), col="grey") plot(CDF <- nnorient(redwood, cumulative=TRUE)) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/Kcross.inhom.Rd0000644000176200001440000003120014141452520016455 0ustar liggesusers\name{Kcross.inhom} \alias{Kcross.inhom} \title{ Inhomogeneous Cross K Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the cross \eqn{K} function, which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}, adjusted for spatially varying intensity. } \usage{ Kcross.inhom(X, i, j, lambdaI=NULL, lambdaJ=NULL, \dots, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIJ=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process of points of type \code{i}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{i} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaJ}{ Optional. Values of the the estimated intensity of the sub-process of points of type \code{j}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{j} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{r}{ Optional. Numeric vector giving the values of the argument \eqn{r} at which the cross K function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for advanced use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"} ,\code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{ Ignored. } \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdaJ} if they are omitted. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdaJ} if they are omitted. Incompatible with \code{sigma}. } \item{lambdaIJ}{ Optional. A matrix containing estimates of the product of the intensities \code{lambdaI} and \code{lambdaJ} for each pair of points of types \code{i} and \code{j} respectively. } \item{lambdaX}{ Optional. Values of the intensity for all points of \code{X}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If present, this argument overrides both \code{lambdaI} and \code{lambdaJ}. } \item{update}{ Logical value indicating what to do when \code{lambdaI}, \code{lambdaJ} or \code{lambdaX} is a fitted point process model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{ij}(r)}{Kij(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{ij}(r)}{Kij(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{ij}(r)}{Kij(r)} obtained by the edge corrections named. } \details{ This is a generalisation of the function \code{\link{Kcross}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Kinhom}}. The inhomogeneous cross-type \eqn{K} function is described by \Moller and Waagepetersen (2003, pages 48-49 and 51-53). Briefly, given a multitype point process, suppose the sub-process of points of type \eqn{j} has intensity function \eqn{\lambda_j(u)}{lambda[j](u)} at spatial locations \eqn{u}. Suppose we place a mass of \eqn{1/\lambda_j(\zeta)}{1/lambda[j](z)} at each point \eqn{\zeta}{z} of type \eqn{j}. Then the expected total mass per unit area is 1. The inhomogeneous ``cross-type'' \eqn{K} function \eqn{K_{ij}^{\mbox{inhom}}(r)}{K[ij]inhom(r)} equals the expected total mass within a radius \eqn{r} of a point of the process of type \eqn{i}. If the process of type \eqn{i} points were independent of the process of type \eqn{j} points, then \eqn{K_{ij}^{\mbox{inhom}}(r)}{K[ij]inhom(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{ij}}{Kij} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} suggest dependence between the points of types \eqn{i} and \eqn{j}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The argument \code{lambdaI} supplies the values of the intensity of the sub-process of points of type \code{i}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the type \code{i} intensity at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the type \code{i} intensity evaluated only at the data points of type \code{i}. The length of this vector must equal the number of type \code{i} points in \code{X}. } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate of \code{lambdaI} for a given point is computed by removing the point from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point in question. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Similarly \code{lambdaJ} should contain estimated values of the intensity of the sub-process of points of type \code{j}. It may be either a pixel image, a function, a numeric vector, or omitted. Alternatively if the argument \code{lambdaX} is given, then it specifies the intensity values for all points of \code{X}, and the arguments \code{lambdaI}, \code{lambdaJ} will be ignored. The optional argument \code{lambdaIJ} is for advanced use only. It is a matrix containing estimated values of the products of these two intensities for each pair of data points of types \code{i} and \code{j} respectively. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. The argument \code{correction} chooses the edge correction as explained e.g. in \code{\link{Kest}}. The pair correlation function can also be applied to the result of \code{Kcross.inhom}; see \code{\link{pcf}}. } \references{ Baddeley, A., \Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. \Moller, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Kcross}}, \code{\link{Kinhom}}, \code{\link{Kdot.inhom}}, \code{\link{Kmulti.inhom}}, \code{\link{pcf}} } \examples{ # Lansing Woods data woods <- lansing \testonly{woods <- woods[seq(1,npoints(woods), by=10)]} ma <- split(woods)$maple wh <- split(woods)$whiteoak # method (1): estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdaW <- density.ppp(wh, sigma=0.15, at="points") K <- Kcross.inhom(woods, "whiteoak", "maple", lambdaW, lambdaM) # method (2): leave-one-out K <- Kcross.inhom(woods, "whiteoak", "maple", sigma=0.15) # method (3): fit parametric intensity model fit <- ppm(woods ~marks * polynom(x,y,2)) # alternative (a): use fitted model as 'lambda' argument K <- Kcross.inhom(woods, "whiteoak", "maple", lambdaI=fit, lambdaJ=fit, update=FALSE) K <- Kcross.inhom(woods, "whiteoak", "maple", lambdaX=fit, update=FALSE) # alternative (b): evaluate fitted intensities at data points # (these are the intensities of the sub-processes of each type) inten <- fitted(fit, dataonly=TRUE) # split according to types of points lambda <- split(inten, marks(woods)) K <- Kcross.inhom(woods, "whiteoak", "maple", lambda$whiteoak, lambda$maple) # synthetic example: type A points have intensity 50, # type B points have intensity 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) K <- Kcross.inhom(X, "A", "B", lambdaI=as.im(50, Window(X)), lambdaJ=lamB) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/rmhstart.Rd0000644000176200001440000000701114141452520015747 0ustar liggesusers\name{rmhstart} \alias{rmhstart} \alias{rmhstart.default} \title{Determine Initial State for Metropolis-Hastings Simulation.} \description{ Builds a description of the initial state for the Metropolis-Hastings algorithm. } \usage{ rmhstart(start, \dots) \method{rmhstart}{default}(start=NULL, \dots, n.start=NULL, x.start=NULL) } \arguments{ \item{start}{An existing description of the initial state in some format. Incompatible with the arguments listed below. } \item{\dots}{There should be no other arguments.} \item{n.start}{ Number of initial points (to be randomly generated). Incompatible with \code{x.start}. } \item{x.start}{ Initial point pattern configuration. Incompatible with \code{n.start}. } } \value{ An object of class \code{"rmhstart"}, which is essentially a list of parameters describing the initial point pattern and (optionally) the initial state of the random number generator. There is a \code{print} method for this class, which prints a sensible description of the initial state. } \details{ Simulated realisations of many point process models can be generated using the Metropolis-Hastings algorithm implemented in \code{\link{rmh}}. This function \code{rmhstart} creates a full description of the initial state of the Metropolis-Hastings algorithm, \emph{including possibly the initial state of the random number generator}, for use in a subsequent call to \code{\link{rmh}}. It also checks that the initial state is valid. The initial state should be specified \bold{either} by the first argument \code{start} \bold{or} by the other arguments \code{n.start}, \code{x.start} etc. If \code{start} is a list, then it should have components named \code{n.start} or \code{x.start}, with the same interpretation as described below. The arguments are: \describe{ \item{n.start}{ The number of \dQuote{initial} points to be randomly (uniformly) generated in the simulation window \code{w}. Incompatible with \code{x.start}. For a multitype point process, \code{n.start} may be a vector (of length equal to the number of types) giving the number of points of each type to be generated. If expansion of the simulation window is selected (see the argument \code{expand} to \code{\link{rmhcontrol}}), then the actual number of starting points in the simulation will be \code{n.start} multiplied by the expansion factor (ratio of the areas of the expanded window and original window). For faster convergence of the Metropolis-Hastings algorithm, the value of \code{n.start} should be roughly equal to (an educated guess at) the expected number of points for the point process inside the window. } \item{x.start}{ Initial point pattern configuration. Incompatible with \code{n.start}. \code{x.start} may be a point pattern (an object of class \code{ppp}), or an object which can be coerced to this class by \code{\link{as.ppp}}, or a dataset containing vectors \code{x} and \code{y}. If \code{x.start} is specified, then expansion of the simulation window (the argument \code{expand} of \code{\link{rmhcontrol}}) is not permitted. } } The parameters \code{n.start} and \code{x.start} are \emph{incompatible}. } \seealso{ \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhmodel}} } \examples{ # 30 random points a <- rmhstart(n.start=30) a # a particular point pattern b <- rmhstart(x.start=cells) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/sharpen.Rd0000644000176200001440000000644514141452520015555 0ustar liggesusers\name{sharpen} \alias{sharpen} \alias{sharpen.ppp} \title{Data Sharpening of Point Pattern} \description{ Performs Choi-Hall data sharpening of a spatial point pattern. } \usage{ sharpen(X, \dots) \method{sharpen}{ppp}(X, sigma=NULL, \dots, varcov=NULL, edgecorrect=FALSE) } \arguments{ \item{X}{A marked point pattern (object of class \code{"ppp"}).} \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel. Incompatible with \code{sigma}. } \item{edgecorrect}{ Logical value indicating whether to apply edge effect bias correction. } \item{\dots}{Arguments passed to \code{\link{density.ppp}} to control the pixel resolution of the result.} } \details{ Choi and Hall (2001) proposed a procedure for \emph{data sharpening} of spatial point patterns. This procedure is appropriate for earthquake epicentres and other point patterns which are believed to exhibit strong concentrations of points along a curve. Data sharpening causes such points to concentrate more tightly along the curve. If the original data points are \eqn{X_1, \ldots, X_n}{X[1],..., X[n]} then the sharpened points are \deqn{ \hat X_i = \frac{\sum_j X_j k(X_j-X_i)}{\sum_j k(X_j - X_i)} }{ X^[i] = (sum[j] X[j] * k(X[j] - X[i]))/(sum[j] k(X[j] - X[i])) } where \eqn{k} is a smoothing kernel in two dimensions. Thus, the new point \eqn{\hat X_i}{X^[i]} is a vector average of the nearby points \eqn{X[j]}. The function \code{sharpen} is generic. It currently has only one method, for two-dimensional point patterns (objects of class \code{"ppp"}). If \code{sigma} is given, the smoothing kernel is the isotropic two-dimensional Gaussian density with standard deviation \code{sigma} in each axis. If \code{varcov} is given, the smoothing kernel is the Gaussian density with variance-covariance matrix \code{varcov}. The data sharpening procedure tends to cause the point pattern to contract away from the boundary of the window. That is, points \code{X_i}{X[i]} that lie `quite close to the edge of the window of the point pattern tend to be displaced inward. If \code{edgecorrect=TRUE} then the algorithm is modified to correct this vector bias. } \value{ A point pattern (object of class \code{"ppp"}) in the same window as the original pattern \code{X}, and with the same marks as \code{X}. } \seealso{ \code{\link{density.ppp}}, \code{\link{Smooth.ppp}}. } \examples{ data(shapley) X <- unmark(shapley) \dontshow{ if(!(interactive())) X <- rthin(X, 0.05) } Y <- sharpen(X, sigma=0.5) Z <- sharpen(X, sigma=0.5, edgecorrect=TRUE) opa <- par(mar=rep(0.2, 4)) plot(solist(X, Y, Z), main= " ", main.panel=c("data", "sharpen", "sharpen, correct"), pch=".", equal.scales=TRUE, mar.panel=0.2) par(opa) } \references{ Choi, E. and Hall, P. (2001) Nonparametric analysis of earthquake point-process data. In M. de Gunst, C. Klaassen and A. van der Vaart (eds.) \emph{State of the art in probability and statistics: Festschrift for Willem R. van Zwet}, Institute of Mathematical Statistics, Beachwood, Ohio. Pages 324--344. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/psstG.Rd0000644000176200001440000001222214141452520015203 0ustar liggesusers\name{psstG} \Rdversion{1.1} \alias{psstG} \title{ Pseudoscore Diagnostic For Fitted Model against Saturation Alternative } \description{ Given a point process model fitted to a point pattern dataset, this function computes the pseudoscore diagnostic of goodness-of-fit for the model, against moderately clustered or moderately inhibited alternatives of saturation type. } \usage{ psstG(object, r = NULL, breaks = NULL, \dots, model=NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), truecoef = NULL, hi.res = NULL) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the diagnostic should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Ignored. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord,ppmcorrection}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } } \details{ This function computes the pseudoscore test statistic which can be used as a diagnostic for goodness-of-fit of a fitted point process model. Consider a point process model fitted to \eqn{x}, with conditional intensity \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}. For the purpose of testing goodness-of-fit, we regard the fitted model as the null hypothesis. The alternative hypothesis is a family of hybrid models obtained by combining the fitted model with the Geyer saturation process (see \code{\link{Geyer}}) with saturation parameter 1. The family of alternatives includes models that are more regular than the fitted model, and others that are more clustered than the fitted model. For any point pattern \eqn{x}, and any \eqn{r > 0}, let \eqn{S(x,r)} be the number of points in \eqn{x} whose nearest neighbour (the nearest other point in \eqn{x}) is closer than \eqn{r} units. Then the pseudoscore for the null model is \deqn{ V(r) = \sum_i \Delta S(x_i, x, r ) - \int_W \Delta S(u,x,r) \lambda(u,x) {\rm d} u }{ V(r) = sum( Delta S(x[i], x, r)) - integral( Delta S(u,x, r) lambda(u,x) du) } where the \eqn{\Delta}{Delta} operator is \deqn{ \Delta S(u,x,r) = S(x\cup\{u\}, r) - S(x\setminus u, r) }{ Delta S(u,x, r) = S(x union u, r) - S(x setminus u, r) } the difference between the values of \eqn{S} for the point pattern with and without the point \eqn{u}. According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have mean zero if the model is correct (ignoring the fact that the parameters of the model have been estimated). Hence \eqn{V(r)} can be used as a diagnostic for goodness-of-fit. The diagnostic \eqn{V(r)} is also called the \bold{pseudoresidual} of \eqn{S}. On the right hand side of the equation for \eqn{V(r)} given above, the sum over points of \eqn{x} is called the \bold{pseudosum} and the integral is called the \bold{pseudocompensator}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. Columns in this data frame include \code{dat} for the pseudosum, \code{com} for the compensator and \code{res} for the pseudoresidual. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Alternative functions: \code{\link{psstA}}, \code{\link{psst}}, \code{\link{Kres}}, \code{\link{Gres}}. } \examples{ if(live <- interactive()) { X <- rStrauss(200,0.1,0.05) } else { pso <- spatstat.options(ndummy.min=16,npixel=32) X <- cells } plot(psstG(X)) plot(psstG(X, interaction=Strauss(0.05))) if(!live) spatstat.options(pso) } \keyword{spatial} \keyword{models} spatstat.core/man/dppBessel.Rd0000644000176200001440000000174714141452520016036 0ustar liggesusers\name{dppBessel} \alias{dppBessel} \title{Bessel Type Determinantal Point Process Model} \description{ Function generating an instance of the Bessel-type determinantal point process model. } \usage{dppBessel(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the model parameters. See Details. } } \details{ The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the shape parameter \code{sigma} as a non-negative numeric \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ Frederic Lavancier and Christophe Biscio. Modified by \ege , \adrian and \rolf } \examples{ m <- dppBessel(lambda=100, alpha=.05, sigma=0, d=2) } \seealso{ \code{\link{dppCauchy}}, \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppPowerExp}} } spatstat.core/man/cdf.test.mppm.Rd0000644000176200001440000002014114141452520016564 0ustar liggesusers\name{cdf.test.mppm} \alias{cdf.test.mppm} \title{Spatial Distribution Test for Multiple Point Process Model} \description{ Performs a spatial distribution test of a point process model fitted to multiple spatial point patterns. The test compares the observed and predicted distributions of the values of a spatial covariate, using either the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises or Anderson-Darling test of goodness-of-fit. } \usage{ \method{cdf.test}{mppm}(model, covariate, test=c("ks", "cvm", "ad"), ..., nsim=19, verbose=TRUE, interpolate=FALSE, fast=TRUE, jitter=TRUE) } \arguments{ \item{model}{ An object of class \code{"mppm"} representing a point process model fitted to multiple spatial point patterns. } \item{covariate}{ The spatial covariate on which the test will be based. A function, a pixel image, a list of functions, a list of pixel images, a hyperframe, a character string containing the name of one of the covariates in \code{model}, or one of the strings \code{"x"} or \code{"y"}. } \item{test}{ Character string identifying the test to be performed: \code{"ks"} for Kolmogorov-Smirnov test, \code{"cvm"} for \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or \code{"ad"} for Anderson-Darling test. } \item{\dots}{ Arguments passed to \code{\link{cdf.test}} to control the test. } \item{nsim}{ Number of simulated realisations which should be generated, if a Monte Carlo test is required. } \item{verbose}{Logical flag indicating whether to print progress reports. } \item{interpolate}{ Logical flag indicating whether to interpolate between pixel values when code{covariate} is a pixel image. See \emph{Details}. } \item{fast}{ Logical flag. If \code{TRUE}, values of the covariate are only sampled at the original quadrature points used to fit the model. If \code{FALSE}, values of the covariate are sampled at all pixels, which can be slower by three orders of magnitude. } \item{jitter}{ Logical flag. If \code{TRUE}, observed values of the covariate are perturbed by adding small random values, to avoid tied observations. } } \details{ This function is a method for the generic function \code{\link{cdf.test}} for the class \code{mppm}. This function performs a goodness-of-fit test of a point process model that has been fitted to multiple point patterns. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises or Anderson-Darling test of goodness-of-fit. These are exact tests if the model is Poisson; otherwise, for a Gibbs model, a Monte Carlo p-value is computed by generating simulated realisations of the model and applying the selected goodness-of-fit test to each simulation. The argument \code{model} should be a fitted point process model fitted to multiple point patterns (object of class \code{"mppm"}). The argument \code{covariate} contains the values of a spatial function. It can be \itemize{ \item a \code{function(x,y)} \item a pixel image (object of class \code{"im"} \item a list of \code{function(x,y)}, one for each point pattern \item a list of pixel images, one for each point pattern \item a hyperframe (see \code{\link{hyperframe}}) of which the first column will be taken as containing the covariate \item a character string giving the name of one of the covariates in \code{model} \item one of the character strings \code{"x"} or \code{"y"}, indicating the spatial coordinates. } If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. The predicted distribution of the values of the \code{covariate} under the fitted \code{model} is computed as follows. The values of the \code{covariate} at all locations in the observation window are evaluated, weighted according to the point process intensity of the fitted model, and compiled into a cumulative distribution function \eqn{F} using \code{\link{ewcdf}}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. A goodness-of-fit test of the uniform distribution is applied to these numbers using \code{\link[stats]{ks.test}}, \code{\link[goftest]{cvm.test}} or \code{\link[goftest]{ad.test}}. The argument \code{interpolate} determines how pixel values will be handled when code{covariate} is a pixel image. The value of the covariate at a data point is obtained by looking up the value of the nearest pixel if \code{interpolate=FALSE}, or by linearly interpolating between the values of the four nearest pixels if \code{interpolate=TRUE}. Linear interpolation is slower, but is sometimes necessary to avoid tied values of the covariate arising when the pixel grid is coarse. If \code{model} is a Poisson point process, then the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises and Anderson-Darling tests are theoretically exact. This test was apparently first described (in the context of spatial data, and for Kolmogorov-Smirnov) by Berman (1986). See also Baddeley et al (2005). If \code{model} is not a Poisson point process, then the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises and Anderson-Darling tests are biased. Instead they are used as the basis of a Monte Carlo test. First \code{nsim} simulated realisations of the model will be generated. Each simulated realisation consists of a list of simulated point patterns, one for each of the original data patterns. This can take a very long time. The model is then re-fitted to each simulation, and the refitted model is subjected to the goodness-of-fit test described above. A Monte Carlo p-value is then computed by comparing the p-value of the original test with the p-values obtained from the simulations. } \value{ An object of class \code{"cdftest"} and \code{"htest"} containing the results of the test. See \code{\link{cdf.test}} for details. } \seealso{ \code{\link{cdf.test}}, \code{\link{quadrat.test}}, \code{\link{mppm}} } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \examples{ # three i.i.d. realisations of nonuniform Poisson process lambda <- as.im(function(x,y) { 200 * exp(x) }, square(1)) dat <- hyperframe(X=list(rpoispp(lambda), rpoispp(lambda), rpoispp(lambda))) # fit uniform Poisson process fit0 <- mppm(X~1, dat) # fit correct nonuniform Poisson process fit1 <- mppm(X~x, dat) # test wrong model cdf.test(fit0, "x") # test right model cdf.test(fit1, "x") # Gibbs model fitGibbs <- update(fit0, interaction=Strauss(0.05)) ns <- if(interactive()) 19 else 2 cdf.test(fitGibbs, "x", nsim=ns) } \keyword{htest} \keyword{spatial} spatstat.core/man/pcfinhom.Rd0000644000176200001440000001621014141452520015707 0ustar liggesusers\name{pcfinhom} \alias{pcfinhom} \title{ Inhomogeneous Pair Correlation Function } \description{ Estimates the inhomogeneous pair correlation function of a point pattern using kernel methods. } \usage{ pcfinhom(X, lambda = NULL, ..., r = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("translate", "Ripley"), divisor = c("r", "d"), renormalise = TRUE, normpower=1, update = TRUE, leaveoneout = TRUE, reciplambda = NULL, sigma = NULL, varcov = NULL, close=NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. Either a single numeric value, or a character string specifying a bandwidth selection rule recognised by \code{\link{density.default}}. If \code{bw} is missing or \code{NULL}, the default value is computed using Stoyan's rule of thumb: see \code{\link{bw.stoyan}}. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Coefficient for Stoyan's bandwidth selection rule; see \code{\link{bw.stoyan}}. } \item{correction}{ Character string or character vector specifying the choice of edge correction. See \code{\link{Kest}} for explanation and options. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See \code{\link{pcf.ppp}}. } \item{renormalise}{ Logical. Whether to renormalise the estimate. See Details. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{update}{ Logical. If \code{lambda} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) and \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } \item{reciplambda}{ Alternative to \code{lambda}. Values of the estimated \emph{reciprocal} \eqn{1/\lambda}{1/lambda} of the intensity function. Either a vector giving the reciprocal intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the reciprocal intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the reciprocal intensity value at any location. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{close}{ Advanced use only. Precomputed data. See section on Advanced Use. } } \details{ The inhomogeneous pair correlation function \eqn{g_{\rm inhom}(r)}{ginhom(r)} is a summary of the dependence between points in a spatial point process that does not have a uniform density of points. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding two points at locations \eqn{x} and \eqn{y} separated by a distance \eqn{r} is equal to \deqn{ p(r) = \lambda(x) lambda(y) g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda(x) * lambda(y) * g(r) dx dy } where \eqn{\lambda}{lambda} is the intensity function of the point process. For a Poisson point process with intensity function \eqn{\lambda}{lambda}, this probability is \eqn{p(r) = \lambda(x) \lambda(y)}{p(r) = lambda(x) * lambda(y)} so \eqn{g_{\rm inhom}(r) = 1}{ginhom(r) = 1}. The inhomogeneous pair correlation function is related to the inhomogeneous \eqn{K} function through \deqn{ g_{\rm inhom}(r) = \frac{K'_{\rm inhom}(r)}{2\pi r} }{ ginhom(r) = Kinhom'(r)/ ( 2 * pi * r) } where \eqn{K'_{\rm inhom}(r)}{Kinhom'(r)} is the derivative of \eqn{K_{\rm inhom}(r)}{Kinhom(r)}, the inhomogeneous \eqn{K} function. See \code{\link{Kinhom}} for information about \eqn{K_{\rm inhom}(r)}{Kinhom(r)}. The command \code{pcfinhom} estimates the inhomogeneous pair correlation using a modified version of the algorithm in \code{\link{pcf.ppp}}. If \code{renormalise=TRUE} (the default), then the estimates are multiplied by \eqn{c^{\mbox{normpower}}}{c^normpower} where \eqn{ c = \mbox{area}(W)/\sum (1/\lambda(x_i)). }{ c = area(W)/sum[i] (1/lambda(x[i])). } This rescaling reduces the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. The default value of \code{normpower} is 1 but the most sensible value is 2, which would correspond to rescaling the \code{lambda} values so that \eqn{ \sum (1/\lambda(x_i)) = \mbox{area}(W). }{ sum[i] (1/lambda(x[i])) = area(W). } } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{ the vector of values of the argument \eqn{r} at which the inhomogeneous pair correlation function \eqn{g_{\rm inhom}(r)}{ginhom(r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g_{\rm inhom}(r)}{ginhom(r)} for the Poisson process } \item{trans}{vector of values of \eqn{g_{\rm inhom}(r)}{ginhom(r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g_{\rm inhom}(r)}{ginhom(r)} estimated by Ripley isotropic correction } as required. } \section{Advanced Use}{ To perform the same computation using several different bandwidths \code{bw}, it is efficient to use the argument \code{close}. This should be the result of \code{\link{closepairs}(X, rmax)} for a suitably large value of \code{rmax}, namely \code{rmax >= max(r) + 3 * bw}. } \seealso{ \code{\link{pcf}}, \code{\link{pcf.ppp}}, \code{\link{bw.stoyan}}, \code{\link{bw.pcf}}, \code{\link{Kinhom}} } \examples{ data(residualspaper) X <- residualspaper$Fig4b plot(pcfinhom(X, stoyan=0.2, sigma=0.1)) fit <- ppm(X, ~polynom(x,y,2)) plot(pcfinhom(X, lambda=fit, normpower=2)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/subspaceDistance.Rd0000644000176200001440000000263214141452520017367 0ustar liggesusers\name{subspaceDistance} \alias{subspaceDistance} \title{ Distance Between Linear Spaces } \description{ Evaluate the distance between two linear subspaces using the measure proposed by Li, Zha and Chiaromonte (2005). } \usage{ subspaceDistance(B0, B1) } \arguments{ \item{B0}{ Matrix whose columns are a basis for the first subspace. } \item{B1}{ Matrix whose columns are a basis for the second subspace. } } \details{ This algorithm calculates the maximum absolute value of the eigenvalues of \eqn{P1-P0} where \eqn{P0,P1} are the projection matrices onto the subspaces generated by \code{B0,B1}. This measure of distance was proposed by Li, Zha and Chiaromonte (2005). See also Xia (2007). } \value{ A single numeric value. } \references{ Guan, Y. and Wang, H. (2010) Sufficient dimension reduction for spatial point processes directed by Gaussian random fields. \emph{Journal of the Royal Statistical Society, Series B}, \bold{72}, 367--387. Li, B., Zha, H. and Chiaromonte, F. (2005) Contour regression: a general approach to dimension reduction. \emph{Annals of Statistics} \bold{33}, 1580--1616. Xia, Y. (2007) A constructive approach to the estimation of dimension reduction directions. \emph{Annals of Statistics} \bold{35}, 2654--2690. } \author{ Matlab original by Yongtao Guan, translated to \R by Suman Rakshit. } \keyword{multivariate} \keyword{algebra} spatstat.core/man/Kmeasure.Rd0000644000176200001440000001570114141452520015664 0ustar liggesusers\name{Kmeasure} \alias{Kmeasure} \title{Reduced Second Moment Measure} \description{ Estimates the reduced second moment measure \eqn{\kappa}{Kappa} from a point pattern in a window of arbitrary shape. } \usage{ Kmeasure(X, sigma, edge=TRUE, \dots, varcov=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{\kappa}{Kappa} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{sigma}{ Standard deviation \eqn{\sigma}{sigma} of the Gaussian smoothing kernel. Incompatible with \code{varcov}. } \item{edge}{ Logical value indicating whether an edge correction should be applied. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} controlling the pixel resolution. } \item{varcov}{ Variance-covariance matrix of the Gaussian smoothing kernel. Incompatible with \code{sigma}. } } \value{ A real-valued pixel image (an object of class \code{"im"}, see \code{\link{im.object}}) whose pixel values are estimates of the density of the reduced second moment measure at each location. } \details{ Given a point pattern dataset, this command computes an estimate of the reduced second moment measure \eqn{\kappa}{Kappa} of the point process. The result is a pixel image whose pixel values are estimates of the density of the reduced second moment measure. The reduced second moment measure \eqn{\kappa}{Kappa} can be regarded as a generalisation of the more familiar \eqn{K}-function. An estimate of \eqn{\kappa}{Kappa} derived from a spatial point pattern dataset can be useful in exploratory data analysis. Its advantage over the \eqn{K}-function is that it is also sensitive to anisotropy and directional effects. In a nutshell, the command \code{Kmeasure} computes a smoothed version of the \emph{Fry plot}. As explained under \code{\link{fryplot}}, the Fry plot is a scatterplot of the vectors joining all pairs of points in the pattern. The reduced second moment measure is (essentially) defined as the average of the Fry plot over different realisations of the point process. The command \code{Kmeasure} effectively smooths the Fry plot of a dataset to obtain an estimate of the reduced second moment measure. In formal terms, the reduced second moment measure \eqn{\kappa}{Kappa} of a stationary point process \eqn{X} is a measure defined on the two-dimensional plane such that, for a `typical' point \eqn{x} of the process, the expected number of other points \eqn{y} of the process such that the vector \eqn{y - x} lies in a region \eqn{A}, equals \eqn{\lambda \kappa(A)}{lambda * Kappa(A)}. Here \eqn{\lambda}{lambda} is the intensity of the process, i.e. the expected number of points of \eqn{X} per unit area. The \eqn{K}-function is a special case. The function value \eqn{K(t)} is the value of the reduced second moment measure for the disc of radius \eqn{t} centred at the origin; that is, \eqn{K(t) = \kappa(b(0,t))}{K(t) = Kappa(b(0,t))}. The command \code{Kmeasure} computes an estimate of \eqn{\kappa}{Kappa} from a point pattern dataset \code{X}, which is assumed to be a realisation of a stationary point process, observed inside a known, bounded window. Marks are ignored. The algorithm approximates the point pattern and its window by binary pixel images, introduces a Gaussian smoothing kernel and uses the Fast Fourier Transform \code{\link{fft}} to form a density estimate of \eqn{\kappa}{Kappa}. The calculation corresponds to the edge correction known as the ``translation correction''. The Gaussian smoothing kernel may be specified by either of the arguments \code{sigma} or \code{varcov}. If \code{sigma} is a single number, this specifies an isotropic Gaussian kernel with standard deviation \code{sigma} on each coordinate axis. If \code{sigma} is a vector of two numbers, this specifies a Gaussian kernel with standard deviation \code{sigma[1]} on the \eqn{x} axis, standard deviation \code{sigma[2]} on the \eqn{y} axis, and zero correlation between the \eqn{x} and \eqn{y} axes. If \code{varcov} is given, this specifies the variance-covariance matrix of the Gaussian kernel. There do not seem to be any well-established rules for selecting the smoothing kernel in this context. The density estimate of \eqn{\kappa}{Kappa} is returned in the form of a real-valued pixel image. Pixel values are estimates of the normalised second moment density at the centre of the pixel. (The uniform Poisson process would have values identically equal to \eqn{1}.) The image \code{x} and \code{y} coordinates are on the same scale as vector displacements in the original point pattern window. The point \code{x=0, y=0} corresponds to the `typical point'. A peak in the image near \code{(0,0)} suggests clustering; a dip in the image near \code{(0,0)} suggests inhibition; peaks or dips at other positions suggest possible periodicity. If desired, the value of \eqn{\kappa(A)}{Kappa(A)} for a region \eqn{A} can be estimated by computing the integral of the pixel image over the domain \eqn{A}, i.e.\ summing the pixel values and multiplying by pixel area, using \code{\link{integral.im}}. One possible application is to compute anisotropic counterparts of the \eqn{K}-function (in which the disc of radius \eqn{t} is replaced by another shape). See Examples. } \section{Warning}{ Some writers use the term \emph{reduced second moment measure} when they mean the \eqn{K}-function. This has caused confusion. As originally defined, the reduced second moment measure is a measure, obtained by modifying the second moment measure, while the \eqn{K}-function is a function obtained by evaluating this measure for discs of increasing radius. In \pkg{spatstat}, the \eqn{K}-function is computed by \code{\link{Kest}} and the reduced second moment measure is computed by \code{Kmeasure}. } \references{ Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{fryplot}}, \code{\link{spatstat.options}}, \code{\link{integral.im}}, \code{\link{im.object}} } \examples{ plot(Kmeasure(cells, 0.05)) # shows pronounced dip around origin consistent with strong inhibition plot(Kmeasure(redwood, 0.03), col=grey(seq(1,0,length=32))) # shows peaks at several places, reflecting clustering and ?periodicity M <- Kmeasure(cells, 0.05) # evaluate measure on a sector W <- Window(M) ang <- as.im(atan2, W) rad <- as.im(function(x,y){sqrt(x^2+y^2)}, W) sector <- solutionset(ang > 0 & ang < 1 & rad < 0.6) integral.im(M[sector, drop=FALSE]) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/HierHard.Rd0000644000176200001440000001054714141452520015601 0ustar liggesusers\name{HierHard} \alias{HierHard} \title{The Hierarchical Hard Core Point Process Model} \description{ Creates an instance of the hierarchical hard core point process model which can then be fitted to point pattern data. } \usage{ HierHard(hradii=NULL, types=NULL, archy=NULL) } \arguments{ \item{hradii}{Optional matrix of hard core distances} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} \item{archy}{Optional: the hierarchical order. See Details.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hierarchical hard core process with hard core distances \eqn{hradii[i,j]}. } \details{ This is a hierarchical point process model for a multitype point pattern (\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander} and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, 1999; Grabarnik and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, 2009). It is appropriate for analysing multitype point pattern data in which the types are ordered so that the points of type \eqn{j} depend on the points of type \eqn{1,2,\ldots,j-1}{1,2,...,j-1}. The hierarchical version of the (stationary) hard core process with \eqn{m} types, with hard core distances \eqn{h_{ij}}{h[i,j]} and parameters \eqn{\beta_j}{beta[j]}, is a point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern. If any pair of points of types \eqn{i} and \eqn{j} lies closer than \eqn{h_{ij}}{h[i,j]} units apart, the configuration of points is impossible (probability density zero). The nonstationary hierarchical hard core process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hierarchical hard core process pairwise interaction is yielded by the function \code{HierHard()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the HierHard interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The argument \code{archy} can be used to specify a hierarchical ordering of the types. It can be either a vector of integers or a character vector matching the possible types. The default is the sequence \eqn{1,2, \ldots, m}{1,2, ..., m} meaning that type \eqn{j} depends on types \eqn{1,2, \ldots, j-1}{1,2, ..., j-1}. The matrix \code{iradii} must be square, with entries which are either positive numbers, or zero or \code{NA}. A value of zero or \code{NA} indicates that no hard core interaction term should be included for this combination of types. Note that only the hard core distances are specified in \code{HierHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} are estimated by \code{\link{ppm}()}, not fixed in \code{HierHard()}. } \seealso{ \code{\link{MultiHard}} for the corresponding symmetrical interaction. \code{\link{HierStrauss}}, \code{\link{HierStraussHard}}. } \examples{ h <- matrix(c(4, NA, 10, 15), 2, 2) HierHard(h) # prints a sensible description of itself ppm(ants ~1, HierHard(h)) # fit the stationary hierarchical hard core process to ants data } \author{\adrian , \rolf and \ege. } \references{ Grabarnik, P. and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, A. (2009) Modelling the spatial structure of forest stands by multivariate point processes with hierarchical interactions. \emph{Ecological Modelling} \bold{220}, 1232--1240. \ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}, H. and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, A. (1999) Multitype spatial point patterns with hierarchical interactions. \emph{Biometrics} \bold{55}, 1051--1058. } \keyword{spatial} \keyword{models} spatstat.core/man/diagnose.ppm.Rd0000644000176200001440000004141214141452520016472 0ustar liggesusers\name{diagnose.ppm} \alias{diagnose.ppm} \alias{plot.diagppm} \title{ Diagnostic Plots for Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, produce diagnostic plots based on residuals. } \usage{ diagnose.ppm(object, \dots, type="raw", which="all", sigma=NULL, rbord=reach(object), cumulative=TRUE, plot.it=TRUE, rv = NULL, compute.sd=is.poisson(object), compute.cts=TRUE, envelope=FALSE, nsim=39, nrank=1, typename, check=TRUE, repair=TRUE, oldstyle=FALSE, splineargs=list(spar=0.5)) \method{plot}{diagppm}(x, \dots, which, plot.neg=c("image", "discrete", "contour", "imagecontour"), plot.smooth=c("imagecontour", "image", "contour", "persp"), plot.sd, spacing=0.1, outer=3, srange=NULL, monochrome=FALSE, main=NULL) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which diagnostics should be produced. This object is usually obtained from \code{\link{ppm}}. } \item{type}{ String indicating the type of residuals or weights to be used. Current options are \code{"eem"} for the Stoyan-Grabarnik exponential energy weights, \code{"raw"} for the raw residuals, \code{"inverse"} for the inverse-lambda residuals, and \code{"pearson"} for the Pearson residuals. A partial match is adequate. } \item{which}{ Character string or vector indicating the choice(s) of plots to be generated. Options are \code{"all"}, \code{"marks"}, \code{"smooth"}, \code{"x"}, \code{"y"} and \code{"sum"}. Multiple choices may be given but must be matched exactly. See Details. } \item{sigma}{ Bandwidth for kernel smoother in \code{"smooth"} option. } \item{rbord}{ Width of border to avoid edge effects. The diagnostic calculations will be confined to those points of the data pattern which are at least \code{rbord} units away from the edge of the window. (An infinite value of \code{rbord} will be ignored.) } \item{cumulative}{ Logical flag indicating whether the lurking variable plots for the \eqn{x} and \eqn{y} coordinates will be the plots of cumulative sums of marks (\code{cumulative=TRUE}) or the plots of marginal integrals of the smoothed residual field (\code{cumulative=FALSE}). } \item{plot.it}{ Logical value indicating whether plots should be shown. If \code{plot.it=FALSE}, the computed diagnostic quantities are returned without plotting them. } \item{plot.neg}{ String indicating how the density part of the residual measure should be plotted. } \item{plot.smooth}{ String indicating how the smoothed residual field should be plotted. } \item{compute.sd,plot.sd}{ Logical values indicating whether error bounds should be computed and added to the \code{"x"} and \code{"y"} plots. The default is \code{TRUE} for Poisson models and \code{FALSE} for non-Poisson models. See Details. } \item{envelope,nsim,nrank}{ Arguments passed to \code{\link{lurking}} in order to plot simulation envelopes for the lurking variable plots. } \item{rv}{ Usually absent. Advanced use only. If this argument is present, the values of the residuals will not be calculated from the fitted model \code{object} but will instead be taken directly from \code{rv}. } \item{spacing}{ The spacing between plot panels (when a four-panel plot is generated) expressed as a fraction of the width of the window of the point pattern. } \item{outer}{ The distance from the outermost line of text to the nearest plot panel, expressed as a multiple of the spacing between plot panels. } \item{srange}{ Vector of length 2 that will be taken as giving the range of values of the smoothed residual field, when generating an image plot of this field. This is useful if you want to generate diagnostic plots for two different fitted models using the same colour map. } \item{monochrome}{ Flag indicating whether images should be displayed in greyscale (suitable for publication) or in colour (suitable for the screen). The default is to display in colour. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{object}, if it is found to be damaged. } \item{oldstyle}{ Logical flag indicating whether error bounds should be plotted using the approximation given in the original paper (\code{oldstyle=TRUE}), or using the correct asymptotic formula (\code{oldstyle=FALSE}). } \item{splineargs}{ Argument passed to \code{\link{lurking}} to control the smoothing in the lurking variable plot. } \item{x}{The value returned from a previous call to \code{diagnose.ppm}. An object of class \code{"diagppm"}. } \item{typename}{String to be used as the name of the residuals.} \item{main}{Main title for the plot.} \item{\dots}{ Extra arguments, controlling either the resolution of the smoothed image (passed from \code{diagnose.ppm} to \code{\link{density.ppp}}) or the appearance of the plots (passed from \code{diagnose.ppm} to \code{plot.diagppm} and from \code{plot.diagppm} to \code{\link{plot.default}}). } \item{compute.cts}{Advanced use only.} } \value{ An object of class \code{"diagppm"} which contains the coordinates needed to reproduce the selected plots. This object can be plotted using \code{plot.diagppm} and printed using \code{print.diagppm}. } \details{ The function \code{diagnose.ppm} generates several diagnostic plots for a fitted point process model. The plots display the residuals from the fitted model (Baddeley et al, 2005) or alternatively the `exponential energy marks' (Stoyan and Grabarnik, 1991). These plots can be used to assess goodness-of-fit, to identify outliers in the data, and to reveal departures from the fitted model. See also the companion function \code{\link{qqplot.ppm}}. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}) typically produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). The argument \code{type} selects the type of residual or weight that will be computed. Current options are: \describe{ \item{\code{"eem"}:}{ exponential energy marks (Stoyan and Grabarnik, 1991) computed by \code{\link{eem}}. These are positive weights attached to the data points (i.e. the points of the point pattern dataset to which the model was fitted). If the fitted model is correct, then the sum of these weights for all data points in a spatial region \eqn{B} has expected value equal to the area of \eqn{B}. See \code{\link{eem}} for further explanation. } \item{\code{"raw"}, \code{"inverse"} or \code{"pearson"}:}{ point process residuals (Baddeley et al, 2005) computed by the function \code{\link{residuals.ppm}}. These are residuals attached both to the data points and to some other points in the window of observation (namely, to the dummy points of the quadrature scheme used to fit the model). If the fitted model is correct, then the sum of the residuals in a spatial region \eqn{B} has mean zero. The options are \itemize{ \item \code{"raw"}: the raw residuals; \item \code{"inverse"}: the `inverse-lambda' residuals, a counterpart of the exponential energy weights; \item \code{"pearson"}: the Pearson residuals. } See \code{\link{residuals.ppm}} for further explanation. } } The argument \code{which} selects the type of plot that is produced. Options are: \describe{ \item{\code{"marks"}:}{ plot the residual measure. For the exponential energy weights (\code{type="eem"}) this displays circles centred at the points of the data pattern, with radii proportional to the exponential energy weights. For the residuals (\code{type="raw"}, \code{type="inverse"} or \code{type="pearson"}) this again displays circles centred at the points of the data pattern with radii proportional to the (positive) residuals, while the plotting of the negative residuals depends on the argument \code{plot.neg}. If \code{plot.neg="image"} then the negative part of the residual measure, which is a density, is plotted as a colour image. If \code{plot.neg="discrete"} then the discretised negative residuals (obtained by approximately integrating the negative density using the quadrature scheme of the fitted model) are plotted as squares centred at the dummy points with side lengths proportional to the (negative) residuals. [To control the size of the circles and squares, use the argument \code{maxsize}.] } \item{\code{"smooth"}:}{ plot a kernel-smoothed version of the residual measure. Each data or dummy point is taken to have a `mass' equal to its residual or exponential energy weight. (Note that residuals can be negative). This point mass is then replaced by a bivariate isotropic Gaussian density with standard deviation \code{sigma}. The value of the smoothed residual field at any point in the window is the sum of these weighted densities. If the fitted model is correct, this smoothed field should be flat, and its height should be close to 0 (for the residuals) or 1 (for the exponential energy weights). The field is plotted either as an image, contour plot or perspective view of a surface, according to the argument \code{plot.smooth}. The range of values of the smoothed field is printed if the option \code{which="sum"} is also selected. } \item{\code{"x"}:}{ produce a `lurking variable' plot for the \eqn{x} coordinate. This is a plot of \eqn{h(x)} against \eqn{x} (solid lines) and of \eqn{E(h(x))} against \eqn{x} (dashed lines), where \eqn{h(x)} is defined below, and \eqn{E(h(x))} denotes the expectation of \eqn{h(x)} assuming the fitted model is true. \itemize{ \item if \code{cumulative=TRUE} then \eqn{h(x)} is the cumulative sum of the weights or residuals for all points which have \eqn{X} coordinate less than or equal to \eqn{x}. For the residuals \eqn{E(h(x)) = 0}, and for the exponential energy weights \eqn{E(h(x)) = } area of the subset of the window to the left of the line \eqn{X=x}. \item if \code{cumulative=FALSE} then \eqn{h(x)} is the marginal integral of the smoothed residual field (see the case \code{which="smooth"} described above) on the \eqn{x} axis. This is approximately the derivative of the plot for \code{cumulative=TRUE}. The value of \eqn{h(x)} is computed by summing the values of the smoothed residual field over all pixels with the given \eqn{x} coordinate. For the residuals \eqn{E(h(x)) = 0}, and for the exponential energy weights \eqn{E(h(x)) = } length of the intersection between the observation window and the line \eqn{X=x}. } If \code{plot.sd = TRUE}, then superimposed on the lurking variable plot are the pointwise two-standard-deviation error limits for \eqn{h(x)} calculated for the inhomogeneous Poisson process. The default is \code{plot.sd = TRUE} for Poisson models and \code{plot.sd = FALSE} for non-Poisson models. } \item{\code{"y"}:}{ produce a similar lurking variable plot for the \eqn{y} coordinate. } \item{\code{"sum"}:}{ print the sum of the weights or residuals for all points in the window (clipped by a margin \code{rbord} if required) and the area of the same window. If the fitted model is correct the sum of the exponential energy weights should equal the area of the window, while the sum of the residuals should equal zero. Also print the range of values of the smoothed field displayed in the \code{"smooth"} case. } \item{\code{"all"}:}{ All four of the diagnostic plots listed above are plotted together in a two-by-two display. Top left panel is \code{"marks"} plot. Bottom right panel is \code{"smooth"} plot. Bottom left panel is \code{"x"} plot. Top right panel is \code{"y"} plot, rotated 90 degrees. } } The argument \code{rbord} ensures there are no edge effects in the computation of the residuals. The diagnostic calculations will be confined to those points of the data pattern which are at least \code{rbord} units away from the edge of the window. The value of \code{rbord} should be greater than or equal to the range of interaction permitted in the model. By default, the two-standard-deviation limits are calculated from the exact formula for the asymptotic variance of the residuals under the asymptotic normal approximation, equation (37) of Baddeley et al (2006). However, for compatibility with the original paper of Baddeley et al (2005), if \code{oldstyle=TRUE}, the two-standard-deviation limits are calculated using the innovation variance, an over-estimate of the true variance of the residuals. (However, see the section about Replicated Data). The argument \code{rv} would normally be used only by experts. It enables the user to substitute arbitrary values for the residuals or marks, overriding the usual calculations. If \code{rv} is present, then instead of calculating the residuals from the fitted model, the algorithm takes the residuals from the object \code{rv}, and plots them in the manner appropriate to the type of residual or mark selected by \code{type}. If \code{type ="eem"} then \code{rv} should be similar to the return value of \code{\link{eem}}, namely, a numeric vector of length equal to the number of points in the original data point pattern. Otherwise, \code{rv} should be similar to the return value of \code{\link{residuals.ppm}}, that is, it should be an object of class \code{"msr"} (see \code{\link{msr}}) representing a signed measure. The return value of \code{diagnose.ppm} is an object of class \code{"diagppm"}. The \code{plot} method for this class is documented here. There is also a \code{print} method. See the Examples. In \code{plot.diagppm}, if a four-panel diagnostic plot is produced (the default), then the extra arguments \code{xlab}, \code{ylab}, \code{rlab} determine the text labels for the \eqn{x} and \eqn{y} coordinates and the residuals, respectively. The undocumented arguments \code{col.neg} and \code{col.smooth} control the colour maps used in the top left and bottom right panels respectively. See also the companion functions \code{\link{qqplot.ppm}}, which produces a Q-Q plot of the residuals, and \code{\link{lurking}}, which produces lurking variable plots for any spatial covariate. } \section{Replicated Data}{ Note that if \code{object} is a model that was obtained by first fitting a model to replicated point pattern data using \code{\link{mppm}} and then using \code{\link{subfits}} to extract a model for one of the individual point patterns, then the variance calculations are only implemented for the innovation variance (\code{oldstyle=TRUE}) and this is the default in such cases. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \seealso{ \code{\link{residuals.ppm}}, \code{\link{eem}}, \code{\link{ppm.object}}, \code{\link{qqplot.ppm}}, \code{\link{lurking}}, \code{\link{ppm}} } \examples{ fit <- ppm(cells ~x, Strauss(r=0.15)) diagnose.ppm(fit) \donttest{ diagnose.ppm(fit, type="pearson") } diagnose.ppm(fit, which="marks") diagnose.ppm(fit, type="raw", plot.neg="discrete") diagnose.ppm(fit, type="pearson", which="smooth") # save the diagnostics and plot them later u <- diagnose.ppm(fit, rbord=0.15, plot.it=FALSE) if(interactive()) { plot(u) plot(u, which="marks") } } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat.core/man/plot.ppm.Rd0000644000176200001440000001532214141452520015660 0ustar liggesusers\name{plot.ppm} \alias{plot.ppm} \title{plot a Fitted Point Process Model} \description{ Given a fitted point process model obtained by \code{\link{ppm}}, create spatial trend and conditional intensity surfaces of the model, in a form suitable for plotting, and (optionally) plot these surfaces. } \usage{ \method{plot}{ppm}(x, ngrid = c(40,40), superimpose = TRUE, trend = TRUE, cif = TRUE, se = TRUE, pause = interactive(), how=c("persp","image", "contour"), plot.it = TRUE, locations = NULL, covariates=NULL, \dots) } \arguments{ \item{x}{ A fitted point process model, typically obtained from the model-fitting algorithm \code{\link{ppm}}. An object of class \code{"ppm"}. } \item{ngrid}{ The dimensions for a grid on which to evaluate, for plotting, the spatial trend and conditional intensity. A vector of 1 or 2 integers. If it is of length 1, \code{ngrid} is replaced by \code{c(ngrid,ngrid)}. } \item{superimpose}{ logical flag; if \code{TRUE} (and if \code{plot=TRUE}) the original data point pattern will be superimposed on the plots. } \item{trend}{ logical flag; if \code{TRUE}, the spatial trend surface will be produced. } \item{cif}{ logical flag; if \code{TRUE}, the conditional intensity surface will be produced. } \item{se}{ logical flag; if \code{TRUE}, the estimated standard error of the spatial trend surface will be produced. } \item{pause}{ logical flag indicating whether to pause with a prompt after each plot. Set \code{pause=FALSE} if plotting to a file. (This flag is ignored if \code{plot=FALSE}). } \item{how}{ character string or character vector indicating the style or styles of plots to be performed. Ignored if \code{plot=FALSE}. } \item{plot.it}{ logical scalar; should a plot be produced immediately? } \item{locations}{ If present, this determines the locations of the pixels at which predictions are computed. It must be a binary pixel image (an object of class \code{"owin"} with type \code{"mask"}). (Incompatible with \code{ngrid}). } \item{covariates}{ Values of external covariates required by the fitted model. Passed to \code{\link{predict.ppm}}. } \item{\dots}{ extra arguments to the plotting functions \code{\link{persp}}, \code{\link{image}} and \code{\link{contour}}. } } \value{ An object of class \code{plotppm}. Such objects may be plotted by \code{\link{plot.plotppm}()}. This is a list with components named \code{trend} and \code{cif}, either of which may be missing. They will be missing if the corresponding component does not make sense for the model, or if the corresponding argument was set equal to \code{FALSE}. Both \code{trend} and \code{cif} are lists of images. If the model is an unmarked point process, then they are lists of length 1, so that \code{trend[[1]]} is an image of the spatial trend and \code{cif[[1]]} is an image of the conditional intensity. If the model is a marked point process, then \code{trend[[i]]} is an image of the spatial trend for the mark \code{m[i]}, and \code{cif[[1]]} is an image of the conditional intensity for the mark \code{m[i]}, where \code{m} is the vector of levels of the marks. } \details{ This is the \code{plot} method for the class \code{"ppm"} (see \code{\link{ppm.object}} for details of this class). It invokes \code{\link{predict.ppm}} to compute the spatial trend and conditional intensity of the fitted point process model. See \code{\link{predict.ppm}} for more explanation about spatial trend and conditional intensity. The default action is to create a rectangular grid of points in (the bounding box of) the observation window of the data point pattern, and evaluate the spatial trend and conditional intensity of the fitted spatial point process model \code{x} at these locations. If the argument \code{locations=} is supplied, then the spatial trend and conditional intensity are calculated at the grid of points specified by this argument. The argument \code{locations}, if present, should be a binary image mask (an object of class \code{"owin"} and type \code{"mask"}). This determines a rectangular grid of locations, or a subset of such a grid, at which predictions will be computed. Binary image masks are conveniently created using \code{\link{as.mask}}. The argument \code{covariates} gives the values of any spatial covariates at the prediction locations. If the trend formula in the fitted model involves spatial covariates (other than the Cartesian coordinates \code{x}, \code{y}) then \code{covariates} is required. The argument \code{covariates} has the same format and interpretation as in \code{\link{predict.ppm}}. It may be either a data frame (the number of whose rows must match the number of pixels in \code{locations} multiplied by the number of possible marks in the point pattern), or a list of images. If argument \code{locations} is not supplied, and \code{covariates} \bold{is} supplied, then it \bold{must} be a list of images. If the fitted model was a marked (multitype) point process, then predictions are made for each possible mark value in turn. If the fitted model had no spatial trend, then the default is to omit calculating this (flat) surface, unless \code{trend=TRUE} is set explicitly. If the fitted model was Poisson, so that there were no spatial interactions, then the conditional intensity and spatial trend are identical, and the default is to omit the conditional intensity, unless \code{cif=TRUE} is set explicitly. If \code{plot.it=TRUE} then \code{\link{plot.plotppm}()} is called upon to plot the class \code{plotppm} object which is produced. (That object is also returned, silently.) Plots are produced successively using \code{\link{persp}}, \code{\link{image}} and \code{\link{contour}} (or only a selection of these three, if \code{how} is given). Extra graphical parameters controlling the display may be passed directly via the arguments \code{...} or indirectly reset using \code{\link{spatstat.options}}. } \seealso{ \code{\link{plot.plotppm}}, \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{predict.ppm}}, \code{\link{print.ppm}}, \code{\link{persp}}, \code{\link{image}}, \code{\link{contour}}, \code{\link{plot}}, \code{\link{spatstat.options}} } \section{Warnings}{ See warnings in \code{\link{predict.ppm}}. } \examples{ m <- ppm(cells ~1, Strauss(0.05)) pm <- plot(m) # The object ``pm'' will be plotted as well as saved # for future plotting. pm } \author{ \spatstatAuthors } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat.core/man/improve.kppm.Rd0000644000176200001440000001140714141452520016536 0ustar liggesusers\name{improve.kppm} \alias{improve.kppm} \title{Improve Intensity Estimate of Fitted Cluster Point Process Model} \description{ Update the fitted intensity of a fitted cluster point process model. } \usage{ improve.kppm(object, type=c("quasi", "wclik1", "clik1"), rmax = NULL, eps.rmax = 0.01, dimyx = 50, maxIter = 100, tolerance = 1e-06, fast = TRUE, vcov = FALSE, fast.vcov = FALSE, verbose = FALSE, save.internals = FALSE) } \arguments{ \item{object}{ Fitted cluster point process model (object of class \code{"kppm"}). } \item{type}{ A character string indicating the method of estimation. Current options are \code{"clik1"}, \code{"wclik1"} and \code{"quasi"} for, respectively, first order composite (Poisson) likelihood, weighted first order composite likelihood and quasi-likelihood. } \item{rmax}{ Optional. The dependence range. Not usually specified by the user. } \item{eps.rmax}{ Numeric. A small positive number which is used to determine \code{rmax} from the tail behaviour of the pair correlation function. Namely \code{rmax} is the smallest value of \eqn{r} at which \eqn{(g(r)-1)/(g(0)-1)} falls below \code{eps.rmax}. Ignored if \code{rmax} is provided. } \item{dimyx}{ Pixel array dimensions. See Details. } \item{maxIter}{ Integer. Maximum number of iterations of iterative weighted least squares (Fisher scoring). } \item{tolerance}{ Numeric. Tolerance value specifying when to stop iterative weighted least squares (Fisher scoring). } \item{fast}{ Logical value indicating whether tapering should be used to make the computations faster (requires the package \pkg{Matrix}). } \item{vcov}{ Logical value indicating whether to calculate the asymptotic variance covariance/matrix. } \item{fast.vcov}{ Logical value indicating whether tapering should be used for the variance/covariance matrix to make the computations faster (requires the package \pkg{Matrix}). Caution: This is expected to underestimate the true asymptotic variances/covariances. } \item{verbose}{ A logical indicating whether the details of computations should be printed. } \item{save.internals}{ A logical indicating whether internal quantities should be saved in the returned object (mostly for development purposes). } } \value{ A fitted cluster point process model of class \code{"kppm"}. } \details{ This function reestimates the intensity parameters in a fitted \code{"kppm"} object. If \code{type="clik1"} estimates are based on the first order composite (Poisson) likelihood, which ignores dependence between the points. Note that \code{type="clik1"} is mainly included for testing purposes and is not recommended for the typical user; instead the more efficient \code{\link{kppm}} with \code{improve.type="none"} should be used. When \code{type="quasi"} or \code{type="wclik1"} the dependence structure between the points is incorporated in the estimation procedure by using the estimated pair correlation function in the estimating equation. In all cases the estimating equation is based on dividing the observation window into small subregions and count the number of points in each subregion. To do this the observation window is first converted into a digital mask by \code{\link{as.mask}} where the resolution is controlled by the argument \code{dimyx}. The computational time grows with the cube of the number of subregions, so fine grids may take very long to compute (or even run out of memory). } \seealso{ \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{improve.kppm}} } \references{ Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes, \emph{Biometrics}, \bold{63}, 252-258. Guan, Y. and Shen, Y. (2010) A weighted estimating equation approach to inference for inhomogeneous spatial point processes, \emph{Biometrika}, \bold{97}, 867-880. Guan, Y., Jalilian, A. and Waagepetersen, R. (2015) Quasi-likelihood for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 677--697. } \examples{ # fit a Thomas process using minimum contrast estimation method # to model interaction between points of the pattern fit0 <- kppm(bei ~ elev + grad, data = bei.extra) # fit the log-linear intensity model with quasi-likelihood method fit1 <- improve.kppm(fit0, type="quasi") # compare coef(fit0) coef(fit1) } \author{Abdollah Jalilian \email{jalilian@razi.ac.ir} % \url{http://www.razi.ac.ir/ajalilian/} and Rasmus Waagepetersen \email{rw@math.aau.dk} adapted for \pkg{spatstat} by \adrian and \ege } \keyword{spatial} \keyword{fit model} spatstat.core/man/plot.profilepl.Rd0000644000176200001440000000763314141452520017066 0ustar liggesusers\name{plot.profilepl} \alias{plot.profilepl} \title{ Plot Profile Likelihood } \description{ Plot the profile (pseudo) likelihood against the irregular parameters, for a model that was fitted by maximum profile (pseudo)likelihood. } \usage{ \method{plot}{profilepl}(x, \dots, add = FALSE, main = NULL, tag = TRUE, coeff = NULL, xvariable = NULL, col = 1, lty = 1, lwd = 1, col.opt = "green", lty.opt = 3, lwd.opt = 1) } \arguments{ \item{x}{ A point process model fitted by maximum profile (pseudo)likelihood. Object of class \code{"profilepl"}, obtained from \code{\link{profilepl}}. } \item{\dots}{ Additional plot arguments passed to \code{\link[graphics]{plot.default}} and \code{\link[graphics]{lines}}. } \item{add}{ Logical. If \code{TRUE}, the plot is drawn over the existing plot. } \item{main}{ Optional. Main title for the plot. A character string or character vector. } \item{tag}{ Logical value. If \code{TRUE} (the default), when the plot contains multiple curves corresponding to different values of a parameter, each curve will be labelled with the values of the irregular parameter. } \item{coeff}{ Optional. If this is given, it should be a character string matching the name of one of the fitted model coefficients. This coefficient will then be plotted on the vertical axis. } \item{xvariable}{ Optional. The name of the irregular parameter that should be plotted along the horizontal axis. The default is the first irregular parameter. } \item{col,lty,lwd}{ Graphical parameters (colour, line type, line width) for the curves on the plot. } \item{col.opt, lty.opt, lwd.opt}{ Graphical parameters for indicating the optimal parameter value. } } \details{ This is the \code{\link[graphics]{plot}} method for the class \code{"profilepl"} of fitted point process models obtained by maximising the profile likelihood or profile pseudolikelihood. The default behaviour is to plot the profile likelihood or profile pseudolikelihood on the vertical axis, against the value of the irregular parameter on the horizontal axis. If there are several irregular parameters, then one of them is plotted on the horizontal axis, and the plot consists of many different curves, corresponding to different values of the other parameters. The parameter to be plotted on the horizontal axis is specified by the argument \code{xvariable}; the default is to use the parameter that was listed first in the original call to \code{\link{profilepl}}. If \code{coeff} is given, it should be the name of one of the fitted model coefficients \code{names(coef(as.ppm(x)))}. The fitted value of that coefficient is plotted on the vertical axis. } \value{ Null. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. } \author{ \spatstatAuthors. } \seealso{ \code{\link{profilepl}} } \examples{ live <- interactive() nr <- if(live) 20 else 3 # one irregular parameter rr <- data.frame(r=seq(0.05,0.15, length=nr)) ps <- profilepl(rr, Strauss, cells) plot(ps) # profile pseudolikelihood plot(ps, coeff="Interaction") # fitted interaction coefficient log(gamma) # two irregular parameters smax <- if(live) 3 else 2 rs <- expand.grid(r=seq(0.05,0.15, length=nr), sat=1:smax) pg <- profilepl(rs, Geyer, cells) plot(pg) # profile pseudolikelihood against r for each value of 'sat' plot(pg, coeff="Interaction") plot(pg, xvariable="sat", col=ifelse(r < 0.1, "red", "green")) } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat.core/man/HierStrauss.Rd0000644000176200001440000001071714141452520016366 0ustar liggesusers\name{HierStrauss} \alias{HierStrauss} \title{The Hierarchical Strauss Point Process Model} \description{ Creates an instance of the hierarchical Strauss point process model which can then be fitted to point pattern data. } \usage{ HierStrauss(radii, types=NULL, archy=NULL) } \arguments{ \item{radii}{Matrix of interaction radii} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} \item{archy}{Optional: the hierarchical order. See Details.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hierarchical Strauss process with interaction radii \eqn{radii[i,j]}. } \details{ This is a hierarchical point process model for a multitype point pattern (\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander} and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, 1999; Grabarnik and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, 2009). It is appropriate for analysing multitype point pattern data in which the types are ordered so that the points of type \eqn{j} depend on the points of type \eqn{1,2,\ldots,j-1}{1,2,...,j-1}. The hierarchical version of the (stationary) Strauss process with \eqn{m} types, with interaction radii \eqn{r_{ij}}{r[i,j]} and parameters \eqn{\beta_j}{beta[j]} and \eqn{\gamma_{ij}}{gamma[i,j]} is a point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern, and a pair of points of types \eqn{i} and \eqn{j} closer than \eqn{r_{ij}}{r[i,j]} units apart contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the density \bold{provided} \eqn{i \le j}{i <= j}. The nonstationary hierarchical Strauss process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hierarchical Strauss process pairwise interaction is yielded by the function \code{HierStrauss()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the HierStrauss interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The argument \code{archy} can be used to specify a hierarchical ordering of the types. It can be either a vector of integers or a character vector matching the possible types. The default is the sequence \eqn{1,2, \ldots, m}{1,2, ..., m} meaning that type \eqn{j} depends on types \eqn{1,2, \ldots, j-1}{1,2, ..., j-1}. The matrix \code{radii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii are specified in \code{HierStrauss}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{HierStrauss()}. } \seealso{ \code{\link{MultiStrauss}} for the corresponding symmetrical interaction. \code{\link{HierHard}}, \code{\link{HierStraussHard}}. } \examples{ r <- matrix(10 * c(3,4,4,3), nrow=2,ncol=2) HierStrauss(r) # prints a sensible description of itself ppm(ants ~1, HierStrauss(r, , c("Messor", "Cataglyphis"))) # fit the stationary hierarchical Strauss process to ants data } \author{\adrian , \rolf and \ege. } \references{ Grabarnik, P. and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, A. (2009) Modelling the spatial structure of forest stands by multivariate point processes with hierarchical interactions. \emph{Ecological Modelling} \bold{220}, 1232--1240. \ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}, H. and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, A. (1999) Multitype spatial point patterns with hierarchical interactions. \emph{Biometrics} \bold{55}, 1051--1058. } \keyword{spatial} \keyword{models} spatstat.core/man/macros/0000755000176200001440000000000014141377573015117 5ustar liggesusersspatstat.core/man/macros/defns.Rd0000755000176200001440000000432014141377573016507 0ustar liggesusers%% macro definitions for spatstat man pages \newcommand{\adrian}{Adrian Baddeley \email{Adrian.Baddeley@curtin.edu.au}} \newcommand{\rolf}{Rolf Turner \email{r.turner@auckland.ac.nz}} \newcommand{\ege}{Ege Rubak \email{rubak@math.aau.dk}} \newcommand{\spatstatAuthors}{\adrian, \rolf and \ege} % Names with accents \newcommand{\Bogsted}{\ifelse{latex}{\out{B\o gsted}}{Bogsted}} \newcommand{\Cramer}{\ifelse{latex}{\out{Cram\'er}}{Cramer}} \newcommand{\Hogmander}{\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}} \newcommand{\Jyvaskyla}{\ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}} \newcommand{\Matern}{\ifelse{latex}{\out{Mat\'ern}}{Matern}} \newcommand{\Moller}{\ifelse{latex}{\out{M\o ller}}{Moller}} \newcommand{\Oehlschlaegel}{\ifelse{latex}{\out{Oehlschl\"{a}gel}}{Oehlschlaegel}} \newcommand{\Prokesova}{\ifelse{latex}{\out{Proke\u{s}ov{\'{a}}}}{Prokesova}} \newcommand{\Sarkka}{\ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}} %% List of all Gibbs interactions \newcommand{\GibbsInteractionsList}{\code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{Concom}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{HierHard}}, \code{\link{HierStrauss}}, \code{\link{HierStraussHard}}, \code{\link{Hybrid}}, \code{\link{LennardJones}}, \code{\link{MultiHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{OrdThresh}}, \code{\link{Ord}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Penttinen}}, \code{\link{Poisson}}, \code{\link{Saturated}}, \code{\link{SatPiece}}, \code{\link{Softcore}}, \code{\link{Strauss}}, \code{\link{StraussHard}} and \code{\link{Triplets}}} %% List of interactions recognised by RMH code \newcommand{\rmhInteractionsList}{\code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{Hybrid}}, \code{\link{LennardJones}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Penttinen}}, \code{\link{Poisson}}, \code{\link{Softcore}}, \code{\link{Strauss}}, \code{\link{StraussHard}} and \code{\link{Triplets}}} spatstat.core/man/kppm.Rd0000644000176200001440000005000314142420412015045 0ustar liggesusers\name{kppm} \alias{kppm} \alias{kppm.formula} \alias{kppm.ppp} \alias{kppm.quad} \concept{point process model} \concept{Cox point process} \concept{cluster process} \concept{Neyman-Scott cluster process} \title{Fit Cluster or Cox Point Process Model} \description{ Fit a homogeneous or inhomogeneous cluster process or Cox point process model to a point pattern. } \usage{ kppm(X, \dots) \method{kppm}{formula}(X, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), \dots, data=NULL) \method{kppm}{ppp}(X, trend = ~1, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), data = NULL, ..., covariates=data, subset, method = c("mincon", "clik2", "palm", "adapcl"), improve.type = c("none", "clik1", "wclik1", "quasi"), improve.args = list(), weightfun=NULL, control=list(), stabilize=TRUE, algorithm, statistic="K", statargs=list(), rmax = NULL, epsilon=0.01, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) \method{kppm}{quad}(X, trend = ~1, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), data = NULL, ..., covariates=data, subset, method = c("mincon", "clik2", "palm", "adapcl"), improve.type = c("none", "clik1", "wclik1", "quasi"), improve.args = list(), weightfun=NULL, control=list(), stabilize=TRUE, algorithm, statistic="K", statargs=list(), rmax = NULL, epsilon=0.01, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) } \arguments{ \item{X}{ A point pattern dataset (object of class \code{"ppp"} or \code{"quad"}) to which the model should be fitted, or a \code{formula} in the \R language defining the model. See Details. } \item{trend}{ An \R formula, with no left hand side, specifying the form of the log intensity. } \item{clusters}{ Character string determining the cluster model. Partially matched. Options are \code{"Thomas"}, \code{"MatClust"}, \code{"Cauchy"}, \code{"VarGamma"} and \code{"LGCP"}. } \item{data,covariates}{ The values of spatial covariates (other than the Cartesian coordinates) required by the model. A named list of pixel images, functions, windows, tessellations or numeric constants. } \item{\dots}{ Additional arguments. See Details. } \item{subset}{ Optional. A subset of the spatial domain, to which the model-fitting should be restricted. A window (object of class \code{"owin"}) or a logical-valued pixel image (object of class \code{"im"}), or an expression (possibly involving the names of entries in \code{data}) which can be evaluated to yield a window or pixel image. } \item{method}{ The fitting method. Either \code{"mincon"} for minimum contrast, \code{"clik2"} for second order composite likelihood, \code{"adapcl"} for adaptive second order composite likelihood, or \code{"palm"} for Palm likelihood. Partially matched. } \item{improve.type}{ Method for updating the initial estimate of the trend. Initially the trend is estimated as if the process is an inhomogeneous Poisson process. The default, \code{improve.type = "none"}, is to use this initial estimate. Otherwise, the trend estimate is updated by \code{\link{improve.kppm}}, using information about the pair correlation function. Options are \code{"clik1"} (first order composite likelihood, essentially equivalent to \code{"none"}), \code{"wclik1"} (weighted first order composite likelihood) and \code{"quasi"} (quasi likelihood). } \item{improve.args}{ Additional arguments passed to \code{\link{improve.kppm}} when \code{improve.type != "none"}. See Details. } \item{weightfun}{ Optional weighting function \eqn{w} in the composite likelihoods or Palm likelihood. A \code{function} in the \R language. See Details. } \item{control}{ List of control parameters passed to the optimization function \code{\link[stats]{optim}}. } \item{stabilize}{ Logical value specifying whether to numerically stabilize the optimization algorithm, by specifying suitable default values of \code{control$fnscale} and \code{control$parscale}. } \item{algorithm}{ Character string determining the mathematical algorithm to be used to solve the fitting problem. If \code{method="mincon", "clik2"} or \code{"palm"} this argument is passed to the generic optimization function \code{\link[stats]{optim}} (renamed as the argument \code{method}) with default \code{"Nelder-Mead"}. If \code{method="adapcl"}) the argument is passed to the equation solver \code{\link[nleqslv]{nleqslv}}, with default \code{"Bryden"}. } \item{statistic}{ Name of the summary statistic to be used for minimum contrast estimation: either \code{"K"} or \code{"pcf"}. } \item{statargs}{ Optional list of arguments to be used when calculating the \code{statistic}. See Details. } \item{rmax}{ Maximum value of interpoint distance to use in the composite likelihood. } \item{epsilon}{ Tuning parameter for the adaptive composite likelihood method. } \item{covfunargs,use.gam,nd,eps}{ Arguments passed to \code{\link{ppm}} when fitting the intensity. } } \details{ This function fits a clustered point process model to the point pattern dataset \code{X}. The model may be either a \emph{Neyman-Scott cluster process} or another \emph{Cox process}. The type of model is determined by the argument \code{clusters}. Currently the options are \code{clusters="Thomas"} for the Thomas process, \code{clusters="MatClust"} for the \Matern cluster process, \code{clusters="Cauchy"} for the Neyman-Scott cluster process with Cauchy kernel, \code{clusters="VarGamma"} for the Neyman-Scott cluster process with Variance Gamma kernel (requires an additional argument \code{nu} to be passed through the dots; see \code{\link{rVarGamma}} for details), and \code{clusters="LGCP"} for the log-Gaussian Cox process (may require additional arguments passed through \code{\dots}; see \code{\link{rLGCP}} for details on argument names). The first four models are Neyman-Scott cluster processes. The algorithm first estimates the intensity function of the point process using \code{\link{ppm}}. The argument \code{X} may be a point pattern (object of class \code{"ppp"}) or a quadrature scheme (object of class \code{"quad"}). The intensity is specified by the \code{trend} argument. If the trend formula is \code{~1} (the default) then the model is \emph{homogeneous}. The algorithm begins by estimating the intensity as the number of points divided by the area of the window. Otherwise, the model is \emph{inhomogeneous}. The algorithm begins by fitting a Poisson process with log intensity of the form specified by the formula \code{trend}. (See \code{\link{ppm}} for further explanation). The argument \code{X} may also be a \code{formula} in the \R language. The right hand side of the formula gives the \code{trend} as described above. The left hand side of the formula gives the point pattern dataset to which the model should be fitted. If \code{improve.type="none"} this is the final estimate of the intensity. Otherwise, the intensity estimate is updated, as explained in \code{\link{improve.kppm}}. Additional arguments to \code{\link{improve.kppm}} are passed as a named list in \code{improve.args}. The cluster parameters of the model are then fitted either by minimum contrast estimation, or by a composite likelihood method (maximum composite likelihood, maximum Palm likelihood, or by solving the adaptive composite likelihood estimating equation). \describe{ \item{Minimum contrast:}{ If \code{method = "mincon"} (the default) clustering parameters of the model will be fitted by minimum contrast estimation, that is, by matching the theoretical \eqn{K}-function of the model to the empirical \eqn{K}-function of the data, as explained in \code{\link{mincontrast}}. For a homogeneous model (\code{ trend = ~1 }) the empirical \eqn{K}-function of the data is computed using \code{\link{Kest}}, and the parameters of the cluster model are estimated by the method of minimum contrast. For an inhomogeneous model, the inhomogeneous \eqn{K} function is estimated by \code{\link{Kinhom}} using the fitted intensity. Then the parameters of the cluster model are estimated by the method of minimum contrast using the inhomogeneous \eqn{K} function. This two-step estimation procedure is due to Waagepetersen (2007). If \code{statistic="pcf"} then instead of using the \eqn{K}-function, the algorithm will use the pair correlation function \code{\link{pcf}} for homogeneous models and the inhomogeneous pair correlation function \code{\link{pcfinhom}} for inhomogeneous models. In this case, the smoothing parameters of the pair correlation can be controlled using the argument \code{statargs}, as shown in the Examples. Additional arguments \code{\dots} will be passed to \code{\link{clusterfit}} to control the minimum contrast fitting algorithm. The optimisation is performed by the generic optimisation algorithm \code{\link[stats]{optim}}. } \item{Second order composite likelihood:}{ If \code{method = "clik2"} the clustering parameters of the model will be fitted by maximising the second-order composite likelihood (Guan, 2006). The log composite likelihood is \deqn{ \sum_{i,j} w(d_{ij}) \log\rho(d_{ij}; \theta) - \left( \sum_{i,j} w(d_{ij}) \right) \log \int_D \int_D w(\|u-v\|) \rho(\|u-v\|; \theta)\, du\, dv }{ sum[i,j] w(d[i,j]) log(rho(d[i,j]; theta)) - (sum[i,j] w(d[i,j])) log(integral[D,D] w(||u-v||) rho(||u-v||; theta) du dv) } where the sums are taken over all pairs of data points \eqn{x_i, x_j}{x[i], x[j]} separated by a distance \eqn{d_{ij} = \| x_i - x_j\|}{d[i,j] = ||x[i] - x[j]||} less than \code{rmax}, and the double integral is taken over all pairs of locations \eqn{u,v} in the spatial window of the data. Here \eqn{\rho(d;\theta)}{rho(d;theta)} is the pair correlation function of the model with cluster parameters \eqn{\theta}{theta}. The function \eqn{w} in the composite likelihood is a weighting function and may be chosen arbitrarily. It is specified by the argument \code{weightfun}. If this is missing or \code{NULL} then the default is a threshold weight function, \eqn{w(d) = 1(d \le R)}{w(d) = 1(d <= R)}, where \eqn{R} is \code{rmax/2}. The optimisation is performed by the generic optimisation algorithm \code{\link[stats]{optim}}. } \item{Palm likelihood:}{ If \code{method = "palm"} the clustering parameters of the model will be fitted by maximising the Palm loglikelihood (Tanaka et al, 2008) \deqn{ \sum_{i,j} w(x_i, x_j) \log \lambda_P(x_j \mid x_i; \theta) - \int_D w(x_i, u) \lambda_P(u \mid x_i; \theta) {\rm d} u }{ sum[i,j] w(x[i], x[j]) log(lambdaP(x[j] | x[i]; theta) - integral[D] w(x[i], u) lambdaP(u | x[i]; theta) du } with the same notation as above. Here \eqn{\lambda_P(u|v;\theta}{lambdaP(u|v;theta)} is the Palm intensity of the model at location \eqn{u} given there is a point at \eqn{v}. The optimisation is performed by the generic optimisation algorithm \code{\link[stats]{optim}}. } \item{Adaptive Composite likelihood:}{ If \code{method = "cladap"} the clustering parameters of the model will be fitted by solving the adaptive second order composite likelihood estimating equation (Lavancier et al, 2021). The estimating function is \deqn{ \sum_{u, v} w(\epsilon \frac{| g(0; \theta) - 1 |}{g(\|u-v\|; \theta)-1}) \frac{\nabla_\theta g(\|u-v\|;\theta)}{g(\|u-v\|;\theta)} - \int_D \int_D w(\epsilon \frac{M(u,v; \theta)} \nabla_\theta g(\|u-v\|; \theta) \rho(u) \rho(v)\, du\, dv }{ \sum[u,v] w(epsilon |g(0; theta)-1|/(g(||u-v||; theta)-1)) g'(||u-v||; theta) / g(||u-v||; theta) - integral[D,D] w(epsilon |g(0; theta)-1|/(g(||u-v||; theta)-1)) g'(||u-v||; theta) rho(u) rho(v) du dv } where the sum is taken over all distinct pairs of points. Here \eqn{g(d;\theta)}{g(d; theta)} is the pair correlation function with parameters \eqn{\theta}{theta}. The partial derivative with respect to \eqn{\theta}{theta} is \eqn{g'(d; \theta)}{g'(d; theta)}, and \eqn{\rho(u)}{rho(u)} denotes the fitted intensity function of the model. The tuning parameter \eqn{\epsilon}{epsilon} is independent of the data. It can be specified by the argument \code{epsilon} and has default value \eqn{0.01}. The function \eqn{w} in the estimating function is a weighting function of bounded support \eqn{[-1,1]}. It is specified by the argument \code{weightfun}. If this is missing or \code{NULL} then the default is \eqn{ w(d) = 1(\|d\| \le 1) \exp(1/(r^2-1))}{w(d) = 1(||d|| \le 1) exp(1/(d^2-1)) }. The estimating equation is solved using the nonlinear equation solver \code{\link[nleqslv]{nleqslv}} from the package \pkg{nleqslv}. The package \pkg{nleqslv} must be installed in order to use this option. } } Fitting the LGCP model requires the \pkg{RandomFields} package, except in the default case where the exponential covariance is assumed. } \section{Optimization algorithm}{ The following details allow greater control over the fitting procedure. For the first three fitting methods (\code{method="mincon", "clik2"} and \code{"palm"}), the optimisation is performed by the generic optimisation algorithm \code{\link[stats]{optim}}. The behaviour of this algorithm can be modified using the arguments \code{control} and \code{algorithm}. Useful control arguments include \code{trace}, \code{maxit} and \code{abstol} (documented in the help for \code{\link[stats]{optim}}). For \code{method="adapcl"}, the estimating equation is solved using the nonlinear equation solver \code{\link[nleqslv]{nleqslv}} from the package \pkg{nleqslv}. Arguments available for controlling the solver are documented in the help for \code{\link[nleqslv]{nleqslv}}; they include \code{control}, \code{globStrat}, \code{startparm} for the initial estimates and \code{algorithm} for the method. The package \pkg{nleqslv} must be installed in order to use this option. } \section{Log-Gaussian Cox Models}{ To fit a log-Gaussian Cox model with non-exponential covariance, specify \code{clusters="LGCP"} and use additional arguments to specify the covariance structure. These additional arguments can be given individually in the call to \code{kppm}, or they can be collected together in a list called \code{covmodel}. For example a \Matern model with parameter \eqn{\nu=0.5} could be specified either by \code{kppm(X, clusters="LGCP", model="matern", nu=0.5)} or by \code{kppm(X, clusters="LGCP", covmodel=list(model="matern", nu=0.5))}. The argument \code{model} specifies the type of covariance model: the default is \code{model="exp"} for an exponential covariance. Alternatives include \code{"matern"}, \code{"cauchy"} and \code{"spheric"}. Model names correspond to functions beginning with \code{RM} in the \pkg{RandomFields} package: for example \code{model="matern"} corresponds to the function \code{RMmatern} in the \pkg{RandomFields} package. Additional arguments are passed to the relevant function in the \pkg{RandomFields} package: for example if \code{model="matern"} then the additional argument \code{nu} is required, and is passed to the function \code{RMmatern} in the \pkg{RandomFields} package. Note that it is not possible to use \emph{anisotropic} covariance models because the \code{kppm} technique assumes the pair correlation function is isotropic. } \value{ An object of class \code{"kppm"} representing the fitted model. There are methods for printing, plotting, predicting, simulating and updating objects of this class. } \section{Error and warning messages}{ See \code{\link{ppm.ppp}} for a list of common error messages and warnings originating from the first stage of model-fitting. } \seealso{ Methods for \code{kppm} objects: \code{\link{plot.kppm}}, \code{\link{fitted.kppm}}, \code{\link{predict.kppm}}, \code{\link{simulate.kppm}}, \code{\link{update.kppm}}, \code{\link{vcov.kppm}}, \code{\link[spatstat.core:methods.kppm]{methods.kppm}}, \code{\link{as.ppm.kppm}}, \code{\link{as.fv.kppm}}, \code{\link{Kmodel.kppm}}, \code{\link{pcfmodel.kppm}}. See also \code{\link{improve.kppm}} for improving the fit of a \code{kppm} object. Minimum contrast fitting algorithm: higher level interface \code{\link{clusterfit}}; low-level algorithm \code{\link{mincontrast}}. Alternative fitting algorithms: \code{\link{thomas.estK}}, \code{\link{matclust.estK}}, \code{\link{lgcp.estK}}, \code{\link{cauchy.estK}}, \code{\link{vargamma.estK}}, \code{\link{thomas.estpcf}}, \code{\link{matclust.estpcf}}, \code{\link{lgcp.estpcf}}, \code{\link{cauchy.estpcf}}, \code{\link{vargamma.estpcf}}. Summary statistics: \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{pcf}}, \code{\link{pcfinhom}}. For fitting Poisson or Gibbs point process models, see \code{\link{ppm}}. } \references{ Guan, Y. (2006) A composite likelihood approach in fitting spatial point process models. \emph{Journal of the American Statistical Association} \bold{101}, 1502--1512. Guan, Y., Jalilian, A. and Waagepetersen, R. (2015) Quasi-likelihood for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 677-697. Jalilian, A., Guan, Y. and Waagepetersen, R. (2012) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119--137. Lavancier, F., Poinas, A., and Waagepetersen, R. (2021) Adaptive estimating function inference for nonstationary determinantal point processes. \emph{Scandinavian Journal of Statistics}, \bold{48} (1), 87--107. Tanaka, U. and Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott point processes. \emph{Biometrical Journal} \bold{50}, 43--57. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \examples{ online <- interactive() if(!online) op <- spatstat.options(npixel=32, ndummy.min=16) # method for point patterns kppm(redwood, ~1, "Thomas") # method for formulas kppm(redwood ~ 1, "Thomas") # different models for clustering if(online) kppm(redwood ~ x, "MatClust") kppm(redwood ~ x, "MatClust", statistic="pcf", statargs=list(stoyan=0.2)) kppm(redwood ~ x, cluster="Cauchy", statistic="K") kppm(redwood, cluster="VarGamma", nu = 0.5, statistic="pcf") # log-Gaussian Cox process (LGCP) models kppm(redwood ~ 1, "LGCP", statistic="pcf") if(require("RandomFields")) { # Random Fields package is needed for non-default choice of covariance model kppm(redwood ~ x, "LGCP", statistic="pcf", model="matern", nu=0.3, control=list(maxit=10)) } # Different fitting techniques kppm(redwood ~ 1, "Thomas", method="c") kppm(redwood ~ 1, "Thomas", method="p") # quasi-likelihood improvement kppm(redwood ~ x, "Thomas", improve.type = "quasi") if(!online) spatstat.options(op) } \author{ \spatstatAuthors, with contributions from Abdollah Jalilian and Rasmus Waagepetersen. Adaptive composite likelihood method contributed by Chiara Fend and modified by Adrian Baddeley. } \keyword{spatial} \keyword{models} spatstat.core/man/rLGCP.Rd0000644000176200001440000001162214141452520015015 0ustar liggesusers\name{rLGCP} \alias{rLGCP} \title{Simulate Log-Gaussian Cox Process} \description{ Generate a random point pattern, a realisation of the log-Gaussian Cox process. } \usage{ rLGCP(model="exp", mu = 0, param = NULL, \dots, win=NULL, saveLambda=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{model}{ character string: the short name of a covariance model for the Gaussian random field. After adding the prefix \code{"RM"}, the code will search for a function of this name in the \pkg{RandomFields} package. } \item{mu}{ mean function of the Gaussian random field. Either a single number, a \code{function(x,y, ...)} or a pixel image (object of class \code{"im"}). } \item{param}{ List of parameters for the covariance. Standard arguments are \code{var} and \code{scale}. } \item{\dots}{ Additional parameters for the covariance, or arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"}. } \item{saveLambda}{ Logical. If \code{TRUE} (the default) then the simulated random intensity will also be saved, and returns as an attribute of the point pattern. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (object of class \code{"ppp"}) or a list of point patterns. Additionally, the simulated intensity function for each point pattern is returned as an attribute \code{"Lambda"} of the point pattern, if \code{saveLambda=TRUE}. } \details{ This function generates a realisation of a log-Gaussian Cox process (LGCP). This is a Cox point process in which the logarithm of the random intensity is a Gaussian random field with mean function \eqn{\mu} and covariance function \eqn{c(r)}. Conditional on the random intensity, the point process is a Poisson process with this intensity. The string \code{model} specifies the covariance function of the Gaussian random field, and the parameters of the covariance are determined by \code{param} and \code{\dots}. To determine the covariance model, the string \code{model} is prefixed by \code{"RM"}, and a function of this name is sought in the \pkg{RandomFields} package. For a list of available models see \code{\link[RandomFields]{RMmodel}} in the \pkg{RandomFields} package. For example the \Matern covariance is specified by \code{model="matern"}, corresponding to the function \code{RMmatern} in the \pkg{RandomFields} package. Standard variance parameters (for all functions beginning with \code{"RM"} in the \pkg{RandomFields} package) are \code{var} for the variance at distance zero, and \code{scale} for the scale parameter. Other parameters are specified in the help files for the individual functions beginning with \code{"RM"}. For example the help file for \code{RMmatern} states that \code{nu} is a parameter for this model. This algorithm uses the function \code{\link[RandomFields]{RFsimulate}} in the \pkg{RandomFields} package to generate values of a Gaussian random field, with the specified mean function \code{mu} and the covariance specified by the arguments \code{model} and \code{param}, on the points of a regular grid. The exponential of this random field is taken as the intensity of a Poisson point process, and a realisation of the Poisson process is then generated by the function \code{\link{rpoispp}} in the \pkg{spatstat} package. If the simulation window \code{win} is missing or \code{NULL}, then it defaults to \code{Window(mu)} if \code{mu} is a pixel image, and it defaults to the unit square otherwise. The LGCP model can be fitted to data using \code{\link{kppm}}. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rGaussPoisson}}, \code{\link{rNeymanScott}}, \code{\link{lgcp.estK}}, \code{\link{kppm}} } \references{ \Moller, J., Syversveen, A. and Waagepetersen, R. (1998) Log Gaussian Cox Processes. \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482. } \examples{ if(require(RandomFields)) { # homogeneous LGCP with exponential covariance function X <- rLGCP("exp", 3, var=0.2, scale=.1) # inhomogeneous LGCP with Gaussian covariance function m <- as.im(function(x, y){5 - 1.5 * (x - 0.5)^2 + 2 * (y - 0.5)^2}, W=owin()) X <- rLGCP("gauss", m, var=0.15, scale =0.5) plot(attr(X, "Lambda")) points(X) # inhomogeneous LGCP with Matern covariance function X <- rLGCP("matern", function(x, y){ 1 - 0.4 * x}, var=2, scale=0.7, nu=0.5, win = owin(c(0, 10), c(0, 10))) plot(X) } } \author{Abdollah Jalilian and Rasmus Waagepetersen. Modified by \spatstatAuthors. } \keyword{spatial} \keyword{datagen} spatstat.core/man/hotbox.Rd0000644000176200001440000000550314141452520015412 0ustar liggesusers\name{hotbox} \alias{hotbox} \title{ Heat Kernel for a Two-Dimensional Rectangle } \description{ Calculate values of the heat kernel in a rectangle with insulated edges. } \usage{ hotbox(Xsource, Xquery, sigma, \dots, W=NULL, squared=FALSE, nmax=20) } \arguments{ \item{Xsource}{ Point pattern of sources of heat. Object of class \code{"ppp"} or convertible to a point pattern using \code{as.ppp(Xsource, W)}. } \item{Xquery}{ Locations where the heat kernel value is required. An object of class \code{"ppp"} specifying query location points, or an object of class \code{"im"} or \code{"owin"} specifying a grid of query points. } \item{sigma}{ Bandwidth for kernel. A single number. } \item{\dots}{ Extra arguments (passed to \code{\link[spatstat.geom]{as.mask}}) controlling the pixel resolution of the result, when \code{Xquery} is a window or an image. } \item{W}{ Window (object of class \code{"owin"}) used to define the spatial domain when \code{Xsource} is not of class \code{"ppp"}. } \item{squared}{ Logical value indicating whether to take the square of each heat kernel value, before summing over the source points. } \item{nmax}{ Number of terms to be used from the infinite-sum expression for the heat kernel. A single integer. } } \details{ This function computes the sum of heat kernels associated with each of the source points, evaluating them at each query location. The window for evaluation of the heat kernel must be a rectangle. The heat kernel in any region can be expressed as an infinite sum of terms associated with the eigenfunctions of the Laplacian. The heat kernel in a rectangle is the product of heat kernels for one-dimensional intervals on the horizontal and vertical axes. This function uses \code{\link[spatstat.geom]{hotrod}} to compute the one-dimensional heat kernels, truncating the infinite sum to the first \code{nmax} terms, and then calculates the two-dimensional heat kernel from each source point to each query location. If \code{squared=TRUE} these values are squared. Finally the values are summed over all source points to obtain a single value for each query location. } \value{ If \code{Xquery} is a point pattern, the result is a numeric vector with one entry for each query point. If \code{Xquery} is an image or window, the result is a pixel image. } \seealso{ \code{\link{densityHeat.ppp}} } \references{ Baddeley, A., Davies, T., Rakshit, S., Nair, G. and McSwiggan, G. (2021) Diffusion smoothing for spatial point patterns. \emph{Statistical Science}, in press. } \author{ Adrian Baddeley and Greg McSwiggan. } \examples{ X <- runifpoint(10) Y <- runifpoint(5) hotbox(X, Y, 0.1) plot(hotbox(X, Window(X), 0.1)) points(X, pch=16) } \keyword{math} spatstat.core/man/allstats.Rd0000644000176200001440000000604414141452520015737 0ustar liggesusers\name{allstats} \alias{allstats} \title{Calculate four standard summary functions of a point pattern.} \description{ Calculates the \eqn{F}, \eqn{G}, \eqn{J}, and \eqn{K} summary functions for an unmarked point pattern. Returns them as a function array (of class \code{"fasp"}, see \code{\link{fasp.object}}). } \usage{ allstats(pp, \dots, dataname=NULL, verb=FALSE) } \arguments{ \item{pp}{The observed point pattern, for which summary function estimates are required. An object of class \code{"ppp"}. It must not be marked. } \item{\dots}{ Optional arguments passed to the summary functions \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} and \code{\link{Kest}}. } \item{dataname}{A character string giving an optional (alternative) name for the point pattern. } \item{verb}{A logical value meaning ``verbose''. If \code{TRUE}, progress reports are printed during calculation. } } \details{ This computes four standard summary statistics for a point pattern: the empty space function \eqn{F(r)}, nearest neighbour distance distribution function \eqn{G(r)}, van Lieshout-Baddeley function \eqn{J(r)} and Ripley's function \eqn{K(r)}. The real work is done by \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} and \code{\link{Kest}} respectively. Consult the help files for these functions for further information about the statistical interpretation of \eqn{F}, \eqn{G}, \eqn{J} and \eqn{K}. If \code{verb} is \code{TRUE}, then ``progress reports'' (just indications of completion) are printed out when the calculations are finished for each of the four function types. The overall title of the array of four functions (for plotting by \code{\link{plot.fasp}}) will be formed from the argument \code{dataname}. If this is not given, it defaults to the expression for \code{pp} given in the call to \code{allstats}. } \value{ A list of length 4 containing the \eqn{F}, \eqn{G}, \eqn{J} and \eqn{K} functions respectively. The list can be plotted directly using \code{plot} (which dispatches to \code{\link{plot.solist}}). Each list entry retains the format of the output of the relevant estimating routine \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} or \code{\link{Kest}}. Thus each entry in the list is a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). The default formulae for plotting these functions are \code{cbind(km,theo) ~ r} for F, G, and J, and \code{cbind(trans,theo) ~ r} for K. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.solist}}, \code{\link{plot.fv}}, \code{\link{fv.object}}, \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}} } \examples{ data(swedishpines) a <- allstats(swedishpines,dataname="Swedish Pines") if(interactive()) { plot(a) plot(a, subset=list("r<=15","r<=15","r<=15","r<=50")) } } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/methods.rhohat.Rd0000644000176200001440000000651614141452520017043 0ustar liggesusers\name{methods.rhohat} \alias{methods.rhohat} %DoNotExport \alias{print.rhohat} \alias{plot.rhohat} \alias{predict.rhohat} \alias{simulate.rhohat} \title{ Methods for Intensity Functions of Spatial Covariate } \description{ These are methods for the class \code{"rhohat"}. } \usage{ \method{print}{rhohat}(x, ...) \method{plot}{rhohat}(x, ..., do.rug=TRUE) \method{predict}{rhohat}(object, ..., relative=FALSE, what=c("rho", "lo", "hi", "se")) \method{simulate}{rhohat}(object, nsim=1, ..., drop=TRUE) } \arguments{ \item{x,object}{ An object of class \code{"rhohat"} representing a smoothed estimate of the intensity function of a point process. } \item{\dots}{ Arguments passed to other methods. } \item{do.rug}{ Logical value indicating whether to plot the observed values of the covariate as a rug plot along the horizontal axis. } \item{relative}{ Logical value indicating whether to compute the estimated point process intensity (\code{relative=FALSE}) or the relative risk (\code{relative=TRUE}) in the case of a relative risk estimate. } \item{nsim}{ Number of simulations to be generated. } \item{drop}{ Logical value indicating what to do when \code{nsim=1}. If \code{drop=TRUE} (the default), a point pattern is returned. If \code{drop=FALSE}, a list of length 1 containing a point pattern is returned. } \item{what}{ Optional character string (partially matched) specifying which value should be calculated: either the function estimate (\code{what="rho"}, the default), the lower or upper end of the confidence interval (\code{what="lo"} or \code{what="hi"}) or the standard error (\code{what="se"}). } } \details{ These functions are methods for the generic commands \code{\link{print}}, \code{\link[graphics]{plot}}, \code{\link[stats]{predict}} and \code{\link[stats]{simulate}} for the class \code{"rhohat"}. An object of class \code{"rhohat"} is an estimate of the intensity of a point process, as a function of a given spatial covariate. See \code{\link{rhohat}}. The method \code{plot.rhohat} displays the estimated function \eqn{\rho}{rho} using \code{\link{plot.fv}}, and optionally adds a \code{\link{rug}} plot of the observed values of the covariate. The method \code{predict.rhohat} computes a pixel image of the intensity \eqn{\rho(Z(u))}{rho(Z(u))} at each spatial location \eqn{u}, where \eqn{Z} is the spatial covariate. The method \code{simulate.rhohat} invokes \code{predict.rhohat} to determine the predicted intensity, and then simulates a Poisson point process with this intensity. } \value{ For \code{predict.rhohat} the value is a pixel image (object of class \code{"im"} or \code{"linim"}). For \code{simulate.rhohat} the value is a point pattern (object of class \code{"ppp"} or \code{"lpp"}). For other functions, the value is \code{NULL}. } \author{ \adrian } \seealso{ \code{\link{rhohat}} } \examples{ X <- rpoispp(function(x,y){exp(3+3*x)}) rho <- rhohat(X, function(x,y){x}) rho plot(rho) Y <- predict(rho) plot(Y) plot(simulate(rho), add=TRUE) # fit <- ppm(X, ~x) rho <- rhohat(fit, "y") opa <- par(mfrow=c(1,2)) plot(predict(rho)) plot(predict(rho, relative=TRUE)) par(opa) plot(predict(rho, what="se")) } \keyword{spatial} \keyword{methods} spatstat.core/man/compatible.fv.Rd0000644000176200001440000000264514141452520016644 0ustar liggesusers\name{compatible.fv} \alias{compatible.fv} \title{Test Whether Function Objects Are Compatible} \description{ Tests whether two or more function objects (class \code{"fv"}) are compatible. } \usage{ \method{compatible}{fv}(A, B, \dots, samenames=TRUE) } \arguments{ \item{A,B,\dots}{Two or more function value objects (class \code{"fv"}).} \item{samenames}{ Logical value indicating whether to check for complete agreement between the column names of the objects (\code{samenames=TRUE}, the default) or just to check that the name of the function argument is the same (\code{samenames=FALSE}). } } \details{ An object of class \code{"fv"} is essentially a data frame containing several different statistical estimates of the same function. Such objects are returned by \code{\link[spatstat.core]{Kest}} and its relatives. This command tests whether such objects are compatible (so that, for example, they could be added or subtracted). It is a method for the generic command \code{\link{compatible}}. The functions are compatible if they have been evaluated at the same sequence of values of the argument \code{r}, and if the statistical estimates have the same names. } \value{ Logical value: \code{TRUE} if the objects are compatible, and \code{FALSE} if they are not. } \seealso{ \code{\link{eval.fv}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat.core/man/profilepl.Rd0000644000176200001440000002013514141452520016101 0ustar liggesusers\name{profilepl} \alias{profilepl} \title{Fit Models by Profile Maximum Pseudolikelihood or AIC} \description{ Fits point process models by maximising the profile likelihood, profile pseudolikelihood, profile composite likelihood or AIC. } \usage{ profilepl(s, f, \dots, aic=FALSE, rbord=NULL, verbose = TRUE, fast=TRUE) } \arguments{ \item{s}{ Data frame containing values of the irregular parameters over which the criterion will be computed. } \item{f}{ Function (such as \code{\link{Strauss}}) that generates an interpoint interaction object, given values of the irregular parameters. } \item{\dots}{ Data passed to \code{\link{ppm}} to fit the model. } \item{aic}{ Logical value indicating whether to find the parameter values which minimise the AIC (\code{aic=TRUE}) or maximise the profile likelihood (\code{aic=FALSE}, the default). } \item{rbord}{ Radius for border correction (same for all models). If omitted, this will be computed from the interactions. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{fast}{ Logical value indicating whether to use a faster, less accurate model-fitting technique when computing the profile pseudolikelihood. See Section on Speed and Accuracy. } } \details{ The model-fitting function \code{\link{ppm}} fits point process models to point pattern data. However, only the \sQuote{regular} parameters of the model can be fitted by \code{\link{ppm}}. The model may also depend on \sQuote{irregular} parameters that must be fixed in any call to \code{\link{ppm}}. This function \code{profilepl} is a wrapper which finds the values of the irregular parameters that give the best fit. If \code{aic=FALSE} (the default), the best fit is the model which maximises the likelihood (if the models are Poisson processes) or maximises the pseudolikelihood or logistic likelihood. If \code{aic=TRUE} then the best fit is the model which minimises the Akaike Information Criterion \code{\link{AIC.ppm}}. The argument \code{s} must be a data frame whose columns contain values of the irregular parameters over which the maximisation is to be performed. An irregular parameter may affect either the interpoint interaction or the spatial trend. \describe{ \item{interaction parameters:}{ in a call to \code{\link{ppm}}, the argument \code{interaction} determines the interaction between points. It is usually a call to a function such as \code{\link{Strauss}}. The arguments of this call are irregular parameters. For example, the interaction radius parameter \eqn{r} of the Strauss process, determined by the argument \code{r} to the function \code{\link{Strauss}}, is an irregular parameter. } \item{trend parameters:}{ in a call to \code{\link{ppm}}, the spatial trend may depend on covariates, which are supplied by the argument \code{covariates}. These covariates may be functions written by the user, of the form \code{function(x,y,...)}, and the extra arguments \code{\dots} are irregular parameters. } } The argument \code{f} determines the interaction for each model to be fitted. It would typically be one of the functions \code{\link{Poisson}}, \code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{LennardJones}}, \code{\link{OrdThresh}}, \code{\link{Softcore}}, \code{\link{Strauss}} or \code{\link{StraussHard}}. Alternatively it could be a function written by the user. Columns of \code{s} which match the names of arguments of \code{f} will be interpreted as interaction parameters. Other columns will be interpreted as trend parameters. The data frame \code{s} must provide values for each argument of \code{f}, except for the optional arguments, which are those arguments of \code{f} that have the default value \code{NA}. To find the best fit, each row of \code{s} will be taken in turn. Interaction parameters in this row will be passed to \code{f}, resulting in an interaction object. Then \code{\link{ppm}} will be applied to the data \code{...} using this interaction. Any trend parameters will be passed to \code{\link{ppm}} through the argument \code{covfunargs}. This results in a fitted point process model. The value of the log pseudolikelihood or AIC from this model is stored. After all rows of \code{s} have been processed in this way, the row giving the maximum value of log pseudolikelihood will be found. The object returned by \code{profilepl} contains the profile pseudolikelihood (or profile AIC) function, the best fitting model, and other data. It can be plotted (yielding a plot of the log pseudolikelihood or AIC values against the irregular parameters) or printed (yielding information about the best fitting values of the irregular parameters). In general, \code{f} may be any function that will return an interaction object (object of class \code{"interact"}) that can be used in a call to \code{\link{ppm}}. Each argument of \code{f} must be a single value. } \section{Speed and Accuracy}{ Computation of the profile pseudolikelihood can be time-consuming. We recommend starting with a small experiment in which \code{s} contains only a few rows of values. This will indicate roughly the optimal values of the parameters. Then a full calculation using more finely spaced values can identify the exact optimal values. It is normal that the procedure appears to slow down at the end. During the computation of the profile pseudolikelihood, the model-fitting procedure is accelerated by omitting some calculations that are not needed for computing the pseudolikelihood. When the optimal parameter values have been identified, they are used to fit the final model in its entirety. Fitting the final model can take longer than computing the profile pseudolikelihood. If \code{fast=TRUE} (the default), then additional shortcuts are taken in order to accelerate the computation of the profile log pseudolikelihood. These shortcuts mean that the values of the profile log pseudolikelihood in the result (\code{$prof}) may not be equal to the values that would be obtained if the model was fitted normally. Currently this happens only for the area interaction \code{\link{AreaInter}}. It may be wise to do a small experiment with \code{fast=TRUE} and then a definitive calculation with \code{fast=FALSE}. } \value{ An object of class \code{"profilepl"}. There are methods for \code{\link[graphics]{plot}}, \code{\link[base]{print}}, \code{\link[base]{summary}}, \code{\link[stats]{simulate}}, \code{\link{as.ppm}}, \code{\link{fitin}} and \code{\link{parameters}} for objects of this class. The components of the object include \item{fit}{Best-fitting model} \item{param}{The data frame \code{s}} \item{iopt}{Row index of the best-fitting parameters in \code{s}} To extract the best fitting model you can also use \code{\link{as.ppm}}. } \seealso{ \code{\link{plot.profilepl}} } \examples{ # one irregular parameter rr <- data.frame(r=seq(0.05,0.15, by=0.01)) \testonly{ rr <- data.frame(r=c(0.05,0.1,0.15)) } ps <- profilepl(rr, Strauss, cells) ps plot(ps) # two irregular parameters rs <- expand.grid(r=seq(0.05,0.15, by=0.01),sat=1:3) \testonly{ rs <- expand.grid(r=c(0.07,0.12),sat=1:2) } pg <- profilepl(rs, Geyer, cells) pg as.ppm(pg) ## more information summary(pg) # multitype pattern with a common interaction radius # RR <- data.frame(R=seq(0.03,0.05,by=0.01)) # MS <- function(R) { MultiStrauss(radii=diag(c(R,R))) } # pm <- profilepl(RR, MS, amacrine ~marks) } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat.core/man/runifpointOnLines.Rd0000644000176200001440000000376214141452520017601 0ustar liggesusers\name{runifpointOnLines} \alias{runifpointOnLines} \title{Generate N Uniform Random Points On Line Segments} \description{ Given a line segment pattern, generate a random point pattern consisting of \code{n} points uniformly distributed on the line segments. } \usage{ runifpointOnLines(n, L, nsim=1, drop=TRUE) } \arguments{ \item{n}{Number of points to generate.} \item{L}{Line segment pattern (object of class \code{"psp"}) on which the points should lie. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This command generates a point pattern consisting of \code{n} independent random points, each point uniformly distributed on the line segment pattern. This means that, for each random point, \itemize{ \item the probability of falling on a particular segment is proportional to the length of the segment; and \item given that the point falls on a particular segment, it has uniform probability density along that segment. } If \code{n} is a single integer, the result is an unmarked point pattern containing \code{n} points. If \code{n} is a vector of integers, the result is a marked point pattern, with \code{m} different types of points, where \code{m = length(n)}, in which there are \code{n[j]} points of type \code{j}. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}) with the same window as \code{L}. If \code{nsim > 1}, a list of point patterns. } \seealso{ \code{\link{psp}}, \code{\link{ppp}}, \code{\link{pointsOnLines}}, \code{\link{runifpoint}} } \examples{ X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) Y <- runifpointOnLines(20, X) plot(X, main="") plot(Y, add=TRUE) Z <- runifpointOnLines(c(5,5), X) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/fixef.mppm.Rd0000644000176200001440000000305214141452520016155 0ustar liggesusers\name{fixef.mppm} \alias{fixef.mppm} \title{ Extract Fixed Effects from Point Process Model } \description{ Given a point process model fitted to a list of point patterns, extract the fixed effects of the model. A method for \code{fixef}. } \usage{ \method{fixef}{mppm}(object, \dots) } \arguments{ \item{object}{ A fitted point process model (an object of class \code{"mppm"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[nlme]{fixef}}. The argument \code{object} must be a fitted point process model (object of class \code{"mppm"}) produced by the fitting algorithm \code{\link{mppm}}). This represents a point process model that has been fitted to a list of several point pattern datasets. See \code{\link{mppm}} for information. This function extracts the coefficients of the fixed effects of the model. } \value{ A numeric vector of coefficients. } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{coef.mppm}} } \examples{ H <- hyperframe(Y = waterstriders) # Tweak data to exaggerate differences H$Y[[1]] <- rthin(H$Y[[1]], 0.3) m1 <- mppm(Y ~ id, data=H, Strauss(7)) fixef(m1) m2 <- mppm(Y ~ 1, random=~1|id, data=H, Strauss(7)) fixef(m2) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.core/man/Kdot.inhom.Rd0000644000176200001440000002764514141452520016134 0ustar liggesusers\name{Kdot.inhom} \alias{Kdot.inhom} \title{ Inhomogeneous Multitype K Dot Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the dot \eqn{K} function, which counts the expected number of points of any type within a given distance of a point of type \eqn{i}, adjusted for spatially varying intensity. } \usage{ Kdot.inhom(X, i, lambdaI=NULL, lambdadot=NULL, \dots, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIdot=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{K} function \eqn{K_{i\bullet}(r)}{Ki.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process of points of type \code{i}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{i} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdadot}{ Optional. Values of the estimated intensity of the entire point process, Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Ignored. } \item{r}{ Optional. Numeric vector giving the values of the argument \eqn{r} at which the cross K function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdadot} if they are omitted. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdadot} if they are omitted. Incompatible with \code{sigma}. } \item{lambdaIdot}{ Optional. A matrix containing estimates of the product of the intensities \code{lambdaI} and \code{lambdadot} for each pair of points, the first point of type \code{i} and the second of any type. } \item{lambdaX}{ Optional. Values of the intensity for all points of \code{X}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If present, this argument overrides both \code{lambdaI} and \code{lambdadot}. } \item{update}{ Logical value indicating what to do when \code{lambdaI}, \code{lambdadot} or \code{lambdaX} is a fitted point process model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{i\bullet}(r)}{Ki.(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{i\bullet}(r)}{Ki.(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{i\bullet}(r)}{Ki.(r)} obtained by the edge corrections named. } \details{ This is a generalisation of the function \code{\link{Kdot}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Kinhom}}. Briefly, given a multitype point process, consider the points without their types, and suppose this unmarked point process has intensity function \eqn{\lambda(u)}{lambda(u)} at spatial locations \eqn{u}. Suppose we place a mass of \eqn{1/\lambda(\zeta)}{1/lambda(z)} at each point \eqn{\zeta}{z} of the process. Then the expected total mass per unit area is 1. The inhomogeneous ``dot-type'' \eqn{K} function \eqn{K_{i\bullet}^{\mbox{inhom}}(r)}{K[i.]inhom(r)} equals the expected total mass within a radius \eqn{r} of a point of the process of type \eqn{i}, discounting this point itself. If the process of type \eqn{i} points were independent of the points of other types, then \eqn{K_{i\bullet}^{\mbox{inhom}}(r)}{K[i.]inhom(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{i\bullet}}{Ki.} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} suggest dependence between the points of types \eqn{i} and \eqn{j} for \eqn{j\neq i}{j != i}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). If \code{i} is missing, it defaults to the first level of the marks factor, \code{i = levels(X$marks)[1]}. The argument \code{lambdaI} supplies the values of the intensity of the sub-process of points of type \code{i}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the type \code{i} intensity at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the type \code{i} intensity evaluated only at the data points of type \code{i}. The length of this vector must equal the number of type \code{i} points in \code{X}. } \item{a function}{ of the form \code{function(x,y)} which can be evaluated to give values of the intensity at any locations. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate of \code{lambdaI} for a given point is computed by removing the point from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point in question. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Similarly the argument \code{lambdadot} should contain estimated values of the intensity of the entire point process. It may be either a pixel image, a numeric vector of length equal to the number of points in \code{X}, a function, or omitted. Alternatively if the argument \code{lambdaX} is given, then it specifies the intensity values for all points of \code{X}, and the arguments \code{lambdaI}, \code{lambdadot} will be ignored. (The two arguments \code{lambdaI}, \code{lambdadot} allow the user to specify two different methods for calculating the intensities of the two kinds of points, while \code{lambdaX} ensures that the same method is used for both kinds of points.) For advanced use only, the optional argument \code{lambdaIdot} is a matrix containing estimated values of the products of these two intensities for each pair of points, the first point of type \code{i} and the second of any type. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. The argument \code{correction} chooses the edge correction as explained e.g. in \code{\link{Kest}}. The pair correlation function can also be applied to the result of \code{Kcross.inhom}; see \code{\link{pcf}}. } \references{ \Moller, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Kdot}}, \code{\link{Kinhom}}, \code{\link{Kcross.inhom}}, \code{\link{Kmulti.inhom}}, \code{\link{pcf}} } \examples{ # Lansing Woods data woods <- lansing woods <- woods[seq(1,npoints(woods), by=10)] ma <- split(woods)$maple lg <- unmark(woods) # Estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdadot <- density.ppp(lg, sigma=0.15, at="points") K <- Kdot.inhom(woods, "maple", lambdaI=lambdaM, lambdadot=lambdadot) # Equivalent K <- Kdot.inhom(woods, "maple", sigma=0.15) # Fit model fit <- ppm(woods ~ marks * polynom(x,y,2)) K <- Kdot.inhom(woods, "maple", lambdaX=fit, update=FALSE) # synthetic example: type A points have intensity 50, # type B points have intensity 50 + 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) lamdot <- as.im(function(x,y) { 100 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) K <- Kdot.inhom(X, "B", lambdaI=lamB, lambdadot=lamdot) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/CDF.Rd0000644000176200001440000000245414141452520014505 0ustar liggesusers\name{CDF} \alias{CDF} \alias{CDF.density} \title{ Cumulative Distribution Function From Kernel Density Estimate } \description{ Given a kernel estimate of a probability density, compute the corresponding cumulative distribution function. } \usage{ CDF(f, \dots) \method{CDF}{density}(f, \dots, warn = TRUE) } \arguments{ \item{f}{ Density estimate (object of class \code{"density"}). } \item{\dots}{ Ignored. } \item{warn}{ Logical value indicating whether to issue a warning if the density estimate \code{f} had to be renormalised because it was computed in a restricted interval. } } \details{ \code{CDF} is generic, with a method for class \code{"density"}. This calculates the cumulative distribution function whose probability density has been estimated and stored in the object \code{f}. The object \code{f} must belong to the class \code{"density"}, and would typically have been obtained from a call to the function \code{\link[stats]{density}}. } \value{ A function, which can be applied to any numeric value or vector of values. } \author{ \spatstatAuthors } \seealso{ \code{\link[stats]{density}}, \code{\link{quantile.density}} } \examples{ b <- density(runif(10)) f <- CDF(b) f(0.5) plot(f) } \keyword{nonparametric} \keyword{univar} spatstat.core/man/plot.dppm.Rd0000644000176200001440000000351214141452520016022 0ustar liggesusers\name{plot.dppm} \alias{plot.dppm} \title{Plot a fitted determinantal point process} \description{ Plots a fitted determinantal point process model, displaying the fitted intensity and the fitted summary function. } \usage{ \method{plot}{dppm}(x, ..., what=c("intensity", "statistic")) } \arguments{ \item{x}{ Fitted determinantal point process model. An object of class \code{"dppm"}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppm}} and \code{\link{plot.fv}} to control the plot. } \item{what}{ Character vector determining what will be plotted. } } \details{ This is a method for the generic function \code{\link{plot}} for the class \code{"dppm"} of fitted determinantal point process models. The argument \code{x} should be a determinantal point process model (object of class \code{"dppm"}) obtained using the function \code{\link{dppm}}. The choice of plots (and the order in which they are displayed) is controlled by the argument \code{what}. The options (partially matched) are \code{"intensity"} and \code{"statistic"}. This command is capable of producing two different plots: \describe{ \item{what="intensity"}{specifies the fitted intensity of the model, which is plotted using \code{\link{plot.ppm}}. By default this plot is not produced for stationary models.} \item{what="statistic"}{specifies the empirical and fitted summary statistics, which are plotted using \code{\link{plot.fv}}. This is only meaningful if the model has been fitted using the Method of Minimum Contrast, and it is turned off otherwise.} } } \value{ Null. } \examples{ fit <- dppm(swedishpines ~ x + y, dppGauss, method="c") plot(fit) } \seealso{ \code{\link{dppm}}, \code{\link{plot.ppm}}, } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.core/man/Extract.fasp.Rd0000644000176200001440000000320014141452520016441 0ustar liggesusers\name{Extract.fasp} \alias{[.fasp} \title{Extract Subset of Function Array} \description{ Extract a subset of a function array (an object of class \code{"fasp"}). } \usage{ \method{[}{fasp}(x, I, J, drop=TRUE,\dots) } \arguments{ \item{x}{ A function array. An object of class \code{"fasp"}. } \item{I}{ any valid expression for a subset of the row indices of the array. } \item{J}{ any valid expression for a subset of the column indices of the array. } \item{drop}{ Logical. When the selected subset consists of only one cell of the array, if \code{drop=FALSE} the result is still returned as a \eqn{1 \times 1}{1 * 1} array of functions (class \code{"fasp"}) while if \code{drop=TRUE} it is returned as a function (class \code{"fv"}). } \item{\dots}{Ignored.} } \value{ A function array (of class \code{"fasp"}). Exceptionally, if the array has only one cell, and if \code{drop=TRUE}, then the result is a function value table (class \code{"fv"}). } \details{ A function array can be regarded as a matrix whose entries are functions. See \code{\link{fasp.object}} for an explanation of function arrays. This routine extracts a sub-array according to the usual conventions for matrix indexing. } \seealso{ \code{\link{fasp.object}} } \examples{ # Lansing woods data - multitype points with 6 types a <- alltypes(lansing, 'K') # extract first three marks only b <- a[1:3,1:3] if(interactive()) {plot(b)} # subset of array pertaining to hickories h <- a["hickory", ] if(interactive()) {plot(h)} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat.core/man/dg.test.Rd0000644000176200001440000001250414141452520015456 0ustar liggesusers\name{dg.test} \alias{dg.test} \title{ Dao-Genton Adjusted Goodness-Of-Fit Test } \description{ Performs the Dao and Genton (2014) adjusted goodness-of-fit test of spatial pattern. } \usage{ dg.test(X, \dots, exponent = 2, nsim=19, nsimsub=nsim-1, alternative=c("two.sided", "less", "greater"), reuse = TRUE, leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose = TRUE) } \arguments{ \item{X}{ Either a point pattern dataset (object of class \code{"ppp"}, \code{"lpp"} or \code{"pp3"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"} or \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{dclf.test}} or \code{\link{mad.test}} or \code{\link{envelope}} to control the conduct of the test. Useful arguments include \code{fun} to determine the summary function, \code{rinterval} to determine the range of \eqn{r} values used in the test, and \code{use.theory} described under Details. } \item{exponent}{ Exponent used in the test statistic. Use \code{exponent=2} for the Diggle-Cressie-Loosmore-Ford test, and \code{exponent=Inf} for the Maximum Absolute Deviation test. } \item{nsim}{ Number of repetitions of the basic test. } \item{nsimsub}{ Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsimsub} simulated realisations, so there will be a total of \code{nsim * (nsimsub + 1)} simulations. } \item{alternative}{ Character string specifying the alternative hypothesis. The default (\code{alternative="two.sided"}) is that the true value of the summary function is not equal to the theoretical value postulated under the null hypothesis. If \code{alternative="less"} the alternative hypothesis is that the true value of the summary function is lower than the theoretical value. } \item{reuse}{ Logical value indicating whether to re-use the first stage simulations at the second stage, as described by Dao and Genton (2014). } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ Performs the Dao-Genton (2014) adjusted Monte Carlo goodness-of-fit test, in the equivalent form described by Baddeley et al (2014). If \code{X} is a point pattern, the null hypothesis is CSR. If \code{X} is a fitted model, the null hypothesis is that model. The argument \code{use.theory} passed to \code{\link{envelope}} determines whether to compare the summary function for the data to its theoretical value for CSR (\code{use.theory=TRUE}) or to the sample mean of simulations from CSR (\code{use.theory=FALSE}). The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). The Dao-Genton test is biased when the significance level is very small (small \eqn{p}-values are not reliable) and we recommend \code{\link{bits.test}} in this case. } \value{ A hypothesis test (object of class \code{"htest"} which can be printed to show the outcome of the test. } \references{ Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2017) On two-stage Monte Carlo tests of composite hypotheses. \emph{Computational Statistics and Data Analysis}, in press. } \author{ Adrian Baddeley, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \spatstatAuthors. } \seealso{ \code{\link{bits.test}}, \code{\link{dclf.test}}, \code{\link{mad.test}} } \examples{ ns <- if(interactive()) 19 else 4 dg.test(cells, nsim=ns) dg.test(cells, alternative="less", nsim=ns) dg.test(cells, nsim=ns, interpolate=TRUE) } \keyword{spatial} \keyword{htest} spatstat.core/man/nnclean.Rd0000644000176200001440000001036014141452520015522 0ustar liggesusers\name{nnclean} \alias{nnclean} \alias{nnclean.ppp} \alias{nnclean.pp3} \title{ Nearest Neighbour Clutter Removal } \description{ Detect features in a 2D or 3D spatial point pattern using nearest neighbour clutter removal. } \usage{ nnclean(X, k, ...) \method{nnclean}{ppp}(X, k, ..., edge.correct = FALSE, wrap = 0.1, convergence = 0.001, plothist = FALSE, verbose = TRUE, maxit = 50) \method{nnclean}{pp3}(X, k, ..., convergence = 0.001, plothist = FALSE, verbose = TRUE, maxit = 50) } \arguments{ \item{X}{ A two-dimensional spatial point pattern (object of class \code{"ppp"}) or a three-dimensional point pattern (object of class \code{"pp3"}). } \item{k}{ Degree of neighbour: \code{k=1} means nearest neighbour, \code{k=2} means second nearest, etc. } \item{\dots}{ Arguments passed to \code{\link{hist.default}} to control the appearance of the histogram, if \code{plothist=TRUE}. } \item{edge.correct}{ Logical flag specifying whether periodic edge correction should be performed (only implemented in 2 dimensions). } \item{wrap}{ Numeric value specifying the relative size of the margin in which data will be replicated for the periodic edge correction (if \code{edge.correct=TRUE}). A fraction of window width and window height. } \item{convergence}{ Relative tolerance threshold for testing convergence of EM algorithm. } \item{maxit}{ Maximum number of iterations for EM algorithm. } \item{plothist}{ Logical flag specifying whether to plot a diagnostic histogram of the nearest neighbour distances and the fitted distribution. } \item{verbose}{ Logical flag specifying whether to print progress reports. } } \details{ Byers and Raftery (1998) developed a technique for recognising features in a spatial point pattern in the presence of random clutter. For each point in the pattern, the distance to the \eqn{k}th nearest neighbour is computed. Then the E-M algorithm is used to fit a mixture distribution to the \eqn{k}th nearest neighbour distances. The mixture components represent the feature and the clutter. The mixture model can be used to classify each point as belong to one or other component. The function \code{nnclean} is generic, with methods for two-dimensional point patterns (class \code{"ppp"}) and three-dimensional point patterns (class \code{"pp3"}) currently implemented. The result is a point pattern (2D or 3D) with two additional columns of marks: \describe{ \item{class}{ A factor, with levels \code{"noise"} and \code{"feature"}, indicating the maximum likelihood classification of each point. } \item{prob}{ Numeric vector giving the estimated probabilities that each point belongs to a feature. } } The object also has extra information stored in attributes: \code{"theta"} contains the fitted parameters of the mixture model, \code{"info"} contains information about the fitting procedure, and \code{"hist"} contains the histogram structure returned from \code{\link{hist.default}} if \code{plothist = TRUE}. } \value{ An object of the same kind as \code{X}, obtained by attaching marks to the points of \code{X}. The object also has attributes, as described under Details. } \references{ Byers, S. and Raftery, A.E. (1998) Nearest-neighbour clutter removal for estimating features in spatial point processes. \emph{Journal of the American Statistical Association} \bold{93}, 577--584. } \author{ Original by Simon Byers and Adrian Raftery. Adapted for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{nndist}}, \code{\link{split.ppp}}, \code{\link{cut.ppp}} } \examples{ # shapley galaxy cluster X <- nnclean(shapley, k=17, plothist=TRUE) plot(X, which.marks=1, chars=c(".", "+"), cols=1:2, main="Shapley data, cluster and noise") plot(X, which.marks=2, cols=function(x)hsv(0.2+0.8*(1-x),1,1), main="Shapley data, probability of cluster") Y <- split(X, un=TRUE) plot(Y, chars="+", cex=0.5) marks(X) <- marks(X)$prob plot(cut(X, breaks=3), chars=c(".", "+", "+"), cols=1:3) } \keyword{spatial} \keyword{classif} spatstat.core/man/rjitter.psp.Rd0000644000176200001440000000433214141452520016372 0ustar liggesusers\name{rjitter.psp} \alias{rjitter.psp} \title{Random Perturbation of Line Segment Pattern} \description{ Randomly pertubs a spatial pattern of line segments by applying independent random displacements to the segment endpoints. } \usage{ \method{rjitter}{psp}(X, radius, \dots, clip=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"psp"}). } \item{radius}{ Scale of perturbations. A positive numerical value. Each point will be displaced by a random distance, with maximum displacement equal to this value. } \item{\dots}{ Ignored. } \item{clip}{ Logical value specifying what to do if segments cross the boundary of the window. See Details. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a spatial pattern of line segments (class \code{"psp"}) rather than a list of length 1 containing this pattern. } } \details{ The function \code{\link[spatstat.geom]{rjitter}} is generic. This function is the method for the class \code{"psp"} of line segment patterns. Each of the endpoints of each segment in \code{X} will be subjected to an independent random displacement. The displacement vectors are uniformly distributed in a circle of radius \code{radius}. If \code{clip=TRUE} (the default), segment endpoints are permitted to move to locations slightly outside the window of \code{X}, and the resulting segments will be clipped to the window. If \code{clip=FALSE}, segment endpoints are conditioned to fall inside the window. If \code{nsim=1} and \code{drop=TRUE}, the result is another spatial pattern of line segments (object of class \code{"psp"}). Otherwise, the result is a list of \code{nsim} line segment patterns. } \value{ A spatial pattern of line segments (object of class \code{"psp"}) or a list of such patterns. } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.geom]{rjitter}} for point patterns in two dimensions. } \examples{ E <- edges(letterR) Window(E) <- owin(c(1.9, 4.1), c(0.5, 3.5)) plot(rjitter(E, 0.1)) } \keyword{spatial} \keyword{datagen} spatstat.core/man/subfits.Rd0000644000176200001440000000517314141452520015571 0ustar liggesusers\name{subfits} \alias{subfits} \alias{subfits.new} \alias{subfits.old} \title{Extract List of Individual Point Process Models} \description{ Takes a Gibbs point process model that has been fitted to several point patterns simultaneously, and produces a list of fitted point process models for the individual point patterns. } \usage{ subfits(object, what="models", verbose=FALSE) subfits.old(object, what="models", verbose=FALSE) subfits.new(object, what="models", verbose=FALSE) } \arguments{ \item{object}{ An object of class \code{"mppm"} representing a point process model fitted to several point patterns. } \item{what}{ What should be returned. Either \code{"models"} to return the fitted models, or \code{"interactions"} to return the fitted interactions only. } \item{verbose}{ Logical flag indicating whether to print progress reports. } } \details{ \code{object} is assumed to have been generated by \code{\link{mppm}}. It represents a point process model that has been fitted to a list of several point patterns, with covariate data. For each of the \emph{individual} point pattern datasets, this function derives the corresponding fitted model for that dataset only (i.e. a point process model for the \eqn{i}th point pattern, that is consistent with \code{object}). If \code{what="models"}, the result is a list of point process models (a list of objects of class \code{"ppm"}), one model for each point pattern dataset in the original fit. If \code{what="interactions"}, the result is a list of fitted interpoint interactions (a list of objects of class \code{"fii"}). Two different algorithms are provided, as \code{subfits.old} and \code{subfits.new}. Currently \code{subfits} is the same as the old algorithm \code{subfits.old} because the newer algorithm is too memory-hungry. } \value{ A list of point process models (a list of objects of class \code{"ppm"}) or a list of fitted interpoint interactions (a list of objects of class \code{"fii"}). } \examples{ H <- hyperframe(Wat=waterstriders) fit <- mppm(Wat~x, data=H) subfits(fit) H$Wat[[3]] <- rthin(H$Wat[[3]], 0.1) fit2 <- mppm(Wat~x, data=H, random=~1|id) subfits(fit2) \testonly{a <- subfits.new(fit)} } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{mppm}}, \code{\link{ppm}} } \keyword{spatial} \keyword{models} spatstat.core/man/rMaternI.Rd0000644000176200001440000000455214141452520015633 0ustar liggesusers\name{rMaternI} \alias{rMaternI} \title{Simulate Matern Model I} \description{ Generate a random point pattern, a simulated realisation of the \Matern Model I inhibition process model. } \usage{ rMaternI(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, \dots, nsim=1, drop=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of proposal points. A single positive number. } \item{r}{ Inhibition distance. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Alternatively a higher-dimensional box of class \code{"box3"} or \code{"boxx"}. } \item{stationary}{ Logical. Whether to start with a stationary process of proposal points (\code{stationary=TRUE}) or to generate the proposal points only inside the window (\code{stationary=FALSE}). } \item{\dots}{Ignored.} \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Each point pattern is normally an object of class \code{"ppp"}, but may be of class \code{"pp3"} or \code{"ppx"} depending on the window. } \details{ This algorithm generates one or more realisations of \Matern's Model I inhibition process inside the window \code{win}. The process is constructed by first generating a uniform Poisson point process of ``proposal'' points with intensity \code{kappa}. If \code{stationary = TRUE} (the default), the proposal points are generated in a window larger than \code{win} that effectively means the proposals are stationary. If \code{stationary=FALSE} then the proposal points are only generated inside the window \code{win}. A proposal point is then deleted if it lies within \code{r} units' distance of another proposal point. Otherwise it is retained. The retained points constitute \Matern's Model I. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}} } \examples{ X <- rMaternI(20, 0.05) Y <- rMaternI(20, 0.05, stationary=FALSE) } \author{ \adrian , Ute Hahn, \rolf and \ege } \keyword{spatial} \keyword{datagen} spatstat.core/man/bits.test.Rd0000644000176200001440000001174614141452520016034 0ustar liggesusers\name{bits.test} \alias{bits.test} \title{ Balanced Independent Two-Stage Monte Carlo Test } \description{ Performs a Balanced Independent Two-Stage Monte Carlo test of goodness-of-fit for spatial pattern. } \usage{ bits.test(X, \dots, exponent = 2, nsim=19, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose = TRUE) } \arguments{ \item{X}{ Either a point pattern dataset (object of class \code{"ppp"}, \code{"lpp"} or \code{"pp3"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"} or \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{dclf.test}} or \code{\link{mad.test}} or \code{\link{envelope}} to control the conduct of the test. Useful arguments include \code{fun} to determine the summary function, \code{rinterval} to determine the range of \eqn{r} values used in the test, and \code{use.theory} described under Details. } \item{exponent}{ Exponent used in the test statistic. Use \code{exponent=2} for the Diggle-Cressie-Loosmore-Ford test, and \code{exponent=Inf} for the Maximum Absolute Deviation test. } \item{nsim}{ Number of replicates in each stage of the test. A total of \code{nsim * (nsim + 1)} simulated point patterns will be generated, and the \eqn{p}-value will be a multiple of \code{1/(nsim+1)}. } \item{alternative}{ Character string specifying the alternative hypothesis. The default (\code{alternative="two.sided"}) is that the true value of the summary function is not equal to the theoretical value postulated under the null hypothesis. If \code{alternative="less"} the alternative hypothesis is that the true value of the summary function is lower than the theoretical value. } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ Performs the Balanced Independent Two-Stage Monte Carlo test proposed by Baddeley et al (2017), an improvement of the Dao-Genton (2014) test. If \code{X} is a point pattern, the null hypothesis is CSR. If \code{X} is a fitted model, the null hypothesis is that model. The argument \code{use.theory} passed to \code{\link{envelope}} determines whether to compare the summary function for the data to its theoretical value for CSR (\code{use.theory=TRUE}) or to the sample mean of simulations from CSR (\code{use.theory=FALSE}). The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). } \value{ A hypothesis test (object of class \code{"htest"} which can be printed to show the outcome of the test. } \references{ Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2017) On two-stage Monte Carlo tests of composite hypotheses. \emph{Computational Statistics and Data Analysis}, in press. } \author{ Adrian Baddeley, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \spatstatAuthors. } \seealso{ Simulation envelopes: \code{\link{bits.envelope}}. Other tests: \code{\link{dg.test}}, \code{\link{dclf.test}}, \code{\link{mad.test}}. } \examples{ ns <- if(interactive()) 19 else 4 bits.test(cells, nsim=ns) bits.test(cells, alternative="less", nsim=ns) bits.test(cells, nsim=ns, interpolate=TRUE) } \keyword{spatial} \keyword{htest} spatstat.core/man/residuals.kppm.Rd0000644000176200001440000000213114141452520017042 0ustar liggesusers\name{residuals.kppm} \alias{residuals.kppm} \title{ Residuals for Fitted Cox or Cluster Point Process Model } \description{ Given a Cox or cluster point process model fitted to a point pattern, compute residuals. } \usage{ \method{residuals}{kppm}(object, \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"kppm"}) for which residuals should be calculated. } \item{\dots}{ Arguments passed to \code{\link{residuals.ppm}}. } } \value{ An object of class \code{"msr"} representing a signed measure or vector-valued measure (see \code{\link{msr}}). This object can be plotted. } \details{ This function extracts the intensity component of the model using \code{\link{as.ppm}} and then applies \code{\link{residuals.ppm}} to compute the residuals. Use \code{\link{plot.msr}} to plot the residuals directly. } \seealso{ \code{\link{msr}}, \code{\link{kppm}} } \examples{ fit <- kppm(redwood ~ x, "Thomas") rr <- residuals(fit) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.core/man/is.stationary.Rd0000644000176200001440000000600614141452520016715 0ustar liggesusers\name{is.stationary} \alias{is.stationary} \alias{is.stationary.ppm} \alias{is.stationary.kppm} \alias{is.stationary.slrm} \alias{is.stationary.rmhmodel} \alias{is.stationary.dppm} \alias{is.stationary.detpointprocfamily} \alias{is.poisson} \alias{is.poisson.ppm} \alias{is.poisson.kppm} \alias{is.poisson.slrm} \alias{is.poisson.rmhmodel} \alias{is.poisson.interact} \title{ Recognise Stationary and Poisson Point Process Models } \description{ Given a point process model that has been fitted to data, determine whether the model is a stationary point process, and whether it is a Poisson point process. } \usage{ is.stationary(x) \method{is.stationary}{ppm}(x) \method{is.stationary}{kppm}(x) \method{is.stationary}{slrm}(x) \method{is.stationary}{rmhmodel}(x) \method{is.stationary}{dppm}(x) \method{is.stationary}{detpointprocfamily}(x) is.poisson(x) \method{is.poisson}{ppm}(x) \method{is.poisson}{kppm}(x) \method{is.poisson}{slrm}(x) \method{is.poisson}{rmhmodel}(x) \method{is.poisson}{interact}(x) } \arguments{ \item{x}{ A fitted spatial point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"dppm"} or \code{"slrm"}) or similar object. } } \details{ The argument \code{x} represents a fitted spatial point process model or a similar object. \code{is.stationary(x)} returns \code{TRUE} if \code{x} represents a stationary point process, and \code{FALSE} if not. \code{is.poisson(x)} returns \code{TRUE} if \code{x} represents a Poisson point process, and \code{FALSE} if not. The functions \code{is.stationary} and \code{is.poisson} are generic, with methods for the classes \code{"ppm"} (Gibbs point process models), \code{"kppm"} (cluster or Cox point process models), \code{"slrm"} (spatial logistic regression models) and \code{"rmhmodel"} (model specifications for the Metropolis-Hastings algorithm). Additionally \code{is.stationary} has a method for classes \code{"detpointprocfamily"} and \code{"dppm"} (both determinantal point processes) and \code{is.poisson} has a method for class \code{"interact"} (interaction structures for Gibbs models). \code{is.poisson.kppm} will return \code{FALSE}, unless the model \code{x} is degenerate: either \code{x} has zero intensity so that its realisations are empty with probability 1, or it is a log-Gaussian Cox process where the log intensity has zero variance. \code{is.poisson.slrm} will always return \code{TRUE}, by convention. } \value{ A logical value. } \author{ \spatstatAuthors. } \seealso{ \code{\link{is.marked}} to determine whether a model is a marked point process. \code{\link{summary.ppm}} for detailed information. Model-fitting functions \code{\link{ppm}}, \code{\link{dppm}}, \code{\link{kppm}}, \code{\link{slrm}}. } \examples{ fit <- ppm(cells ~ x) is.stationary(fit) is.poisson(fit) fut <- kppm(redwood ~ 1, "MatClust") is.stationary(fut) is.poisson(fut) fot <- slrm(cells ~ x) is.stationary(fot) is.poisson(fot) } \keyword{spatial} \keyword{models} spatstat.core/man/is.multitype.ppm.Rd0000644000176200001440000000437114141452520017352 0ustar liggesusers\name{is.multitype.ppm} \alias{is.multitype.ppm} \title{Test Whether A Point Process Model is Multitype} \description{ Tests whether a fitted point process model involves ``marks'' attached to the points that classify the points into several types. } \usage{ \method{is.multitype}{ppm}(X, \dots) } \arguments{ \item{X}{ Fitted point process model (object of class \code{"ppm"}) usually obtained from \code{\link{ppm}}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a model that was fitted to a multitype point pattern dataset. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link[spatstat.data]{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. The argument \code{X} is a fitted point process model (an object of class \code{"ppm"}) typically obtained by fitting a model to point pattern data using \code{\link{ppm}}. This function returns \code{TRUE} if the \emph{original data} (to which the model \code{X} was fitted) were a multitype point pattern. Note that this is not the same as testing whether the model involves terms that depend on the marks (i.e. whether the fitted model ignores the marks in the data). Currently we have not implemented a test for this. If this function returns \code{TRUE}, the implications are (for example) that any simulation of this model will require simulation of random marks as well as random point locations. } \seealso{ \code{\link{is.multitype}}, \code{\link{is.multitype.ppp}} } \examples{ X <- lansing # Multitype point pattern --- trees marked by species \testonly{ # Smaller dataset X <- amacrine } fit1 <- ppm(X, ~ marks, Poisson()) is.multitype(fit1) # TRUE fit2 <- ppm(X, ~ 1, Poisson()) is.multitype(fit2) # TRUE # Unmarked point pattern fit3 <- ppm(cells, ~ 1, Poisson()) is.multitype(fit3) # FALSE } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat.core/man/Kres.Rd0000644000176200001440000000557414141452520015023 0ustar liggesusers\name{Kres} \Rdversion{1.1} \alias{Kres} \title{ Residual K Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the residual \eqn{K} function, which serves as a diagnostic for goodness-of-fit of the model. } \usage{ Kres(object, ...) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}), a point pattern (object of class \code{"ppp"}), a quadrature scheme (object of class \code{"quad"}), or the value returned by a previous call to \code{\link{Kcom}}. } \item{\dots}{ Arguments passed to \code{\link{Kcom}}. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes a residual version of the \eqn{K} function of the dataset, which should be approximately zero if the model is a good fit to the data. In normal use, \code{object} is a fitted point process model or a point pattern. Then \code{Kres} first calls \code{\link{Kcom}} to compute both the nonparametric estimate of the \eqn{K} function and its model compensator. Then \code{Kres} computes the difference between them, which is the residual \eqn{K}-function. Alternatively, \code{object} may be a function value table (object of class \code{"fv"}) that was returned by a previous call to \code{\link{Kcom}}. Then \code{Kres} computes the residual from this object. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Related functions: \code{\link{Kcom}}, \code{\link{Kest}}. Alternative functions: \code{\link{Gres}}, \code{\link{psstG}}, \code{\link{psstA}}, \code{\link{psst}}. Point process models: \code{\link{ppm}}. } \examples{ data(cells) fit0 <- ppm(cells, ~1) # uniform Poisson \testonly{ fit0 <- ppm(cells, ~1, nd=16)} K0 <- Kres(fit0) K0 plot(K0) # isotropic-correction estimate plot(K0, ires ~ r) # uniform Poisson is clearly not correct fit1 <- ppm(cells, ~1, Strauss(0.08)) \testonly{fit1 <- ppm(cells, ~1, Strauss(0.08), nd=16)} K1 <- Kres(fit1) if(interactive()) { plot(K1, ires ~ r) # fit looks approximately OK; try adjusting interaction distance plot(Kres(cells, interaction=Strauss(0.12))) } # How to make envelopes # E <- envelope(fit1, Kres, model=fit1, nsim=19) # plot(E) # For computational efficiency Kc <- Kcom(fit1) K1 <- Kres(Kc) } \keyword{spatial} \keyword{models} spatstat.core/man/dppspecden.Rd0000644000176200001440000000110414141452520016225 0ustar liggesusers\name{dppspecden} \alias{dppspecden} \title{Extract Spectral Density from Determinantal Point Process Model Object} \description{ Returns the spectral density of a determinantal point process model as a function of one argument \code{x}. } \usage{dppspecden(model)} \arguments{ \item{model}{Model of class \code{"detpointprocfamily"}.} } \value{A function} \author{ \adrian \rolf and \ege } \examples{ model <- dppMatern(lambda = 100, alpha=.01, nu=1, d=2) dppspecden(model) } \seealso{ \code{\link{dppspecdenrange}} } \keyword{spatial} \keyword{models} spatstat.core/man/reach.kppm.Rd0000644000176200001440000000220714141452520016135 0ustar liggesusers\name{reach.kppm} \alias{reach.kppm} \title{Range of Interaction for a Cox or Cluster Point Process Model} \description{ Returns the range of interaction for a Cox or cluster point process model. } \usage{ \method{reach}{kppm}(x, \dots, epsilon) } \arguments{ \item{x}{ Fitted point process model of class \code{"kppm"}. } \item{epsilon}{ Optional numerical value. Differences smaller than \code{epsilon} are treated as zero. } \item{\dots}{ Additional arguments passed to the range function of the given model. } } \details{ The range of interaction for a fitted point process model of class \code{"kppm"} may defined as the smallest number \eqn{R} such that \eqn{g(r)=1} for all \eqn{r\ge R}{r>=R}, where \eqn{g} is the pair correlation function. For many models the range is infinite, but one may instead use a value where the pair correlation function is sufficiently close to 1. The argument \code{epsilon} specifies the tolerance; there is a sensible default. } \value{Numeric} \author{ \spatstatAuthors. } \examples{ fit <- kppm(redwood ~ 1) reach(fit) } \keyword{spatial} \keyword{models} spatstat.core/man/pool.quadrattest.Rd0000644000176200001440000000530614141452520017421 0ustar liggesusers\name{pool.quadrattest} \alias{pool.quadrattest} \title{ Pool Several Quadrat Tests } \description{ Pool several quadrat tests into a single quadrat test. } \usage{ \method{pool}{quadrattest}(..., df=NULL, df.est=NULL, nsim=1999, Xname=NULL, CR=NULL) } \arguments{ \item{\dots}{ Any number of objects, each of which is a quadrat test (object of class \code{"quadrattest"}). } \item{df}{ Optional. Number of degrees of freedom of the test statistic. Relevant only for \eqn{\chi^2}{chi^2} tests. Incompatible with \code{df.est}. } \item{df.est}{ Optional. The number of fitted parameters, or the number of degrees of freedom lost by estimation of parameters. Relevant only for \eqn{\chi^2}{chi^2} tests. Incompatible with \code{df}. } \item{nsim}{ Number of simulations, for Monte Carlo test. } \item{Xname}{ Optional. Name of the original data. } \item{CR}{ Optional. Numeric value of the Cressie-Read exponent \code{CR} overriding the value used in the tests. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"quadrattest"}. An object of class \code{"quadrattest"} represents a \eqn{\chi^2}{chi^2} test or Monte Carlo test of goodness-of-fit for a point process model, based on quadrat counts. Such objects are created by the command \code{\link{quadrat.test}}. Each of the arguments \code{\dots} must be an object of class \code{"quadrattest"}. They must all be the same type of test (chi-squared test or Monte Carlo test, conditional or unconditional) and must all have the same type of alternative hypothesis. The test statistic of the pooled test is the Pearson \eqn{X^2} statistic taken over all cells (quadrats) of all tests. The \eqn{p} value of the pooled test is then computed using either a Monte Carlo test or a \eqn{\chi^2}{chi^2} test. For a pooled \eqn{\chi^2}{chi^2} test, the number of degrees of freedom of the combined test is computed by adding the degrees of freedom of all the tests (equivalent to assuming the tests are independent) unless it is determined by the arguments \code{df} or \code{df.est}. The resulting \eqn{p} value is computed to obtain the pooled test. For a pooled Monte Carlo test, new simulations are performed to determine the pooled Monte Carlo \eqn{p} value. } \value{ Another object of class \code{"quadrattest"}. } \seealso{ \code{\link{pool}}, \code{\link{quadrat.test}} } \examples{ Y <- split(humberside) test1 <- quadrat.test(Y[[1]]) test2 <- quadrat.test(Y[[2]]) pool(test1, test2, Xname="Humberside") } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} spatstat.core/man/marktable.Rd0000644000176200001440000000541714141452520016055 0ustar liggesusers\name{marktable} \alias{marktable} \title{Tabulate Marks in Neighbourhood of Every Point in a Point Pattern} \description{ Visit each point in a point pattern, find the neighbouring points, and compile a frequency table of the marks of these neighbour points. } \usage{ marktable(X, R, N, exclude=TRUE, collapse=FALSE) } \arguments{ \item{X}{ A marked point pattern. An object of class \code{"ppp"}. } \item{R}{ Neighbourhood radius. Incompatible with \code{N}. } \item{N}{ Number of neighbours of each point. Incompatible with \code{R}. } \item{exclude}{ Logical. If \code{exclude=TRUE}, the neighbours of a point do not include the point itself. If \code{exclude=FALSE}, a point belongs to its own neighbourhood. } \item{collapse}{ Logical. If \code{collapse=FALSE} (the default) the results for each point are returned as separate rows of a table. If \code{collapse=TRUE}, the results are aggregated according to the type of point. } } \value{ A contingency table (object of class \code{"table"}). If \code{collapse=FALSE}, the table has one row for each point in \code{X}, and one column for each possible mark value. If \code{collapse=TRUE}, the table has one row and one column for each possible mark value. } \details{ This algorithm visits each point in the point pattern \code{X}, inspects all the neighbouring points within a radius \code{R} of the current point (or the \code{N} nearest neighbours of the current point), and compiles a frequency table of the marks attached to the neighbours. The dataset \code{X} must be a multitype point pattern, that is, \code{marks(X)} must be a \code{factor}. If \code{collapse=FALSE} (the default), the result is a two-dimensional contingency table with one row for each point in the pattern, and one column for each possible mark value. The \code{[i,j]} entry in the table gives the number of neighbours of point \code{i} that have mark \code{j}. If \code{collapse=TRUE}, this contingency table is aggregated according to the type of point, so that the result is a contingency table with one row and one column for each possible mark value. The \code{[i,j]} entry in the table gives the number of neighbours of a point with mark \code{i} that have mark \code{j}. To perform more complicated calculations on the neighbours of every point, use \code{\link{markstat}} or \code{\link{applynbd}}. } \seealso{ \code{\link{markstat}}, \code{\link{applynbd}}, \code{\link{Kcross}}, \code{\link{ppp.object}}, \code{\link{table}} } \examples{ head(marktable(amacrine, 0.1)) head(marktable(amacrine, 0.1, exclude=FALSE)) marktable(amacrine, N=1, collapse=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{programming} spatstat.core/man/bw.diggle.Rd0000644000176200001440000000734714141452520015761 0ustar liggesusers\name{bw.diggle} \alias{bw.diggle} \title{ Cross Validated Bandwidth Selection for Kernel Density } \description{ Uses cross-validation to select a smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.diggle(X, ..., correction="good", hmax=NULL, nr=512, warn=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{\dots}{Ignored.} \item{correction}{ Character string passed to \code{\link{Kest}} determining the edge correction to be used to calculate the \eqn{K} function. } \item{hmax}{ Numeric. Maximum value of bandwidth that should be considered. } \item{nr}{ Integer. Number of steps in the distance value \eqn{r} to use in computing numerical integrals. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the minimum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{\sigma} is chosen to minimise the mean-square error criterion defined by Diggle (1985). The algorithm uses the method of Berman and Diggle (1989) to compute the quantity \deqn{ M(\sigma) = \frac{\mbox{MSE}(\sigma)}{\lambda^2} - g(0) }{ M(\sigma) = MSE(\sigma)/\lambda^2 - g(0) } as a function of bandwidth \eqn{\sigma}{\sigma}, where \eqn{\mbox{MSE}(\sigma)}{MSE(\sigma)} is the mean squared error at bandwidth \eqn{\sigma}{\sigma}, while \eqn{\lambda}{\lambda} is the mean intensity, and \eqn{g} is the pair correlation function. See Diggle (2003, pages 115-118) for a summary of this method. The result is a numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted to show the (rescaled) mean-square error as a function of \code{sigma}. } \section{Definition of bandwidth}{ The smoothing parameter \code{sigma} returned by \code{bw.diggle} (and displayed on the horizontal axis of the plot) corresponds to \code{h/2}, where \code{h} is the smoothing parameter described in Diggle (2003, pages 116-118) and Berman and Diggle (1989). In those references, the smoothing kernel is the uniform density on the disc of radius \code{h}. In \code{\link{density.ppp}}, the smoothing kernel is the isotropic Gaussian density with standard deviation \code{sigma}. When replacing one kernel by another, the usual practice is to adjust the bandwidths so that the kernels have equal variance (cf. Diggle 2003, page 118). This implies that \code{sigma = h/2}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.ppl}}, \code{\link{bw.scott}}, \code{\link{bw.CvL}}, \code{\link{bw.frac}}. } \examples{ data(lansing) attach(split(lansing)) b <- bw.diggle(hickory) plot(b, ylim=c(-2, 0), main="Cross validation for hickories") if(interactive()) { plot(density(hickory, b)) } } \references{ Berman, M. and Diggle, P. (1989) Estimating weighted integrals of the second-order intensity of a spatial point process. \emph{Journal of the Royal Statistical Society, series B} \bold{51}, 81--92. Diggle, P.J. (1985) A kernel method for smoothing point process data. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C) \bold{34} (1985) 138--147. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/simulate.ppm.Rd0000644000176200001440000001025114141452520016521 0ustar liggesusers\name{simulate.ppm} \alias{simulate.ppm} \title{Simulate a Fitted Gibbs Point Process Model} \description{ Generates simulated realisations from a fitted Gibbs or Poisson point process model. } \usage{ \method{simulate}{ppm}(object, nsim=1, ..., singlerun = FALSE, start = NULL, control = default.rmhcontrol(object, w=w), w = window, window = NULL, project=TRUE, new.coef=NULL, verbose=FALSE, progress=(nsim > 1), drop=FALSE) } \arguments{ \item{object}{ Fitted point process model. An object of class \code{"ppm"}. } \item{nsim}{ Number of simulated realisations. } \item{singlerun}{ Logical. Whether to generate the simulated realisations from a single long run of the Metropolis-Hastings algorithm (\code{singlerun=TRUE}) or from separate, independent runs of the algorithm (\code{singlerun=FALSE}, the default). } \item{start}{Data determining the initial state of the Metropolis-Hastings algorithm. See \code{\link{rmhstart}} for description of these arguments. Defaults to \code{list(n.start=npoints(data.ppm(object)))} meaning that the initial state of the algorithm has the same number of points as the original dataset. } \item{control}{Data controlling the running of the Metropolis-Hastings algorithm. See \code{\link{rmhcontrol}} for description of these arguments. } \item{w, window}{ Optional. The window in which the model is defined. An object of class \code{"owin"}. } \item{\dots}{ Further arguments passed to \code{\link{rmhcontrol}}, or to \code{\link{rmh.default}}, or to covariate functions in the model. } \item{project}{ Logical flag indicating what to do if the fitted model is invalid (in the sense that the values of the fitted coefficients do not specify a valid point process). If \code{project=TRUE} the closest valid model will be simulated; if \code{project=FALSE} an error will occur. } \item{verbose}{ Logical flag indicating whether to print progress reports from \code{\link{rmh.ppm}} during the simulation of each point pattern. } \item{progress}{ Logical flag indicating whether to print progress reports for the sequence of simulations. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(object)}. } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE}, the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"ppm"} of fitted point process models. Simulations are performed by \code{\link{rmh.ppm}}. If \code{singlerun=FALSE} (the default), the simulated patterns are the results of independent runs of the Metropolis-Hastings algorithm. If \code{singlerun=TRUE}, a single long run of the algorithm is performed, and the state of the simulation is saved every \code{nsave} iterations to yield the simulated patterns. In the case of a single run, the behaviour is controlled by the parameters \code{nsave,nburn,nrep}. These are described in \code{\link{rmhcontrol}}. They may be passed in the \code{\dots} arguments or included in \code{control}. It is sufficient to specify two of the three parameters \code{nsave,nburn,nrep}. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"ppp"}). It also belongs to the class \code{"solist"}, so that it can be plotted, and the class \code{"timed"}, so that the total computation time is recorded. } \examples{ \testonly{op <- spatstat.options(rmh.nrep=10)} fit <- ppm(japanesepines, ~1, Strauss(0.1)) simulate(fit, 2) simulate(fit, 2, singlerun=TRUE, nsave=1e4, nburn=1e4) \testonly{spatstat.options(op)} } \seealso{ \code{\link{ppm}}, \code{\link{simulate.kppm}}, \code{\link[stats]{simulate}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/lurking.slrm.Rd0000644000176200001440000002637614141452520016551 0ustar liggesusers\name{lurking.slrm} \alias{lurking.slrm} \title{Lurking Variable Plot for Spatial Logistic Regression} \description{ Plot spatial point process residuals against a covariate } \usage{ \method{lurking}{slrm}(object, covariate, type="raw", cumulative=TRUE, \dots, plot.it = TRUE, plot.sd = TRUE, clipwindow=NULL, rv = NULL, envelope=FALSE, nsim=39, nrank=1, typename, covname, oldstyle=FALSE, check=TRUE, verbose=TRUE, nx=128, splineargs=list(spar=0.5), internal=NULL) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"slrm"}) for which diagnostics should be produced. This object is usually obtained from \code{\link{slrm}}. } \item{covariate}{ The covariate against which residuals should be plotted. Either a numeric vector, a pixel image, or an \code{expression}. See \emph{Details} below. } \item{type}{ String indicating the type of residuals or weights to be computed. Choices include \code{"eem"}, \code{"raw"}, \code{"inverse"} and \code{"pearson"}. } \item{cumulative}{ Logical flag indicating whether to plot a cumulative sum of marks (\code{cumulative=TRUE}) or the derivative of this sum, a marginal density of the smoothed residual field (\code{cumulative=FALSE}). } \item{\dots}{ Arguments passed to \code{\link{plot.default}} and \code{\link{lines}} to control the plot behaviour. } \item{plot.it}{ Logical value indicating whether plots should be shown. If \code{plot.it=FALSE}, only the computed coordinates for the plots are returned. See \emph{Value}. } \item{plot.sd}{ Logical value indicating whether error bounds should be added to plot. The default is \code{TRUE}. } \item{clipwindow}{ If not \code{NULL} this argument indicates that residuals shall only be computed inside a subregion of the window containing the original point pattern data. Then \code{clipwindow} should be a window object of class \code{"owin"}. } \item{rv}{ Usually absent. If this argument is present, the point process residuals will not be calculated from the fitted model \code{object}, but will instead be taken directly from \code{rv}. } \item{envelope}{ Logical value indicating whether to compute simulation envelopes for the plot. Alternatively \code{envelope} may be a list of point patterns to use for computing the simulation envelopes, or an object of class \code{"envelope"} containing simulated point patterns. } \item{nsim}{ Number of simulated point patterns to be generated to produce the simulation envelope, if \code{envelope=TRUE}. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{typename}{ Usually absent. If this argument is present, it should be a string, and will be used (in the axis labels of plots) to describe the type of residuals. } \item{covname}{ A string name for the covariate, to be used in axis labels of plots. } \item{oldstyle}{ Logical flag indicating whether error bounds should be plotted using the approximation given in the original paper (\code{oldstyle=TRUE}), or using the correct asymptotic formula (\code{oldstyle=FALSE}). } \item{check}{ Logical flag indicating whether the integrity of the data structure in \code{object} should be checked. } \item{verbose}{ Logical value indicating whether to print progress reports during Monte Carlo simulation. } \item{nx}{ Integer. Number of covariate values to be used in the plot. } \item{splineargs}{ A list of arguments passed to \code{smooth.spline} for the estimation of the derivatives in the case \code{cumulative=FALSE}. } \item{internal}{ Internal use only. } } \value{ The (invisible) return value is an object belonging to the class \code{"lurk"}, for which there are methods for \code{plot} and \code{print}. This object is a list containing two dataframes \code{empirical} and \code{theoretical}. The first dataframe \code{empirical} contains columns \code{covariate} and \code{value} giving the coordinates of the lurking variable plot. The second dataframe \code{theoretical} contains columns \code{covariate}, \code{mean} and \code{sd} giving the coordinates of the plot of the theoretical mean and standard deviation. } \details{ This function generates a \sQuote{lurking variable} plot for a fitted spatial logistic regression model. Residuals from the model represented by \code{object} are plotted against the covariate specified by \code{covariate}. This plot can be used to reveal departures from the fitted model, in particular, to reveal that the point pattern depends on the covariate. The function \code{lurking} is generic, with methods for \code{ppm} and \code{ppp} documented elsewhere. The argument \code{object} would usually be a fitted point process model obtained from the model-fitting algorithm \code{\link{slrm}}). First the residuals from the fitted model (Baddeley et al, 2004) are computed at each pixel, or alternatively the `exponential energy marks' (Stoyan and Grabarnik, 1991) are computed at each data point. The argument \code{type} selects the type of residual or weight. See \code{\link{diagnose.ppm}} for options and explanation. A lurking variable plot for point processes (Baddeley et al, 2004) displays either the cumulative sum of residuals/weights (if \code{cumulative = TRUE}) or a kernel-weighted average of the residuals/weights (if \code{cumulative = FALSE}) plotted against the covariate. The empirical plot (solid lines) is shown together with its expected value assuming the model is true (dashed lines) and optionally also the pointwise two-standard-deviation limits (grey shading). To be more precise, let \eqn{Z(u)} denote the value of the covariate at a spatial location \eqn{u}. \itemize{ \item If \code{cumulative=TRUE} then we plot \eqn{H(z)} against \eqn{z}, where \eqn{H(z)} is the sum of the residuals over all quadrature points where the covariate takes a value less than or equal to \eqn{z}, or the sum of the exponential energy weights over all data points where the covariate takes a value less than or equal to \eqn{z}. \item If \code{cumulative=FALSE} then we plot \eqn{h(z)} against \eqn{z}, where \eqn{h(z)} is the derivative of \eqn{H(z)}, computed approximately by spline smoothing. } For the point process residuals \eqn{E(H(z)) = 0}, while for the exponential energy weights \eqn{E(H(z)) = } area of the subset of the window satisfying \eqn{Z(u) <= z}{Z(u) \le z}. If the empirical and theoretical curves deviate substantially from one another, the interpretation is that the fitted model does not correctly account for dependence on the covariate. The correct form (of the spatial trend part of the model) may be suggested by the shape of the plot. If \code{plot.sd = TRUE}, then superimposed on the lurking variable plot are the pointwise two-standard-deviation error limits for \eqn{H(x)} calculated for the inhomogeneous Poisson process. The default is \code{plot.sd = TRUE} for Poisson models and \code{plot.sd = FALSE} for non-Poisson models. By default, the two-standard-deviation limits are calculated from the exact formula for the asymptotic variance of the residuals under the asymptotic normal approximation, equation (37) of Baddeley et al (2006). However, for compatibility with the original paper of Baddeley et al (2005), if \code{oldstyle=TRUE}, the two-standard-deviation limits are calculated using the innovation variance, an over-estimate of the true variance of the residuals. The argument \code{covariate} is either a numeric vector, a pixel image, or an R language expression. If it is a numeric vector, it is assumed to contain the values of the covariate for each of the quadrature points in the fitted model. The quadrature points can be extracted by \code{\link{quad.ppm}(object)}. If \code{covariate} is a pixel image, it is assumed to contain the values of the covariate at each location in the window. The values of this image at the quadrature points will be extracted. Alternatively, if \code{covariate} is an \code{expression}, it will be evaluated in the same environment as the model formula used in fitting the model \code{object}. It must yield a vector of the same length as the number of quadrature points. The expression may contain the terms \code{x} and \code{y} representing the cartesian coordinates, and may also contain other variables that were available when the model was fitted. Certain variable names are reserved words; see \code{\link{ppm}}. Note that lurking variable plots for the \eqn{x} and \eqn{y} coordinates are also generated by \code{\link{diagnose.ppm}}, amongst other types of diagnostic plots. This function is more general in that it enables the user to plot the residuals against any chosen covariate that may have been present. For advanced use, even the values of the residuals/weights can be altered. If the argument \code{rv} is present, the residuals will not be calculated from the fitted model \code{object} but will instead be taken directly from the object \code{rv}. If \code{type = "eem"} then \code{rv} should be similar to the return value of \code{\link{eem}}, namely, a numeric vector with length equal to the number of data points in the original point pattern. Otherwise, \code{rv} should be similar to the return value of \code{\link{residuals.ppm}}, that is, \code{rv} should be an object of class \code{"msr"} (see \code{\link{msr}}) representing a signed measure. } \seealso{ \code{\link{residuals.slrm}}, \code{\link{eem}}, \code{\link{slrm}} } \references{ Baddeley, A., Berman, M., Fisher, N.I., Hardegen, A., Milne, R.K., Schuhmacher, D., Shah, R. and Turner, R. (2010) Spatial logistic regression and change-of-support for spatial Poisson point processes. \emph{Electronic Journal of Statistics} \bold{4}, 1151--1201. {doi: 10.1214/10-EJS581} Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2006) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \examples{ fit <- slrm(japanesepines ~ y) (b <- lurking(fit, expression(x), type="raw")) lurking(fit, expression(x), type="raw", cumulative=FALSE) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat.core/man/dppGauss.Rd0000644000176200001440000000215114141452520015671 0ustar liggesusers\name{dppGauss} \alias{dppGauss} \title{Gaussian Determinantal Point Process Model} \description{ Function generating an instance of the Gaussian determinantal point process model. } \usage{dppGauss(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the parameters. See Details.} } \details{ The Gaussian DPP is defined in (Lavancier, \Moller and Rubak, 2015) The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ \adrian \rolf and \ege } \references{ Lavancier, F. \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \examples{ m <- dppGauss(lambda=100, alpha=.05, d=2) } \seealso{ \code{\link{dppBessel}}, \code{\link{dppCauchy}}, \code{\link{dppMatern}}, \code{\link{dppPowerExp}} } spatstat.core/man/dummify.Rd0000644000176200001440000000342714141452520015564 0ustar liggesusers\name{dummify} \alias{dummify} \title{ Convert Data to Numeric Values by Constructing Dummy Variables } \description{ Converts data of any kind to numeric values. A factor is expanded to a set of dummy variables. } \usage{ dummify(x) } \arguments{ \item{x}{ Vector, factor, matrix or data frame to be converted. } } \details{ This function converts data (such as a factor) to numeric values in order that the user may calculate, for example, the mean, variance, covariance and correlation of the data. If \code{x} is a numeric vector or integer vector, it is returned unchanged. If \code{x} is a logical vector, it is converted to a 0-1 matrix with 2 columns. The first column contains a 1 if the logical value is \code{FALSE}, and the second column contains a 1 if the logical value is \code{TRUE}. If \code{x} is a complex vector, it is converted to a matrix with 2 columns, containing the real and imaginary parts. If \code{x} is a factor, the result is a matrix of 0-1 dummy variables. The matrix has one column for each possible level of the factor. The \code{(i,j)} entry is equal to 1 when the \code{i}th factor value equals the \code{j}th level, and is equal to 0 otherwise. If \code{x} is a matrix or data frame, the appropriate conversion is applied to each column of \code{x}. Note that, unlike \code{\link[stats]{model.matrix}}, this command converts a factor into a full set of dummy variables (one column for each level of the factor). } \value{ A numeric matrix. } \author{ \adrian } \examples{ chara <- sample(letters[1:3], 8, replace=TRUE) logi <- (runif(8) < 0.3) comp <- round(4*runif(8) + 3*runif(8) * 1i, 1) nume <- 8:1 + 0.1 df <- data.frame(nume, chara, logi, comp) df dummify(df) } \keyword{math} spatstat.core/man/pool.anylist.Rd0000644000176200001440000000235214141452520016541 0ustar liggesusers\name{pool.anylist} \alias{pool.anylist} \title{ Pool Data from a List of Objects } \description{ Pool the data from the objects in a list. } \usage{ \method{pool}{anylist}(x, ...) } \arguments{ \item{x}{ A list, belonging to the class \code{"anylist"}, containing objects that can be pooled. } \item{\dots}{ Optional additional objects which can be pooled with the elements of \code{x}. } } \details{ The function \code{\link{pool}} is generic. Its purpose is to combine data from several objects of the same type (typically computed from different datasets) into a common, pooled estimate. The function \code{pool.anyist} is the method for the class \code{"anylist"}. It is used when the objects to be pooled are given in a list \code{x}. Each of the elements of the list \code{x}, and each of the subsequent arguments \code{\dots} if provided, must be an object of the same class. } \value{ An object of the same class as each of the entries in \code{x}. } \seealso{ \code{\link{anylist}}, \code{\link{pool}}. } \examples{ Keach <- anylapply(waterstriders, Kest, ratio=TRUE, correction="iso") K <- pool(Keach) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/methods.ssf.Rd0000644000176200001440000000662414141452520016351 0ustar liggesusers\name{methods.ssf} \alias{methods.ssf} %DoNotExport \alias{marks.ssf} \alias{marks<-.ssf} \alias{unmark.ssf} \alias{as.im.ssf} \alias{as.function.ssf} \alias{as.ppp.ssf} \alias{print.ssf} \alias{summary.ssf} \alias{range.ssf} \alias{min.ssf} \alias{max.ssf} \alias{integral.ssf} \title{Methods for Spatially Sampled Functions} \description{ Methods for various generic commands, for the class \code{"ssf"} of spatially sampled functions. } \usage{ \method{marks}{ssf}(x, \dots) \method{marks}{ssf}(x, \dots) <- value \method{unmark}{ssf}(X) \method{as.im}{ssf}(X, \dots) \method{as.function}{ssf}(x, \dots) \method{as.ppp}{ssf}(X, \dots) \method{print}{ssf}(x, \dots, brief=FALSE) \method{summary}{ssf}(object, \dots) \method{range}{ssf}(x, \dots) \method{min}{ssf}(x, \dots) \method{max}{ssf}(x, \dots) \method{integral}{ssf}(f, domain=NULL, ..., weights=attr(f, "weights")) } \arguments{ \item{x,X,f,object}{ A spatially sampled function (object of class \code{"ssf"}). } \item{\dots}{Arguments passed to the default method.} \item{brief}{Logical value controlling the amount of detail printed.} \item{value}{Matrix of replacement values for the function.} \item{domain}{Optional. Domain of integration. An object of class\code{"owin"} or \code{"tess"}. } \item{weights}{ Optional. Numeric vector of \emph{quadrature weights} associated with the sample points. } } \value{ \code{marks} returns a matrix. \code{marks(x) <- value} returns an object of class \code{"ssf"}. \code{as.owin} returns a window (object of class \code{"owin"}). \code{as.ppp} and \code{unmark} return a point pattern (object of class \code{"ppp"}). \code{as.function} returns a \code{function(x,y)} of class \code{"funxy"}. \code{print} returns \code{NULL}. \code{summary} returns an object of class \code{"summary.ssf"} which has a print method. \code{range} returns a numeric vector of length 2. \code{min} and \code{max} return a single numeric value. \code{integral} returns a numeric or complex value, vector, or matrix. \code{integral(f)} returns a numeric or complex value (if \code{f} had numeric or complex values) or a numeric vector (if \code{f} had vector values). If \code{domain} is a tessellation then \code{integral(f, domain)} returns a numeric or complex vector with one entry for each tile (if \code{f} had numeric or complex values) or a numeric matrix with one row for each tile (if \code{f} had vector values). } \details{ An object of class \code{"ssf"} represents a function (real- or vector-valued) that has been sampled at a finite set of points. The commands documented here are methods for this class, for the generic commands \code{\link[spatstat.geom]{marks}}, \code{\link[spatstat.geom]{marks<-}}, \code{\link[spatstat.geom]{unmark}}, \code{\link[spatstat.geom]{as.im}}, \code{\link[base]{as.function}}, \code{\link[spatstat.geom]{as.ppp}}, \code{\link[base]{print}}, \code{\link[base]{summary}}, \code{\link[base]{range}}, \code{\link[base]{min}}, \code{\link[base]{max}} and \code{\link[spatstat.geom]{integral}}. } \seealso{ \code{\link{ssf}} } \examples{ g <- distfun(cells[1:4]) X <- rsyst(Window(cells), 10) f <- ssf(X, g(X)) f summary(f) marks(f) as.ppp(f) as.im(f) integral(f) integral(f, quadrats(Window(f), 3)) } \author{Adrian Baddeley} \keyword{spatial} \keyword{methods} spatstat.core/man/pairMean.Rd0000644000176200001440000000403314141452520015640 0ustar liggesusers\name{pairMean} \alias{pairMean} \title{ Mean of a Function of Interpoint Distance } \description{ Computes the mean value, or the double integral, of a specified function of the distance between two independent random points in a given window or windows. } \usage{ pairMean(fun, W, V = NULL, ..., normalise = TRUE) } \arguments{ \item{fun}{ A function in the \R language which takes one argument. } \item{W}{ A window (object of class \code{"owin"}) containing the first random point. } \item{V}{ Optional. Another window containing the second random point. Defaults to \code{W}. } \item{\dots}{ Further optional arguments passed to \code{\link{distcdf}} to determine the pixel resolution for the calculation and the probability distributions of the random points. } \item{normalise}{ Logical value specifying whether to calculate the mean value (\code{normalise=TRUE}, the default) or the double integral (\code{normalise=FALSE}). } } \details{ This command computes the mean value of \code{fun(T)} where \code{T} is the Euclidean distance \eqn{T = \|X_1 - X_2\|}{T = |X1-X2|} between two independent random points \eqn{X_1}{X1} and \eqn{X_2}{X2}. In the simplest case, the command \code{pairMean(fun, W)}, the random points are assumed to be uniformly distributed in the same window \code{W}. Alternatively the two random points may be uniformly distributed in two different windows \code{W} and \code{V}. Other options are described in \code{\link{distcdf}}. The algorithm uses \code{\link{distcdf}} to compute the cumulative distribution function of \code{T}, and \code{\link{stieltjes}} to compute the mean value of \code{fun(T)}. If \code{normalise=TRUE} (the default) the result is the mean value of \code{fun(T)}. If \code{normalise=FALSE} the result is the double integral. } \value{ A single numeric value. } \author{ \adrian. } \seealso{ \code{\link{distcdf}} } \examples{ pairMean(function(d) { d^2 }, disc()) } \keyword{spatial} \keyword{math} spatstat.core/man/dimhat.Rd0000644000176200001440000000267514141452520015364 0ustar liggesusers\name{dimhat} \alias{dimhat} \title{ Estimate Dimension of Central Subspace } \description{ Given the kernel matrix that characterises a central subspace, this function estimates the dimension of the subspace. } \usage{ dimhat(M) } \arguments{ \item{M}{ Kernel of subspace. A symmetric, non-negative definite, numeric matrix, typically obtained from \code{\link{sdr}}. } } \details{ This function computes the maximum descent estimate of the dimension of the central subspace with a given kernel matrix \code{M}. The matrix \code{M} should be the kernel matrix of a central subspace, which can be obtained from \code{\link{sdr}}. It must be a symmetric, non-negative-definite, numeric matrix. The algorithm finds the eigenvalues \eqn{\lambda_1 \ge \ldots \ge \lambda_n}{lambda[1] \ge ...\ge lambda[n]} of \eqn{M}, and then determines the index \eqn{k} for which \eqn{\lambda_k/\lambda_{k-1}}{lambda[k]/lambda[k-1]} is greatest. } \value{ A single integer giving the estimated dimension. } \seealso{ \code{\link{sdr}}, \code{\link{subspaceDistance}} } \references{ Guan, Y. and Wang, H. (2010) Sufficient dimension reduction for spatial point processes directed by Gaussian random fields. \emph{Journal of the Royal Statistical Society, Series B}, \bold{72}, 367--387. } \author{ Matlab original by Yongtao Guan, translated to \R by Suman Rakshit. } \keyword{array} \keyword{algebra} \keyword{multivariate} spatstat.core/man/bw.abram.Rd0000644000176200001440000001530314141452520015577 0ustar liggesusers\name{bw.abram} \alias{bw.abram} \title{ Abramson's Adaptive Bandwidths } \description{ Computes adaptive smoothing bandwidths according to the inverse-square-root rule of Abramson (1982). } \usage{ bw.abram(X, h0, \dots, at=c("points", "pixels"), hp = h0, pilot = NULL, trim=5, smoother=density.ppp) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) for which the variable bandwidths should be computed. } \item{h0}{ A scalar value giving the global smoothing bandwidth in the same units as the coordinates of \code{X}. The default is \code{h0=\link{bw.ppl}(X)}. } \item{\dots}{ Additional arguments passed to \code{\link[spatstat.geom]{as.im}} to control the pixel resolution, or passed to \code{\link{density.ppp}} or \code{smoother} to control the type of smoothing, when computing the pilot estimate. } \item{at}{ Character string (partially matched) specifying whether to compute bandwidth values at the points of \code{X} (\code{at="points"}, the default) or to compute bandwidths at every pixel in a fine pixel grid (\code{at="pixels"}). } \item{hp}{ Optional. A scalar pilot bandwidth, used for estimation of the pilot density if required. Ignored if \code{pilot} is a pixel image (object of class \code{"im"}); see below. } \item{pilot}{ Optional. Specification of a pilot density (possibly unnormalised). If \code{pilot=NULL} the pilot density is computed by applying fixed-bandwidth density estimation to \code{X} using bandwidth \code{hp}. If \code{pilot} is a point pattern, the pilot density is is computed using a fixed-bandwidth estimate based on \code{pilot} and \code{hp}. If \code{pilot} is a pixel image (object of class \code{"im"}), this is taken to be the (possibly unnormalised) pilot density, and \code{hp} is ignored. } \item{trim}{ A trimming value required to curb excessively large bandwidths. See Details. The default is sensible in most cases. } \item{smoother}{ Smoother for the pilot. A function or character string, specifying the function to be used to compute the pilot estimate when \code{pilot} is \code{NULL} or is a point pattern. } } \details{ This function computes adaptive smoothing bandwidths using the methods of Abramson (1982) and Hall and Marron (1988). If \code{at="points"} (the default) a smoothing bandwidth is computed for each point in the pattern \code{X}. Alternatively if \code{at="pixels"} a smoothing bandwidth is computed for each spatial location in a pixel grid. Under the Abramson-Hall-Marron rule, the bandwidth at location \eqn{u} is \deqn{ h(u) = \mbox{\texttt{h0}} * \mbox{min}[ \frac{\tilde{f}(u)^{-1/2}}{\gamma}, \mbox{\texttt{trim}} ] }{ h(u) = h0 * min(\tilde{f}(u)^{-1/2}/\gamma, trim) } where \eqn{\tilde{f}(u)} is a pilot estimate of the spatially varying probability density. The variable bandwidths are rescaled by \eqn{\gamma}, the geometric mean of the \eqn{\tilde{f}(u)^{-1/2}} terms evaluated at the data; this allows the global bandwidth \code{h0} to be considered on the same scale as a corresponding fixed bandwidth. The trimming value \code{trim} has the same interpretation as the required `clipping' of the pilot density at some small nominal value (see Hall and Marron, 1988), to necessarily prevent extreme bandwidths (which can occur at very isolated observations). The pilot density or intensity is determined as follows: \itemize{ \item If \code{pilot} is a pixel image, this is taken as the pilot density or intensity. \item If \code{pilot} is \code{NULL}, then the pilot intensity is computed as a fixed-bandwidth kernel intensity estimate using \code{\link{density.ppp}} applied to the data pattern \code{X} using the pilot bandwidth \code{hp}. \item If \code{pilot} is a different point pattern on the same spatial domain as \code{X}, then the pilot intensity is computed as a fixed-bandwidth kernel intensity estimate using \code{\link{density.ppp}} applied to \code{pilot} using the pilot bandwidth \code{hp}. } In each case the pilot density or intensity is renormalised to become a probability density, and then the Abramson rule is applied. Instead of calculating the pilot as a fixed-bandwidth density estimate, the user can specify another density estimation procedure using the argument \code{smoother}. This should be either a function or the character string name of a function. It will replace \code{\link{density.ppp}} as the function used to calculate the pilot estimate. The pilot estimate will be computed as \code{smoother(X, sigma=hp, ...)} if \code{pilot} is \code{NULL}, or \code{smoother(pilot, sigma=hp, ...)} if \code{pilot} is a point pattern. If \code{smoother} does not recognise the argument name \code{sigma} for the smoothing bandwidth, then \code{hp} is effectively ignored, as shown in the Examples. } \value{ Either a numeric vector of length \code{npoints(X)} giving the Abramson bandwidth for each point (when \code{at = "points"}, the default), or the entire pixel \code{\link[spatstat.geom]{im}}age of the Abramson bandwidths over the relevant spatial domain (when \code{at = "pixels"}). } \references{ Abramson, I. (1982) On bandwidth variation in kernel estimates --- a square root law. \emph{Annals of Statistics}, \bold{10}(4), 1217-1223.\cr Davies, T.M. and Baddeley, A. (2018) Fast computation of spatially adaptive kernel estimates. \emph{Statistics and Computing}, \bold{28}(4), 937-956.\cr Davies, T.M., Marshall, J.C., and Hazelton, M.L. (2018) Tutorial on kernel estimation of continuous spatial and spatiotemporal relative risk. \emph{Statistics in Medicine}, \bold{37}(7), 1191-1221.\cr Hall, P. and Marron, J.S. (1988) Variable window width kernel density estimates of probability densities. \emph{Probability Theory and Related Fields}, \bold{80}, 37-49.\cr Silverman, B.W. (1986) \emph{Density Estimation for Statistics and Data Analysis}. Chapman and Hall, New York. } \author{ Tilman M. Davies. Adapted by \adrian. } \examples{ # 'ch' just 58 laryngeal cancer cases ch <- split(chorley)[[1]] h <- bw.abram(ch,h0=1,hp=0.7) length(h) summary(h) if(interactive()) hist(h) # calculate pilot based on all 1036 observations h.pool <- bw.abram(ch,h0=1,hp=0.7,pilot=chorley) length(h.pool) summary(h.pool) if(interactive()) hist(h.pool) # get full image used for 'h' above him <- bw.abram(ch,h0=1,hp=0.7,at="pixels") plot(him);points(ch,col="grey") # use Voronoi-Dirichlet pilot ('hp' is ignored) hvo <- bw.abram(ch, h0=1, smoother=densityVoronoi) } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/envelope.Rd0000644000176200001440000010150114141452520015717 0ustar liggesusers\name{envelope} \alias{envelope} \alias{envelope.ppp} \alias{envelope.ppm} \alias{envelope.kppm} \alias{envelope.slrm} \title{Simulation Envelopes of Summary Function} \description{ Computes simulation envelopes of a summary function. } \usage{ envelope(Y, fun, \dots) \method{envelope}{ppp}(Y, fun=Kest, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) \method{envelope}{ppm}(Y, fun=Kest, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, clipdata=TRUE, start=NULL, control=update(default.rmhcontrol(Y), nrep=nrep), nrep=1e5, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) \method{envelope}{kppm}(Y, fun=Kest, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) \method{envelope}{slrm}(Y, fun=Kest, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) } \arguments{ \item{Y}{ Object containing point pattern data. A point pattern (object of class \code{"ppp"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"slrm"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{nsim}{ Number of simulated point patterns to be generated when computing the envelopes. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{\dots}{ Extra arguments passed to \code{fun}. } \item{funargs}{ A list, containing extra arguments to be passed to \code{fun}. } \item{funYargs}{ Optional. A list, containing extra arguments to be passed to \code{fun} when applied to the original data \code{Y} only. } \item{simulate}{ Optional. Specifies how to generate the simulated point patterns. If \code{simulate} is an expression in the R language, then this expression will be evaluated \code{nsim} times, to obtain \code{nsim} point patterns which are taken as the simulated patterns from which the envelopes are computed. If \code{simulate} is a function, then this function will be repeatedly applied to the data pattern \code{Y} to obtain \code{nsim} simulated patterns. If \code{simulate} is a list of point patterns, then the entries in this list will be treated as the simulated patterns from which the envelopes are computed. Alternatively \code{simulate} may be an object produced by the \code{envelope} command: see Details. } \item{fix.n}{ Logical. If \code{TRUE}, simulated patterns will have the same number of points as the original data pattern. This option is currently not available for \code{envelope.kppm}. } \item{fix.marks}{ Logical. If \code{TRUE}, simulated patterns will have the same number of points \emph{and} the same marks as the original data pattern. In a multitype point pattern this means that the simulated patterns will have the same number of points \emph{of each type} as the original data. This option is currently not available for \code{envelope.kppm}. } \item{verbose}{ Logical flag indicating whether to print progress reports during the simulations. } \item{clipdata}{ Logical flag indicating whether the data point pattern should be clipped to the same window as the simulated patterns, before the summary function for the data is computed. This should usually be \code{TRUE} to ensure that the data and simulations are properly comparable. } \item{start,control}{ Optional. These specify the arguments \code{start} and \code{control} of \code{rmh}, giving complete control over the simulation algorithm. Applicable only when \code{Y} is a fitted model of class \code{"ppm"}. } \item{nrep}{ Number of iterations in the Metropolis-Hastings simulation algorithm. Applicable only when \code{Y} is a fitted model of class \code{"ppm"}. } \item{transform}{ Optional. A transformation to be applied to the function values, before the envelopes are computed. An expression object (see Details). } \item{global}{ Logical flag indicating whether envelopes should be pointwise (\code{global=FALSE}) or simultaneous (\code{global=TRUE}). } \item{ginterval}{ Optional. A vector of length 2 specifying the interval of \eqn{r} values for the simultaneous critical envelopes. Only relevant if \code{global=TRUE}. } \item{use.theory}{ Logical value indicating whether to use the theoretical value, computed by \code{fun}, as the reference value for simultaneous envelopes. Applicable only when \code{global=TRUE}. Default is \code{use.theory=TRUE} if \code{Y} is a point pattern, or a point process model equivalent to Complete Spatial Randomness, and \code{use.theory=FALSE} otherwise. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{side="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{side="less"}) or a one-sided test with an upper critical boundary (\code{side="greater"}). } \item{scale}{ Optional. Scaling function for global envelopes. A function in the \R language which determines the relative scale of deviations, as a function of distance \eqn{r}, when computing the global envelopes. Applicable only when \code{global=TRUE}. Summary function values for distance \code{r} will be \emph{divided} by \code{scale(r)} before the maximum deviation is computed. The resulting global envelopes will have width proportional to \code{scale(r)}. } \item{clamp}{ Logical value indicating how to compute envelopes when \code{alternative="less"} or \code{alternative="greater"}. Deviations of the observed summary function from the theoretical summary function are initially evaluated as signed real numbers, with large positive values indicating consistency with the alternative hypothesis. If \code{clamp=FALSE} (the default), these values are not changed. If \code{clamp=TRUE}, any negative values are replaced by zero. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } \item{nsim2}{ Number of extra simulated point patterns to be generated if it is necessary to use simulation to estimate the theoretical mean of the summary function. Only relevant when \code{global=TRUE} and the simulations are not based on CSR. } \item{VARIANCE}{ Logical. If \code{TRUE}, critical envelopes will be calculated as sample mean plus or minus \code{nSD} times sample standard deviation. } \item{nSD}{ Number of estimated standard deviations used to determine the critical envelopes, if \code{VARIANCE=TRUE}. } \item{Yname}{ Character string that should be used as the name of the data point pattern \code{Y} when printing or plotting the results. } \item{maxnerr}{ Maximum number of rejected patterns. If \code{fun} yields a fatal error when applied to a simulated point pattern (for example, because the pattern is empty and \code{fun} requires at least one point), the pattern will be rejected and a new random point pattern will be generated. If this happens more than \code{maxnerr} times, the algorithm will give up. } \item{rejectNA}{ Logical value specifying whether to reject a simulated pattern if the resulting values of \code{fun} are all equal to \code{NA}, \code{NaN} or infinite. If \code{FALSE} (the default), then simulated patterns are only rejected when \code{fun} gives a fatal error. } \item{silent}{ Logical value specifying whether to print a report each time a simulated pattern is rejected. } \item{do.pwrong}{ Logical. If \code{TRUE}, the algorithm will also estimate the true significance level of the \dQuote{wrong} test (the test that declares the summary function for the data to be significant if it lies outside the \emph{pointwise} critical boundary at any point). This estimate is printed when the result is printed. } \item{envir.simul}{ Environment in which to evaluate the expression \code{simulate}, if not the current environment. } } \value{ An object of class \code{"envelope"} and \code{"fv"}, see \code{\link{fv.object}}, which can be printed and plotted directly. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the summary function \code{fun} has been estimated } \item{obs}{ values of the summary function for the data point pattern } \item{lo}{ lower envelope of simulations } \item{hi}{ upper envelope of simulations } and \emph{either} \item{theo}{ theoretical value of the summary function under CSR (Complete Spatial Randomness, a uniform Poisson point process) if the simulations were generated according to CSR } \item{mmean}{ estimated theoretical value of the summary function, computed by averaging simulated values, if the simulations were not generated according to CSR. } Additionally, if \code{savepatterns=TRUE}, the return value has an attribute \code{"simpatterns"} which is a list containing the \code{nsim} simulated patterns. If \code{savefuns=TRUE}, the return value has an attribute \code{"simfuns"} which is an object of class \code{"fv"} containing the summary functions computed for each of the \code{nsim} simulated patterns. } \details{ The \code{envelope} command performs simulations and computes envelopes of a summary statistic based on the simulations. The result is an object that can be plotted to display the envelopes. The envelopes can be used to assess the goodness-of-fit of a point process model to point pattern data. For the most basic use, if you have a point pattern \code{X} and you want to test Complete Spatial Randomness (CSR), type \code{plot(envelope(X, Kest,nsim=39))} to see the \eqn{K} function for \code{X} plotted together with the envelopes of the \eqn{K} function for 39 simulations of CSR. The \code{envelope} function is generic, with methods for the classes \code{"ppp"}, \code{"ppm"}, \code{"kppm"} and \code{"slrm"} described here. There are also methods for the classes \code{"pp3"}, \code{"lpp"} and \code{"lppm"} which are described separately under \code{\link{envelope.pp3}} and \code{envelope.lpp}. Envelopes can also be computed from other envelopes, using \code{\link{envelope.envelope}}. To create simulation envelopes, the command \code{envelope(Y, ...)} first generates \code{nsim} random point patterns in one of the following ways. \itemize{ \item If \code{Y} is a point pattern (an object of class \code{"ppp"}) and \code{simulate=NULL}, then we generate \code{nsim} simulations of Complete Spatial Randomness (i.e. \code{nsim} simulated point patterns each being a realisation of the uniform Poisson point process) with the same intensity as the pattern \code{Y}. (If \code{Y} is a multitype point pattern, then the simulated patterns are also given independent random marks; the probability distribution of the random marks is determined by the relative frequencies of marks in \code{Y}.) \item If \code{Y} is a fitted point process model (an object of class \code{"ppm"} or \code{"kppm"} or \code{"slrm"}) and \code{simulate=NULL}, then this routine generates \code{nsim} simulated realisations of that model. \item If \code{simulate} is supplied, then it determines how the simulated point patterns are generated. It may be either \itemize{ \item an expression in the R language, typically containing a call to a random generator. This expression will be evaluated \code{nsim} times to yield \code{nsim} point patterns. For example if \code{simulate=expression(runifpoint(100))} then each simulated pattern consists of exactly 100 independent uniform random points. \item a function in the R language, typically containing a call to a random generator. This function will be applied repeatedly to the original data pattern \code{Y} to yield \code{nsim} point patterns. For example if \code{simulate=\link{rlabel}} then each simulated pattern was generated by evaluating \code{\link{rlabel}(Y)} and consists of a randomly-relabelled version of \code{Y}. \item a list of point patterns. The entries in this list will be taken as the simulated patterns. \item an object of class \code{"envelope"}. This should have been produced by calling \code{envelope} with the argument \code{savepatterns=TRUE}. The simulated point patterns that were saved in this object will be extracted and used as the simulated patterns for the new envelope computation. This makes it possible to plot envelopes for two different summary functions based on exactly the same set of simulated point patterns. } } The summary statistic \code{fun} is applied to each of these simulated patterns. Typically \code{fun} is one of the functions \code{Kest}, \code{Gest}, \code{Fest}, \code{Jest}, \code{pcf}, \code{Kcross}, \code{Kdot}, \code{Gcross}, \code{Gdot}, \code{Jcross}, \code{Jdot}, \code{Kmulti}, \code{Gmulti}, \code{Jmulti} or \code{Kinhom}. It may also be a character string containing the name of one of these functions. The statistic \code{fun} can also be a user-supplied function; if so, then it must have arguments \code{X} and \code{r} like those in the functions listed above, and it must return an object of class \code{"fv"}. Upper and lower critical envelopes are computed in one of the following ways: \describe{ \item{pointwise:}{by default, envelopes are calculated pointwise (i.e. for each value of the distance argument \eqn{r}), by sorting the \code{nsim} simulated values, and taking the \code{m}-th lowest and \code{m}-th highest values, where \code{m = nrank}. For example if \code{nrank=1}, the upper and lower envelopes are the pointwise maximum and minimum of the simulated values. The pointwise envelopes are \bold{not} \dQuote{confidence bands} for the true value of the function! Rather, they specify the critical points for a Monte Carlo test (Ripley, 1981). The test is constructed by choosing a \emph{fixed} value of \eqn{r}, and rejecting the null hypothesis if the observed function value lies outside the envelope \emph{at this value of} \eqn{r}. This test has exact significance level \code{alpha = 2 * nrank/(1 + nsim)}. } \item{simultaneous:}{if \code{global=TRUE}, then the envelopes are determined as follows. First we calculate the theoretical mean value of the summary statistic (if we are testing CSR, the theoretical value is supplied by \code{fun}; otherwise we perform a separate set of \code{nsim2} simulations, compute the average of all these simulated values, and take this average as an estimate of the theoretical mean value). Then, for each simulation, we compare the simulated curve to the theoretical curve, and compute the maximum absolute difference between them (over the interval of \eqn{r} values specified by \code{ginterval}). This gives a deviation value \eqn{d_i}{d[i]} for each of the \code{nsim} simulations. Finally we take the \code{m}-th largest of the deviation values, where \code{m=nrank}, and call this \code{dcrit}. Then the simultaneous envelopes are of the form \code{lo = expected - dcrit} and \code{hi = expected + dcrit} where \code{expected} is either the theoretical mean value \code{theo} (if we are testing CSR) or the estimated theoretical value \code{mmean} (if we are testing another model). The simultaneous critical envelopes have constant width \code{2 * dcrit}. The simultaneous critical envelopes allow us to perform a different Monte Carlo test (Ripley, 1981). The test rejects the null hypothesis if the graph of the observed function lies outside the envelope \bold{at any value of} \eqn{r}. This test has exact significance level \code{alpha = nrank/(1 + nsim)}. This test can also be performed using \code{\link{mad.test}}. } \item{based on sample moments:}{if \code{VARIANCE=TRUE}, the algorithm calculates the (pointwise) sample mean and sample variance of the simulated functions. Then the envelopes are computed as mean plus or minus \code{nSD} standard deviations. These envelopes do not have an exact significance interpretation. They are a naive approximation to the critical points of the Neyman-Pearson test assuming the summary statistic is approximately Normally distributed. } } The return value is an object of class \code{"fv"} containing the summary function for the data point pattern, the upper and lower simulation envelopes, and the theoretical expected value (exact or estimated) of the summary function for the model being tested. It can be plotted using \code{\link{plot.envelope}}. If \code{VARIANCE=TRUE} then the return value also includes the sample mean, sample variance and other quantities. Arguments can be passed to the function \code{fun} through \code{...}. This means that you simply specify these arguments in the call to \code{envelope}, and they will be passed to \code{fun}. In particular, the argument \code{correction} determines the edge correction to be used to calculate the summary statistic. See the section on Edge Corrections, and the Examples. Arguments can also be passed to the function \code{fun} through the list \code{funargs}. This mechanism is typically used if an argument of \code{fun} has the same name as an argument of \code{envelope}. The list \code{funargs} should contain entries of the form \code{name=value}, where each \code{name} is the name of an argument of \code{fun}. There is also an option, rarely used, in which different function arguments are used when computing the summary function for the data \code{Y} and for the simulated patterns. If \code{funYargs} is given, it will be used when the summary function for the data \code{Y} is computed, while \code{funargs} will be used when computing the summary function for the simulated patterns. This option is only needed in rare cases: usually the basic principle requires that the data and simulated patterns must be treated equally, so that \code{funargs} and \code{funYargs} should be identical. If \code{Y} is a fitted cluster point process model (object of class \code{"kppm"}), and \code{simulate=NULL}, then the model is simulated directly using \code{\link{simulate.kppm}}. If \code{Y} is a fitted Gibbs point process model (object of class \code{"ppm"}), and \code{simulate=NULL}, then the model is simulated by running the Metropolis-Hastings algorithm \code{\link{rmh}}. Complete control over this algorithm is provided by the arguments \code{start} and \code{control} which are passed to \code{\link{rmh}}. For simultaneous critical envelopes (\code{global=TRUE}) the following options are also useful: \describe{ \item{\code{ginterval}}{determines the interval of \eqn{r} values over which the deviation between curves is calculated. It should be a numeric vector of length 2. There is a sensible default (namely, the recommended plotting interval for \code{fun(X)}, or the range of \code{r} values if \code{r} is explicitly specified). } \item{\code{transform}}{specifies a transformation of the summary function \code{fun} that will be carried out before the deviations are computed. Such transforms are useful if \code{global=TRUE} or \code{VARIANCE=TRUE}. The \code{transform} must be an expression object using the symbol \code{.} to represent the function value (and possibly other symbols recognised by \code{\link{with.fv}}). For example, the conventional way to normalise the \eqn{K} function (Ripley, 1981) is to transform it to the \eqn{L} function \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/\pi)} and this is implemented by setting \code{transform=expression(sqrt(./pi))}. } } It is also possible to extract the summary functions for each of the individual simulated point patterns, by setting \code{savefuns=TRUE}. Then the return value also has an attribute \code{"simfuns"} containing all the summary functions for the individual simulated patterns. It is an \code{"fv"} object containing functions named \code{sim1, sim2, ...} representing the \code{nsim} summary functions. It is also possible to save the simulated point patterns themselves, by setting \code{savepatterns=TRUE}. Then the return value also has an attribute \code{"simpatterns"} which is a list of length \code{nsim} containing all the simulated point patterns. See \code{\link{plot.envelope}} and \code{\link{plot.fv}} for information about how to plot the envelopes. Different envelopes can be recomputed from the same data using \code{\link{envelope.envelope}}. Envelopes can be combined using \code{\link{pool.envelope}}. } \section{Errors and warnings}{ An error may be generated if one of the simulations produces a point pattern that is empty, or is otherwise unacceptable to the function \code{fun}. The upper envelope may be \code{NA} (plotted as plus or minus infinity) if some of the function values computed for the simulated point patterns are \code{NA}. Whether this occurs will depend on the function \code{fun}, but it usually happens when the simulated point pattern does not contain enough points to compute a meaningful value. } \section{Confidence intervals}{ Simulation envelopes do \bold{not} compute confidence intervals; they generate significance bands. If you really need a confidence interval for the true summary function of the point process, use \code{\link{lohboot}}. See also \code{\link{varblock}}. } \section{Edge corrections}{ It is common to apply a correction for edge effects when calculating a summary function such as the \eqn{K} function. Typically the user has a choice between several possible edge corrections. In a call to \code{envelope}, the user can specify the edge correction to be applied in \code{fun}, using the argument \code{correction}. See the Examples below. \describe{ \item{Summary functions in \pkg{spatstat}}{ Summary functions that are available in \pkg{spatstat}, such as \code{\link{Kest}}, \code{\link{Gest}} and \code{\link{pcf}}, have a standard argument called \code{correction} which specifies the name of one or more edge corrections. The list of available edge corrections is different for each summary function, and may also depend on the kind of window in which the point pattern is recorded. In the case of \code{Kest} (the default and most frequently used value of \code{fun}) the best edge correction is Ripley's isotropic correction if the window is rectangular or polygonal, and the translation correction if the window is a binary mask. See the help files for the individual functions for more information. All the summary functions in \pkg{spatstat} recognise the option \code{correction="best"} which gives the \dQuote{best} (most accurate) available edge correction for that function. In a call to \code{envelope}, if \code{fun} is one of the summary functions provided in \pkg{spatstat}, then the default is \code{correction="best"}. This means that \emph{by default, the envelope will be computed using the \dQuote{best} available edge correction}. The user can override this default by specifying the argument \code{correction}. For example the computation can be accelerated by choosing another edge correction which is less accurate than the \dQuote{best} one, but faster to compute. } \item{User-written summary functions}{ If \code{fun} is a function written by the user, then \code{envelope} has to guess what to do. If \code{fun} has an argument called \code{correction}, or has \code{\dots} arguments, then \code{envelope} assumes that the function can handle a correction argument. To compute the envelope, \code{fun} will be called with a \code{correction} argument. The default is \code{correction="best"}, unless overridden in the call to \code{envelope}. Otherwise, if \code{fun} does not have an argument called \code{correction} and does not have \code{\dots} arguments, then \code{envelope} assumes that the function \emph{cannot} handle a correction argument. To compute the envelope, \code{fun} is called without a correction argument. } } } \references{ Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Arnold, 2003. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{dclf.test}}, \code{\link{mad.test}} for envelope-based tests. \code{\link{fv.object}}, \code{\link{plot.envelope}}, \code{\link{plot.fv}}, \code{\link{envelope.envelope}}, \code{\link{pool.envelope}} for handling envelopes. There are also methods for \code{print} and \code{summary}. \code{\link{Kest}}, \code{\link{Gest}}, \code{\link{Fest}}, \code{\link{Jest}}, \code{\link{pcf}}, \code{\link{ppp}}, \code{\link{ppm}}, \code{\link{default.expand}} } \examples{ X <- simdat online <- interactive() Nsim <- if(online) 19 else 3 # Envelope of K function under CSR plot(envelope(X, nsim=Nsim)) # Translation edge correction (this is also FASTER): if(online) { plot(envelope(X, correction="translate")) } else { E <- envelope(X, nsim=Nsim, correction="translate") } # Global envelopes if(online) { plot(envelope(X, Lest, global=TRUE)) plot(envelope(X, Kest, global=TRUE, scale=function(r) { r })) } else { E <- envelope(X, Lest, nsim=Nsim, global=TRUE) E <- envelope(X, Kest, nsim=Nsim, global=TRUE, scale=function(r) { r }) E summary(E) } # Envelope of K function for simulations from Gibbs model if(online) { fit <- ppm(cells ~1, Strauss(0.05)) plot(envelope(fit)) plot(envelope(fit, global=TRUE)) } else { fit <- ppm(cells ~1, Strauss(0.05), nd=20) E <- envelope(fit, nsim=Nsim, correction="border", nrep=100) E <- envelope(fit, nsim=Nsim, correction="border", global=TRUE, nrep=100) } # Envelope of K function for simulations from cluster model fit <- kppm(redwood ~1, "Thomas") if(online) { plot(envelope(fit, Gest)) plot(envelope(fit, Gest, global=TRUE)) } else { E <- envelope(fit, Gest, correction="rs", nsim=Nsim, global=TRUE, nrep=100) } # Envelope of G function under CSR if(online) { plot(envelope(X, Gest)) } else { E <- envelope(X, Gest, correction="rs", nsim=Nsim) } # Envelope of L function under CSR # L(r) = sqrt(K(r)/pi) if(online) { E <- envelope(X, Kest) } else { E <- envelope(X, Kest, correction="border", nsim=Nsim) } plot(E, sqrt(./pi) ~ r) # Simultaneous critical envelope for L function # (alternatively, use Lest) if(online) { plot(envelope(X, Kest, transform=expression(sqrt(./pi)), global=TRUE)) } else { E <- envelope(X, Kest, nsim=Nsim, correction="border", transform=expression(sqrt(./pi)), global=TRUE) } ## One-sided envelope if(online) { plot(envelope(X, Lest, alternative="less")) } else { E <- envelope(X, Lest, nsim=Nsim, alternative="less") } # How to pass arguments needed to compute the summary functions: # We want envelopes for Jcross(X, "A", "B") # where "A" and "B" are types of points in the dataset 'demopat' if(online) { plot(envelope(demopat, Jcross, i="A", j="B")) } else { plot(envelope(demopat, Jcross, correction="rs", i="A", j="B", nsim=Nsim)) } # Use of `simulate' expression if(online) { plot(envelope(cells, Gest, simulate=expression(runifpoint(42)))) plot(envelope(cells, Gest, simulate=expression(rMaternI(100,0.02)))) } else { plot(envelope(cells, Gest, correction="rs", simulate=expression(runifpoint(42)), nsim=Nsim)) plot(envelope(cells, Gest, correction="rs", simulate=expression(rMaternI(100, 0.02)), nsim=Nsim, global=TRUE)) } # Use of `simulate' function if(online) { plot(envelope(amacrine, Kcross, simulate=rlabel)) } else { plot(envelope(amacrine, Kcross, simulate=rlabel, nsim=Nsim)) } # Envelope under random toroidal shifts if(online) { plot(envelope(amacrine, Kcross, i="on", j="off", simulate=expression(rshift(amacrine, radius=0.25)))) } # Envelope under random shifts with erosion if(online) { plot(envelope(amacrine, Kcross, i="on", j="off", simulate=expression(rshift(amacrine, radius=0.1, edge="erode")))) } # Envelope of INHOMOGENEOUS K-function with fitted trend # The following is valid. # Setting lambda=fit means that the fitted model is re-fitted to # each simulated pattern to obtain the intensity estimates for Kinhom. # (lambda=NULL would also be valid) fit <- kppm(redwood ~1, clusters="MatClust") if(online) { plot(envelope(fit, Kinhom, lambda=fit, nsim=19)) } else { envelope(fit, Kinhom, lambda=fit, nsim=Nsim) } # Note that the principle of symmetry, essential to the validity of # simulation envelopes, requires that both the observed and # simulated patterns be subjected to the same method of intensity # estimation. In the following example it would be incorrect to set the # argument 'lambda=red.dens' in the envelope command, because this # would mean that the inhomogeneous K functions of the simulated # patterns would be computed using the intensity function estimated # from the original redwood data, violating the symmetry. There is # still a concern about the fact that the simulations are generated # from a model that was fitted to the data; this is only a problem in # small datasets. if(online) { red.dens <- density(redwood, sigma=bw.diggle, positive=TRUE) plot(envelope(redwood, Kinhom, sigma=bw.diggle, simulate=expression(rpoispp(red.dens)))) } # Precomputed list of point patterns if(online) { nX <- npoints(X) PatList <- list() for(i in 1:Nsim) PatList[[i]] <- runifpoint(nX) E <- envelope(X, Kest, nsim=19, simulate=PatList) } else { PatList <- list() for(i in 1:Nsim) PatList[[i]] <- runifpoint(10) } E <- envelope(X, Kest, nsim=Nsim, simulate=PatList) # re-using the same point patterns # EK <- envelope(X, Kest, savepatterns=TRUE) # EG <- envelope(X, Gest, simulate=EK) if(online) { EK <- envelope(X, Kest, nsim=Nsim, savepatterns=TRUE) EG <- envelope(X, Gest, nsim=Nsim, simulate=EK) } } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat.core/man/localpcf.Rd0000644000176200001440000001563214141452520015676 0ustar liggesusers\name{localpcf} \alias{localpcf} \alias{localpcfinhom} \title{Local pair correlation function} \description{ Computes individual contributions to the pair correlation function from each data point. } \usage{ localpcf(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, rvalue=NULL) localpcfinhom(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, lambda=NULL, sigma=NULL, varcov=NULL, update=TRUE, leaveoneout=TRUE, rvalue=NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{delta}{ Smoothing bandwidth for pair correlation. The halfwidth of the Epanechnikov kernel. } \item{rmax}{ Optional. Maximum value of distance \eqn{r} for which pair correlation values \eqn{g(r)} should be computed. } \item{nr}{ Optional. Number of values of distance \eqn{r} for which pair correlation \eqn{g(r)} should be computed. } \item{stoyan}{ Optional. The value of the constant \eqn{c} in Stoyan's rule of thumb for selecting the smoothing bandwidth \code{delta}. } \item{lambda}{ Optional. Values of the estimated intensity function, for the inhomogeneous pair correlation. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{sigma,varcov,\dots}{ These arguments are ignored by \code{localpcf} but are passed by \code{localpcfinhom} (when \code{lambda=NULL}) to the function \code{\link{density.ppp}} to control the kernel smoothing estimation of \code{lambda}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } \item{update}{ Logical value indicating what to do when \code{lambda} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the local pair correlation should be computed. } } \details{ \code{localpcf} computes the contribution, from each individual data point in a point pattern \code{X}, to the empirical pair correlation function of \code{X}. These contributions are sometimes known as LISA (local indicator of spatial association) functions based on pair correlation. \code{localpcfinhom} computes the corresponding contribution to the \emph{inhomogeneous} empirical pair correlation function of \code{X}. Given a spatial point pattern \code{X}, the local pcf \eqn{g_i(r)}{g[i](r)} associated with the \eqn{i}th point in \code{X} is computed by \deqn{ g_i(r) = \frac a {2 \pi n} \sum_j k(d_{i,j} - r) }{ g[i](r) = (a/(2 * pi * n) * sum[j] k(d[i,j] - r) } where the sum is over all points \eqn{j \neq i}{j != i}, \eqn{a} is the area of the observation window, \eqn{n} is the number of points in \code{X}, and \eqn{d_{ij}}{d[i,j]} is the distance between points \code{i} and \code{j}. Here \code{k} is the Epanechnikov kernel, \deqn{ k(t) = \frac 3 { 4\delta} \max(0, 1 - \frac{t^2}{\delta^2}). }{ k(t) = (3/(4*delta)) * max(0, 1 - t^2/delta^2). } Edge correction is performed using the border method (for the sake of computational efficiency): the estimate \eqn{g_i(r)}{g[i](r)} is set to \code{NA} if \eqn{r > b_i}{r > b[i]}, where \eqn{b_i}{b[i]} is the distance from point \eqn{i} to the boundary of the observation window. The smoothing bandwidth \eqn{\delta}{delta} may be specified. If not, it is chosen by Stoyan's rule of thumb \eqn{\delta = c/\hat\lambda}{delta = c/lambda} where \eqn{\hat\lambda = n/a}{lambda = n/a} is the estimated intensity and \eqn{c} is a constant, usually taken to be 0.15. The value of \eqn{c} is controlled by the argument \code{stoyan}. For \code{localpcfinhom}, the optional argument \code{lambda} specifies the values of the estimated intensity function. If \code{lambda} is given, it should be either a numeric vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If \code{lambda} is not given, then it will be estimated using a leave-one-out kernel density smoother as described in \code{\link{pcfinhom}}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the local pair correlation function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{localK}}, \code{\link{localKinhom}}, \code{\link{pcf}}, \code{\link{pcfinhom}} } \examples{ X <- ponderosa g <- localpcf(X, stoyan=0.5) colo <- c(rep("grey", npoints(X)), "blue") a <- plot(g, main=c("local pair correlation functions", "Ponderosa pines"), legend=FALSE, col=colo, lty=1) # plot only the local pair correlation function for point number 7 plot(g, est007 ~ r) # Extract the local pair correlation at distance 15 metres, for each point g15 <- localpcf(X, rvalue=15, stoyan=0.5) g15[1:10] # Check that the value for point 7 agrees with the curve for point 7: points(15, g15[7], col="red") # Inhomogeneous gi <- localpcfinhom(X, stoyan=0.5) a <- plot(gi, main=c("inhomogeneous local pair correlation functions", "Ponderosa pines"), legend=FALSE, col=colo, lty=1) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/Kscaled.Rd0000644000176200001440000002165314141452520015461 0ustar liggesusers\name{Kscaled} \alias{Kscaled} \alias{Lscaled} \title{Locally Scaled K-function} \description{ Estimates the locally-rescaled \eqn{K}-function of a point process. } \usage{ Kscaled(X, lambda=NULL, \dots, r = NULL, breaks = NULL, rmax = 2.5, correction=c("border", "isotropic", "translate"), renormalise=FALSE, normpower=1, sigma=NULL, varcov=NULL) Lscaled(\dots) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the locally scaled \eqn{K} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()}. } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a \code{function(x,y)} which can be evaluated to give the intensity value at any location, or a fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Arguments passed from \code{Lscaled} to \code{Kscaled} and from \code{Kscaled} to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{r}{ vector of values for the argument \eqn{r} at which the locally scaled \eqn{K} function should be evaluated. (These are rescaled distances.) Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{rmax}{ maximum value of the argument \eqn{r} that should be used. (This is the rescaled distance). } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{renormalise}{ Logical. Whether to renormalise the estimate. See Details. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing at least the following columns, \item{r}{the vector of values of the argument \eqn{r} at which the pair correlation function \eqn{g(r)} has been estimated } \item{theo}{vector of values of \eqn{\pi r^2}{pi * r^2}, the theoretical value of \eqn{K_{\rm scaled}(r)}{Kscaled(r)} for an inhomogeneous Poisson process } and containing additional columns according to the choice specified in the \code{correction} argument. The additional columns are named \code{border}, \code{trans} and \code{iso} and give the estimated values of \eqn{K_{\rm scaled}(r)}{Kscaled(r)} using the border correction, translation correction, and Ripley isotropic correction, respectively. } \details{ \code{Kscaled} computes an estimate of the \eqn{K} function for a locally scaled point process. \code{Lscaled} computes the corresponding \eqn{L} function \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. Locally scaled point processes are a class of models for inhomogeneous point patterns, introduced by Hahn et al (2003). They include inhomogeneous Poisson processes, and many other models. The template \eqn{K} function of a locally-scaled process is a counterpart of the ``ordinary'' Ripley \eqn{K} function, in which the distances between points of the process are measured on a spatially-varying scale (such that the locally rescaled process has unit intensity). The template \eqn{K} function is an indicator of interaction between the points. For an inhomogeneous Poisson process, the theoretical template \eqn{K} function is approximately equal to \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}. Values \eqn{K_{\rm scaled}(r) > \pi r^2}{Kscaled(r) > pi * r^2} are suggestive of clustering. \code{Kscaled} computes an estimate of the template \eqn{K} function and \code{Lscaled} computes the corresponding \eqn{L} function \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. The locally scaled interpoint distances are computed using an approximation proposed by Hahn (2007). The Euclidean distance between two points is multiplied by the average of the square roots of the intensity values at the two points. The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda}. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. If \code{renormalise=TRUE}, the estimated intensity \code{lambda} is multiplied by \eqn{c^(normpower/2)} before performing other calculations, where \eqn{c = area(W)/sum[i] (1/lambda(x[i]))}. This renormalisation has about the same effect as in \code{\link{Kinhom}}, reducing the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. Edge corrections are used to correct bias in the estimation of \eqn{K_{\rm scaled}}{Kscaled}. First the interpoint distances are rescaled, and then edge corrections are applied as in \code{\link{Kest}}. See \code{\link{Kest}} for details of the edge corrections and the options for the argument \code{correction}. The pair correlation function can also be applied to the result of \code{Kscaled}; see \code{\link{pcf}} and \code{\link{pcf.fv}}. } \references{ Baddeley, A., \Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. Hahn, U. (2007) \emph{Global and Local Scaling in the Statistics of Spatial Point Processes}. Habilitationsschrift, Universitaet Augsburg. Hahn, U., Jensen, E.B.V., van Lieshout, M.N.M. and Nielsen, L.S. (2003) Inhomogeneous spatial point processes by location-dependent scaling. \emph{Advances in Applied Probability} \bold{35}, 319--336. \Prokesova, M., Hahn, U. and Vedel Jensen, E.B. (2006) Statistics for locally scaled point patterns. In A. Baddeley, P. Gregori, J. Mateu, R. Stoica and D. Stoyan (eds.) \emph{Case Studies in Spatial Point Pattern Modelling}. Lecture Notes in Statistics 185. New York: Springer Verlag. Pages 99--123. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} } \examples{ data(bronzefilter) X <- unmark(bronzefilter) K <- Kscaled(X) fit <- ppm(X, ~x) lam <- predict(fit) K <- Kscaled(X, lam) } \author{Ute Hahn, \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/relrisk.ppp.Rd0000644000176200001440000002442414141452520016363 0ustar liggesusers\name{relrisk.ppp} \alias{relrisk.ppp} \title{ Nonparametric Estimate of Spatially-Varying Relative Risk } \description{ Given a multitype point pattern, this function estimates the spatially-varying probability of each type of point, or the ratios of such probabilities, using kernel smoothing. The default smoothing bandwidth is selected by cross-validation. } \usage{ \method{relrisk}{ppp}(X, sigma = NULL, ..., at = c("pixels", "points"), weights = NULL, varcov = NULL, relative=FALSE, adjust=1, edge=TRUE, diggle=FALSE, se=FALSE, casecontrol=TRUE, control=1, case) } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"} which has factor valued marks). } \item{sigma}{ Optional. The numeric value of the smoothing bandwidth (the standard deviation of isotropic Gaussian smoothing kernel). Alternatively \code{sigma} may be a function which can be used to select a different bandwidth for each type of point. See Details. } \item{\dots}{ Arguments passed to \code{\link{bw.relrisk}} to select the bandwidth, or passed to \code{\link{density.ppp}} to control the pixel resolution. } \item{at}{ Character string specifying whether to compute the probability values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } \item{weights}{ Optional. Weights for the data points of \code{X}. A numeric vector, an \code{expression}, or a pixel image. } \item{varcov}{ Optional. Variance-covariance matrix of anisotopic Gaussian smoothing kernel. Incompatible with \code{sigma}. } \item{relative}{ Logical. If \code{FALSE} (the default) the algorithm computes the probabilities of each type of point. If \code{TRUE}, it computes the \emph{relative risk}, the ratio of probabilities of each type relative to the probability of a control. } \item{adjust}{ Optional. Adjustment factor for the bandwidth \code{sigma}. } \item{edge}{ Logical value indicating whether to apply edge correction. } \item{diggle}{ Logical. If \code{TRUE}, use the Jones-Diggle improved edge correction, which is more accurate but slower to compute than the default correction. } \item{se}{ Logical value indicating whether to compute standard errors as well. } \item{casecontrol}{ Logical. Whether to treat a bivariate point pattern as consisting of cases and controls, and return only the probability or relative risk of a case. Ignored if there are more than 2 types of points. See Details. } \item{control}{ Integer, or character string, identifying which mark value corresponds to a control. } \item{case}{ Integer, or character string, identifying which mark value corresponds to a case (rather than a control) in a bivariate point pattern. This is an alternative to the argument \code{control} in a bivariate point pattern. Ignored if there are more than 2 types of points. } } \details{ The command \code{\link{relrisk}} is generic and can be used to estimate relative risk in different ways. This function \code{relrisk.ppp} is the method for point pattern datasets. It computes \emph{nonparametric} estimates of relative risk by kernel smoothing. If \code{X} is a bivariate point pattern (a multitype point pattern consisting of two types of points) then by default, the points of the first type (the first level of \code{marks(X)}) are treated as controls or non-events, and points of the second type are treated as cases or events. Then by default this command computes the spatially-varying \emph{probability} of a case, i.e. the probability \eqn{p(u)} that a point at spatial location \eqn{u} will be a case. If \code{relative=TRUE}, it computes the spatially-varying \emph{relative risk} of a case relative to a control, \eqn{r(u) = p(u)/(1- p(u))}. If \code{X} is a multitype point pattern with \eqn{m > 2} types, or if \code{X} is a bivariate point pattern and \code{casecontrol=FALSE}, then by default this command computes, for each type \eqn{j}, a nonparametric estimate of the spatially-varying \emph{probability} of an event of type \eqn{j}. This is the probability \eqn{p_j(u)}{p[j](u)} that a point at spatial location \eqn{u} will belong to type \eqn{j}. If \code{relative=TRUE}, the command computes the \emph{relative risk} of an event of type \eqn{j} relative to a control, \eqn{r_j(u) = p_j(u)/p_k(u)}{r[j](u) = p[j](u)/p[k](u)}, where events of type \eqn{k} are treated as controls. The argument \code{control} determines which type \eqn{k} is treated as a control. If \code{at = "pixels"} the calculation is performed for every spatial location \eqn{u} on a fine pixel grid, and the result is a pixel image representing the function \eqn{p(u)} or a list of pixel images representing the functions \eqn{p_j(u)}{p[j](u)} or \eqn{r_j(u)}{r[j](u)} for \eqn{j = 1,\ldots,m}{j = 1,...,m}. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{NA}. If \code{at = "points"} the calculation is performed only at the data points \eqn{x_i}{x[i]}. By default the result is a vector of values \eqn{p(x_i)}{p(x[i])} giving the estimated probability of a case at each data point, or a matrix of values \eqn{p_j(x_i)}{p[j](x[i])} giving the estimated probability of each possible type \eqn{j} at each data point. If \code{relative=TRUE} then the relative risks \eqn{r(x_i)}{r(x[i])} or \eqn{r_j(x_i)}{r[j](x[i])} are returned. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{Inf}. Estimation is performed by a simple Nadaraja-Watson type kernel smoother (Diggle, 2003). The smoothing bandwidth can be specified in any of the following ways: \itemize{ \item \code{sigma} is a single numeric value, giving the standard deviation of the isotropic Gaussian kernel. \item \code{sigma} is a numeric vector of length 2, giving the standard deviations in the \eqn{x} and \eqn{y} directions of a Gaussian kernel. \item \code{varcov} is a 2 by 2 matrix giving the variance-covariance matrix of the Gaussian kernel. \item \code{sigma} is a \code{function} which selects the bandwidth. Bandwidth selection will be applied \bold{separately to each type of point}. An example of such a function is \code{\link{bw.diggle}}. \item \code{sigma} and \code{varcov} are both missing or null. Then a \bold{common} smoothing bandwidth \code{sigma} will be selected by cross-validation using \code{\link{bw.relrisk}}. \item An infinite smoothing bandwidth, \code{sigma=Inf}, is permitted and yields a constant estimate of relative risk. } If \code{se=TRUE} then standard errors will also be computed, based on asymptotic theory, \emph{assuming a Poisson process}. The optional argument \code{weights} may provide numerical weights for the points of \code{X}. It should be a numeric vector of length equal to \code{npoints(X)}. The argument \code{weights} can also be an \code{expression}. It will be evaluated in the data frame \code{as.data.frame(X)} to obtain a vector of weights. The expression may involve the symbols \code{x} and \code{y} representing the Cartesian coordinates, and the symbol \code{marks} representing the mark values. The argument \code{weights} can also be a pixel image (object of class \code{"im"}). numerical weights for the data points will be extracted from this image (by looking up the pixel values at the locations of the data points in \code{X}). } \value{ If \code{se=FALSE} (the default), the format is described below. If \code{se=TRUE}, the result is a list of two entries, \code{estimate} and \code{SE}, each having the format described below. If \code{X} consists of only two types of points, and if \code{casecontrol=TRUE}, the result is a pixel image (if \code{at="pixels"}) or a vector (if \code{at="points"}). The pixel values or vector values are the probabilities of a case if \code{relative=FALSE}, or the relative risk of a case (probability of a case divided by the probability of a control) if \code{relative=TRUE}. If \code{X} consists of more than two types of points, or if \code{casecontrol=FALSE}, the result is: \itemize{ \item (if \code{at="pixels"}) a list of pixel images, with one image for each possible type of point. The result also belongs to the class \code{"solist"} so that it can be printed and plotted. \item (if \code{at="points"}) a matrix of probabilities, with rows corresponding to data points \eqn{x_i}{x[i]}, and columns corresponding to types \eqn{j}. } The pixel values or matrix entries are the probabilities of each type of point if \code{relative=FALSE}, or the relative risk of each type (probability of each type divided by the probability of a control) if \code{relative=TRUE}. If \code{relative=FALSE}, the resulting values always lie between 0 and 1. If \code{relative=TRUE}, the results are either non-negative numbers, or the values \code{Inf} or \code{NA}. } \seealso{ There is another method \code{\link{relrisk.ppm}} for point process models which computes \emph{parametric} estimates of relative risk, using the fitted model. See also \code{\link{bw.relrisk}}, \code{\link{density.ppp}}, \code{\link{Smooth.ppp}}, \code{\link{eval.im}} } \examples{ p.oak <- relrisk(urkiola, 20) if(interactive()) { plot(p.oak, main="proportion of oak") plot(eval.im(p.oak > 0.3), main="More than 30 percent oak") plot(split(lansing), main="Lansing Woods") p.lan <- relrisk(lansing, 0.05, se=TRUE) plot(p.lan$estimate, main="Lansing Woods species probability") plot(p.lan$SE, main="Lansing Woods standard error") wh <- im.apply(p.lan$estimate, which.max) types <- levels(marks(lansing)) wh <- eval.im(types[wh]) plot(wh, main="Most common species") } } \references{ Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/inforder.family.Rd0000644000176200001440000000200514141452520017171 0ustar liggesusers\name{inforder.family} \alias{inforder.family} \title{Infinite Order Interaction Family} \description{ An object describing the family of all Gibbs point processes with infinite interaction order. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the interaction structure of Gibbs point processes which have infinite order of interaction, such as the area-interaction process \cite{\link{AreaInter}}. } \value{ Object of class \code{"isf"}, see \code{\link{isf.object}}. } \seealso{ \code{\link{AreaInter}} to create the area interaction process structure. Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{ord.family}}. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/response.Rd0000644000176200001440000000361614141452520015750 0ustar liggesusers\name{response} \alias{response} \alias{response.lm} \alias{response.glm} \alias{response.ppm} \alias{response.dppm} \alias{response.kppm} \alias{response.slrm} \alias{response.mppm} \title{ Extract the Values of the Response from a Fitted Model } \description{ Given a fitted model (of any kind) extract the values of the response variable. } \usage{ response(object) \method{response}{lm}(object) \method{response}{glm}(object) \method{response}{ppm}(object) \method{response}{kppm}(object) \method{response}{dppm}(object) \method{response}{slrm}(object) \method{response}{mppm}(object) } \arguments{ \item{object}{ A fitted model (object of class \code{"lm"}, \code{"glm"}, \code{"ppm"}, \code{"kppm"}, \code{"dppm"}, \code{"slrm"} or \code{"mppm"} or some other class). } } \details{ For fitted linear models of class \code{"lm"} and fitted generalized linear models of class \code{"glm"}, the numerical values of the response variable are extracted if they are available, and otherwise \code{NULL} is returned. For fitted point process models of class \code{"ppm"}, \code{"kppm"}, \code{"dppm"}, \code{"slrm"} or \code{"lppm"}, the original data point pattern is extracted. For a fitted point process model of class \code{"mppm"}, the list of original data point patterns is extracted. } \value{ For \code{response.lm} and \code{response.glm}, a numeric vector, or \code{NULL}. For \code{response.ppm}, \code{response.kppm}, \code{response.dppm} and \code{response.slrm} a two-dimensional spatial point pattern (class \code{"ppp"}). For \code{response.mppm}, a list of two-dimensional spatial point patterns (objects of class \code{"ppp"}). The list also belongs to classes \code{"solist"} and \code{"ppplist"}. } \author{ \adrian. } \examples{ fit <- ppm(cells ~ x) response(fit) } \keyword{manip} \keyword{models} spatstat.core/man/Smooth.Rd0000644000176200001440000000167214141452520015363 0ustar liggesusers\name{Smooth} \alias{Smooth} \title{Spatial smoothing of data} \description{ Generic function to perform spatial smoothing of spatial data. } \usage{ Smooth(X, ...) } \arguments{ \item{X}{Some kind of spatial data} \item{\dots}{Arguments passed to methods.} } \details{ This generic function calls an appropriate method to perform spatial smoothing on the spatial dataset \code{X}. Methods for this function include \itemize{ \item \code{\link{Smooth.ppp}} for point patterns \item \code{\link{Smooth.msr}} for measures \item \code{\link{Smooth.fv}} for function value tables } } \seealso{ \code{\link{Smooth.ppp}}, \code{\link{Smooth.im}}, \code{\link{Smooth.msr}}, \code{\link{Smooth.fv}}. } \value{ An object containing smoothed values of the input data, in an appropriate format. See the documentation for the methods. } \author{ \adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/bits.envelope.Rd0000644000176200001440000001121414141452520016660 0ustar liggesusers\name{bits.envelope} \alias{bits.envelope} \title{ Global Envelopes for Balanced Independent Two-Stage Test } \description{ Computes the global envelopes corresponding to the balanced independent two-stage Monte Carlo test of goodness-of-fit. } \usage{ bits.envelope(X, \dots, nsim = 19, nrank = 1, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose = TRUE) } \arguments{ \item{X}{ Either a point pattern dataset (object of class \code{"ppp"}, \code{"lpp"} or \code{"pp3"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{mad.test}} or \code{\link{envelope}} to control the conduct of the test. Useful arguments include \code{fun} to determine the summary function, \code{rinterval} to determine the range of \eqn{r} values used in the test, and \code{verbose=FALSE} to turn off the messages. } \item{nsim}{ Number of simulated patterns to be generated in each stage. Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsim} simulated realisations, together with one independent set of \code{nsim} realisations, so there will be a total of \code{nsim * (nsim + 1)} simulations. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{alternative="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{alternative="less"}) or a one-sided test with an upper critical boundary (\code{alternative="greater"}). } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value determining whether to print progress reports. } } \details{ Computes global simulation envelopes corresponding to the balanced independent two-stage Monte Carlo test of goodness-of-fit described by Baddeley et al (2017). The envelopes are described in Baddeley et al (2019). If \code{X} is a point pattern, the null hypothesis is CSR. If \code{X} is a fitted model, the null hypothesis is that model. This command is similar to \code{\link{dg.envelope}} which corresponds to the Dao-Genton test of goodness-of-fit. It was shown in Baddeley et al (2017) that the Dao-Genton test is biased when the significance level is very small (small \eqn{p}-values are not reliable) and we recommend \code{\link{bits.envelope}} in this case. } \value{ An object of class \code{"fv"}. } \references{ Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Baddeley, A., Hardegen, A., Lawrence, T., Milne, R.K., Nair, G. and Rakshit, S. (2017) On two-stage Monte Carlo tests of composite hypotheses. \emph{Computational Statistics and Data Analysis} \bold{114}, {75--87}. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2019) Pushing the envelope: extensions of graphical Monte Carlo tests. In preparation. } \author{ Adrian Baddeley, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \adrian. } \seealso{ \code{\link{dg.envelope}}, \code{\link{bits.test}}, \code{\link{mad.test}}, \code{\link{envelope}} } \examples{ ns <- if(interactive()) 19 else 4 E <- bits.envelope(swedishpines, Lest, nsim=ns) E plot(E) Eo <- bits.envelope(swedishpines, Lest, alternative="less", nsim=ns) Ei <- bits.envelope(swedishpines, Lest, interpolate=TRUE, nsim=ns) } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat.core/man/plot.plotppm.Rd0000644000176200001440000000635614141452520016566 0ustar liggesusers\name{plot.plotppm} \alias{plot.plotppm} \title{Plot a plotppm Object Created by plot.ppm} \description{ The function plot.ppm produces objects which specify plots of fitted point process models. The function plot.plotppm carries out the actual plotting of these objects. } \usage{ \method{plot}{plotppm}(x, data = NULL, trend = TRUE, cif = TRUE, se = TRUE, pause = interactive(), how = c("persp", "image", "contour"), \dots, pppargs) } \arguments{ \item{x}{ An object of class \code{plotppm} produced by \code{\link{plot.ppm}()} }. \item{data}{ The point pattern (an object of class \code{ppp}) to which the point process model was fitted (by \code{\link{ppm}}). } \item{trend}{ Logical scalar; should the trend component of the fitted model be plotted? } \item{cif}{ Logical scalar; should the complete conditional intensity of the fitted model be plotted? } \item{se}{ Logical scalar; should the estimated standard error of the fitted intensity be plotted? } \item{pause}{ Logical scalar indicating whether to pause with a prompt after each plot. Set \code{pause=FALSE} if plotting to a file. } \item{how}{ Character string or character vector indicating the style or styles of plots to be performed. } \item{\dots}{ Extra arguments to the plotting functions \code{\link{persp}}, \code{\link{image}} and \code{\link{contour}}. } \item{pppargs}{ List of extra arguments passed to \code{\link{plot.ppp}} when displaying the original point pattern data. } } \details{ If argument \code{data} is supplied then the point pattern will be superimposed on the image and contour plots. Sometimes a fitted model does not have a trend component, or the trend component may constitute all of the conditional intensity (if the model is Poisson). In such cases the object \code{x} will not contain a trend component, or will contain only a trend component. This will also be the case if one of the arguments \code{trend} and \code{cif} was set equal to \code{FALSE} in the call to \code{plot.ppm()} which produced \code{x}. If this is so then only the item which is present will be plotted. Explicitly setting \code{trend=TRUE}, or \code{cif=TRUE}, respectively, will then give an error. } \value{ None. } \section{Warning}{ Arguments which are passed to \code{persp}, \code{image}, and \code{contour} via the \dots argument get passed to any of the other functions listed in the \code{how} argument, and won't be recognized by them. This leads to a lot of annoying but harmless warning messages. Arguments to \code{persp} may be supplied via \code{\link{spatstat.options}()} which alleviates the warning messages in this instance. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.ppm}()} } \examples{ if(interactive()) { m <- ppm(cells ~ 1, Strauss(0.05)) mpic <- plot(m) # Perspective plot only, with altered parameters: plot(mpic,how="persp", theta=-30,phi=40,d=4) # All plots, with altered parameters for perspective plot: op <- spatstat.options(par.persp=list(theta=-30,phi=40,d=4)) plot(mpic) # Revert spatstat.options(op) } } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat.core/man/reload.or.compute.Rd0000644000176200001440000000527014141452520017450 0ustar liggesusers\name{reload.or.compute} \alias{reload.or.compute} \title{ Compute Unless Previously Saved } \description{ If the designated file does not yet exist, evaluate the expression and save the results in the file. If the file already exists, re-load the results from the file. } \usage{ reload.or.compute(filename, expr, objects = NULL, destination = parent.frame(), force=FALSE, verbose=TRUE) } \arguments{ \item{filename}{ Name of data file. A character string. } \item{expr}{ \R language expression to be evaluated. } \item{objects}{ Optional character vector of names of objects to be saved in \code{filename} after evaluating \code{expr}, or names of objects that should be present in \code{filename} when loaded. } \item{destination}{ Environment in which the resulting objects should be assigned. } \item{force}{ Logical value indicating whether to perform the computation in any case. } \item{verbose}{ Logical value indicating whether to print a message indicating whether the data were recomputed or reloaded from the file. } } \details{ This facility is useful for saving, and later re-loading, the results of time-consuming computations. It would typically be used in an \R script file or an \code{\link[utils]{Sweave}} document. If the file called \code{filename} does not yet exist, then \code{expr} will be evaluated and the results will be saved in \code{filename}. The optional argument \code{objects} specifies which results should be saved to the file: the default is to save all objects that were created by evaluating the expression. If the file called \code{filename} already exists, then it will be loaded. The optional argument \code{objects} specifies the names of objects that should be present in the file; a warning is issued if any of them are missing. The resulting objects can be assigned into any desired \code{destination}. The default behaviour is equivalent to evaluating \code{expr} in the current environment. If \code{force=TRUE} then \code{expr} will be evaluated (regardless of whether the file already exists or not) and the results will be saved in \code{filename}, overwriting any previously-existing file with that name. This is a convenient way to force the code to re-compute everything in an \R script file or \code{\link[utils]{Sweave}} document. } \value{ Character vector (invisible) giving the names of the objects computed or loaded. } \examples{ \dontrun{ if(FALSE) { reload.or.compute("mydata.rda", { x <- very.long.computation() y <- 42 }) } } } \author{\adrian and \rolf } \keyword{utilities} spatstat.core/man/auc.Rd0000644000176200001440000000726414141452520014665 0ustar liggesusers\name{auc} \alias{auc} \alias{auc.ppp} \alias{auc.ppm} \alias{auc.kppm} \alias{auc.slrm} \title{ Area Under ROC Curve } \description{ Compute the AUC (area under the Receiver Operating Characteristic curve) for a fitted point process model. } \usage{ auc(X, \dots) \method{auc}{ppp}(X, covariate, \dots, high = TRUE) \method{auc}{ppm}(X, \dots) \method{auc}{kppm}(X, \dots) \method{auc}{slrm}(X, \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"} or \code{"lpp"}) or fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"slrm"} or \code{"lppm"}). } \item{covariate}{ Spatial covariate. Either a \code{function(x,y)}, a pixel image (object of class \code{"im"}), or one of the strings \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} controlling the pixel resolution for calculations. } \item{high}{ Logical value indicating whether the threshold operation should favour high or low values of the covariate. } } \details{ This command computes the AUC, the area under the Receiver Operating Characteristic curve. The ROC itself is computed by \code{\link{roc}}. For a point pattern \code{X} and a covariate \code{Z}, the AUC is a numerical index that measures the ability of the covariate to separate the spatial domain into areas of high and low density of points. Let \eqn{x_i}{x[i]} be a randomly-chosen data point from \code{X} and \eqn{U} a randomly-selected location in the study region. The AUC is the probability that \eqn{Z(x_i) > Z(U)}{Z(x[i]) > Z(U)} assuming \code{high=TRUE}. That is, AUC is the probability that a randomly-selected data point has a higher value of the covariate \code{Z} than does a randomly-selected spatial location. The AUC is a number between 0 and 1. A value of 0.5 indicates a complete lack of discriminatory power. For a fitted point process model \code{X}, the AUC measures the ability of the fitted model intensity to separate the spatial domain into areas of high and low density of points. Suppose \eqn{\lambda(u)}{\lambda(u)} is the intensity function of the model. The AUC is the probability that \eqn{\lambda(x_i) > \lambda(U)}{\lambda(x[i]) > \lambda(U)}. That is, AUC is the probability that a randomly-selected data point has higher predicted intensity than does a randomly-selected spatial location. The AUC is \bold{not} a measure of the goodness-of-fit of the model (Lobo et al, 2007). (For spatial logistic regression models (class \code{"slrm"}) replace \dQuote{intensity} by \dQuote{probability of presence} in the text above.) } \value{ Numeric. For \code{auc.ppp} and \code{auc.lpp}, the result is a single number giving the AUC value. For \code{auc.ppm}, \code{auc.kppm} and \code{auc.lppm}, the result is a numeric vector of length 2 giving the AUC value and the theoretically expected AUC value for this model. } \references{ Lobo, J.M., \ifelse{latex}{\out{Jim{\'e}nez}}{Jimenez}-Valverde, A. and Real, R. (2007) AUC: a misleading measure of the performance of predictive distribution models. \emph{Global Ecology and Biogeography} \bold{17}(2) 145--151. Nam, B.-H. and D'Agostino, R. (2002) Discrimination index, the area under the {ROC} curve. Pages 267--279 in Huber-Carol, C., Balakrishnan, N., Nikulin, M.S. and Mesbah, M., \emph{Goodness-of-fit tests and model validity}, \ifelse{latex}{\out{Birkh{\"a}user}}{Birkhauser}, Basel. } \author{ \spatstatAuthors. } \seealso{ \code{\link{roc}} } \examples{ fit <- ppm(swedishpines ~ x+y) auc(fit) auc(swedishpines, "x") } \keyword{spatial} spatstat.core/man/dppeigen.Rd0000644000176200001440000000124314141452520015677 0ustar liggesusers\name{dppeigen} \alias{dppeigen} \title{Internal function calculating eig and index} \description{This function is mainly for internal package use and is usually not called by the user.} \usage{dppeigen(model, trunc, Wscale, stationary = FALSE)} \arguments{ \item{model}{object of class \code{"detpointprocfamily"} } \item{trunc}{numeric giving the truncation} \item{Wscale}{numeric giving the scale of the window relative to a unit box} \item{stationary}{logical indicating whether the stationarity of the model should be used (only works in dimension 2).} } \value{A list} \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat.core/man/circdensity.Rd0000644000176200001440000000262714141452520016433 0ustar liggesusers\name{circdensity} \alias{circdensity} \title{ Density Estimation for Circular Data } \description{ Computes a kernel smoothed estimate of the probability density for angular data. } \usage{ circdensity(x, sigma = "nrd0", \dots, bw = NULL, weights=NULL, unit = c("degree", "radian")) } \arguments{ \item{x}{ Numeric vector, containing angular data. } \item{sigma}{ Smoothing bandwidth, or bandwidth selection rule, passed to \code{\link[stats]{density.default}}. } \item{bw}{Alternative to \code{sigma} for consistency with other functions.} \item{\dots}{ Additional arguments passed to \code{\link[stats]{density.default}}, such as \code{kernel} and \code{weights}. } \item{weights}{ Optional numeric vector of weights for the data in \code{x}. } \item{unit}{ The unit of angle in which \code{x} is expressed. } } \details{ The angular values \code{x} are smoothed using (by default) the wrapped Gaussian kernel with standard deviation \code{sigma}. } \value{ An object of class \code{"density"} (produced by \code{\link[stats]{density.default}}) which can be plotted by \code{plot} or by \code{\link{rose}}. } \author{ \spatstatAuthors. } \seealso{ \code{\link[stats]{density.default}}), \code{\link{rose}}. } \examples{ ang <- runif(1000, max=360) rose(circdensity(ang, 12)) } \keyword{nonparametric} \keyword{smooth} spatstat.core/man/localKcross.Rd0000644000176200001440000001241714141452520016370 0ustar liggesusers\name{localKcross} \alias{localKcross} \alias{localLcross} \title{Local Multitype K Function (Cross-Type)} \description{ for a multitype point pattern, computes the cross-type version of the local K function. } \usage{ localKcross(X, from, to, \dots, rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL) localLcross(X, from, to, \dots, rmax = NULL, correction = "Ripley") } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"} with marks which are a factor). } \item{\dots}{ Further arguments passed from \code{localLcross} to \code{localKcross}. } \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{from}{ Type of points from which distances should be measured. A single value; one of the possible levels of \code{marks(X)}, or an integer indicating which level. } \item{to}{ Type of points to which distances should be measured. A single value; one of the possible levels of \code{marks(X)}, or an integer indicating which level. } \item{correction}{ String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"translation"}, \code{"Ripley"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{verbose}{ Logical flag indicating whether to print progress reports during the calculation. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the function L or K should be computed. } } \details{ Given a multitype spatial point pattern \code{X}, the local cross-type \eqn{K} function \code{localKcross} is the local version of the multitype \eqn{K} function \code{\link{Kcross}}. Recall that \code{Kcross(X, from, to)} is a sum of contributions from all pairs of points in \code{X} where the first point belongs to \code{from} and the second point belongs to type \code{to}. The \emph{local} cross-type \eqn{K} function is defined for each point \code{X[i]} that belongs to type \code{from}, and it consists of all the contributions to the cross-type \eqn{K} function that originate from point \code{X[i]}: \deqn{ K_{i,from,to}(r) = \sqrt{\frac a {(n-1) \pi} \sum_j e_{ij}} }{ K[i,from,to](r) = sqrt( (a/((n-1)* pi)) * sum[j] e[i,j]) } where the sum is over all points \eqn{j \neq i}{j != i} belonging to type \code{to}, that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{a} is the area of the observation window, \eqn{n} is the number of points in \code{X}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The value of \eqn{K_{i,from,to}(r)}{K[i,from,to](r)} can also be interpreted as one of the summands that contributes to the global estimate of the \code{\link{Kcross}} function. By default, the function \eqn{K_{i,from,to}(r)}{K[i,from,to](r)} is computed for a range of \eqn{r} values for each point \eqn{i} belonging to type \code{from}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X} belonging to type \code{from}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X} belonging to type \code{from}. The local cross-type \eqn{L} function \code{localLcross} is computed by applying the transformation \eqn{L(r) = \sqrt{K(r)/(2\pi)}}{L(r) = sqrt(K(r)/(2*pi))}. } \value{ If \code{rvalue} is given, the result is a numeric vector of length equal to the number of points in the point pattern that belong to type \code{from}. If \code{rvalue} is absent, the result is an object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point of type \code{from}. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{Kcross}}, \code{\link{Lcross}}, \code{\link{localK}}, \code{\link{localL}}. Inhomogeneous counterparts of \code{localK} and \code{localL} are computed by \code{\link{localKcross.inhom}} and \code{\link{localLinhom}}. } \examples{ X <- amacrine # compute all the local Lcross functions L <- localLcross(X) # plot all the local Lcross functions against r plot(L, main="local Lcross functions for amacrine", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) # compute the values of L(r) for r = 0.1 metres L12 <- localLcross(X, rvalue=0.1) } \author{ \ege and \adrian. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/pool.Rd0000644000176200001440000000171714141452520015063 0ustar liggesusers\name{pool} \alias{pool} \title{ Pool Data } \description{ Pool the data from several objects of the same class. } \usage{ pool(...) } \arguments{ \item{\dots}{ Objects of the same type. } } \details{ The function \code{pool} is generic. There are methods for several classes, listed below. \code{pool} is used to combine the data from several objects of the same type, and to compute statistics based on the combined dataset. It may be used to pool the estimates obtained from replicated datasets. It may also be used in high-performance computing applications, when the objects \code{\dots} have been computed on different processors or in different batch runs, and we wish to combine them. } \value{ An object of the same class as the arguments \code{\dots}. } \seealso{ \code{\link{pool.envelope}}, \code{\link{pool.fasp}}, \code{\link{pool.rat}}, \code{\link{pool.fv}} } \author{\adrian and \rolf } \keyword{spatial} spatstat.core/man/rGaussPoisson.Rd0000644000176200001440000000427614141452520016734 0ustar liggesusers\name{rGaussPoisson} \alias{rGaussPoisson} \title{Simulate Gauss-Poisson Process} \description{ Generate a random point pattern, a simulated realisation of the Gauss-Poisson Process. } \usage{ rGaussPoisson(kappa, r, p2, win = owin(c(0,1),c(0,1)), \dots, nsim=1, drop=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{r}{ Diameter of each cluster that consists of exactly 2 points. } \item{p2}{ Probability that a cluster contains exactly 2 points. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{\dots}{Ignored.} \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of the point pattern. See \code{\link{rNeymanScott}}. } \details{ This algorithm generates a realisation of the Gauss-Poisson point process inside the window \code{win}. The process is constructed by first generating a Poisson point process of parent points with intensity \code{kappa}. Then each parent point is either retained (with probability \code{1 - p2}) or replaced by a pair of points at a fixed distance \code{r} apart (with probability \code{p2}). In the case of clusters of 2 points, the line joining the two points has uniform random orientation. In this implementation, parent points are not restricted to lie in the window; the parent process is effectively the uniform Poisson process on the infinite plane. } \seealso{ \code{\link{rpoispp}}, \code{\link{rThomas}}, \code{\link{rMatClust}}, \code{\link{rNeymanScott}} } \examples{ pp <- rGaussPoisson(30, 0.07, 0.5) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/densityHeat.Rd0000644000176200001440000000244214141452520016367 0ustar liggesusers\name{densityHeat} \alias{densityHeat} \title{ Diffusion Estimate of Point Pattern Intensity } \description{ Computes a diffusion estimate of intensity for a point pattern. } \usage{ densityHeat(x, sigma, \dots) } \arguments{ \item{x}{ Point pattern (object of class \code{"ppp"} or another class). } \item{sigma}{ Smoothing bandwidth. Usually a single number giving the equivalent standard deviation of the smoother. } \item{\dots}{ Additional arguments depending on the method. } } \details{ The generic function \code{densityHeat} computes an estimate of point process intensity using a diffusion kernel method. Further details depend on the class of point pattern \code{x}. See the help file for the appropriate method. } \value{ Depends on the class of \code{x}. } \seealso{ For two-dimensional point patterns (objects of class \code{"ppp"}), the diffusion kernel estimator is \code{\link{densityHeat.ppp}}. The usual kernel estimator is \code{\link{density.ppp}}, and the tessellation-based estimator is \code{\link{adaptive.density}}. %% For point patterns on a linear network (objects of class %% \code{"lpp"}), see \code{\link[spatstat.linnet]{densityHeat.lpp}}. } \author{ Adrian Baddeley and Tilman Davies. } \keyword{spatial} \keyword{smooth} spatstat.core/man/psst.Rd0000644000176200001440000001213414141452520015076 0ustar liggesusers\name{psst} \alias{psst} \title{ Pseudoscore Diagnostic For Fitted Model against General Alternative } \description{ Given a point process model fitted to a point pattern dataset, and any choice of functional summary statistic, this function computes the pseudoscore test statistic of goodness-of-fit for the model. } \usage{ psst(object, fun, r = NULL, breaks = NULL, ..., model=NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), truecoef=NULL, hi.res=NULL, funargs = list(correction="best"), verbose=TRUE) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{fun}{ Summary function to be applied to each point pattern. } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the function \eqn{S(r)} should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Ignored. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } \item{funargs}{ List of additional arguments to be passed to \code{fun}. } \item{verbose}{ Logical value determining whether to print progress reports during the computation. } } \details{ Let \eqn{x} be a point pattern dataset consisting of points \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} in a window \eqn{W}. Consider a point process model fitted to \eqn{x}, with conditional intensity \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}. For the purpose of testing goodness-of-fit, we regard the fitted model as the null hypothesis. Given a functional summary statistic \eqn{S}, consider a family of alternative models obtained by exponential tilting of the null model by \eqn{S}. The pseudoscore for the null model is \deqn{ V(r) = \sum_i \Delta S(x_i, x, r) - \int_W \Delta S(u,x, r) \lambda(u,x) {\rm d} u }{ V(r) = sum( Delta S(x[i], x, r)) - integral( Delta S(u,x, r) lambda(u,x) du) } where the \eqn{\Delta}{Delta} operator is \deqn{ \Delta S(u,x, r) = S(x\cup\{u\}, r) - S(x\setminus u, r) }{ Delta S(u,x, r) = S(x union u, r) - S(x setminus u, r) } the difference between the values of \eqn{S} for the point pattern with and without the point \eqn{u}. According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have mean zero if the model is correct (ignoring the fact that the parameters of the model have been estimated). Hence \eqn{V(r)} can be used as a diagnostic for goodness-of-fit. This algorithm computes \eqn{V(r)} by direct evaluation of the sum and integral. It is computationally intensive, but it is available for any summary statistic \eqn{S(r)}. The diagnostic \eqn{V(r)} is also called the \bold{pseudoresidual} of \eqn{S}. On the right hand side of the equation for \eqn{V(r)} given above, the sum over points of \eqn{x} is called the \bold{pseudosum} and the integral is called the \bold{pseudocompensator}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. Columns in this data frame include \code{dat} for the pseudosum, \code{com} for the compensator and \code{res} for the pseudoresidual. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Special cases: \code{\link{psstA}}, \code{\link{psstG}}. Alternative functions: \code{\link{Kres}}, \code{\link{Gres}}. } \examples{ if(live <- interactive()) { fit0 <- ppm(cells ~ 1) } else { fit0 <- ppm(cells ~ 1, nd=8) } G0 <- psst(fit0, Gest) G0 if(live) plot(G0) } \keyword{spatial} \keyword{models} spatstat.core/man/rho2hat.Rd0000644000176200001440000000726714141452520015467 0ustar liggesusers\name{rho2hat} \alias{rho2hat} \title{ Smoothed Relative Density of Pairs of Covariate Values } \description{ Given a point pattern and two spatial covariates \eqn{Z_1}{Z1} and \eqn{Z_2}{Z2}, construct a smooth estimate of the relative risk of the pair \eqn{(Z_1,Z_2)}{(Z1, Z2)}. } \usage{ rho2hat(object, cov1, cov2, ..., method=c("ratio", "reweight")) } \arguments{ \item{object}{ A point pattern (object of class \code{"ppp"}), a quadrature scheme (object of class \code{"quad"}) or a fitted point process model (object of class \code{"ppm"}). } \item{cov1,cov2}{ The two covariates. Each argument is either a \code{function(x,y)} or a pixel image (object of class \code{"im"}) providing the values of the covariate at any location, or one of the strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. } \item{\dots}{ Additional arguments passed to \code{\link{density.ppp}} to smooth the scatterplots. } \item{method}{ Character string determining the smoothing method. See Details. } } \details{ This is a bivariate version of \code{\link{rhohat}}. If \code{object} is a point pattern, this command produces a smoothed version of the scatterplot of the values of the covariates \code{cov1} and \code{cov2} observed at the points of the point pattern. The covariates \code{cov1,cov2} must have continuous values. If \code{object} is a fitted point process model, suppose \code{X} is the original data point pattern to which the model was fitted. Then this command assumes \code{X} is a realisation of a Poisson point process with intensity function of the form \deqn{ \lambda(u) = \rho(Z_1(u), Z_2(u)) \kappa(u) }{ lambda(u) = rho(Z1(u), Z2(u)) * kappa(u) } where \eqn{\kappa(u)}{kappa(u)} is the intensity of the fitted model \code{object}, and \eqn{\rho(z_1,z_2)}{rho(z1, z2)} is a function to be estimated. The algorithm computes a smooth estimate of the function \eqn{\rho}{rho}. The \code{method} determines how the density estimates will be combined to obtain an estimate of \eqn{\rho(z_1, z_2)}{rho(z1, z2)}: \itemize{ \item If \code{method="ratio"}, then \eqn{\rho(z_1, z_2)}{rho(z1,z2)} is estimated by the ratio of two density estimates. The numerator is a (rescaled) density estimate obtained by smoothing the points \eqn{(Z_1(y_i), Z_2(y_i))}{(Z1(y[i]), Z2(y[i]))} obtained by evaluating the two covariate \eqn{Z_1, Z_2}{Z1, Z2} at the data points \eqn{y_i}{y[i]}. The denominator is a density estimate of the reference distribution of \eqn{(Z_1,Z_2)}{(Z1, Z2)}. \item If \code{method="reweight"}, then \eqn{\rho(z_1, z_2)}{rho(z1,z2)} is estimated by applying density estimation to the points \eqn{(Z_1(y_i), Z_2(y_i))}{(Z1(y[i]), Z2(y[i]))} obtained by evaluating the two covariate \eqn{Z_1, Z_2}{Z1, Z2} at the data points \eqn{y_i}{y[i]}, with weights inversely proportional to the reference density of \eqn{(Z_1,Z_2)}{(Z1, Z2)}. } } \value{ A pixel image (object of class \code{"im"}). Also belongs to the special class \code{"rho2hat"} which has a plot method. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2012) Nonparametric estimation of the dependence of a point process on spatial covariates. \emph{Statistics and Its Interface} \bold{5} (2), 221--236. } \author{ \adrian } \seealso{ \code{\link{rhohat}}, \code{\link{methods.rho2hat}} } \examples{ data(bei) attach(bei.extra) plot(rho2hat(bei, elev, grad)) fit <- ppm(bei, ~elev, covariates=bei.extra) # plot(rho2hat(fit, elev, grad)) plot(rho2hat(fit, elev, grad, method="reweight")) } \keyword{spatial} \keyword{models} spatstat.core/man/ord.family.Rd0000644000176200001440000000240614141452520016152 0ustar liggesusers\name{ord.family} \alias{ord.family} \title{Ord Interaction Process Family} \description{ An object describing the family of all Ord interaction point processes } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the family of point process models introduced by Ord (1977). If you need to create a specific Ord-type model for use in analysis, use the function \code{\link{OrdThresh}} or \code{\link{Ord}}. } \value{ Object of class \code{"isf"}, see \code{\link{isf.object}}. } \seealso{ \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{Ord}}, \code{\link{OrdThresh}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ord, J.K. (1977) Contribution to the discussion of Ripley (1977). Ord, J.K. (1978) How many trees in a forest? \emph{Mathematical Scientist} \bold{3}, 23--33. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/lurking.mppm.Rd0000644000176200001440000000671114141452520016534 0ustar liggesusers\name{lurking.mppm} \alias{lurking.mppm} \title{ Lurking Variable Plot for Multiple Point Patterns } \description{ Generate a lurking variable plot of spatial point process residuals against a covariate, for a model fitted to several point patterns. } \usage{ \method{lurking}{mppm}(object, covariate, type="eem", \dots, separate = FALSE, plot.it = TRUE, covname, oldstyle = FALSE, nx = 512, main="") } \arguments{ \item{object}{ The fitted model. An object of class \code{"mppm"} representing a point process model fitted to several point patterns. } \item{covariate}{ The covariate to be used on the horizontal axis. Either an \code{expression} which can be evaluated in the original data, or a list of pixel images, one image for each point pattern in the original data. } \item{type}{ String indicating the type of residuals or weights to be computed. Choices include \code{"eem"}, \code{"raw"}, \code{"inverse"} and \code{"pearson"}. See \code{\link{diagnose.ppm}} for all possible choices. } \item{\dots}{ Additional arguments passed to \code{\link{lurking.ppm}}, including arguments controlling the plot. } \item{separate}{ Logical value indicating whether to compute a separate lurking variable plot for each of the original point patterns. If \code{FALSE} (the default), a single lurking-variable plot is produced by combining residuals from all patterns. } \item{plot.it}{ Logical value indicating whether plots should be shown. If \code{plot.it=FALSE}, only the computed coordinates for the plots are returned. See \emph{Value}. } \item{covname}{ A string name for the covariate, to be used in axis labels of plots. } \item{oldstyle}{ Logical flag indicating whether error bounds should be plotted using the approximation given in the original paper (\code{oldstyle=TRUE}), or using the correct asymptotic formula (\code{oldstyle=FALSE}). } \item{nx}{ Integer. Number of covariate values to be used in the plot. } \item{main}{ Character string giving a main title for the plot. } } \details{ This function generates a \sQuote{lurking variable} plot for a point process model fitted to several point patterns. Residuals from the model represented by \code{object} are plotted against the covariate specified by \code{covariate}. This plot can be used to reveal departures from the fitted model. The function \code{lurking} is generic. This is the method for the class \code{mppm}. The argument \code{object} must be a fitted point process model object of class \code{"mppm"}) produced by the model-fitting algorithm \code{\link{mppm}}. } \value{ If \code{separate=FALSE} (the default), the return value is an object belonging to the class \code{"lurk"}, for which there are methods for \code{plot} and \code{print}. See \code{\link{lurking}} for details of the format. If \code{separate=TRUE}, the result is a list of such objects, and also belongs to the class \code{anylist} so that it can be printed and plotted. } \author{ \adrian, with thanks to Nicholas Read. } \seealso{ \code{\link{lurking.ppm}} } \examples{ fit <- mppm(Points ~ Image + Group, demohyper) lurking(fit, expression(Image), type="P") lurking(fit, expression(Image), type="P", separate=TRUE) } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat.core/man/rpoint.Rd0000644000176200001440000001141414141452520015420 0ustar liggesusers\name{rpoint} \alias{rpoint} \title{Generate N Random Points} \description{ Generate a random point pattern containing \eqn{n} independent, identically distributed random points with any specified distribution. } \usage{ rpoint(n, f, fmax=NULL, win=unit.square(), \dots, giveup=1000, verbose=FALSE, nsim=1, drop=TRUE, forcewin=FALSE) } \arguments{ \item{n}{ Number of points to generate. } \item{f}{ The probability density of the points, possibly un-normalised. Either a constant, a function \code{f(x,y,...)}, or a pixel image object. } \item{fmax}{ An upper bound on the values of \code{f}. If missing, this number will be estimated. } \item{win}{ Window in which to simulate the pattern. (Ignored if \code{f} is a pixel image, unless \code{forcewin=TRUE}). } \item{\dots}{ Arguments passed to the function \code{f}. } \item{giveup}{ Number of attempts in the rejection method after which the algorithm should stop trying to generate new points. } \item{verbose}{ Flag indicating whether to report details of performance of the simulation algorithm. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{forcewin}{ Logical. If \code{TRUE}, then simulations will be generated inside \code{win} in all cases. If \code{FALSE} (the default), the argument \code{win} is ignored when \code{f} is a pixel image. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates \code{n} independent, identically distributed random points with common probability density proportional to \code{f}. The argument \code{f} may be \describe{ \item{a numerical constant:}{ uniformly distributed random points will be generated. } \item{a function:}{random points will be generated in the window \code{win} with probability density proportional to \code{f(x,y,...)} where \code{x} and \code{y} are the cartesian coordinates. The function \code{f} must accept two \emph{vectors} of coordinates \code{x,y} and return the corresponding vector of function values. Additional arguments \code{...} of any kind may be passed to the function. } \item{a pixel image:}{ if \code{f} is a pixel image (object of class \code{"im"}, see \code{\link{im.object}}) then random points will be generated with probability density proportional to the pixel values of \code{f}. To be precise, pixels are selected with probabilities proportional to the pixel values, and within each selected pixel, a point is generated with a uniform distribution inside the pixel. The window of the simulated point pattern is determined as follows. If \code{forcewin=FALSE} (the default) then the argument \code{win} is ignored, and the simulation window is the window of the pixel image, \code{Window(f)}. If \code{forcefit=TRUE} then the simulation window is \code{win}. } } The algorithm is as follows: \itemize{ \item If \code{f} is a constant, we invoke \code{\link{runifpoint}}. \item If \code{f} is a function, then we use the rejection method. Proposal points are generated from the uniform distribution. A proposal point \eqn{(x,y)} is accepted with probability \code{f(x,y,...)/fmax} and otherwise rejected. The algorithm continues until \code{n} points have been accepted. It gives up after \code{giveup * n} proposals if there are still fewer than \code{n} points. \item If \code{f} is a pixel image, then a random sequence of pixels is selected (using \code{\link{sample}}) with probabilities proportional to the pixel values of \code{f}. Then for each pixel in the sequence we generate a uniformly distributed random point in that pixel. } The algorithm for pixel images is more efficient than that for functions. } \seealso{ \code{\link{ppp.object}}, \code{\link{owin.object}}, \code{\link{runifpoint}} } \examples{ # 100 uniform random points in the unit square X <- rpoint(100) # 100 random points with probability density proportional to x^2 + y^2 X <- rpoint(100, function(x,y) { x^2 + y^2}, 1) # `fmax' may be omitted X <- rpoint(100, function(x,y) { x^2 + y^2}) # irregular window X <- rpoint(100, function(x,y) { x^2 + y^2}, win=letterR) # make a pixel image Z <- setcov(letterR) # 100 points with density proportional to pixel values X <- rpoint(100, Z) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{datagen} spatstat.core/man/bw.smoothppp.Rd0000644000176200001440000000616414141452520016553 0ustar liggesusers\name{bw.smoothppp} \alias{bw.smoothppp} \title{ Cross Validated Bandwidth Selection for Spatial Smoothing } \description{ Uses least-squares cross-validation to select a smoothing bandwidth for spatial smoothing of marks. } \usage{ bw.smoothppp(X, nh = spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE, kernel="gaussian") } \arguments{ \item{X}{ A marked point pattern with numeric marks. } \item{nh}{ Number of trial values of smoothing bandwith \code{sigma} to consider. The default is 32. } \item{hmin, hmax}{ Optional. Numeric values. Range of trial values of smoothing bandwith \code{sigma} to consider. There is a sensible default. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the minimum of the cross-validation criterion occurs at one of the ends of the search interval. } \item{kernel}{ The smoothing kernel. A character string specifying the smoothing kernel (current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}). } } \details{ This function selects an appropriate bandwidth for the nonparametric smoothing of mark values using \code{\link{Smooth.ppp}}. The argument \code{X} must be a marked point pattern with a vector or data frame of marks. All mark values must be numeric. The bandwidth is selected by least-squares cross-validation. Let \eqn{y_i}{y[i]} be the mark value at the \eqn{i}th data point. For a particular choice of smoothing bandwidth, let \eqn{\hat y_i}{y*[i]} be the smoothed value at the \eqn{i}th data point. Then the bandwidth is chosen to minimise the squared error of the smoothed values \eqn{\sum_i (y_i - \hat y_i)^2}{sum (y[i] - y*[i])^2}. The result of \code{bw.smoothppp} is a numerical value giving the selected bandwidth \code{sigma}. The result also belongs to the class \code{"bw.optim"} allowing it to be printed and plotted. The plot shows the cross-validation criterion as a function of bandwidth. The range of values for the smoothing bandwidth \code{sigma} is set by the arguments \code{hmin, hmax}. There is a sensible default, based on the nearest neighbour distances. If the optimal bandwidth is achieved at an endpoint of the interval \code{[hmin, hmax]}, the algorithm will issue a warning (unless \code{warn=FALSE}). If this occurs, then it is probably advisable to expand the interval by changing the arguments \code{hmin, hmax}. Computation time depends on the number \code{nh} of trial values considered, and also on the range \code{[hmin, hmax]} of values considered, because larger values of \code{sigma} require calculations involving more pairs of data points. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{Smooth.ppp}} } \examples{ data(longleaf) \testonly{op <- spatstat.options(n.bandwidth=8)} b <- bw.smoothppp(longleaf) b plot(b) \testonly{spatstat.options(op)} } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/data.ppm.Rd0000644000176200001440000000221214141452520015605 0ustar liggesusers\name{data.ppm} \alias{data.ppm} \title{Extract Original Data from a Fitted Point Process Model} \description{ Given a fitted point process model, this function extracts the original point pattern dataset to which the model was fitted. } \usage{ data.ppm(object) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"}). } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The object contains complete information about the original data point pattern to which the model was fitted. This function extracts the original data pattern. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppp.object}} } \examples{ fit <- ppm(cells, ~1, Strauss(r=0.1)) X <- data.ppm(fit) # 'X' is identical to 'cells' } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{manip} \keyword{models} spatstat.core/man/simulate.mppm.Rd0000644000176200001440000000354114141452520016702 0ustar liggesusers\name{simulate.mppm} \alias{simulate.mppm} \title{Simulate a Point Process Model Fitted to Several Point Patterns} \description{ Generates simulated realisations from a point process model that was fitted to several point patterns. } \usage{ \method{simulate}{mppm}(object, nsim=1, \dots, verbose=TRUE) } \arguments{ \item{object}{ Point process model fitted to several point patterns. An object of class \code{"mppm"}. } \item{nsim}{ Number of simulated realisations (of each original pattern). } \item{\dots}{ Further arguments passed to \code{\link{simulate.ppm}} to control the simulation. } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"mppm"} of fitted point process models for replicated point pattern data. The result is a hyperframe with \code{n} rows and \code{nsim} columns, where \code{n} is the number of original point pattern datasets to which the model was fitted. Each column of the hyperframe contains a simulated version of the original data. For each of the original point pattern datasets, the fitted model for this dataset is extracted using \code{\link{subfits}}, then \code{nsim} simulated realisations of this model are generated using \code{\link{simulate.ppm}}, and these are stored in the corresponding row of the output. } \value{ A hyperframe. } \examples{ H <- hyperframe(Bugs=waterstriders) fit <- mppm(Bugs ~ id, H) y <- simulate(fit, nsim=2) y plot(y[1,,drop=TRUE], main="Simulations for Waterstriders pattern 1") plot(y[,1,drop=TRUE], main="Simulation 1 for each Waterstriders pattern") } \seealso{ \code{\link{mppm}}, \code{\link{simulate.ppm}}. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.core/man/pairwise.family.Rd0000644000176200001440000000304714141452520017213 0ustar liggesusers\name{pairwise.family} \alias{pairwise.family} \title{Pairwise Interaction Process Family} \description{ An object describing the family of all pairwise interaction Gibbs point processes. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the pairwise interaction family of point process models. If you need to create a specific pairwise interaction model for use in modelling, use the function \code{\link{Pairwise}} or one of the existing functions listed below. Anyway, \code{pairwise.family} is an object of class \code{"isf"} containing a function \code{pairwise.family$eval} for evaluating the sufficient statistics of any pairwise interaction point process model taking an exponential family form. } \value{ Object of class \code{"isf"}, see \code{\link{isf.object}}. } \seealso{ Other families: \code{\link{pairsat.family}}, \code{\link{ord.family}}, \code{\link{inforder.family}}. Pairwise interactions: \code{\link{Poisson}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Fiksel}}, \code{\link{Hardcore}}, \code{\link{LennardJones}}, \code{\link{MultiHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Softcore}}. Other interactions: \code{\link{AreaInter}}, \code{\link{Geyer}}, \code{\link{Saturated}}, \code{\link{Ord}}, \code{\link{OrdThresh}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/rat.Rd0000644000176200001440000000337514141452520014702 0ustar liggesusers\name{rat} \alias{rat} \title{ Ratio object } \description{ Stores the numerator, denominator, and value of a ratio as a single object. } \usage{ rat(ratio, numerator, denominator, check = TRUE) } \arguments{ \item{ratio,numerator,denominator}{ Three objects belonging to the same class. } \item{check}{ Logical. Whether to check that the objects are \code{\link{compatible}}. } } \details{ The class \code{"rat"} is a simple mechanism for keeping track of the numerator and denominator when calculating a ratio. Its main purpose is simply to signal that the object is a ratio. The function \code{rat} creates an object of class \code{"rat"} given the numerator, the denominator and the ratio. No calculation is performed; the three objects are simply stored together. The arguments \code{ratio}, \code{numerator}, \code{denominator} can be objects of any kind. They should belong to the same class. It is assumed that the relationship \deqn{ \mbox{ratio} = \frac{\mbox{numerator}}{\mbox{denominator}} }{ ratio = numerator/denominator } holds in some version of arithmetic. However, no calculation is performed. By default the algorithm checks whether the three arguments \code{ratio}, \code{numerator}, \code{denominator} are compatible objects, according to \code{\link{compatible}}. The result is equivalent to \code{ratio} except for the addition of extra information. } \value{ An object equivalent to the object \code{ratio} except that it also belongs to the class \code{"rat"} and has additional attributes \code{numerator} and \code{denominator}. } \author{\adrian and \rolf. } \seealso{ \code{\link{compatible}}, \code{\link[spatstat.core]{pool}} } \keyword{spatial} \keyword{manip} spatstat.core/man/harmonic.Rd0000644000176200001440000000416214141452520015707 0ustar liggesusers\name{harmonic} \alias{harmonic} \title{Basis for Harmonic Functions} \description{ Evaluates a basis for the harmonic polynomials in \eqn{x} and \eqn{y} of degree less than or equal to \eqn{n}. } \usage{ harmonic(x, y, n) } \arguments{ \item{x}{ Vector of \eqn{x} coordinates } \item{y}{ Vector of \eqn{y} coordinates } \item{n}{ Maximum degree of polynomial } } \value{ A data frame with \code{2 * n} columns giving the values of the basis functions at the coordinates. Each column is labelled by an algebraic expression for the corresponding basis function. } \details{ This function computes a basis for the harmonic polynomials in two variables \eqn{x} and \eqn{y} up to a given degree \eqn{n} and evaluates them at given \eqn{x,y} locations. It can be used in model formulas (for example in the model-fitting functions \code{\link[stats]{lm},\link[stats]{glm},\link[mgcv]{gam}} and \code{\link[spatstat.core]{ppm}}) to specify a linear predictor which is a harmonic function. A function \eqn{f(x,y)} is harmonic if \deqn{\frac{\partial^2}{\partial x^2} f + \frac{\partial^2}{\partial y^2}f = 0.}{ (d/dx)^2 f + (d/dy)^2 f = 0.} The harmonic polynomials of degree less than or equal to \eqn{n} have a basis consisting of \eqn{2 n} functions. This function was implemented on a suggestion of P. McCullagh for fitting nonstationary spatial trend to point process models. } \seealso{ \code{\link[spatstat.core]{ppm}}, \code{\link[spatstat.core]{polynom}} } \examples{ # inhomogeneous point pattern X <- unmark(longleaf) \testonly{ # smaller dataset X <- X[seq(1,npoints(X), by=50)] } # fit Poisson point process with log-cubic intensity fit.3 <- ppm(X ~ polynom(x,y,3), Poisson()) # fit Poisson process with log-cubic-harmonic intensity fit.h <- ppm(X ~ harmonic(x,y,3), Poisson()) # Likelihood ratio test lrts <- 2 * (logLik(fit.3) - logLik(fit.h)) df <- with(coords(X), ncol(polynom(x,y,3)) - ncol(harmonic(x,y,3))) pval <- 1 - pchisq(lrts, df=df) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.core/man/effectfun.Rd0000644000176200001440000000766614141452520016070 0ustar liggesusers\name{effectfun} \alias{effectfun} \title{Compute Fitted Effect of a Spatial Covariate in a Point Process Model} \description{ Compute the trend or intensity of a fitted point process model as a function of one of its covariates. } \usage{ effectfun(model, covname, \dots, se.fit=FALSE, nvalues=256) } \arguments{ \item{model}{ A fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"}, \code{"dppm"}, \code{"rppm"} or \code{"profilepl"}). } \item{covname}{ The name of the covariate. A character string. (Needed only if the model has more than one covariate.) } \item{\dots}{ The fixed values of other covariates (in the form \code{name=value}) if required. } \item{se.fit}{ Logical. If \code{TRUE}, asymptotic standard errors of the estimates will be computed, together with a 95\% confidence interval. } \item{nvalues}{ Integer. The number of values of the covariate (if it is numeric) for which the effect function should be evaluated. We recommend at least 256. } } \details{ The object \code{model} should be an object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"}, \code{"dppm"}, \code{"rppm"} or \code{"profilepl"} representing a point process model fitted to point pattern data. The model's trend formula should involve a spatial covariate named \code{covname}. This could be \code{"x"} or \code{"y"} representing one of the Cartesian coordinates. More commonly the covariate is another, external variable that was supplied when fitting the model. The command \code{effectfun} computes the fitted trend of the point process \code{model} as a function of the covariate named \code{covname}. The return value can be plotted immediately, giving a plot of the fitted trend against the value of the covariate. If the model also involves covariates other than \code{covname}, then these covariates will be held fixed. Values for these other covariates must be provided as arguments to \code{effectfun} in the form \code{name=value}. If \code{se.fit=TRUE}, the algorithm also calculates the asymptotic standard error of the fitted trend, and a (pointwise) asymptotic 95\% confidence interval for the true trend. This command is just a wrapper for the prediction method \code{\link{predict.ppm}}. For more complicated computations about the fitted intensity, use \code{\link{predict.ppm}}. } \section{Trend and intensity}{ For a Poisson point process model, the trend is the same as the intensity of the point process. For a more general Gibbs model, the trend is the first order potential in the model (the first order term in the Gibbs representation). In Poisson or Gibbs models fitted by \code{\link{ppm}}, the trend is the only part of the model that depends on the covariates. } \section{Determinantal point process models with fixed intensity}{ The function \code{\link{dppm}} which fits a determinantal point process model allows the user to specify the intensity \code{lambda}. In such cases the effect function is undefined, and \code{effectfun} stops with an error message. } \value{ A data frame containing a column of values of the covariate and a column of values of the fitted trend. If \code{se.fit=TRUE}, there are 3 additional columns containing the standard error and the upper and lower limits of a confidence interval. If the covariate named \code{covname} is numeric (rather than a factor or logical variable), the return value is also of class \code{"fv"} so that it can be plotted immediately. } \seealso{ \code{\link{ppm}}, \code{\link{predict.ppm}}, \code{\link{fv.object}} } \examples{ X <- copper$SouthPoints D <- distfun(copper$SouthLines) fit <- ppm(X ~ polynom(D, 5)) effectfun(fit) plot(effectfun(fit, se.fit=TRUE)) fitx <- ppm(X ~ x + polynom(D, 5)) plot(effectfun(fitx, "D", x=20)) } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{models} spatstat.core/man/quadratresample.Rd0000644000176200001440000000460514141452520017303 0ustar liggesusers\name{quadratresample} \alias{quadratresample} \title{Resample a Point Pattern by Resampling Quadrats} \description{ Given a point pattern dataset, create a resampled point pattern by dividing the window into rectangular quadrats and randomly resampling the list of quadrats. } \usage{ quadratresample(X, nx, ny=nx, ..., replace = FALSE, nsamples = 1, verbose = (nsamples > 1)) } \arguments{ \item{X}{ A point pattern dataset (object of class \code{"ppp"}). } \item{nx,ny}{ Numbers of quadrats in the \eqn{x} and \eqn{y} directions. } \item{\dots}{Ignored.} \item{replace}{ Logical value. Specifies whether quadrats should be sampled with or without replacement. } \item{nsamples}{Number of randomised point patterns to be generated.} \item{verbose}{Logical value indicating whether to print progress reports.} } \details{ This command implements a very simple bootstrap resampling procedure for spatial point patterns \code{X}. The dataset \code{X} must be a point pattern (object of class \code{"ppp"}) and its observation window must be a rectangle. The window is first divided into \code{N = nx * ny} rectangular tiles (quadrats) of equal size and shape. To generate one resampled point pattern, a random sample of \code{N} quadrats is selected from the list of \code{N} quadrats, with replacement (if \code{replace=TRUE}) or without replacement (if \code{replace=FALSE}). The \eqn{i}th quadrat in the original dataset is then replaced by the \eqn{i}th sampled quadrat, after the latter is shifted so that it occupies the correct spatial position. The quadrats are then reconstituted into a point pattern inside the same window as \code{X}. If \code{replace=FALSE}, this procedure effectively involves a random permutation of the quadrats. The resulting resampled point pattern has the same number of points as \code{X}. If \code{replace=TRUE}, the number of points in the resampled point pattern is random. } \value{ A point pattern (if \code{nsamples = 1}) or a list of point patterns (if \code{nsamples > 1}). } \author{\adrian and \rolf } \seealso{ \code{\link{quadrats}}, \code{\link{quadratcount}}. See \code{\link{varblock}} to estimate the variance of a summary statistic by block resampling. } \examples{ data(bei) quadratresample(bei, 6, 3) } \keyword{spatial} \keyword{datagen} spatstat.core/man/Extract.leverage.ppm.Rd0000644000176200001440000000446214141452520020110 0ustar liggesusers\name{Extract.leverage.ppm} \alias{[.leverage.ppm} \title{Extract Subset of Leverage Object} \description{ Extract a subset of a leverage map, or extract the leverage values at specified locations. } \usage{ \method{[}{leverage.ppm}(x, i, \dots, update=TRUE) } \arguments{ \item{x}{ A leverage object (of class \code{"leverage.ppm"}) computed by \code{\link{leverage.ppm}}. } \item{i}{ Subset index (passed to \code{\link{[.im}}). Either a spatial window (object of class \code{"owin"}) or a spatial point pattern (object of class \code{"ppp"}). } \item{\dots}{ Further arguments passed to \code{\link{[.im}}, especially the argument \code{drop}. } \item{update}{ Logical value indicating whether to update the internally-stored value of the mean leverage, by averaging over the specified subset. } } \value{ Another object of class \code{"leverage.ppm"}, or a vector of numeric values of leverage. } \details{ An object of class \code{"leverage.ppm"} contains the values of the leverage function for a point process model, computed by \code{\link{leverage.ppm}}. This function extracts a designated subset of the leverage values, either as another leverage object, or as a vector of numeric values. The function \code{[.leverage.ppm} is a method for \code{\link{[}} for the class \code{"leverage.ppm"}. The argument \code{i} should be either \itemize{ \item a spatial window (object of class \code{"owin"}) determining a region where the leverage map is required. The result will typically be another leverage map (object of class \code{leverage.ppm}). \item a spatial point pattern (object of class \code{"ppp"}) specifying locations at which the leverage values are required. The result will be a numeric vector. } The subset operator for images, \code{\link{[.im}}, is applied to the leverage map. If this yields a pixel image, then the result of \code{\link{[.leverage.ppm}} is another leverage object. Otherwise, a vector containing the numeric values of leverage is returned. } \seealso{ \code{\link{leverage.ppm}}. } \examples{ fit <- ppm(cells ~x) lev <- leverage(fit) b <- owin(c(0.1, 0.3), c(0.2, 0.4)) lev[b] lev[cells] } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat.core/man/dppCauchy.Rd0000644000176200001440000000245114141452520016026 0ustar liggesusers\name{dppCauchy} \alias{dppCauchy} \title{Generalized Cauchy Determinantal Point Process Model} \description{ Function generating an instance of the (generalized) Cauchy determinantal point process model. } \usage{dppCauchy(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the parameters. See Details.} } \details{ The (generalized) Cauchy DPP is defined in (Lavancier, \Moller and Rubak, 2015) The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the shape parameter \code{nu} as a positive numeric (artificially required to be less than 20 in the code for numerical stability) \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ \adrian \rolf and \ege } \references{ Lavancier, F. \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \examples{ m <- dppCauchy(lambda=100, alpha=.05, nu=1, d=2) } \seealso{ \code{\link{dppBessel}}, \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppPowerExp}} } spatstat.core/man/measureVariation.Rd0000644000176200001440000000423014141452520017421 0ustar liggesusers\name{measureVariation} \alias{measureVariation} \alias{measurePositive} \alias{measureNegative} \alias{totalVariation} \title{ Positive and Negative Parts, and Variation, of a Measure } \description{ Given a measure \code{A} (object of class \code{"msr"}) these functions find the positive part, negative part and variation of \code{A}. } \usage{ measurePositive(x) measureNegative(x) measureVariation(x) totalVariation(x) } \arguments{ \item{x}{ A measure (object of class \code{"msr"}). } } \details{ The functions \code{measurePositive} and \code{measureNegative} return the positive and negative parts of the measure, and \code{measureVariation} returns the variation (sum of positive and negative parts). The function \code{totalVariation} returns the total variation norm. If \eqn{\mu} is a signed measure, it can be represented as \deqn{\mu = \mu_{+} - \mu_{-}}{\mu = \mu[+] - \mu[-]} where \eqn{\mu_{+}}{\mu[+]} and \eqn{\mu_{-}}{\mu[-]} are \emph{nonnegative} measures called the positive and negative parts of \eqn{\mu}. In a nutshell, the positive part of \eqn{\mu} consists of all positive contributions or increments, and the negative part consists of all negative contributions multiplied by \code{-1}. The variation \eqn{|\mu|} is defined by \deqn{\mu = \mu_{+} + \mu_{-}}{\mu = \mu[+] + \mu[-]} and is also a nonnegative measure. The total variation norm is the integral of the variation. } \value{ The result of \code{measurePositive}, \code{measureNegative} and \code{measureVariation} is another measure (object of class \code{"msr"}) on the same spatial domain. The result of \code{totalVariation} is a non-negative number. } \references{ Halmos, P.R. (1950) \emph{Measure Theory}. Van Nostrand. } \author{ \adrian. } \seealso{ \code{\link{msr}}, \code{\link{with.msr}}, \code{\link{split.msr}}, \code{\link{measureDiscrete}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") measurePositive(rp) measureNegative(rp) measureVariation(rp) # total variation norm totalVariation(rp) } \keyword{spatial} \keyword{math} spatstat.core/man/default.rmhcontrol.Rd0000644000176200001440000000277514141452520017731 0ustar liggesusers\name{default.rmhcontrol} \alias{default.rmhcontrol} \title{Set Default Control Parameters for Metropolis-Hastings Algorithm.} \description{ Given a fitted point process model, this command sets appropriate default values of the parameters controlling the iterative behaviour of the Metropolis-Hastings algorithm. } \usage{ default.rmhcontrol(model, w=NULL) } \arguments{ \item{model}{ A fitted point process model (object of class \code{"ppm"}) } \item{w}{ Optional. Window for the resulting simulated patterns. } } \value{ An object of class \code{"rmhcontrol"}. See \code{\link{rmhcontrol}}. } \details{ This function sets the values of the parameters controlling the iterative behaviour of the Metropolis-Hastings simulation algorithm. It uses default values that would be appropriate for the fitted point process model \code{model}. The expansion parameter \code{expand} is set to \code{\link{default.expand}(model, w)}. All other parameters revert to their defaults given in \code{\link{rmhcontrol.default}}. See \code{\link{rmhcontrol}} for the full list of control parameters. To override default parameters, use \code{\link{update.rmhcontrol}}. } \seealso{ \code{\link{rmhcontrol}}, \code{\link{update.rmhcontrol}}, \code{\link{ppm}}, \code{\link{default.expand}} } \examples{ fit <- ppm(cells, ~1, Strauss(0.1)) default.rmhcontrol(fit) default.rmhcontrol(fit, w=square(2)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/msr.Rd0000644000176200001440000001546414141452520014717 0ustar liggesusers\name{msr} \alias{msr} \title{ Signed or Vector-Valued Measure } \description{ Defines an object representing a signed measure or vector-valued measure on a spatial domain. } \usage{ msr(qscheme, discrete, density, check=TRUE) } \arguments{ \item{qscheme}{ A quadrature scheme (object of class \code{"quad"} usually extracted from a fitted point process model). } \item{discrete}{ Vector or matrix containing the values (masses) of the discrete component of the measure, for each of the data points in \code{qscheme}. } \item{density}{ Vector or matrix containing values of the density of the diffuse component of the measure, for each of the quadrature points in \code{qscheme}. } \item{check}{ Logical. Whether to check validity of the arguments. } } \details{ This function creates an object that represents a signed or vector valued \emph{measure} on the two-dimensional plane. It is not normally called directly by the user. A signed measure is a classical mathematical object (Diestel and Uhl, 1977) which can be visualised as a collection of electric charges, positive and/or negative, spread over the plane. Electric charges may be concentrated at specific points (atoms), or spread diffusely over a region. An object of class \code{"msr"} represents a signed (i.e. real-valued) or vector-valued measure in the \pkg{spatstat} package. Spatial residuals for point process models (Baddeley et al, 2005, 2008) take the form of a real-valued or vector-valued measure. The function \code{\link{residuals.ppm}} returns an object of class \code{"msr"} representing the residual measure. Various other diagnostic tools such as \code{\link{dfbetas.ppm}} and \code{\link{dffit.ppm}} also return an object of class \code{"msr"}. The function \code{msr} would not normally be called directly by the user. It is the low-level creator function that makes an object of class \code{"msr"} from raw data. The first argument \code{qscheme} is a quadrature scheme (object of class \code{"quad"}). It is typically created by \code{\link{quadscheme}} or extracted from a fitted point process model using \code{\link{quad.ppm}}. A quadrature scheme contains both data points and dummy points. The data points of \code{qscheme} are used as the locations of the atoms of the measure. All quadrature points (i.e. both data points and dummy points) of \code{qscheme} are used as sampling points for the density of the continuous component of the measure. The argument \code{discrete} gives the values of the atomic component of the measure for each \emph{data point} in \code{qscheme}. It should be either a numeric vector with one entry for each data point, or a numeric matrix with one row for each data point. The argument \code{density} gives the values of the \emph{density} of the diffuse component of the measure, at each \emph{quadrature point} in \code{qscheme}. It should be either a numeric vector with one entry for each quadrature point, or a numeric matrix with one row for each quadrature point. If both \code{discrete} and \code{density} are vectors (or one-column matrices) then the result is a signed (real-valued) measure. Otherwise, the result is a vector-valued measure, with the dimension of the vector space being determined by the number of columns in the matrices \code{discrete} and/or \code{density}. (If one of these is a \eqn{k}-column matrix and the other is a 1-column matrix, then the latter is replicated to \eqn{k} columns). The class \code{"msr"} has methods for \code{print}, \code{plot} and \code{[}. There is also a function \code{\link{Smooth.msr}} for smoothing a measure. } \value{ An object of class \code{"msr"}. } \section{Guide to using measures}{ Objects of class \code{"msr"}, representing measures, are returned by the functions \code{\link{residuals.ppm}}, \code{\link{dfbetas.ppm}}, \code{\link{dffit.ppm}} and possibly by other functions. There are methods for printing and plotting a measure, along with many other operations, which can be listed by typing \code{methods(class="msr")}. The \code{print} and \code{summary} methods report basic information about a measure, such as the total value of the measure, and the spatial domain on which it is defined. The \code{plot} method displays the measure. It is documented separately in \code{\link{plot.msr}}. A measure can be smoothed using \code{\link{Smooth.msr}}, yielding a pixel image which is sometimes easier to interpret than the plot of the measure itself. The subset operator \code{[} can be used to restrict the measure to a subregion of space, or to extract one of the scalar components of a vector-valued measure. It is documented separately in \code{\link{[.msr}}. The total value of a measure, or the value on a subregion, can be obtained using \code{\link{integral.msr}}. The value of a measure \code{m} on a subregion \code{B} can be obtained by \code{integral(m, domain=B)} or \code{integral(m[B])}. The values of a measure \code{m} on each tile of a tessellation \code{A} can be obtained by \code{integral(m, domain=A)}. Some mathematical operations on measures are supported, such as multiplying a measure by a single number, or adding two measures. Measures can be separated into components in different ways using \code{\link{as.layered.msr}}, \code{\link{unstack.msr}} and \code{\link{split.msr}}. Internal components of the data structure of an \code{"msr"} object can be extracted using \code{\link{with.msr}}. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. Diestel, J. and Uhl, J.J. Jr (1977) \emph{Vector measures}. Providence, RI, USA: American Mathematical Society. Halmos, P.R. (1950) \emph{Measure Theory}. Van Nostrand. } \author{ \adrian } \seealso{ \code{\link{plot.msr}}, \code{\link{Smooth.msr}}, \code{\link{[.msr}}, \code{\link{with.msr}}, \code{\link{split.msr}}, \code{\link{Ops.msr}}, \code{\link{measureVariation}}, \code{\link{measureContinuous}}. } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rp rs <- residuals(fit, type="score") rs colnames(rs) # An equivalent way to construct the Pearson residual measure by hand Q <- quad.ppm(fit) lambda <- fitted(fit) slam <- sqrt(lambda) Z <- is.data(Q) m <- msr(Q, discrete=1/slam[Z], density = -slam) m } \keyword{spatial} \keyword{models} spatstat.core/man/plot.bermantest.Rd0000644000176200001440000000551414141452520017232 0ustar liggesusers\name{plot.bermantest} \alias{plot.bermantest} \title{Plot Result of Berman Test} \description{ Plot the result of Berman's test of goodness-of-fit } \usage{ \method{plot}{bermantest}(x, ..., lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=2, lty0=2) } \arguments{ \item{x}{ Object to be plotted. An object of class \code{"bermantest"} produced by \code{\link{berman.test}}. } \item{\dots}{ extra arguments that will be passed to the plotting function \code{\link{plot.ecdf}}. } \item{col,lwd,lty}{ The width, colour and type of lines used to plot the empirical distribution curve. } \item{col0,lwd0,lty0}{ The width, colour and type of lines used to plot the predicted (null) distribution curve. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for the class \code{"bermantest"}. An object of this class represents the outcome of Berman's test of goodness-of-fit of a spatial Poisson point process model, computed by \code{\link{berman.test}}. For the \emph{Z1} test (i.e. if \code{x} was computed using \code{berman.test( ,which="Z1")}), the plot displays the two cumulative distribution functions that are compared by the test: namely the empirical cumulative distribution function of the covariate at the data points, \eqn{\hat F}{Fhat}, and the predicted cumulative distribution function of the covariate under the model, \eqn{F_0}{F0}, both plotted against the value of the covariate. Two vertical lines show the mean values of these two distributions. If the model is correct, the two curves should be close; the test is based on comparing the two vertical lines. For the \emph{Z2} test (i.e. if \code{x} was computed using \code{berman.test( ,which="Z2")}), the plot displays the empirical cumulative distribution function of the values \eqn{U_i = F_0(Y_i)}{U[i] = F0(Y[i])} where \eqn{Y_i}{Y[i]} is the value of the covariate at the \eqn{i}-th data point. The diagonal line with equation \eqn{y=x} is also shown. Two vertical lines show the mean of the values \eqn{U_i}{U[i]} and the value \eqn{1/2}. If the model is correct, the two curves should be close. The test is based on comparing the two vertical lines. } \seealso{ \code{\link{berman.test}} } \examples{ # synthetic data: nonuniform Poisson process X <- rpoispp(function(x,y) { 100 * exp(-x) }, win=square(1)) # fit uniform Poisson process fit0 <- ppm(X, ~1) # test covariate = x coordinate xcoord <- function(x,y) { x } # test wrong model k <- berman.test(fit0, xcoord, "Z1") # plot result of test plot(k, col="red", col0="green") # Z2 test k2 <- berman.test(fit0, xcoord, "Z2") plot(k2, col="red", col0="green") } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{hplot} spatstat.core/man/thomas.estpcf.Rd0000644000176200001440000001472514141452520016673 0ustar liggesusers\name{thomas.estpcf} \alias{thomas.estpcf} \title{Fit the Thomas Point Process by Minimum Contrast} \description{ Fits the Thomas point process to a point pattern dataset by the Method of Minimum Contrast using the pair correlation function. } \usage{ thomas.estpcf(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list()) } \arguments{ \item{X}{ Data to which the Thomas model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the Thomas process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Thomas point process model to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function \code{\link{pcf}}. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Thomas point process to \code{X}, by finding the parameters of the Thomas model which give the closest match between the theoretical pair correlation function of the Thomas process and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Thomas point process is described in \Moller and Waagepetersen (2003, pp. 61--62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and isotropically Normally distributed around the parent point with standard deviation \eqn{\sigma}{sigma} which is equal to the parameter \code{scale}. The named vector of stating values can use either \code{sigma2} (\eqn{\sigma^2}{sigma^2}) or \code{scale} as the name of the second component, but the latter is recommended for consistency with other cluster models. The theoretical pair correlation function of the Thomas process is \deqn{ g(r) = 1 + \frac 1 {4\pi \kappa \sigma^2} \exp(-\frac{r^2}{4\sigma^2})). }{ g(r) = 1 + exp(-r^2/(4 * sigma^2)))/(4 * pi * kappa * sigma^2). } The theoretical intensity of the Thomas process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{\sigma^2}{sigma^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The Thomas process can be simulated, using \code{\link{rThomas}}. Homogeneous or inhomogeneous Thomas process models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Diggle, P. J., Besag, J. and Gleaves, J. T. (1976) Statistical analysis of spatial point patterns by means of distance methods. \emph{Biometrics} \bold{32} 659--667. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Thomas, M. (1949) A generalisation of Poisson's binomial limit for use in ecology. \emph{Biometrika} \bold{36}, 18--25. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ \adrian } \seealso{ \code{\link{thomas.estK}} \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{rThomas}} to simulate the fitted model. } \examples{ data(redwood) u <- thomas.estpcf(redwood, c(kappa=10, scale=0.1)) u plot(u, legendpos="topright") u2 <- thomas.estpcf(redwood, c(kappa=10, scale=0.1), pcfargs=list(stoyan=0.12)) } \keyword{spatial} \keyword{models} spatstat.core/man/thresholdCI.Rd0000644000176200001440000000400714141452520016315 0ustar liggesusers\name{thresholdCI} \alias{thresholdCI} \title{ Confidence Interval for Threshold of Numerical Predictor } \description{ Given a point pattern and a spatial covariate that has some predictive value for the point pattern, compute a confidence interval for the optimal value of the threshold that should be used to convert the covariate to a binary predictor. } \usage{ thresholdCI(X, Z, confidence = 0.95, nsim = 1000, parametric = FALSE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{Z}{ Spatial covariate with numerical values. Either a pixel image (object of class \code{"im"}), a distance function (object of class \code{"distfun"}) or a \code{function(x,y)} in the \R language. } \item{confidence}{ Confidence level. A number between 0 and 1. } \item{nsim}{ Number of bootstrap simulations to perform. } \item{parametric}{ Logical value specifying whether to use the parametric bootstrap. } } \details{ The spatial covariate \code{Z} is assumed to have some utility as a predictor of the point pattern \code{X}. This code computes a bootstrap confidence interval for the best threshold value \eqn{z} for converting the numerical predictor to a binary predictor, for use in techniques such as Weights of Evidence. } \value{ A matrix containing upper and lower limits for the threshold \code{z} and the corresponding upper and lower limits for the fraction of area of the study region. } \references{ Baddeley, A., Brown, W., Milne, R.K., Nair, G., Rakshit, S., Lawrence, T., Phatak, A. and Fu, S.C. (2021) Optimal thresholding of predictors in mineral prospectivity analysis. \emph{Natural Resources Research} \bold{30} 923--969. } \author{ \adrian. } \seealso{ \code{\link{thresholdSelect}} } \examples{ gold <- rescale(murchison$gold, 1000, "km") faults <- rescale(murchison$faults, 1000, "km") distfault <- distfun(faults) thresholdCI(gold, distfault, nsim=100) } \keyword{spatial} \keyword{models} \keyword{nonparametric} spatstat.core/man/clusterradius.Rd0000644000176200001440000000671614141452520017007 0ustar liggesusers\name{clusterradius} \alias{clusterradius} \alias{clusterradius.character} \alias{clusterradius.kppm} \title{ Compute or Extract Effective Range of Cluster Kernel } \description{ Given a cluster point process model, this command returns a value beyond which the the probability density of the cluster offspring is neglible. } \usage{ clusterradius(model, \dots) \method{clusterradius}{kppm}(model, \dots, thresh = NULL, precision = FALSE) \method{clusterradius}{character}(model, \dots, thresh = NULL, precision = FALSE) } \arguments{ \item{model}{ Cluster model. Either a fitted cluster or Cox model (object of class \code{"kppm"}), or a character string specifying the type of cluster model. } \item{\dots}{ Parameter values for the model, when \code{model} is a character string. } \item{thresh}{ Numerical threshold relative to the cluster kernel value at the origin (parent location) determining when the cluster kernel will be considered neglible. A sensible default is provided. } \item{precision}{ Logical. If \code{precision=TRUE} the precision of the calculated range is returned as an attribute to the range. See details. } } \details{ Given a cluster model this function by default returns the effective range of the model with the given parameters as used in spatstat. For the \Matern cluster model (see e.g. \code{\link{rMatClust}}) this is simply the finite radius of the offsring density given by the paramter \code{scale} irrespective of other options given to this function. The remaining models in spatstat have infinite theoretical range, and an effective finite value is given as follows: For the Thomas model (see e.g. \code{\link{rThomas}} the default is \code{4*scale} where scale is the scale or standard deviation parameter of the model. If \code{thresh} is given the value is instead found as described for the other models below. For the Cauchy model (see e.g. \code{\link{rCauchy}}) and the Variance Gamma (Bessel) model (see e.g. \code{\link{rVarGamma}}) the value of \code{thresh} defaults to 0.001, and then this is used to compute the range numerically as follows. If \eqn{k(x,y)=k_0(r)}{k(x,y)=k0(r)} with \eqn{r=\sqrt(x^2+y^2)}{r=sqrt(x^2+y^2)} denotes the isotropic cluster kernel then \eqn{f(r) = 2 \pi r k_0(r)}{f(r) = 2 \pi r k0(r)} is the density function of the offspring distance from the parent. The range is determined as the value of \eqn{r} where \eqn{f(r)} falls below \code{thresh} times \eqn{k_0(r)}{k0(r)}. If \code{precision=TRUE} the precision related to the chosen range is returned as an attribute. Here the precision is defined as the polar integral of the kernel from distance 0 to the calculated range. Ideally this should be close to the value 1 which would be obtained for the true theretical infinite range. } \value{ A positive numeric. Additionally, the precision related to this range value is returned as an attribute \code{"prec"}, if \code{precision=TRUE}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{clusterkernel}}, \code{\link{kppm}}, \code{\link{rMatClust}}, \code{\link{rThomas}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, \code{\link{rNeymanScott}}. } \examples{ fit <- kppm(redwood ~ x, "MatClust") clusterradius(fit) clusterradius("Thomas", scale = .1) clusterradius("Thomas", scale = .1, thresh = 0.001) clusterradius("VarGamma", scale = .1, nu = 2, precision = TRUE) } \keyword{spatial} spatstat.core/man/distcdf.Rd0000644000176200001440000001035414141452520015527 0ustar liggesusers\name{distcdf} \alias{distcdf} \title{Distribution Function of Interpoint Distance } \description{ Computes the cumulative distribution function of the distance between two independent random points in a given window or windows. } \usage{ distcdf(W, V=W, \dots, dW=1, dV=dW, nr=1024, regularise=TRUE, savedenom=FALSE, delta=NULL) } \arguments{ \item{W}{ A window (object of class \code{"owin"}) containing the first random point. } \item{V}{ Optional. Another window containing the second random point. Defaults to \code{W}. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution for the calculation. } \item{dV, dW}{ Optional. Probability densities (not necessarily normalised) for the first and second random points respectively. Data in any format acceptable to \code{\link{as.im}}, for example, a \code{function(x,y)} or a pixel image or a numeric value. The default corresponds to a uniform distribution over the window. } \item{nr}{ Integer. The number of values of interpoint distance \eqn{r} for which the CDF will be computed. Should be a large value. Alternatively if \code{nr=NULL}, a good default value will be chosen, depending on the pixel resolution. } \item{regularise}{ Logical value indicating whether to smooth the results for very small distances, to avoid discretisation artefacts. } \item{savedenom}{ Logical value indicating whether to save the denominator of the double integral as an attribute of the result. } \item{delta}{ Optional. A positive number. The maximum permitted spacing between values of the function argument. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \details{ This command computes the Cumulative Distribution Function \eqn{ CDF(r) = Prob(T \le r) }{ CDF(r) = Prob(T \le r) } of the Euclidean distance \eqn{T = \|X_1 - X_2\|}{T = |X1-X2|} between two independent random points \eqn{X_1}{X1} and \eqn{X_2}{X2}. In the simplest case, the command \code{distcdf(W)}, the random points are assumed to be uniformly distributed in the same window \code{W}. Alternatively the two random points may be uniformly distributed in two different windows \code{W} and \code{V}. In the most general case the first point \eqn{X_1}{X1} is random in the window \code{W} with a probability density proportional to \code{dW}, and the second point \eqn{X_2}{X2} is random in a different window \code{V} with probability density proportional to \code{dV}. The values of \code{dW} and \code{dV} must be finite and nonnegative. The calculation is performed by numerical integration of the set covariance function \code{\link{setcov}} for uniformly distributed points, and by computing the covariance function \code{\link{imcov}} in the general case. The accuracy of the result depends on the pixel resolution used to represent the windows: this is controlled by the arguments \code{\dots} which are passed to \code{\link{as.mask}}. For example use \code{eps=0.1} to specify pixels of size 0.1 units. The arguments \code{W} or \code{V} may also be point patterns (objects of class \code{"ppp"}). The result is the cumulative distribution function of the distance from a randomly selected point in the point pattern, to a randomly selected point in the other point pattern or window. If \code{regularise=TRUE} (the default), values of the cumulative distribution function for very short distances are smoothed to avoid discretisation artefacts. Smoothing is applied to all distances shorter than the width of 10 pixels. Numerical accuracy of some calculations requires very fine spacing of the values of the function argument \code{r}. If the argument \code{delta} is given, then after the cumulative distribution function has been calculated, it will be interpolated onto a finer grid of \code{r} values with spacing less than or equal to \code{delta}. } \seealso{ \code{\link{setcov}}, \code{\link{as.mask}}. } \examples{ # The unit disc B <- disc() plot(distcdf(B)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat.core/man/collapse.fv.Rd0000644000176200001440000000651314141452520016325 0ustar liggesusers\name{collapse.fv} \alias{collapse.fv} \alias{collapse.anylist} \title{ Collapse Several Function Tables into One } \description{ Combines several function tables (objects of class \code{"fv"}) into a single function table, merging columns that are identical and relabelling columns that are different. } \usage{ \method{collapse}{fv}(object, \dots, same = NULL, different = NULL) \method{collapse}{anylist}(object, \dots, same = NULL, different = NULL) } \arguments{ \item{object}{ An object of class \code{"fv"}, or a list of such objects. } \item{\dots}{ Additional objects of class \code{"fv"}. } \item{same}{ Character string or character vector specifying a column or columns of function values that are identical in different \code{"fv"} objects. These columns will be included only once in the result. } \item{different}{ Character string or character vector specifying a column or columns of function values, that are different in different \code{"fv"} objects. Each of these columns of data will be included, with labels that distinguish them from each other. } } \details{ This is a method for the generic function \code{\link[nlme]{collapse}}. It combines the data in several function tables (objects of class \code{"fv"}, see \code{\link{fv.object}}) to make a single function table. It is essentially a smart wrapper for \code{\link{cbind.fv}}. A typical application is to calculate the same summary statistic (such as the \eqn{K} function) for different point patterns, and then to use \code{collapse.fv} to combine the results into a single object that can easily be plotted. See the Examples. The arguments \code{object} and \code{\dots} should be function tables (objects of class \code{"fv"}, see \code{\link{fv.object}}) that are compatible in the sense that they have the same values of the function argument. (This can be ensured by applying \code{\link{harmonise.fv}} to them.) The argument \code{same} identifies any columns that are present in some or all of the function tables, and which are known to contain exactly the same values in each table that includes them. This column or columns will be included only once in the result. The argument \code{different} identifies any columns that are present in some or all of the function tables, and which may contain different numerical values in different tables. Each of these columns will be included, with labels to distinguish them. Columns that are not named in \code{same} or \code{different} will not be included. The function argument is always included and does not need to be specified. The arguments \code{same} and \code{different} can be \code{NULL}, or they can be character vectors containing the names of columns of \code{object}. The argument \code{different} can be one of the abbreviations recognised by \code{\link{fvnames}}. } \value{ Object of class \code{"fv"}. } \seealso{ \code{\link{fv.object}}, \code{\link{cbind.fv}} } \examples{ # generate simulated data X <- replicate(3, rpoispp(100), simplify=FALSE) names(X) <- paste("Simulation", 1:3) # compute K function estimates Klist <- anylapply(X, Kest) # collapse K <- collapse(Klist, same="theo", different="iso") K } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat.core/man/integral.msr.Rd0000644000176200001440000000376714141452520016526 0ustar liggesusers\name{integral.msr} \alias{integral.msr} \title{ Integral of a Measure } \description{ Computes the integral (total value) of a measure over its domain. } \usage{ \method{integral}{msr}(f, domain=NULL, \dots) } \arguments{ \item{f}{ A signed measure or vector-valued measure (object of class \code{"msr"}). } \item{domain}{ Optional window specifying the domain of integration. Alternatively a tessellation. } \item{\dots}{ Ignored. } } \details{ The integral (total value of the measure over its domain) is calculated. If \code{domain} is a window (class \code{"owin"}) then the integration will be restricted to this window. If \code{domain} is a tessellation (class \code{"tess"}) then the integral of \code{f} in each tile of \code{domain} will be computed. For a multitype measure \code{m}, use \code{\link{split.msr}} to separate the contributions for each type of point, as shown in the Examples. } \value{ A numeric value, vector, or matrix. \code{integral(f)} returns a numeric value (for a signed measure) or a vector of values (for a vector-valued measure). If \code{domain} is a tessellation then \code{integral(f, domain)} returns a numeric vector with one entry for each tile (if \code{f} is a signed measure) or a numeric matrix with one row for each tile (if \code{f} is a vector-valued measure). } \seealso{ \code{\link{msr}}, \code{\link{integral}} } \examples{ fit <- ppm(cells ~ x) rr <- residuals(fit) integral(rr) # vector-valued measure rs <- residuals(fit, type="score") integral(rs) # multitype fitA <- ppm(amacrine ~ x) rrA <- residuals(fitA) sapply(split(rrA), integral) # multitype and vector-valued rsA <- residuals(fitA, type="score") sapply(split(rsA), integral) ## integral over a subregion integral(rr, domain=square(0.5)) ## integrals over the tiles of a tessellation integral(rr, domain=quadrats(cells, 2)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat.core/man/Triplets.Rd0000644000176200001440000000630214141452520015713 0ustar liggesusers\name{Triplets} \alias{Triplets} \title{The Triplet Point Process Model} \description{ Creates an instance of Geyer's triplet interaction point process model which can then be fitted to point pattern data. } \usage{ Triplets(r) } \arguments{ \item{r}{The interaction radius of the Triplets process} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Triplets process with interaction radius \eqn{r}. } \details{ The (stationary) Geyer triplet process (Geyer, 1999) with interaction radius \eqn{r} and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma} is the point process in which each point contributes a factor \eqn{\beta}{beta} to the probability density of the point pattern, and each triplet of close points contributes a factor \eqn{\gamma}{gamma} to the density. A triplet of close points is a group of 3 points, each pair of which is closer than \eqn{r} units apart. Thus the probability density is \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of unordered triples of points that are closer than \eqn{r} units apart, and \eqn{\alpha}{alpha} is the normalising constant. The interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1} so that this model describes an ``ordered'' or ``inhibitive'' pattern. The nonstationary Triplets process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Triplets process pairwise interaction is yielded by the function \code{Triplets()}. See the examples below. Note the only argument is the interaction radius \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\gamma)}{log(gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Triplets()}. } \seealso{ \code{\link{ppm}}, \code{\link{triplet.family}}, \code{\link{ppm.object}} } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \examples{ Triplets(r=0.1) # prints a sensible description of itself ppm(cells ~1, Triplets(r=0.2)) # fit the stationary Triplets process to `cells' # ppm(cells ~polynom(x,y,3), Triplets(r=0.2)) # fit a nonstationary Triplets process with log-cubic polynomial trend } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/hybrid.family.Rd0000644000176200001440000000145214141452520016647 0ustar liggesusers\name{hybrid.family} \alias{hybrid.family} \title{ Hybrid Interaction Family } \description{ An object describing the family of all hybrid interactions. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the family of all hybrid point process models. If you need to create a specific hybrid interaction model for use in modelling, use the function \code{\link{Hybrid}}. } \value{ Object of class \code{"isf"}, see \code{\link{isf.object}}. } \seealso{ Use \code{\link{Hybrid}} to make hybrid interactions. Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{ord.family}}, \code{\link{inforder.family}}. } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{models} spatstat.core/man/update.ppm.Rd0000644000176200001440000001431514141452520016165 0ustar liggesusers\name{update.ppm} \alias{update.ppm} \title{Update a Fitted Point Process Model} \description{ \code{update} method for class \code{"ppm"}. } \usage{ \method{update}{ppm}(object, \dots, fixdummy=TRUE, use.internal=NULL, envir=environment(terms(object))) } \arguments{ \item{object}{ An existing fitted point process model, typically produced by \code{\link{ppm}}. } \item{\dots}{ Arguments to be updated in the new call to \code{\link{ppm}}. } \item{fixdummy}{ Logical flag indicating whether the quadrature scheme for the call to \code{\link{ppm}} should use the same set of dummy points as that in the original call. } \item{use.internal}{ Optional. Logical flag indicating whether the model should be refitted using the internally saved data (\code{use.internal=TRUE}) or by re-evaluating these data in the current frame (\code{use.internal=FALSE}). } \item{envir}{ Environment in which to re-evaluate the call to \code{\link{ppm}}. } } \details{ This is a method for the generic function \code{\link{update}} for the class \code{"ppm"}. An object of class \code{"ppm"} describes a fitted point process model. See \code{\link{ppm.object}}) for details of this class. \code{update.ppm} will modify the point process model specified by \code{object} according to the new arguments given, then re-fit it. The actual re-fitting is performed by the model-fitting function \code{\link{ppm}}. If you are comparing several model fits to the same data, or fits of the same model to different data, it is strongly advisable to use \code{update.ppm} rather than trying to fit them by hand. This is because \code{update.ppm} re-fits the model in a way which is comparable to the original fit. The arguments \code{...} are matched to the formal arguments of \code{\link{ppm}} as follows. First, all the \emph{named} arguments in \code{...} are matched with the formal arguments of \code{\link{ppm}}. Use \code{name=NULL} to remove the argument \code{name} from the call. Second, any \emph{unnamed} arguments in \code{...} are matched with formal arguments of \code{\link{ppm}} if the matching is obvious from the class of the object. Thus \code{...} may contain \itemize{ \item exactly one argument of class \code{"ppp"} or \code{"quad"}, which will be interpreted as the named argument \code{Q}; \item exactly one argument of class \code{"formula"}, which will be interpreted as the named argument \code{trend} (or as specifying a change to the trend formula); \item exactly one argument of class \code{"interact"}, which will be interpreted as the named argument \code{interaction}; \item exactly one argument of class \code{"data.frame"}, which will be interpreted as the named argument \code{covariates}. } The \code{trend} argument can be a formula that specifies a \emph{change} to the current trend formula. For example, the formula \code{~ . + Z} specifies that the additional covariate \code{Z} will be added to the right hand side of the trend formula in the existing \code{object}. The argument \code{fixdummy=TRUE} ensures comparability of the objects before and after updating. When \code{fixdummy=FALSE}, calling \code{update.ppm} is exactly the same as calling \code{ppm} with the updated arguments. However, the original and updated models are not strictly comparable (for example, their pseudolikelihoods are not strictly comparable) unless they used the same set of dummy points for the quadrature scheme. Setting \code{fixdummy=TRUE} ensures that the re-fitting will be performed using the same set of dummy points. This is highly recommended. The value of \code{use.internal} determines where to find data to re-evaluate the model (data for the arguments mentioned in the original call to \code{ppm} that are not overwritten by arguments to \code{update.ppm}). If \code{use.internal=FALSE}, then arguments to \code{ppm} are \emph{re-evaluated} in the frame where you call \code{update.ppm}. This is like the behaviour of the other methods for \code{\link{update}}. This means that if you have changed any of the objects referred to in the call, these changes will be taken into account. Also if the original call to \code{ppm} included any calls to random number generators, these calls will be recomputed, so that you will get a different outcome of the random numbers. If \code{use.internal=TRUE}, then arguments to \code{ppm} are extracted from internal data stored inside the current fitted model \code{object}. This is useful if you don't want to re-evaluate anything. It is also necessary if if \code{object} has been restored from a dump file using \code{\link{load}} or \code{\link{source}}. In such cases, we have lost the environment in which \code{object} was fitted, and data cannot be re-evaluated. By default, if \code{use.internal} is missing, \code{update.ppm} will re-evaluate the arguments if this is possible, and use internal data if not. } \value{ Another fitted point process model (object of class \code{"ppm"}). } \examples{ data(nztrees) data(cells) # fit the stationary Poisson process fit <- ppm(nztrees, ~ 1) # fit a nonstationary Poisson process fitP <- update(fit, trend=~x) fitP <- update(fit, ~x) # change the trend formula: add another term to the trend fitPxy <- update(fitP, ~ . + y) # change the trend formula: remove the x variable fitPy <- update(fitPxy, ~ . - x) # fit a stationary Strauss process fitS <- update(fit, interaction=Strauss(13)) fitS <- update(fit, Strauss(13)) # refit using a different edge correction fitS <- update(fitS, correction="isotropic") # re-fit the model to a subset # of the original point pattern nzw <- owin(c(0,148),c(0,95)) nzsub <- nztrees[,nzw] fut <- update(fitS, Q=nzsub) fut <- update(fitS, nzsub) # WARNING: the point pattern argument is called 'Q' ranfit <- ppm(rpoispp(42), ~1, Poisson()) ranfit # different random data! update(ranfit) # the original data update(ranfit, use.internal=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.core/man/closepaircounts.Rd0000644000176200001440000000473714141452520017334 0ustar liggesusers\name{closepaircounts} \alias{closepaircounts} \alias{crosspaircounts} \title{ Count Close Pairs of Points } \description{ Low-level functions to count the number of close pairs of points. } \usage{ closepaircounts(X, r) crosspaircounts(X, Y, r) } \arguments{ \item{X,Y}{ Point patterns (objects of class \code{"ppp"}). } \item{r}{ Maximum distance between pairs of points to be counted as close pairs. } } \details{ These are the efficient low-level functions used by \pkg{spatstat} to count close pairs of points in a point pattern or between two point patterns. \code{closepaircounts(X,r)} counts the number of neighbours for each point in the pattern \code{X}. That is, for each point \code{X[i]}, it counts the number of other points \code{X[j]} with \code{j != i} such that \code{d(X[i],X[j]) <= r} where \code{d} denotes Euclidean distance. The result is an integer vector \code{v} such that \code{v[i]} is the number of neighbours of \code{X[i]}. \code{crosspaircounts(X,Y,r)} counts, for each point in the pattern \code{X}, the number of neighbours in the pattern \code{Y}. That is, for each point \code{X[i]}, it counts the number of points \code{Y[j]} such that \code{d(X[i],X[j]) <= r}. The result is an integer vector \code{v} such that \code{v[i]} is the number of neighbours of \code{X[i]} in the pattern \code{Y}. } \section{Warning about accuracy}{ The results of these functions may not agree exactly with the correct answer (as calculated by a human) and may not be consistent between different computers and different installations of \R. The discrepancies arise in marginal cases where the interpoint distance is equal to, or very close to, the threshold \code{rmax}. Floating-point numbers in a computer are not mathematical Real Numbers: they are approximations using finite-precision binary arithmetic. The approximation is accurate to a tolerance of about \code{.Machine$double.eps}. If the true interpoint distance \eqn{d} and the threshold \code{rmax} are equal, or if their difference is no more than \code{.Machine$double.eps}, the result may be incorrect. } \value{ An integer vector of length equal to the number of points in \code{X}. } \author{ \adrian and \rolf } \seealso{ \code{\link{closepairs}} to identify all close pairs of points. } \examples{ a <- closepaircounts(cells, 0.1) sum(a) Y <- split(amacrine) b <- crosspaircounts(Y$on, Y$off, 0.1) } \keyword{spatial} \keyword{math} spatstat.core/man/scanLRTS.Rd0000644000176200001440000001167214141452520015544 0ustar liggesusers\name{scanLRTS} \alias{scanLRTS} \title{ Likelihood Ratio Test Statistic for Scan Test } \description{ Calculate the Likelihood Ratio Test Statistic for the Scan Test, at each spatial location. } \usage{ scanLRTS(X, r, \dots, method = c("poisson", "binomial"), baseline = NULL, case = 2, alternative = c("greater", "less", "two.sided"), saveopt = FALSE, Xmask = NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{r}{ Radius of circle to use. A single number or a numeric vector. } \item{\dots}{ Optional. Arguments passed to \code{\link{as.mask}} to determine the spatial resolution of the computations. } \item{method}{ Either \code{"poisson"} or \code{"binomial"} specifying the type of likelihood. } \item{baseline}{ Baseline for the Poisson intensity, if \code{method="poisson"}. A pixel image or a function. } \item{case}{ Which type of point should be interpreted as a case, if \code{method="binomial"}. Integer or character string. } \item{alternative}{ Alternative hypothesis: \code{"greater"} if the alternative postulates that the mean number of points inside the circle will be greater than expected under the null. } \item{saveopt}{ Logical value indicating to save the optimal value of \code{r} at each location. } \item{Xmask}{ Internal use only. } } \details{ This command computes, for all spatial locations \code{u}, the Likelihood Ratio Test Statistic \eqn{\Lambda(u)}{Lambda(u)} for a test of homogeneity at the location \eqn{u}, as described below. The result is a pixel image giving the values of \eqn{\Lambda(u)}{Lambda(u)} at each pixel. The \bold{maximum} value of \eqn{\Lambda(u)}{Lambda(u)} over all locations \eqn{u} is the \emph{scan statistic}, which is the basis of the \emph{scan test} performed by \code{\link{scan.test}}. \itemize{ \item If \code{method="poisson"} then the test statistic is based on Poisson likelihood. The dataset \code{X} is treated as an unmarked point pattern. By default (if \code{baseline} is not specified) the null hypothesis is complete spatial randomness CSR (i.e. a uniform Poisson process). At the spatial location \eqn{u}, the alternative hypothesis is a Poisson process with one intensity \eqn{\beta_1}{beta1} inside the circle of radius \code{r} centred at \eqn{u}, and another intensity \eqn{\beta_0}{beta0} outside the circle. If \code{baseline} is given, then it should be a pixel image or a \code{function(x,y)}. The null hypothesis is an inhomogeneous Poisson process with intensity proportional to \code{baseline}. The alternative hypothesis is an inhomogeneous Poisson process with intensity \code{beta1 * baseline} inside the circle, and \code{beta0 * baseline} outside the circle. \item If \code{method="binomial"} then the test statistic is based on binomial likelihood. The dataset \code{X} must be a bivariate point pattern, i.e. a multitype point pattern with two types. The null hypothesis is that all permutations of the type labels are equally likely. The alternative hypothesis is that the circle of radius \code{r} centred at \eqn{u} has a higher proportion of points of the second type, than expected under the null hypothesis. } If \code{r} is a vector of more than one value for the radius, then the calculations described above are performed for every value of \code{r}. Then the maximum over \code{r} is taken for each spatial location \eqn{u}. The resulting pixel value of \code{scanLRTS} at a location \eqn{u} is the profile maximum of the Likelihood Ratio Test Statistic, that is, the maximum of the Likelihood Ratio Test Statistic for circles of all radii, centred at the same location \eqn{u}. If you have already performed a scan test using \code{\link{scan.test}}, the Likelihood Ratio Test Statistic can be extracted from the test result using the function \code{\link{as.im.scan.test}}. } \section{Warning: window size}{ Note that the result of \code{scanLRTS} is a pixel image on a larger window than the original window of \code{X}. The expanded window contains the centre of any circle of radius \code{r} that has nonempty intersection with the original window. } \value{ A pixel image (object of class \code{"im"}) whose pixel values are the values of the (profile) Likelihood Ratio Test Statistic at each spatial location. } \references{ Kulldorff, M. (1997) A spatial scan statistic. \emph{Communications in Statistics --- Theory and Methods} \bold{26}, 1481--1496. } \author{\adrian and \rolf } \seealso{ \code{\link{scan.test}}, \code{\link{as.im.scan.test}} } \examples{ plot(scanLRTS(redwood, 0.1, method="poisson")) sc <- scanLRTS(chorley, 1, method="binomial", case="larynx") plot(sc) scanstatchorley <- max(sc) } \keyword{htest} \keyword{spatial} spatstat.core/man/edge.Trans.Rd0000644000176200001440000001124514141452520016101 0ustar liggesusers\name{edge.Trans} \alias{edge.Trans} \alias{rmax.Trans} \title{ Translation Edge Correction } \description{ Computes Ohser and Stoyan's translation edge correction weights for a point pattern. } \usage{ edge.Trans(X, Y = X, W = Window(X), exact = FALSE, paired = FALSE, ..., trim = spatstat.options("maxedgewt"), dx=NULL, dy=NULL, give.rmax=FALSE, gW=NULL) rmax.Trans(W, g=setcov(W)) } \arguments{ \item{X,Y}{ Point patterns (objects of class \code{"ppp"}). } \item{W}{ Window for which the edge correction is required. } \item{exact}{ Logical. If \code{TRUE}, a slow algorithm will be used to compute the exact value. If \code{FALSE}, a fast algorithm will be used to compute the approximate value. } \item{paired}{ Logical value indicating whether \code{X} and \code{Y} are paired. If \code{TRUE}, compute the edge correction for corresponding points \code{X[i], Y[i]} for all \code{i}. If \code{FALSE}, compute the edge correction for each possible pair of points \code{X[i], Y[j]} for all \code{i} and \code{j}. } \item{\dots}{Ignored.} \item{trim}{ Maximum permitted value of the edge correction weight. } \item{dx,dy}{ Alternative data giving the \eqn{x} and \eqn{y} coordinates of the vector differences between the points. Incompatible with \code{X} and \code{Y}. See Details. } \item{give.rmax}{ Logical. If \code{TRUE}, also compute the value of \code{rmax.Trans(W)} and return it as an attribute of the result. } \item{g, gW}{ Optional. Set covariance of \code{W}, if it has already been computed. Not required if \code{W} is a rectangle. } } \details{ The function \code{edge.Trans} computes Ohser and Stoyan's translation edge correction weight, which is used in estimating the \eqn{K} function and in many other contexts. The function \code{rmax.Trans} computes the maximum value of distance \eqn{r} for which the translation edge correction estimate of \eqn{K(r)} is valid. For a pair of points \eqn{x} and \eqn{y} in a window \eqn{W}, the translation edge correction weight is \deqn{ e(u, r) = \frac{\mbox{area}(W)}{\mbox{area}(W \cap (W + y - x))} }{ e(u, r) = area(W) / area(intersect(W, W + y - x)) } where \eqn{W + y - x} is the result of shifting the window \eqn{W} by the vector \eqn{y - x}. The denominator is the area of the overlap between this shifted window and the original window. The function \code{edge.Trans} computes this edge correction weight. If \code{paired=TRUE}, then \code{X} and \code{Y} should contain the same number of points. The result is a vector containing the edge correction weights \code{e(X[i], Y[i])} for each \code{i}. If \code{paired=FALSE}, then the result is a matrix whose \code{i,j} entry gives the edge correction weight \code{e(X[i], Y[j])}. Computation is exact if the window is a rectangle. Otherwise, \itemize{ \item if \code{exact=TRUE}, the edge correction weights are computed exactly using \code{\link{overlap.owin}}, which can be quite slow. \item if \code{exact=FALSE} (the default), the weights are computed rapidly by evaluating the set covariance function \code{\link{setcov}} using the Fast Fourier Transform. } If any value of the edge correction weight exceeds \code{trim}, it is set to \code{trim}. The arguments \code{dx} and \code{dy} can be provided as an alternative to \code{X} and \code{Y}. If \code{paired=TRUE} then \code{dx,dy} should be vectors of equal length such that the vector difference of the \eqn{i}th pair is \code{c(dx[i], dy[i])}. If \code{paired=FALSE} then \code{dx,dy} should be matrices of the same dimensions, such that the vector difference between \code{X[i]} and \code{Y[j]} is \code{c(dx[i,j], dy[i,j])}. The argument \code{W} is needed. The value of \code{rmax.Trans} is the shortest distance from the origin \eqn{(0,0)} to the boundary of the support of the set covariance function of \code{W}. It is computed by pixel approximation using \code{\link{setcov}}, unless \code{W} is a rectangle, when \code{rmax.Trans(W)} is the length of the shortest side of the rectangle. } \value{ Numeric vector or matrix. } \references{ Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. } \seealso{ \code{\link{rmax.Trans}}, \code{\link{edge.Ripley}}, \code{\link{setcov}}, \code{\link{Kest}} } \examples{ v <- edge.Trans(cells) rmax.Trans(Window(cells)) } \author{\adrian and \rolf. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/clarkevans.Rd0000644000176200001440000001061514141452520016240 0ustar liggesusers\name{clarkevans} \alias{clarkevans} \title{Clark and Evans Aggregation Index} \description{ Computes the Clark and Evans aggregation index \eqn{R} for a spatial point pattern. } \usage{ clarkevans(X, correction=c("none", "Donnelly", "cdf"), clipregion=NULL) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}). } \item{correction}{ Character vector. The type of edge correction(s) to be applied. } \item{clipregion}{ Clipping region for the guard area correction. A window (object of class \code{"owin"}). See Details. } } \details{ The Clark and Evans (1954) aggregation index \eqn{R} is a crude measure of clustering or ordering of a point pattern. It is the ratio of the observed mean nearest neighbour distance in the pattern to that expected for a Poisson point process of the same intensity. A value \eqn{R>1} suggests ordering, while \eqn{R<1} suggests clustering. Without correction for edge effects, the value of \code{R} will be positively biased. Edge effects arise because, for a point of \code{X} close to the edge of the window, the true nearest neighbour may actually lie outside the window. Hence observed nearest neighbour distances tend to be larger than the true nearest neighbour distances. The argument \code{correction} specifies an edge correction or several edge corrections to be applied. It is a character vector containing one or more of the options \code{"none"}, \code{"Donnelly"}, \code{"guard"} and \code{"cdf"} (which are recognised by partial matching). These edge corrections are: \describe{ \item{"none":}{ No edge correction is applied. } \item{"Donnelly":}{ Edge correction of Donnelly (1978), available for rectangular windows only. The theoretical expected value of mean nearest neighbour distance under a Poisson process is adjusted for edge effects by the edge correction of Donnelly (1978). The value of \eqn{R} is the ratio of the observed mean nearest neighbour distance to this adjusted theoretical mean. } \item{"guard":}{ Guard region or buffer area method. The observed mean nearest neighbour distance for the point pattern \code{X} is re-defined by averaging only over those points of \code{X} that fall inside the sub-window \code{clipregion}. } \item{"cdf":}{ Cumulative Distribution Function method. The nearest neighbour distance distribution function \eqn{G(r)} of the stationary point process is estimated by \code{\link{Gest}} using the Kaplan-Meier type edge correction. Then the mean of the distribution is calculated from the cdf. } } Alternatively \code{correction="all"} selects all options. If the argument \code{clipregion} is given, then the selected edge corrections will be assumed to include \code{correction="guard"}. To perform a test based on the Clark-Evans index, see \code{\link{clarkevans.test}}. } \value{ A numeric value, or a numeric vector with named components \item{naive}{\eqn{R} without edge correction} \item{Donnelly}{\eqn{R} using Donnelly edge correction} \item{guard}{\eqn{R} using guard region} \item{cdf}{\eqn{R} using cdf method} (as selected by \code{correction}). The value of the \code{Donnelly} component will be \code{NA} if the window of \code{X} is not a rectangle. } \references{ Clark, P.J. and Evans, F.C. (1954) Distance to nearest neighbour as a measure of spatial relationships in populations \emph{Ecology} \bold{35}, 445--453. Donnelly, K. (1978) Simulations to determine the variance and edge-effect of total nearest neighbour distance. In I. Hodder (ed.) \emph{Simulation studies in archaeology}, Cambridge/New York: Cambridge University Press, pp 91--95. } \author{ John Rudge \email{rudge@esc.cam.ac.uk} with modifications by \adrian } \seealso{ \code{\link{clarkevans.test}}, \code{\link{hopskel}}, \code{\link{nndist}}, \code{\link{Gest}} } \examples{ # Example of a clustered pattern clarkevans(redwood) # Example of an ordered pattern clarkevans(cells) # Random pattern X <- rpoispp(100) clarkevans(X) # How to specify a clipping region clip1 <- owin(c(0.1,0.9),c(0.1,0.9)) clip2 <- erosion(Window(cells), 0.1) clarkevans(cells, clipregion=clip1) clarkevans(cells, clipregion=clip2) } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/localKcross.inhom.Rd0000644000176200001440000001374114141452520017502 0ustar liggesusers\name{localKcross.inhom} \alias{localKcross.inhom} \alias{localLcross.inhom} \title{Inhomogeneous Multitype K Function} \description{ Computes spatially-weighted versions of the the local multitype \eqn{K}-function or \eqn{L}-function. } \usage{ localKcross.inhom(X, from, to, lambdaFrom=NULL, lambdaTo=NULL, \dots, rmax = NULL, correction = "Ripley", sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) localLcross.inhom(X, from, to, lambdaFrom=NULL, lambdaTo=NULL, \dots, rmax = NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{from}{ Type of points from which distances should be measured. A single value; one of the possible levels of \code{marks(X)}, or an integer indicating which level. } \item{to}{ Type of points to which distances should be measured. A single value; one of the possible levels of \code{marks(X)}, or an integer indicating which level. } \item{lambdaFrom,lambdaTo}{ Optional. Values of the estimated intensity function for the points of type \code{from} and \code{to}, respectively. Each argument should be either a vector giving the intensity values at the required points, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Extra arguments. Ignored if \code{lambda} is present. Passed to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{correction}{ String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"Ripley"}, \code{"translation"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{sigma, varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the kernel smoothing procedure for estimating \code{lambdaFrom} and \code{lambdaTo}, if they are missing. } \item{lambdaX}{ Optional. Values of the estimated intensity function for all points of \code{X}. Either a vector giving the intensity values at each point of \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a list of pixel images giving the intensity values at all locations for each type of point, or a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} or \code{function(x,y,m)} which can be evaluated to give the intensity value at any location. } \item{update}{ Logical value indicating what to do when \code{lambdaFrom}, \code{lambdaTo} or \code{lambdaX} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } } \details{ The functions \code{localKcross.inhom} and \code{localLcross.inhom} are inhomogeneous or weighted versions of the local multitype \eqn{K} and \eqn{L} functions implemented in \code{\link{localKcross}} and \code{\link{localLcross}}. Given a multitype spatial point pattern \code{X}, and two designated types \code{from} and \code{to}, the local multitype \eqn{K} function is defined for each point \code{X[i]} that belongs to type \code{from}, and is computed by \deqn{ K_i(r) = \sqrt{\frac 1 \pi \sum_j \frac{e_{ij}}{\lambda_j}} }{ K[i](r) = sqrt( (1/pi) * sum[j] e[i,j]/lambda[j]) } where the sum is over all points \eqn{j \neq i}{j != i} of type \code{to} that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{\lambda_j}{\lambda[j]} is the estimated intensity of the point pattern at the point \eqn{j}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The function \eqn{K_i(r)}{K[i](r)} is computed for a range of \eqn{r} values for each point \eqn{i}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X} of type \code{from}. The corresponding \eqn{L} function \eqn{L_i(r)}{L[i](r)} is computed by applying the transformation \eqn{L(r) = \sqrt{K(r)/(2\pi)}}{L(r) = sqrt(K(r)/(2*pi))}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern of type \code{from}. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{Kinhom}}, \code{\link{Linhom}}, \code{\link{localK}}, \code{\link{localL}}. } \examples{ X <- amacrine # compute all the local L functions L <- localLcross.inhom(X) # plot all the local L functions against r plot(L, main="local L functions for ponderosa", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/plot.fasp.Rd0000644000176200001440000001204214141452520016011 0ustar liggesusers\name{plot.fasp} \alias{plot.fasp} \title{Plot a Function Array} \description{ Plots an array of summary functions, usually associated with a point pattern, stored in an object of class \code{"fasp"}. A method for \code{plot}. } \usage{ \method{plot}{fasp}(x,formule=NULL, \dots, subset=NULL, title=NULL, banner=TRUE, transpose=FALSE, samex=FALSE, samey=FALSE, mar.panel=NULL, outerlabels=TRUE, cex.outerlabels=1.25, legend=FALSE) } \arguments{ \item{x}{An object of class \code{"fasp"} representing a function array. } \item{formule}{ A formula or list of formulae indicating what variables are to be plotted against what variable. Each formula is either an R language formula object, or a string that can be parsed as a formula. If \code{formule} is a list, its \eqn{k^{th}}{k-th} component should be applicable to the \eqn{(i,j)^{th}}{(i,j)-th} plot where \code{x$which[i,j]=k}. If the formula is left as \code{NULL}, then \code{plot.fasp} attempts to use the component \code{default.formula} of \code{x}. If that component is NULL as well, it gives up. } \item{\dots}{ Arguments passed to \code{\link{plot.fv}} to control the individual plot panels. } \item{subset}{ A logical vector, or a vector of indices, or an expression or a character string, or a \bold{list} of such, indicating a subset of the data to be included in each plot. If \code{subset} is a list, its \eqn{k^{th}}{k-th} component should be applicable to the \eqn{(i,j)^{th}}{(i,j)-th} plot where \code{x$which[i,j]=k}. } \item{title}{ Overall title for the plot. } \item{banner}{ Logical. If \code{TRUE}, the overall title is plotted. If \code{FALSE}, the overall title is not plotted and no space is allocated for it. } \item{transpose}{ Logical. If \code{TRUE}, rows and columns will be exchanged. } \item{samex,samey}{ Logical values indicating whether all individual plot panels should have the same x axis limits and the same y axis limits, respectively. This makes it easier to compare the plots. } \item{mar.panel}{ Vector of length 4 giving the value of the graphics parameter \code{mar} controlling the size of plot margins for each individual plot panel. See \code{\link{par}}. } \item{outerlabels}{Logical. If \code{TRUE}, the row and column names of the array of functions are plotted in the margins of the array of plot panels. If \code{FALSE}, each individual plot panel is labelled by its row and column name. } \item{cex.outerlabels}{ Character expansion factor for row and column labels of array. } \item{legend}{ Logical flag determining whether to plot a legend in each panel. } } \details{ An object of class \code{"fasp"} represents an array of summary functions, usually associated with a point pattern. See \code{\link{fasp.object}} for details. Such an object is created, for example, by \code{\link[spatstat.core]{alltypes}}. The function \code{plot.fasp} is a method for \code{plot}. It calls \code{\link{plot.fv}} to plot the individual panels. For information about the interpretation of the arguments \code{formule} and \code{subset}, see \code{\link{plot.fv}}. Arguments that are often passed through \code{...} include \code{col} to control the colours of the different lines in a panel, and \code{lty} and \code{lwd} to control the line type and line width of the different lines in a panel. The argument \code{shade} can also be used to display confidence intervals or significance bands as filled grey shading. See \code{\link{plot.fv}}. The argument \code{title}, if present, will determine the overall title of the plot. If it is absent, it defaults to \code{x$title}. Titles for the individual plot panels will be taken from \code{x$titles}. } \value{None.} \section{Warnings}{ (Each component of) the \code{subset} argument may be a logical vector (of the same length as the vectors of data which are extracted from \code{x}), or a vector of indices, or an \bold{expression} such as \code{expression(r<=0.2)}, or a text string, such as \code{"r<=0.2"}. Attempting a syntax such as \code{subset = r<=0.2} (without wrapping \code{r<=0.2} either in quote marks or in \code{expression()}) will cause this function to fall over. Variables referred to in any formula must exist in the data frames stored in \code{x}. What the names of these variables are will of course depend upon the nature of \code{x}. } \seealso{ \code{\link[spatstat.core]{alltypes}}, \code{\link{plot.fv}}, \code{\link{fasp.object}} } \examples{ if(interactive()) { X.G <- alltypes(amacrine,"G") plot(X.G) plot(X.G,subset="r<=0.2") plot(X.G,formule=asin(sqrt(cbind(km,theo))) ~ asin(sqrt(theo))) plot(X.G,fo=cbind(km,theo) - theo~r, subset="theo<=0.9") } } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat.core/man/default.expand.Rd0000644000176200001440000000771514141452520017020 0ustar liggesusers\name{default.expand} \alias{default.expand} \title{Default Expansion Rule for Simulation of Model} \description{ Defines the default expansion window or expansion rule for simulation of a fitted point process model. } \usage{ default.expand(object, m=2, epsilon=1e-6, w=Window(object)) } \arguments{ \item{object}{ A point process model (object of class \code{"ppm"} or \code{"rmhmodel"}). } \item{m}{ A single numeric value. The window will be expanded by a distance \code{m * reach(object)} along each side. } \item{epsilon}{ Threshold argument passed to \code{\link{reach}} to determine \code{reach(object)}. } \item{w}{ Optional. The un-expanded window in which the model is defined. The resulting simulated point patterns will lie in this window. } } \value{ A window expansion rule (object of class \code{"rmhexpand"}). } \details{ This function computes a default value for the expansion rule (the argument \code{expand} in \code{\link{rmhcontrol}}) given a fitted point process model \code{object}. This default is used by \code{\link{envelope}}, \code{\link{qqplot.ppm}}, \code{\link{simulate.ppm}} and other functions. Suppose we wish to generate simulated realisations of a fitted point process model inside a window \code{w}. It is advisable to first simulate the pattern on a larger window, and then clip it to the original window \code{w}. This avoids edge effects in the simulation. It is called \emph{expansion} of the simulation window. Accordingly, for the Metropolis-Hastings simulation algorithm \code{\link{rmh}}, the algorithm control parameters specified by \code{\link{rmhcontrol}} include an argument \code{expand} that determines the expansion of the simulation window. The function \code{default.expand} determines the default expansion rule for a fitted point process model \code{object}. If the model is Poisson, then no expansion is necessary. No expansion is performed by default, and \code{default.expand} returns a rule representing no expansion. The simulation window is the original window \code{w = Window(object)}. If the model depends on external covariates (i.e.\ covariates other than the Cartesian covariates \code{x} and \code{y} and the \code{marks}) then no expansion is feasible, in general, because the spatial domain of the covariates is not guaranteed to be large enough. \code{default.expand} returns a rule representing no expansion. The simulation window is the original window \code{w = Window(object)}. If the model depends on the Cartesian covariates \code{x} and \code{y}, it would be feasible to expand the simulation window, and this was the default for \pkg{spatstat} version 1.24-1 and earlier. However this sometimes produces artefacts (such as an empty point pattern) or memory overflow, because the fitted trend, extrapolated outside the original window of the data, may become very large. In \pkg{spatstat} version 1.24-2 and later, the default rule is \emph{not} to expand if the model depends on \code{x} or \code{y}. Again \code{default.expand} returns a rule representing no expansion. Otherwise, expansion will occur. The original window \code{w = Window(object)} is expanded by a distance \code{m * rr}, where \code{rr} is the interaction range of the model, computed by \code{\link{reach}}. If \code{w} is a rectangle then each edge of \code{w} is displaced outward by distance \code{m * rr}. If \code{w} is not a rectangle then \code{w} is dilated by distance \code{m * rr} using \code{\link{dilation}}. } \seealso{ \code{\link{rmhexpand}}, \code{\link{rmhcontrol}}, \code{\link{rmh}}, \code{\link{envelope}}, \code{\link{qqplot.ppm}} } \examples{ data(cells) fit <- ppm(cells, ~1, Strauss(0.07)) default.expand(fit) mod <- rmhmodel(cif="strauss", par=list(beta=100, gamma=0.5, r=0.07)) default.expand(fit) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/rknn.Rd0000644000176200001440000000365114141452520015061 0ustar liggesusers\name{rknn} \alias{dknn} \alias{pknn} \alias{qknn} \alias{rknn} \title{ Theoretical Distribution of Nearest Neighbour Distance } \description{ Density, distribution function, quantile function and random generation for the random distance to the \eqn{k}th nearest neighbour in a Poisson point process in \eqn{d} dimensions. } \usage{ dknn(x, k = 1, d = 2, lambda = 1) pknn(q, k = 1, d = 2, lambda = 1) qknn(p, k = 1, d = 2, lambda = 1) rknn(n, k = 1, d = 2, lambda = 1) } \arguments{ \item{x,q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations to be generated.} \item{k}{order of neighbour.} \item{d}{dimension of space.} \item{lambda}{intensity of Poisson point process.} } \details{ In a Poisson point process in \eqn{d}-dimensional space, let the random variable \eqn{R} be the distance from a fixed point to the \eqn{k}-th nearest random point, or the distance from a random point to the \eqn{k}-th nearest other random point. Then \eqn{R^d} has a Gamma distribution with shape parameter \eqn{k} and rate \eqn{\lambda * \alpha}{lambda * alpha} where \eqn{\alpha}{alpha} is a constant (equal to the volume of the unit ball in \eqn{d}-dimensional space). See e.g. Cressie (1991, page 61). These functions support calculation and simulation for the distribution of \eqn{R}. } \value{ A numeric vector: \code{dknn} returns the probability density, \code{pknn} returns cumulative probabilities (distribution function), \code{qknn} returns quantiles, and \code{rknn} generates random deviates. } \references{ Cressie, N.A.C. (1991) \emph{Statistics for spatial data}. John Wiley and Sons, 1991. } \author{\adrian and \rolf } \examples{ x <- seq(0, 5, length=20) densities <- dknn(x, k=3, d=2) cdfvalues <- pknn(x, k=3, d=2) randomvalues <- rknn(100, k=3, d=2) deciles <- qknn((1:9)/10, k=3, d=2) } \keyword{spatial} \keyword{distribution} spatstat.core/man/Kinhom.Rd0000644000176200001440000003536214141452520015342 0ustar liggesusers\name{Kinhom} \alias{Kinhom} \title{Inhomogeneous K-function} \description{ Estimates the inhomogeneous \eqn{K} function of a non-stationary point pattern. } \usage{ Kinhom(X, lambda=NULL, \dots, r = NULL, breaks = NULL, correction=c("border", "bord.modif", "isotropic", "translate"), renormalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, nlarge = 1000, lambda2=NULL, reciplambda=NULL, reciplambda2=NULL, diagonal=TRUE, sigma=NULL, varcov=NULL, ratio=FALSE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{K} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Extra arguments. Ignored if \code{lambda} is present. Passed to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{renormalise}{ Logical. Whether to renormalise the estimate. See Details. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{update}{ Logical value indicating what to do when \code{lambda} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } \item{nlarge}{ Optional. Efficiency threshold. If the number of points exceeds \code{nlarge}, then only the border correction will be computed, using a fast algorithm. } \item{lambda2}{ Advanced use only. Matrix containing estimates of the products \eqn{\lambda(x_i)\lambda(x_j)}{lambda(x[i]) * lambda(x[j])} of the intensities at each pair of data points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}. } \item{reciplambda}{ Alternative to \code{lambda}. Values of the estimated \emph{reciprocal} \eqn{1/\lambda}{1/lambda} of the intensity function. Either a vector giving the reciprocal intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the reciprocal intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the reciprocal intensity value at any location. } \item{reciplambda2}{ Advanced use only. Alternative to \code{lambda2}. A matrix giving values of the estimated \emph{reciprocal products} \eqn{1/\lambda(x_i)\lambda(x_j)}{1/(lambda(x[i]) * lambda(x[j]))} of the intensities at each pair of data points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}. } \item{diagonal}{ Do not use this argument. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing at least the following columns, \item{r}{the vector of values of the argument \eqn{r} at which \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} has been estimated } \item{theo}{vector of values of \eqn{\pi r^2}{pi * r^2}, the theoretical value of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} for an inhomogeneous Poisson process } and containing additional columns according to the choice specified in the \code{correction} argument. The additional columns are named \code{border}, \code{trans} and \code{iso} and give the estimated values of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} using the border correction, translation correction, and Ripley isotropic correction, respectively. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)}. } \details{ This computes a generalisation of the \eqn{K} function for inhomogeneous point patterns, proposed by Baddeley, \Moller and Waagepetersen (2000). The ``ordinary'' \eqn{K} function (variously known as the reduced second order moment function and Ripley's \eqn{K} function), is described under \code{\link{Kest}}. It is defined only for stationary point processes. The inhomogeneous \eqn{K} function \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} is a direct generalisation to nonstationary point processes. Suppose \eqn{x} is a point process with non-constant intensity \eqn{\lambda(u)}{lambda(u)} at each location \eqn{u}. Define \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} to be the expected value, given that \eqn{u} is a point of \eqn{x}, of the sum of all terms \eqn{1/\lambda(x_j)}{1/lambda(x[j])} over all points \eqn{x_j}{x[j]} in the process separated from \eqn{u} by a distance less than \eqn{r}. This reduces to the ordinary \eqn{K} function if \eqn{\lambda()}{lambda()} is constant. If \eqn{x} is an inhomogeneous Poisson process with intensity function \eqn{\lambda(u)}{lambda(u)}, then \eqn{K_{\mbox{\scriptsize\rm inhom}}(r) = \pi r^2}{Kinhom(r) = pi * r^2}. Given a point pattern dataset, the inhomogeneous \eqn{K} function can be estimated essentially by summing the values \eqn{1/(\lambda(x_i)\lambda(x_j))}{1/(lambda(x[i]) * lambda(x[j]))} for all pairs of points \eqn{x_i, x_j}{x[i], x[j]} separated by a distance less than \eqn{r}. This allows us to inspect a point pattern for evidence of interpoint interactions after allowing for spatial inhomogeneity of the pattern. Values \eqn{K_{\mbox{\scriptsize\rm inhom}}(r) > \pi r^2}{Kinhom(r) > pi * r^2} are suggestive of clustering. The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda}. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Edge corrections are used to correct bias in the estimation of \eqn{K_{\mbox{\scriptsize\rm inhom}}}{Kinhom}. Each edge-corrected estimate of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} is of the form \deqn{ \widehat K_{\mbox{\scriptsize\rm inhom}}(r) = (1/A) \sum_i \sum_j \frac{1\{d_{ij} \le r\} e(x_i,x_j,r)}{\lambda(x_i)\lambda(x_j)} }{ K^inhom(r)= (1/A) sum[i] sum[j] 1(d[i,j] <= r) * e(x[i],x[j],r)/(lambda(x[i]) * lambda(x[j])) } where \code{A} is a constant denominator, \eqn{d_{ij}}{d[i,j]} is the distance between points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}, and \eqn{e(x_i,x_j,r)}{e(x[i],x[j],r)} is an edge correction factor. For the `border' correction, \deqn{ e(x_i,x_j,r) = \frac{1(b_i > r)}{\sum_j 1(b_j > r)/\lambda(x_j)} }{ 1(b[i] > r)/(sum[j] 1(b[j] > r)/lambda(x[j])) } where \eqn{b_i}{b[i]} is the distance from \eqn{x_i}{x[i]} to the boundary of the window. For the `modified border' correction, \deqn{ e(x_i,x_j,r) = \frac{1(b_i > r)}{\mbox{area}(W \ominus r)} }{ 1(b[i] > r)/area(W [-] r) } where \eqn{W \ominus r}{W [-] r} is the eroded window obtained by trimming a margin of width \eqn{r} from the border of the original window. For the `translation' correction, \deqn{ e(x_i,x_j,r) = \frac 1 {\mbox{area}(W \cap (W + (x_j - x_i)))} }{ 1/area(W intersect (W + x[j]-x[i])) } and for the `isotropic' correction, \deqn{ e(x_i,x_j,r) = \frac 1 {\mbox{area}(W) g(x_i,x_j)} }{ 1/(area(W) g(x[i],x[j])) } where \eqn{g(x_i,x_j)}{g(x[i],x[j])} is the fraction of the circumference of the circle with centre \eqn{x_i}{x[i]} and radius \eqn{||x_i - x_j||}{||x[i]-x[j]||} which lies inside the window. If \code{renormalise=TRUE} (the default), then the estimates described above are multiplied by \eqn{c^{\mbox{normpower}}}{c^normpower} where \eqn{ c = \mbox{area}(W)/\sum (1/\lambda(x_i)). }{ c = area(W)/sum[i] (1/lambda(x[i])). } This rescaling reduces the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. The default value of \code{normpower} is 1 (for consistency with previous versions of \pkg{spatstat}) but the most sensible value is 2, which would correspond to rescaling the \code{lambda} values so that \eqn{ \sum (1/\lambda(x_i)) = \mbox{area}(W). }{ sum[i] (1/lambda(x[i])) = area(W). } If the point pattern \code{X} contains more than about 1000 points, the isotropic and translation edge corrections can be computationally prohibitive. The computations for the border method are much faster, and are statistically efficient when there are large numbers of points. Accordingly, if the number of points in \code{X} exceeds the threshold \code{nlarge}, then only the border correction will be computed. Setting \code{nlarge=Inf} or \code{correction="best"} will prevent this from happening. Setting \code{nlarge=0} is equivalent to selecting only the border correction with \code{correction="border"}. The pair correlation function can also be applied to the result of \code{Kinhom}; see \code{\link{pcf}}. } \references{ Baddeley, A., \Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} } \examples{ # inhomogeneous pattern of maples X <- unmark(split(lansing)$maple) \testonly{ sub <- sample(c(TRUE,FALSE), npoints(X), replace=TRUE, prob=c(0.1,0.9)) X <- X[sub] } # (1) intensity function estimated by model-fitting # Fit spatial trend: polynomial in x and y coordinates fit <- ppm(X, ~ polynom(x,y,2), Poisson()) # (a) predict intensity values at points themselves, # obtaining a vector of lambda values lambda <- predict(fit, locations=X, type="trend") # inhomogeneous K function Ki <- Kinhom(X, lambda) plot(Ki) # (b) predict intensity at all locations, # obtaining a pixel image lambda <- predict(fit, type="trend") Ki <- Kinhom(X, lambda) plot(Ki) # (2) intensity function estimated by heavy smoothing Ki <- Kinhom(X, sigma=0.1) plot(Ki) # (3) simulated data: known intensity function lamfun <- function(x,y) { 50 + 100 * x } # inhomogeneous Poisson process Y <- rpoispp(lamfun, 150, owin()) # inhomogeneous K function Ki <- Kinhom(Y, lamfun) plot(Ki) # How to make simulation envelopes: # Example shows method (2) if(interactive()) { smo <- density.ppp(X, sigma=0.1) Ken <- envelope(X, Kinhom, nsim=99, simulate=expression(rpoispp(smo)), sigma=0.1, correction="trans") plot(Ken) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/pcfcross.Rd0000644000176200001440000001455614141452520015741 0ustar liggesusers\name{pcfcross} \alias{pcfcross} \title{Multitype pair correlation function (cross-type)} \description{ Calculates an estimate of the cross-type pair correlation function for a multitype point pattern. } \usage{ pcfcross(X, i, j, ..., r = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("isotropic", "Ripley", "translate"), divisor = c("r", "d")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{\dots}{ Ignored. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{stoyan}{ Coefficient for default bandwidth rule; see Details. } \item{correction}{ Choice of edge correction. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See Details. } } \details{ The cross-type pair correlation function is a generalisation of the pair correlation function \code{\link{pcf}} to multitype point patterns. For two locations \eqn{x} and \eqn{y} separated by a distance \eqn{r}, the probability \eqn{p(r)} of finding a point of type \eqn{i} at location \eqn{x} and a point of type \eqn{j} at location \eqn{y} is \deqn{ p(r) = \lambda_i \lambda_j g_{i,j}(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i] * lambda[j] * g[i,j](r) dx dy } where \eqn{\lambda_i}{lambda[i]} is the intensity of the points of type \eqn{i}. For a completely random Poisson marked point process, \eqn{p(r) = \lambda_i \lambda_j}{p(r) = lambda[i] * lambda[j]} so \eqn{g_{i,j}(r) = 1}{g[i,j](r) = 1}. Indeed for any marked point pattern in which the points of type \code{i} are independent of the points of type \code{j}, the theoretical value of the cross-type pair correlation is \eqn{g_{i,j}(r) = 1}{g[i,j](r) = 1}. For a stationary multitype point process, the cross-type pair correlation function between marks \eqn{i} and \eqn{j} is formally defined as \deqn{ g_{i,j}(r) = \frac{K_{i,j}^\prime(r)}{2\pi r} }{ g(r) = K[i,j]'(r)/ ( 2 * pi * r) } where \eqn{K_{i,j}^\prime}{K[i,j]'(r)} is the derivative of the cross-type \eqn{K} function \eqn{K_{i,j}(r)}{K[i,j](r)}. of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. The command \code{pcfcross} computes a kernel estimate of the cross-type pair correlation function between marks \eqn{i} and \eqn{j}. \itemize{ \item If \code{divisor="r"} (the default), then the multitype counterpart of the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used: the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \code{correction="translate"} is the Ohser-Stoyan translation correction, and \code{correction="isotropic"} or \code{"Ripley"} is Ripley's isotropic correction. The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density}}. The default is the Epanechnikov kernel. The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Its precise interpretation is explained in the documentation for \code{\link{density.default}}. For the Epanechnikov kernel with support \eqn{[-h,h]}, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. If \code{bw} is not specified, the default bandwidth is determined by Stoyan's rule of thumb (Stoyan and Stoyan, 1994, page 285) applied to the points of type \code{j}. That is, \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the point process of type \code{j}, and \eqn{c} is a constant in the range from 0.1 to 0.2. The argument \code{stoyan} determines the value of \eqn{c}. The companion function \code{\link{pcfdot}} computes the corresponding analogue of \code{\link{Kdot}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{g_{i,j}}{g[i,j]} has been estimated } \item{theo}{the theoretical value \eqn{g_{i,j}(r) = 1}{g[i,j](r) = r} for independent marks. } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{g_{i,j}}{g[i,j]} obtained by the edge corrections named. } \seealso{ Mark connection function \code{\link{markconnect}}. Multitype pair correlation \code{\link{pcfdot}}, \code{\link{pcfmulti}}. Pair correlation \code{\link{pcf}},\code{\link{pcf.ppp}}. \code{\link{Kcross}} } \examples{ data(amacrine) p <- pcfcross(amacrine, "off", "on") p <- pcfcross(amacrine, "off", "on", stoyan=0.1) plot(p) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/parameters.Rd0000644000176200001440000000301614141452520016247 0ustar liggesusers\name{parameters} \alias{parameters} \alias{parameters.dppm} \alias{parameters.kppm} \alias{parameters.slrm} \alias{parameters.ppm} \alias{parameters.profilepl} \alias{parameters.interact} \alias{parameters.fii} \title{ Extract Model Parameters in Understandable Form } \description{ Given a fitted model of some kind, this function extracts all the parameters needed to specify the model, and returns them as a list. } \usage{ parameters(model, \dots) \method{parameters}{dppm}(model, \dots) \method{parameters}{kppm}(model, \dots) \method{parameters}{slrm}(model, \dots) \method{parameters}{ppm}(model, \dots) \method{parameters}{profilepl}(model, \dots) \method{parameters}{fii}(model, \dots) \method{parameters}{interact}(model, \dots) } \arguments{ \item{model}{ A fitted model of some kind. } \item{\dots}{ Arguments passed to methods. } } \details{ The argument \code{model} should be a fitted model of some kind. This function extracts all the parameters that would be needed to specify the model, and returns them as a list. The function \code{parameters} is generic, with methods for class \code{"ppm"}, \code{"kppm"}, \code{"dppm"} and \code{"profilepl"} and other classes. } \value{ A named list, whose format depends on the fitted model. } \author{ \spatstatAuthors } \seealso{ \code{\link{coef}} } \examples{ parameters(Strauss(0.1)) fit1 <- ppm(cells ~ x, Strauss(0.1)) parameters(fit1) fit2 <- kppm(redwood ~ x, "Thomas") parameters(fit2) } \keyword{spatial} \keyword{models} spatstat.core/man/rNeymanScott.Rd0000644000176200001440000002245414141452520016541 0ustar liggesusers\name{rNeymanScott} \alias{rNeymanScott} \title{Simulate Neyman-Scott Process} \description{ Generate a random point pattern, a realisation of the Neyman-Scott cluster process. } \usage{ rNeymanScott(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)), \dots, lmax=NULL, nsim=1, drop=TRUE, nonempty=TRUE, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{expand}{ Size of the expansion of the simulation window for generating parent points. A single non-negative number. } \item{rcluster}{ A function which generates random clusters, or other data specifying the random cluster mechanism. See Details. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{\dots}{ Arguments passed to \code{rcluster}. } \item{lmax}{ Optional. Upper bound on the values of \code{kappa} when \code{kappa} is a function or pixel image. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{nonempty}{ Logical. If \code{TRUE} (the default), a more efficient algorithm is used, in which parents are generated conditionally on having at least one offspring point. If \code{FALSE}, parents are generated even if they have no offspring. Both choices are valid; the default is recommended unless you need to simulate all the parent points for some other purpose. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of this point pattern: see Details. } \details{ This algorithm generates a realisation of the general Neyman-Scott process, with the cluster mechanism given by the function \code{rcluster}. First, the algorithm generates a Poisson point process of \dQuote{parent} points with intensity \code{kappa} in an expanded window as explained below. Here \code{kappa} may be a single positive number, a function \code{kappa(x,y)}, or a pixel image object of class \code{"im"} (see \code{\link{im.object}}). See \code{\link{rpoispp}} for details. Second, each parent point is replaced by a random cluster of points. These clusters are combined together to yield a single point pattern, and the restriction of this pattern to the window \code{win} is then returned as the result of \code{rNeymanScott}. The expanded window consists of \code{\link{as.rectangle}(win)} extended by the amount \code{expand} in each direction. The size of the expansion is saved in the attribute \code{"expand"} and may be extracted by \code{attr(X, "expand")} where \code{X} is the generated point pattern. The argument \code{rcluster} specifies the cluster mechanism. It may be either: \itemize{ \item A \code{function} which will be called to generate each random cluster (the offspring points of each parent point). The function should expect to be called in the form \code{rcluster(x0,y0,\dots)} for a parent point at a location \code{(x0,y0)}. The return value of \code{rcluster} should specify the coordinates of the points in the cluster; it may be a list containing elements \code{x,y}, or a point pattern (object of class \code{"ppp"}). If it is a marked point pattern then the result of \code{rNeymanScott} will be a marked point pattern. \item A \code{list(mu, f)} where \code{mu} specifies the mean number of offspring points in each cluster, and \code{f} generates the random displacements (vectors pointing from the parent to the offspring). In this case, the number of offspring in a cluster is assumed to have a Poisson distribution, implying that the Neyman-Scott process is also a Cox process. The first element \code{mu} should be either a single nonnegative number (interpreted as the mean of the Poisson distribution of cluster size) or a pixel image or a \code{function(x,y)} giving a spatially varying mean cluster size (interpreted in the sense of Waagepetersen, 2007). The second element \code{f} should be a function that will be called once in the form \code{f(n)} to generate \code{n} independent and identically distributed displacement vectors (i.e. as if there were a cluster of size \code{n} with a parent at the origin \code{(0,0)}). The function should return a point pattern (object of class \code{"ppp"}) or something acceptable to \code{\link[grDevices]{xy.coords}} that specifies the coordinates of \code{n} points. } If required, the intermediate stages of the simulation (the parents and the individual clusters) can also be extracted from the return value of \code{rNeymanScott} through the attributes \code{"parents"} and \code{"parentid"}. The attribute \code{"parents"} is the point pattern of parent points. The attribute \code{"parentid"} is an integer vector specifying the parent for each of the points in the simulated pattern. Neyman-Scott models where \code{kappa} is a single number and \code{rcluster = list(mu,f)} can be fitted to data using the function \code{\link{kppm}}. } \section{Inhomogeneous Neyman-Scott Processes}{ There are several different ways of specifying a spatially inhomogeneous Neyman-Scott process: \itemize{ \item The point process of parent points can be inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process according to which the parent points are generated. \item The number of points in a typical cluster can be spatially varying. If the argument \code{rcluster} is a list of two elements \code{mu, f} and the first entry \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then \code{mu} is interpreted as the reference intensity for offspring points, in the sense of Waagepetersen (2007). For a given parent point, the offspring constitute a Poisson process with intensity function equal to \code{mu(x, y) * g(x-x0, y-y0)} where \code{g} is the probability density of the offspring displacements generated by the function \code{f}. Equivalently, clusters are first generated with a constant expected number of points per cluster: the constant is \code{mumax}, the maximum of \code{mu}. Then the offspring are randomly \emph{thinned} (see \code{\link{rthin}}) with spatially-varying retention probabilities given by \code{mu/mumax}. \item The entire mechanism for generating a cluster can be dependent on the location of the parent point. If the argument \code{rcluster} is a function, then the cluster associated with a parent point at location \code{(x0,y0)} will be generated by calling \code{rcluster(x0, y0, \dots)}. The behaviour of this function could depend on the location \code{(x0,y0)} in any fashion. } Note that if \code{kappa} is an image, the spatial domain covered by this image must be large enough to include the \emph{expanded} window in which the parent points are to be generated. This requirement means that \code{win} must be small enough so that the expansion of \code{as.rectangle(win)} is contained in the spatial domain of \code{kappa}. As a result, one may wind up having to simulate the process in a window smaller than what is really desired. In the first two cases, the intensity of the Neyman-Scott process is equal to \code{kappa * mu} if at least one of \code{kappa} or \code{mu} is a single number, and is otherwise equal to an integral involving \code{kappa}, \code{mu} and \code{f}. } \seealso{ \code{\link{rpoispp}}, \code{\link{rThomas}}, \code{\link{rGaussPoisson}}, \code{\link{rMatClust}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}} } \examples{ # each cluster consist of 10 points in a disc of radius 0.2 nclust <- function(x0, y0, radius, n) { return(runifdisc(n, radius, centre=c(x0, y0))) } plot(rNeymanScott(10, 0.2, nclust, radius=0.2, n=5)) # multitype Neyman-Scott process (each cluster is a multitype process) nclust2 <- function(x0, y0, radius, n, types=c("a", "b")) { X <- runifdisc(n, radius, centre=c(x0, y0)) M <- sample(types, n, replace=TRUE) marks(X) <- M return(X) } plot(rNeymanScott(15,0.1,nclust2, radius=0.1, n=5)) } \references{ Neyman, J. and Scott, E.L. (1958) A statistical approach to problems of cosmology. \emph{Journal of the Royal Statistical Society, Series B} \bold{20}, 1--43. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/rDGS.Rd0000644000176200001440000000733014141452520014706 0ustar liggesusers\name{rDGS} \alias{rDGS} \title{Perfect Simulation of the Diggle-Gates-Stibbard Process} \description{ Generate a random pattern of points, a simulated realisation of the Diggle-Gates-Stibbard process, using a perfect simulation algorithm. } \usage{ rDGS(beta, rho, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{rho}{ interaction range (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Diggle-Gates-Stibbard point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. Diggle, Gates and Stibbard (1987) proposed a pairwise interaction point process in which each pair of points separated by a distance \eqn{d} contributes a factor \eqn{e(d)} to the probability density, where \deqn{ e(d) = \sin^2\left(\frac{\pi d}{2\rho}\right) }{ e(d) = sin^2((pi * d)/(2 * rho)) } for \eqn{d < \rho}{d < rho}, and \eqn{e(d)} is equal to 1 for \eqn{d \ge \rho}{d >= rho}. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. \Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. } \author{ \adrian, based on original code for the Strauss process by Kasper Klitgaard Berthelsen. } \examples{ X <- rDGS(50, 0.05) Z <- rDGS(50, 0.03, nsim=2) } \seealso{ \code{\link{rmh}}, \code{\link{DiggleGatesStibbard}}. \code{\link{rStrauss}}, \code{\link{rHardcore}}, \code{\link{rStraussHard}}, \code{\link{rDiggleGratton}}, \code{\link{rPenttinen}}. } \keyword{spatial} \keyword{datagen} spatstat.core/man/Smooth.ssf.Rd0000644000176200001440000000157514141452520016157 0ustar liggesusers\name{Smooth.ssf} \alias{Smooth.ssf} \title{ Smooth a Spatially Sampled Function } \description{ Applies kernel smoothing to a spatially sampled function. } \usage{ \method{Smooth}{ssf}(X, \dots) } \arguments{ \item{X}{ Object of class \code{"ssf"}. } \item{\dots}{ Arguments passed to \code{\link[spatstat.core]{Smooth.ppp}} to control the smoothing. } } \details{ An object of class \code{"ssf"} represents a real-valued or vector-valued function that has been evaluated or sampled at an irregular set of points. The function values will be smoothed using a Gaussian kernel. } \value{ A pixel image or a list of pixel images. } \author{ \adrian. } \seealso{ \code{\link{ssf}}, \code{\link[spatstat.core]{Smooth.ppp}} } \examples{ f <- ssf(redwood, nndist(redwood)) Smooth(f, sigma=0.1) } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/plot.ssf.Rd0000644000176200001440000000554214141452520015662 0ustar liggesusers\name{plot.ssf} \alias{plot.ssf} \alias{image.ssf} \alias{contour.ssf} \title{ Plot a Spatially Sampled Function } \description{ Plot a spatially sampled function object. } \usage{ \method{plot}{ssf}(x, \dots, how = c("smoothed", "nearest", "points"), style = c("image", "contour", "imagecontour"), sigma = NULL, contourargs=list()) \method{image}{ssf}(x, \dots) \method{contour}{ssf}(x, ..., main, sigma = NULL) } \arguments{ \item{x}{ Spatially sampled function (object of class \code{"ssf"}). } \item{\dots}{ Arguments passed to \code{\link[graphics]{image.default}} or \code{\link[spatstat.geom]{plot.ppp}} to control the plot. } \item{how}{ Character string determining whether to display the function values at the data points (\code{how="points"}), a smoothed interpolation of the function (\code{how="smoothed"}), or the function value at the nearest data point (\code{how="nearest"}). } \item{style}{ Character string indicating whether to plot the smoothed function as a colour image, a contour map, or both. } \item{contourargs}{ Arguments passed to \code{\link[graphics]{contour.default}} to control the contours, if \code{style="contour"} or \code{style="imagecontour"}. } \item{sigma}{ Smoothing bandwidth for smooth interpolation. } \item{main}{ Optional main title for the plot. } } \details{ These are methods for the generic \code{\link[graphics]{plot}}, \code{\link[graphics]{image}} and \code{\link[graphics]{contour}} for the class \code{"ssf"}. An object of class \code{"ssf"} represents a function (real- or vector-valued) that has been sampled at a finite set of points. For \code{plot.ssf} there are three types of display. If \code{how="points"} the exact function values will be displayed as circles centred at the locations where they were computed. If \code{how="smoothed"} (the default) these values will be kernel-smoothed using \code{\link{Smooth.ppp}} and displayed as a pixel image. If \code{how="nearest"} the values will be interpolated by nearest neighbour interpolation using \code{\link[spatstat.geom]{nnmark}} and displayed as a pixel image. For \code{image.ssf} and \code{contour.ssf} the values are kernel-smoothed before being displayed. } \value{ \code{NULL}. } \references{ Baddeley, A. (2017) Local composite likelihood for spatial point processes. \emph{Spatial Statistics} \bold{22}, 261--295. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. } \author{ \adrian. } \seealso{ \code{\link{ssf}} } \examples{ a <- ssf(cells, nndist(cells, k=1:3)) plot(a, how="points") plot(a, how="smoothed") plot(a, how="nearest") } \keyword{spatial} \keyword{hplot} spatstat.core/man/coef.slrm.Rd0000644000176200001440000000177614141452520016007 0ustar liggesusers\name{coef.slrm} \Rdversion{1.1} \alias{coef.slrm} \title{ Coefficients of Fitted Spatial Logistic Regression Model } \description{ Extracts the coefficients (parameters) from a fitted Spatial Logistic Regression model. } \usage{ \method{coef}{slrm}(object, ...) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Ignored. } } \details{ This is a method for \code{\link{coef}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). It extracts the fitted canonical parameters, i.e.\ the coefficients in the linear predictor of the spatial logistic regression. } \value{ Numeric vector of coefficients. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) coef(fit) } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.core/man/dg.sigtrace.Rd0000644000176200001440000001503314141452520016300 0ustar liggesusers\name{dg.sigtrace} \alias{dg.sigtrace} \title{ Significance Trace of Dao-Genton Test } \description{ Generates a Significance Trace of the Dao and Genton (2014) test for a spatial point pattern. } \usage{ dg.sigtrace(X, fun = Lest, \dots, exponent = 2, nsim = 19, nsimsub = nsim - 1, alternative = c("two.sided", "less", "greater"), rmin=0, leaveout=1, interpolate = FALSE, confint = TRUE, alpha = 0.05, savefuns=FALSE, savepatterns=FALSE, verbose=FALSE) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{\dots}{ Arguments passed to \code{\link{envelope}}. } \item{exponent}{ Positive number. Exponent used in the test statistic. Use \code{exponent=2} for the Diggle-Cressie-Loosmore-Ford test, and \code{exponent=Inf} for the Maximum Absolute Deviation test. See Details. } \item{nsim}{ Number of repetitions of the basic test. } \item{nsimsub}{ Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsimsub} simulated realisations, so there will be a total of \code{nsim * (nsimsub + 1)} simulations. } \item{alternative}{ Character string specifying the alternative hypothesis. The default (\code{alternative="two.sided"}) is that the true value of the summary function is not equal to the theoretical value postulated under the null hypothesis. If \code{alternative="less"} the alternative hypothesis is that the true value of the summary function is lower than the theoretical value. } \item{rmin}{ Optional. Left endpoint for the interval of \eqn{r} values on which the test statistic is calculated. } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{confint}{ Logical value indicating whether to compute a confidence interval for the \sQuote{true} \eqn{p}-value. } \item{alpha}{ Significance level to be plotted (this has no effect on the calculation but is simply plotted as a reference value). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical flag indicating whether to print progress reports. } } \details{ The Dao and Genton (2014) test for a spatial point pattern is described in \code{\link{dg.test}}. This test depends on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{significance trace} (Bowman and Azzalini, 1997; Baddeley et al, 2014, 2015) of the test is a plot of the \eqn{p}-value obtained from the test against the length of the interval \code{rinterval}. The command \code{dg.sigtrace} effectively performs \code{\link{dg.test}} on \code{X} using all possible intervals of the form \eqn{[0,R]}, and returns the resulting \eqn{p}-values as a function of \eqn{R}. The result is an object of class \code{"fv"} that can be plotted to obtain the significance trace. The plot shows the Dao-Genton adjusted \eqn{p}-value (solid black line), the critical value \code{0.05} (dashed red line), and a pointwise 95\% confidence band (grey shading) for the \sQuote{true} (Neyman-Pearson) \eqn{p}-value. The confidence band is based on the Agresti-Coull (1998) confidence interval for a binomial proportion. If \code{X} is an envelope object and \code{fun=NULL} then the code will re-use the simulated functions stored in \code{X}. If the argument \code{rmin} is given, it specifies the left endpoint of the interval defining the test statistic: the tests are performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]} where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}. The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). } \value{ An object of class \code{"fv"} that can be plotted to obtain the significance trace. } \references{ Agresti, A. and Coull, B.A. (1998) Approximate is better than \dQuote{Exact} for interval estimation of binomial proportions. \emph{American Statistician} \bold{52}, 119--126. Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84}(3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2015) Pushing the envelope: extensions of graphical Monte Carlo tests. Submitted for publication. Bowman, A.W. and Azzalini, A. (1997) \emph{Applied smoothing techniques for data analysis: the kernel approach with S-Plus illustrations}. Oxford University Press, Oxford. Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. } \author{ \adrian, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \spatstatAuthors. } \seealso{ \code{\link{dg.test}} for the Dao-Genton test, \code{\link{dclf.sigtrace}} for significance traces of other tests. } \examples{ ns <- if(interactive()) 19 else 5 plot(dg.sigtrace(cells, nsim=ns)) } \keyword{spatial} \keyword{htest} spatstat.core/man/range.fv.Rd0000644000176200001440000000236014141452520015613 0ustar liggesusers\name{range.fv} \alias{range.fv} \alias{max.fv} \alias{min.fv} \title{ Range of Function Values } \description{ Compute the range, maximum, or minimum of the function values in a summary function. } \usage{ \method{range}{fv}(\dots, na.rm = TRUE, finite = na.rm) \method{max}{fv}(\dots, na.rm = TRUE, finite = na.rm) \method{min}{fv}(\dots, na.rm = TRUE, finite = na.rm) } \arguments{ \item{\dots}{ One or more function value tables (objects of class \code{"fv"} representing summary functions) or other data. } \item{na.rm}{ Logical. Whether to ignore \code{NA} values. } \item{finite}{ Logical. Whether to ignore values that are infinite, \code{NaN} or \code{NA}. } } \details{ These are methods for the generic \code{\link[base]{range}}, \code{\link[base]{max}} and \code{\link[base]{min}}. They compute the range, maximum, and minimum of the \emph{function} values that would be plotted on the \eqn{y} axis by default. For more complicated calculations, use \code{\link{with.fv}}. } \value{ Numeric vector of length 2. } \seealso{ \code{\link{with.fv}} } \examples{ G <- Gest(cells) range(G) max(G) min(G) } \author{ \adrian , \rolf and \ege. } \keyword{spatial} \keyword{math} spatstat.core/man/rThomas.Rd0000644000176200001440000001521014141452520015520 0ustar liggesusers\name{rThomas} \alias{rThomas} \title{Simulate Thomas Process} \description{ Generate a random point pattern, a realisation of the Thomas cluster process. } \usage{ rThomas(kappa, scale, mu, win = owin(c(0,1),c(0,1)), nsim=1, drop=TRUE, saveLambda=FALSE, expand = 4*scale, ..., poisthresh=1e-6, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{scale}{ Standard deviation of random displacement (along each coordinate axis) of a point from its cluster centre. } \item{mu}{ Mean number of points per cluster (a single positive number) or reference intensity for the cluster points (a function or a pixel image). } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{saveLambda}{ Logical. If \code{TRUE} then the random intensity corresponding to the simulated parent points will also be calculated and saved, and returns as an attribute of the point pattern. } \item{expand}{ Numeric. Size of window expansion for generation of parent points. Has a sensible default. } \item{\dots}{ Passed to \code{\link{clusterfield}} to control the image resolution when \code{saveLambda=TRUE} and to \code{\link{clusterradius}} when \code{expand} is missing. } \item{poisthresh}{ Numerical threshold below which the model will be treated as a Poisson process. See Details. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of this point pattern (see \code{\link{rNeymanScott}}). Furthermore, the simulated intensity function is returned as an attribute \code{"Lambda"}, if \code{saveLambda=TRUE}. } \details{ This algorithm generates a realisation of the (`modified') Thomas process, a special case of the Neyman-Scott process, inside the window \code{win}. In the simplest case, where \code{kappa} and \code{mu} are single numbers, the algorithm generates a uniform Poisson point process of \dQuote{parent} points with intensity \code{kappa}. Then each parent point is replaced by a random cluster of \dQuote{offspring} points, the number of points per cluster being Poisson (\code{mu}) distributed, and their positions being isotropic Gaussian displacements from the cluster parent location. The resulting point pattern is a realisation of the classical \dQuote{stationary Thomas process} generated inside the window \code{win}. This point process has intensity \code{kappa * mu}. The algorithm can also generate spatially inhomogeneous versions of the Thomas process: \itemize{ \item The parent points can be spatially inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process that generates the parent points. \item The offspring points can be inhomogeneous. If the argument \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is interpreted as the reference density for offspring points, in the sense of Waagepetersen (2007). For a given parent point, the offspring constitute a Poisson process with intensity function equal to \code{mu * f}, where \code{f} is the Gaussian probability density centred at the parent point. Equivalently we first generate, for each parent point, a Poisson (\code{mumax}) random number of offspring (where \eqn{M} is the maximum value of \code{mu}) with independent Gaussian displacements from the parent location, and then randomly thin the offspring points, with retention probability \code{mu/M}. \item Both the parent points and the offspring points can be spatially inhomogeneous, as described above. } Note that if \code{kappa} is a pixel image, its domain must be larger than the window \code{win}. This is because an offspring point inside \code{win} could have its parent point lying outside \code{win}. In order to allow this, the simulation algorithm first expands the original window \code{win} by a distance \code{expand} and generates the Poisson process of parent points on this larger window. If \code{kappa} is a pixel image, its domain must contain this larger window. The intensity of the Thomas process is \code{kappa * mu} if either \code{kappa} or \code{mu} is a single number. In the general case the intensity is an integral involving \code{kappa}, \code{mu} and \code{f}. The Thomas process with homogeneous parents (i.e. where \code{kappa} is a single number) can be fitted to data using \code{\link{kppm}}. Currently it is not possible to fit the Thomas model with inhomogeneous parents. If the pair correlation function of the model is very close to that of a Poisson process, deviating by less than \code{poisthresh}, then the model is approximately a Poisson process, and will be simulated as a Poisson process with intensity \code{kappa * mu}, using \code{\link{rpoispp}}. This avoids computations that would otherwise require huge amounts of memory. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, \code{\link{rNeymanScott}}, \code{\link{rGaussPoisson}}, \code{\link{kppm}}, \code{\link{clusterfit}}. } \references{ Diggle, P. J., Besag, J. and Gleaves, J. T. (1976) Statistical analysis of spatial point patterns by means of distance methods. \emph{Biometrics} \bold{32} 659--667. Thomas, M. (1949) A generalisation of Poisson's binomial limit for use in ecology. \emph{Biometrika} \bold{36}, 18--25. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \examples{ #homogeneous X <- rThomas(10, 0.2, 5) #inhomogeneous Z <- as.im(function(x,y){ 5 * exp(2 * x - 1) }, owin()) Y <- rThomas(10, 0.2, Z) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/rpoislinetess.Rd0000644000176200001440000000273614141452520017017 0ustar liggesusers\name{rpoislinetess} \alias{rpoislinetess} \title{Poisson Line Tessellation} \description{ Generate a tessellation delineated by the lines of the Poisson line process } \usage{ rpoislinetess(lambda, win = owin()) } \arguments{ \item{lambda}{ Intensity of the Poisson line process. A positive number. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Currently, the window must be a rectangle. } } \details{ This algorithm generates a realisation of the uniform Poisson line process, and divides the window \code{win} into tiles separated by these lines. The argument \code{lambda} must be a positive number. It controls the intensity of the process. The expected number of lines intersecting a convex region of the plane is equal to \code{lambda} times the perimeter length of the region. The expected total length of the lines crossing a region of the plane is equal to \code{lambda * pi} times the area of the region. } \value{ A tessellation (object of class \code{"tess"}). Also has an attribute \code{"lines"} containing the realisation of the Poisson line process, as an object of class \code{"infline"}. } \author{\adrian and \rolf } \seealso{ \code{\link{rpoisline}} to generate the lines only. } \examples{ X <- rpoislinetess(3) plot(as.im(X), main="rpoislinetess(3)") plot(X, add=TRUE) } \keyword{spatial} \keyword{datagen} spatstat.core/man/vcov.mppm.Rd0000644000176200001440000000562414141452520016040 0ustar liggesusers\name{vcov.mppm} \alias{vcov.mppm} \title{Calculate Variance-Covariance Matrix for Fitted Multiple Point Process Model} \description{ Given a fitted multiple point process model, calculate the variance-covariance matrix of the parameter estimates. } \usage{ \method{vcov}{mppm}(object, ..., what="vcov", err="fatal") } \arguments{ \item{object}{ A multiple point process model (object of class \code{"mppm"}). } \item{\dots}{ Arguments recognised by \code{\link{vcov.ppm}}. } \item{what}{ Character string indicating which quantity should be calculated. Options include \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} for the Fisher information matrix. } \item{err}{ Character string indicating what action to take if an error occurs. Either \code{"fatal"}, \code{"warn"} or \code{"null"}. } } \details{ This is a method for the generic function \code{\link{vcov}}. The argument \code{object} should be a fitted multiple point process model (object of class \code{"mppm"}) generated by \code{\link{mppm}}. The variance-covariance matrix of the parameter estimates is computed using asymptotic theory for maximum likelihood (for Poisson processes) or estimating equations (for other Gibbs models). If \code{what="vcov"} (the default), the variance-covariance matrix is returned. If \code{what="corr"}, the variance-covariance matrix is normalised to yield a correlation matrix, and this is returned. If \code{what="fisher"}, the Fisher information matrix is returned instead. In all three cases, the rows and columns of the matrix correspond to the parameters (coefficients) in the same order as in \code{coef{model}}. If errors or numerical problems occur, the argument \code{err} determines what will happen. If \code{err="fatal"} an error will occur. If \code{err="warn"} a warning will be issued and \code{NA} will be returned. If \code{err="null"}, no warning is issued, but \code{NULL} is returned. } \value{ A numeric matrix (or \code{NA} or \code{NULL}). } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix of one of the models was either too large or too small for reliable numerical calculation. See \code{\link{vcov.ppm}} for suggestions on how to handle this. } \seealso{ \code{\link{vcov}}, \code{\link{vcov.ppm}}, \code{\link{mppm}} } \examples{ fit <- mppm(Wat ~x, data=hyperframe(Wat=waterstriders)) vcov(fit) } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.core/man/vcov.ppm.Rd0000644000176200001440000002177314141452520015666 0ustar liggesusers\name{vcov.ppm} \alias{vcov.ppm} \title{Variance-Covariance Matrix for a Fitted Point Process Model} \description{ Returns the variance-covariance matrix of the estimates of the parameters of a fitted point process model. } \usage{ \method{vcov}{ppm}(object, \dots, what = "vcov", verbose = TRUE, fine=FALSE, gam.action=c("warn", "fatal", "silent"), matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), nacoef.action=c("warn", "fatal", "silent"), hessian=FALSE) } \arguments{ \item{object}{A fitted point process model (an object of class \code{"ppm"}.)} \item{\dots}{Ignored.} \item{what}{Character string (partially-matched) that specifies what matrix is returned. Options are \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} or \code{"Fisher"} for the Fisher information matrix. } \item{fine}{ Logical value indicating whether to use a quick estimate (\code{fine=FALSE}, the default) or a slower, more accurate estimate (\code{fine=TRUE}). } \item{verbose}{Logical. If \code{TRUE}, a message will be printed if various minor problems are encountered. } \item{gam.action}{String indicating what to do if \code{object} was fitted by \code{gam}. } \item{matrix.action}{String indicating what to do if the matrix is ill-conditioned (so that its inverse cannot be calculated). } \item{logi.action}{String indicating what to do if \code{object} was fitted via the logistic regression approximation using a non-standard dummy point process. } \item{nacoef.action}{ String indicating what to do if some of the fitted coefficients are \code{NA} (so that variance cannot be calculated). } \item{hessian}{ Logical. Use the negative Hessian matrix of the log pseudolikelihood instead of the Fisher information. } } \details{ This function computes the asymptotic variance-covariance matrix of the estimates of the canonical parameters in the point process model \code{object}. It is a method for the generic function \code{\link{vcov}}. \code{object} should be an object of class \code{"ppm"}, typically produced by \code{\link{ppm}}. The canonical parameters of the fitted model \code{object} are the quantities returned by \code{coef.ppm(object)}. The function \code{vcov} calculates the variance-covariance matrix for these parameters. The argument \code{what} provides three options: \describe{ \item{\code{what="vcov"}}{ return the variance-covariance matrix of the parameter estimates } \item{\code{what="corr"}}{ return the correlation matrix of the parameter estimates } \item{\code{what="fisher"}}{ return the observed Fisher information matrix. } } In all three cases, the result is a square matrix. The rows and columns of the matrix correspond to the canonical parameters given by \code{\link{coef.ppm}(object)}. The row and column names of the matrix are also identical to the names in \code{\link{coef.ppm}(object)}. For models fitted by the Berman-Turner approximation (Berman and Turner, 1992; Baddeley and Turner, 2000) to the maximum pseudolikelihood (using the default \code{method="mpl"} in the call to \code{\link{ppm}}), the implementation works as follows. \itemize{ \item If the fitted model \code{object} is a Poisson process, the calculations are based on standard asymptotic theory for the maximum likelihood estimator (Kutoyants, 1998). The observed Fisher information matrix of the fitted model \code{object} is first computed, by summing over the Berman-Turner quadrature points in the fitted model. The asymptotic variance-covariance matrix is calculated as the inverse of the observed Fisher information. The correlation matrix is then obtained by normalising. \item If the fitted model is not a Poisson process (i.e. it is some other Gibbs point process) then the calculations are based on Coeurjolly and Rubak (2012). A consistent estimator of the variance-covariance matrix is computed by summing terms over all pairs of data points. If required, the Fisher information is calculated as the inverse of the variance-covariance matrix. } For models fitted by the Huang-Ogata method (\code{method="ho"} in the call to \code{\link{ppm}}), the implementation uses the Monte Carlo estimate of the Fisher information matrix that was computed when the original model was fitted. For models fitted by the logistic regression approximation to the maximum pseudolikelihood (\code{method="logi"} in the call to \code{\link{ppm}}), calculations are based on (Baddeley et al., 2013). A consistent estimator of the variance-covariance matrix is computed by summing terms over all pairs of data points. If required, the Fisher information is calculated as the inverse of the variance-covariance matrix. In this case the calculations depend on the type of dummy pattern used, and currently only the types \code{"stratrand"}, \code{"binomial"} and \code{"poisson"} as generated by \code{\link{quadscheme.logi}} are implemented. For other types the behavior depends on the argument \code{logi.action}. If \code{logi.action="fatal"} an error is produced. Otherwise, for types \code{"grid"} and \code{"transgrid"} the formulas for \code{"stratrand"} are used which in many cases should be conservative. For an arbitrary user specified dummy pattern (type \code{"given"}) the formulas for \code{"poisson"} are used which in many cases should be conservative. If \code{logi.action="warn"} a warning is issued otherwise the calculation proceeds without a warning. The argument \code{verbose} makes it possible to suppress some diagnostic messages. The asymptotic theory is not correct if the model was fitted using \code{gam} (by calling \code{\link{ppm}} with \code{use.gam=TRUE}). The argument \code{gam.action} determines what to do in this case. If \code{gam.action="fatal"}, an error is generated. If \code{gam.action="warn"}, a warning is issued and the calculation proceeds using the incorrect theory for the parametric case, which is probably a reasonable approximation in many applications. If \code{gam.action="silent"}, the calculation proceeds without a warning. If \code{hessian=TRUE} then the negative Hessian (second derivative) matrix of the log pseudolikelihood, and its inverse, will be computed. For non-Poisson models, this is not a valid estimate of variance, but is useful for other calculations. Note that standard errors and 95\% confidence intervals for the coefficients can also be obtained using \code{confint(object)} or \code{coef(summary(object))}. } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix was either too large or too small for reliable numerical calculation. If this message occurs, try repeating the calculation using \code{fine=TRUE}. Singularity can occur because of numerical overflow or collinearity in the covariates. To check this, rescale the coordinates of the data points and refit the model. See the Examples. In a Gibbs model, a singular matrix may also occur if the fitted model is a hard core process: this is a feature of the variance estimator. } \value{ A square matrix. } \examples{ X <- rpoispp(42) fit <- ppm(X, ~ x + y) vcov(fit) vcov(fit, what="Fish") # example of singular system m <- ppm(demopat ~polynom(x,y,2)) \dontrun{ try(v <- vcov(m)) } # rescale x, y coordinates to range [0,1] x [0,1] approximately demopatScale <- rescale(demopat, 10000) m <- ppm(demopatScale ~ polynom(x,y,2)) v <- vcov(m) # Gibbs example fitS <- ppm(swedishpines ~1, Strauss(9)) coef(fitS) sqrt(diag(vcov(fitS))) } \author{ Original code for Poisson point process was written by \adrian and \rolf . New code for stationary Gibbs point processes was generously contributed by \ege and Jean-Francois Coeurjolly. New code for generic Gibbs process written by \adrian. New code for logistic method contributed by \ege. } \seealso{ \code{\link{vcov}} for the generic, \code{\link{ppm}} for information about fitted models, \code{\link[stats]{confint}} for confidence intervals. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014) Logistic regression for spatial Gibbs point processes. \emph{Biometrika} \bold{101} (2) 377--392. Coeurjolly, J.-F. and Rubak, E. (2013) Fast covariance estimation for innovations computed from a spatial Gibbs point process. Scandinavian Journal of Statistics \bold{40} 669--684. Kutoyants, Y.A. (1998) \bold{Statistical Inference for Spatial Poisson Processes}, Lecture Notes in Statistics 134. New York: Springer 1998. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.core/man/rthin.Rd0000644000176200001440000000656014141452520015237 0ustar liggesusers\name{rthin} \alias{rthin} \title{Random Thinning} \description{ Applies independent random thinning to a point pattern or segment pattern. } \usage{ rthin(X, P, \dots, nsim=1, drop=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"} or \code{"pp3"} or \code{"ppx"}) or line segment pattern (object of class \code{"psp"}) that will be thinned. } \item{P}{ Data giving the retention probabilities, i.e. the probability that each point or line in \code{X} will be retained. Either a single number, or a vector of numbers, or a \code{function(x,y)} in the \R language, or a function object (class \code{"funxy"} or \code{"linfun"}), or a pixel image (object of class \code{"im"} or \code{"linim"}). } \item{\dots}{ Additional arguments passed to \code{P}, if it is a function. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ An object of the same kind as \code{X} if \code{nsim=1}, or a list of such objects if \code{nsim > 1}. } \details{ In a random thinning operation, each point of the point pattern \code{X} is randomly either deleted or retained (i.e. not deleted). The result is a point pattern, consisting of those points of \code{X} that were retained. Independent random thinning means that the retention/deletion of each point is independent of other points. The argument \code{P} determines the probability of \bold{retaining} each point. It may be \describe{ \item{a single number,}{so that each point will be retained with the same probability \code{P}; } \item{a vector of numbers,}{so that the \code{i}th point of \code{X} will be retained with probability \code{P[i]}; } \item{a function \code{P(x,y)},}{so that a point at a location \code{(x,y)} will be retained with probability \code{P(x,y)}; } \item{an object of class \code{"funxy"} or \code{"linfun"},}{so that points in the pattern \code{X} will be retained with probabilities \code{P(X)}; } \item{a pixel image,}{containing values of the retention probability for all locations in a region encompassing the point pattern. } } If \code{P} is a function \code{P(x,y)}, it should be \sQuote{vectorised}, that is, it should accept vector arguments \code{x,y} and should yield a numeric vector of the same length. The function may have extra arguments which are passed through the \code{\dots} argument. } \section{Reproducibility}{ The algorithm for random thinning was changed in \pkg{spatstat} version \code{1.42-3}. Set \code{spatstat.options(fastthin=FALSE)} to use the previous, slower algorithm, if it is desired to reproduce results obtained with earlier versions. } \examples{ plot(redwood, main="thinning") # delete 20\% of points Y <- rthin(redwood, 0.8) points(Y, col="green", cex=1.4) # function f <- function(x,y) { ifelse(x < 0.4, 1, 0.5) } Y <- rthin(redwood, f) # pixel image Z <- as.im(f, Window(redwood)) Y <- rthin(redwood, Z) # thin other kinds of patterns E <- rthin(osteo$pts[[1]], 0.6) L <- rthin(copper$Lines, 0.5) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{datagen} \keyword{manip}spatstat.core/man/dppparbounds.Rd0000644000176200001440000000145014141452520016605 0ustar liggesusers\name{dppparbounds} \alias{dppparbounds} \title{Parameter Bound for a Determinantal Point Process Model} \description{ Returns the lower and upper bound for a specific parameter of a determinantal point process model when all other parameters are fixed. } \usage{dppparbounds(model, name, \dots)} \arguments{ \item{model}{Model of class \code{"detpointprocfamily"}.} \item{name}{name of the parameter for which the bound should be computed.} \item{\dots}{ Additional arguments passed to the \code{parbounds} function of the given model } } \value{A \code{data.frame} containing lower and upper bounds.} \author{ \adrian \rolf and \ege } \examples{ model <- dppMatern(lambda=100, alpha=.01, nu=1, d=2) dppparbounds(model, "lambda") } \keyword{spatial} \keyword{models} spatstat.core/man/LambertW.Rd0000644000176200001440000000252614141452520015626 0ustar liggesusers\name{LambertW} \alias{LambertW} \title{ Lambert's W Function } \description{ Computes Lambert's W-function. } \usage{ LambertW(x) } \arguments{ \item{x}{ Vector of nonnegative numbers. } } \details{ Lambert's W-function is the inverse function of \eqn{f(y) = y e^y}{f(y) = y * exp(y)}. That is, \eqn{W} is the function such that \deqn{ W(x) e^{W(x)} = x }{ W(x) * exp(W(x)) = x } This command \code{LambertW} computes \eqn{W(x)} for each entry in the argument \code{x}. If the library \pkg{gsl} has been installed, then the function \code{lambert_W0} in that library is invoked. Otherwise, values of the W-function are computed by root-finding, using the function \code{\link[stats]{uniroot}}. Computation using \pkg{gsl} is about 100 times faster. If any entries of \code{x} are infinite or \code{NA}, the corresponding results are \code{NA}. } \value{ Numeric vector. } \references{ Corless, R, Gonnet, G, Hare, D, Jeffrey, D and Knuth, D (1996), On the Lambert W function. \emph{Computational Mathematics}, \bold{5}, 325--359. Roy, R and Olver, F (2010), Lambert W function. In Olver, F, Lozier, D and Boisvert, R (eds.), \emph{{NIST} Handbook of Mathematical Functions}, Cambridge University Press. } \author{\adrian and \rolf } \examples{ LambertW(exp(1)) } \keyword{math} spatstat.core/man/addvar.Rd0000644000176200001440000001507314141452520015353 0ustar liggesusers\name{addvar} \alias{addvar} \title{ Added Variable Plot for Point Process Model } \description{ Computes the coordinates for an Added Variable Plot for a fitted point process model. } \usage{ addvar(model, covariate, ..., subregion=NULL, bw="nrd0", adjust=1, from=NULL, to=NULL, n=512, bw.input = c("points", "quad"), bw.restrict = FALSE, covname, crosscheck=FALSE) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{covariate}{ The covariate to be added to the model. Either a pixel image, a \code{function(x,y)}, or a character string giving the name of a covariate that was supplied when the model was fitted. } \item{subregion}{ Optional. A window (object of class \code{"owin"}) specifying a subset of the spatial domain of the data. The calculation will be confined to the data in this subregion. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link[stats]{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link[stats]{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link[stats]{density.default}} to control the number and range of values at which the function will be estimated. } \item{\dots}{ Additional arguments passed to \code{\link[stats]{density.default}}. } \item{bw.input}{ Character string specifying the input data used for automatic bandwidth selection. } \item{bw.restrict}{ Logical value, specifying whether bandwidth selection is performed using data from the entire spatial domain or from the \code{subregion}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } \item{crosscheck}{ For developers only. Logical value indicating whether to perform cross-checks on the validity of the calculation. } } \details{ This command generates the plot coordinates for an Added Variable Plot for a spatial point process model. Added Variable Plots (Cox, 1958, sec 4.5; Wang, 1985) are commonly used in linear models and generalized linear models, to decide whether a model with response \eqn{y} and predictors \eqn{x} would be improved by including another predictor \eqn{z}. In a (generalised) linear model with response \eqn{y} and predictors \eqn{x}, the Added Variable Plot for a new covariate \eqn{z} is a plot of the smoothed Pearson residuals from the original model against the scaled residuals from a weighted linear regression of \eqn{z} on \eqn{x}. If this plot has nonzero slope, then the new covariate \eqn{z} is needed. For general advice see Cook and Weisberg(1999); Harrell (2001). Essentially the same technique can be used for a spatial point process model (Baddeley et al, 2012). The argument \code{model} should be a fitted spatial point process model (object of class \code{"ppm"}). The argument \code{covariate} identifies the covariate that is to be considered for addition to the model. It should be either a pixel image (object of class \code{"im"}) or a \code{function(x,y)} giving the values of the covariate at any spatial location. Alternatively \code{covariate} may be a character string, giving the name of a covariate that was supplied (in the \code{covariates} argument to \code{\link{ppm}}) when the model was fitted, but was not used in the model. The result of \code{addvar(model, covariate)} is an object belonging to the classes \code{"addvar"} and \code{"fv"}. Plot this object to generate the added variable plot. Note that the plot method shows the pointwise significance bands for a test of the \emph{null} model, i.e. the null hypothesis that the new covariate has no effect. The smoothing bandwidth is controlled by the arguments \code{bw}, \code{adjust}, \code{bw.input} and \code{bw.restrict}. If \code{bw} is a numeric value, then the bandwidth is taken to be \code{adjust * bw}. If \code{bw} is a string representing a bandwidth selection rule (recognised by \code{\link[stats]{density.default}}) then the bandwidth is selected by this rule. The data used for automatic bandwidth selection are specified by \code{bw.input} and \code{bw.restrict}. If \code{bw.input="points"} (the default) then bandwidth selection is based on the covariate values at the points of the original point pattern dataset to which the model was fitted. If \code{bw.input="quad"} then bandwidth selection is based on the covariate values at every quadrature point used to fit the model. If \code{bw.restrict=TRUE} then the bandwidth selection is performed using only data from inside the \code{subregion}. } \section{Slow computation}{ In a large dataset, computation can be very slow if the default settings are used, because the smoothing bandwidth is selected automatically. To avoid this, specify a numerical value for the bandwidth \code{bw}. One strategy is to use a coarser subset of the data to select \code{bw} automatically. The selected bandwidth can be read off the print output for \code{addvar}. } \value{ An object of class \code{"addvar"} containing the coordinates for the added variable plot. There is a \code{plot} method. } \section{Internal data}{ The return value has an attribute \code{"spatial"} which contains the internal data: the computed values of the residuals, and of all relevant covariates, at each quadrature point of the model. It is an object of class \code{"ppp"} with a data frame of marks. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2013) Residual diagnostics for covariate effects in spatial point process models. \emph{Journal of Computational and Graphical Statistics}, \bold{22}, 886--905. Cook, R.D. and Weisberg, S. (1999) \emph{Applied regression, including computing and graphics}. New York: Wiley. Cox, D.R. (1958) \emph{Planning of Experiments}. New York: Wiley. Harrell, F. (2001) \emph{Regression Modeling Strategies}. New York: Springer. Wang, P. (1985) Adding a variable in generalized linear models. \emph{Technometrics} \bold{27}, 273--276. } \author{ \adrian, \rolf, Ya-Mei Chang and Yong Song. } \seealso{ \code{\link{parres}}, \code{\link{rhohat}}, \code{\link{rho2hat}}. } \examples{ X <- rpoispp(function(x,y){exp(3+3*x)}) model <- ppm(X, ~y) adv <- addvar(model, "x") plot(adv) adv <- addvar(model, "x", subregion=square(0.5)) } \keyword{spatial} \keyword{models} spatstat.core/man/Hest.Rd0000644000176200001440000001336414141452520015016 0ustar liggesusers\name{Hest} \alias{Hest} \title{Spherical Contact Distribution Function} \description{ Estimates the spherical contact distribution function of a random set. } \usage{ Hest(X, r=NULL, breaks=NULL, ..., W, correction=c("km", "rs", "han"), conditional=TRUE) } \arguments{ \item{X}{The observed random set. An object of class \code{"ppp"}, \code{"psp"} or \code{"owin"}. Alternatively a pixel image (class \code{"im"}) with logical values. } \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{H(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control the discretisation. } \item{W}{ Optional. A window (object of class \code{"owin"}) to be taken as the window of observation. The contact distribution function will be estimated from values of the contact distance inside \code{W}. The default is \code{W=Frame(X)} when \code{X} is a window, and \code{W=Window(X)} otherwise. } \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{H(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"han"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{conditional}{ Logical value indicating whether to compute the conditional or unconditional distribution. See Details. } } \details{ The spherical contact distribution function of a stationary random set \eqn{X} is the cumulative distribution function \eqn{H} of the distance from a fixed point in space to the nearest point of \eqn{X}, given that the point lies outside \eqn{X}. That is, \eqn{H(r)} equals the probability that \code{X} lies closer than \eqn{r} units away from the fixed point \eqn{x}, given that \code{X} does not cover \eqn{x}. Let \eqn{D = d(x,X)} be the shortest distance from an arbitrary point \eqn{x} to the set \code{X}. Then the spherical contact distribution function is \deqn{H(r) = P(D \le r \mid D > 0)}{H(r) = P(D <= r | D > 0)} For a point process, the spherical contact distribution function is the same as the empty space function \eqn{F} discussed in \code{\link{Fest}}. The argument \code{X} may be a point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}) or a window (object of class \code{"owin"}). It is assumed to be a realisation of a stationary random set. The algorithm first calls \code{\link{distmap}} to compute the distance transform of \code{X}, then computes the Kaplan-Meier and reduced-sample estimates of the cumulative distribution following Hansen et al (1999). If \code{conditional=TRUE} (the default) the algorithm returns an estimate of the spherical contact function \eqn{H(r)} as defined above. If \code{conditional=FALSE}, it instead returns an estimate of the cumulative distribution function \eqn{H^\ast(r) = P(D \le r)}{H*(r) = P(D <= r)} which includes a jump at \eqn{r=0} if \code{X} has nonzero area. Accuracy depends on the pixel resolution, which is controlled by the arguments \code{eps}, \code{dimyx} and \code{xy} passed to \code{\link{as.mask}}. For example, use \code{eps=0.1} to specify square pixels of side 0.1 units, and \code{dimyx=256} to specify a 256 by 256 grid of pixels. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing up to six columns: \item{r}{the values of the argument \eqn{r} at which the function \eqn{H(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{H(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{H(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{H(r)} by the spatial Kaplan-Meier method } \item{han}{the spatial Hanisch-Chiu-Stoyan estimator of \eqn{H(r)} } \item{raw}{the uncorrected estimate of \eqn{H(r)}, i.e. the empirical distribution of the distance from a fixed point in the window to the nearest point of \code{X} } } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37-78. Baddeley, A.J. and Gill, R.D. The empty space hazard of a spatial pattern. Research Report 1994/3, Department of Mathematics, University of Western Australia, May 1994. Hansen, M.B., Baddeley, A.J. and Gill, R.D. First contact distributions for spatial patterns: regularity and estimation. \emph{Advances in Applied Probability} \bold{31} (1999) 15-33. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \seealso{\code{\link{Fest}}} \examples{ X <- runifpoint(42) H <- Hest(X) Y <- rpoisline(10) H <- Hest(Y) H <- Hest(Y, dimyx=256) X <- heather$coarse plot(Hest(X)) H <- Hest(X, conditional=FALSE) P <- owin(poly=list(x=c(5.3, 8.5, 8.3, 3.7, 1.3, 3.7), y=c(9.7, 10.0, 13.6, 14.4, 10.7, 7.2))) plot(X) plot(P, add=TRUE, col="red") H <- Hest(X, W=P) Z <- as.im(FALSE, Frame(X)) Z[X] <- TRUE Z <- Z[P, drop=FALSE] plot(Z) H <- Hest(Z) } \author{ \spatstatAuthors with contributions from Kassel Hingee. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/FmultiInhom.Rd0000644000176200001440000000425314141452520016343 0ustar liggesusers\name{FmultiInhom} \alias{FmultiInhom} \title{ Inhomogeneous Marked F-Function } \description{ For a marked point pattern, estimate the inhomogeneous version of the multitype \eqn{F} function, effectively the cumulative distribution function of the distance from a fixed point to the nearest point in subset \eqn{J}, adjusted for spatially varying intensity. } \usage{ FmultiInhom(X, J, lambda = NULL, lambdaJ = NULL, lambdamin = NULL, \dots, r = NULL) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}. } \item{J}{ A subset index specifying the subset of points to which distances are measured. Any kind of subset index acceptable to \code{\link{[.ppp}}. } \item{lambda}{ Intensity estimates for each point of \code{X}. A numeric vector of length equal to \code{npoints(X)}. Incompatible with \code{lambdaJ}. } \item{lambdaJ}{ Intensity estimates for each point of \code{X[J]}. A numeric vector of length equal to \code{npoints(X[J])}. Incompatible with \code{lambda}. } \item{lambdamin}{ A lower bound for the intensity, or at least a lower bound for the values in \code{lambdaJ} or \code{lambda[J]}. } \item{\dots}{ Ignored. } \item{r}{ Vector of distance values at which the inhomogeneous \eqn{G} function should be estimated. There is a sensible default. } } \details{ See Cronie and Van Lieshout (2015). } \value{ Object of class \code{"fv"} containing the estimate of the inhomogeneous multitype \eqn{F} function. } \references{ Cronie, O. and Van Lieshout, M.N.M. (2015) Summary statistics for inhomogeneous marked point processes. \emph{Annals of the Institute of Statistical Mathematics} DOI: 10.1007/s10463-015-0515-z } \author{ Ottmar Cronie and Marie-Colette van Lieshout. Rewritten for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{Finhom}} } \examples{ X <- amacrine J <- (marks(X) == "off") mod <- ppm(X ~ marks * x) lam <- fitted(mod, dataonly=TRUE) lmin <- min(predict(mod)[["off"]]) * 0.9 plot(FmultiInhom(X, J, lambda=lam, lambdamin=lmin)) } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/vcov.slrm.Rd0000644000176200001440000000661414144333466016056 0ustar liggesusers\name{vcov.slrm} \alias{vcov.slrm} \title{Variance-Covariance Matrix for a Fitted Spatial Logistic Regression} \description{ Returns the variance-covariance matrix of the estimates of the parameters of a point process model that was fitted by spatial logistic regression. } \usage{ \method{vcov}{slrm}(object, \dots, what=c("vcov", "corr", "fisher", "Fisher")) } \arguments{ \item{object}{A fitted point process model of class \code{"slrm"}.} \item{\dots}{Ignored.} \item{what}{Character string (partially-matched) that specifies what matrix is returned. Options are \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} or \code{"Fisher"} for the Fisher information matrix. } } \details{ This function computes the asymptotic variance-covariance matrix of the estimates of the canonical parameters in the point process model \code{object}. It is a method for the generic function \code{\link{vcov}}. \code{object} should be an object of class \code{"slrm"}, typically produced by \code{\link{slrm}}. It represents a Poisson point process model fitted by spatial logistic regression. The canonical parameters of the fitted model \code{object} are the quantities returned by \code{coef.slrm(object)}. The function \code{vcov} calculates the variance-covariance matrix for these parameters. The argument \code{what} provides three options: \describe{ \item{\code{what="vcov"}}{ return the variance-covariance matrix of the parameter estimates } \item{\code{what="corr"}}{ return the correlation matrix of the parameter estimates } \item{\code{what="fisher"}}{ return the observed Fisher information matrix. } } In all three cases, the result is a square matrix. The rows and columns of the matrix correspond to the canonical parameters given by \code{\link{coef.slrm}(object)}. The row and column names of the matrix are also identical to the names in \code{\link{coef.slrm}(object)}. Note that standard errors and 95\% confidence intervals for the coefficients can also be obtained using \code{confint(object)} or \code{coef(summary(object))}. Standard errors for the fitted intensity can be obtained using \code{\link{predict.slrm}}. } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix was either too large or too small for reliable numerical calculation. This can occur because of numerical overflow or collinearity in the covariates. } \value{ A square matrix. } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x + y) vcov(fit) vcov(fit, what="corr") vcov(fit, what="f") } \author{ \adrian and \rolf . } \seealso{ \code{\link{vcov}} for the generic, \code{\link{slrm}} for information about fitted models, \code{\link{predict.slrm}} for other kinds of calculation about the model, \code{\link[stats]{confint}} for confidence intervals. } \references{ Baddeley, A., Berman, M., Fisher, N.I., Hardegen, A., Milne, R.K., Schuhmacher, D., Shah, R. and Turner, R. (2010) Spatial logistic regression and change-of-support for spatial Poisson point processes. \emph{Electronic Journal of Statistics} \bold{4}, 1151--1201. \code{DOI: 10.1214/10-EJS581} } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.core/man/Gdot.Rd0000644000176200001440000002132314141452520015002 0ustar liggesusers\name{Gdot} \alias{Gdot} \title{ Multitype Nearest Neighbour Distance Function (i-to-any) } \description{ For a multitype point pattern, estimate the distribution of the distance from a point of type \eqn{i} to the nearest other point of any type. } \usage{ Gdot(X, i, r=NULL, breaks=NULL, \dots, correction=c("km", "rs", "han")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the distance distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{G_{i\bullet}(r)}{Gi.(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)} } \item{han}{the Hanisch-style estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G_{i\bullet}(r)}{Gi.(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G_{i\bullet}(r)}{Gi.(r)}, i.e. the empirical distribution of the distances from each point of type \eqn{i} to the nearest other point of any type. } \item{theo}{the theoretical value of \eqn{G_{i\bullet}(r)}{Gi.(r)} for a marked Poisson process with the same estimated intensity (see below). } } \details{ This function \code{Gdot} and its companions \code{\link{Gcross}} and \code{\link{Gmulti}} are generalisations of the function \code{\link{Gest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level.) The ``dot-type'' (type \eqn{i} to any type) nearest neighbour distance distribution function of a multitype point process is the cumulative distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} of the distance from a typical random point of the process with type \eqn{i} the nearest other point of the process, regardless of type. An estimate of \eqn{G_{i\bullet}(r)}{Gi.(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the type \eqn{i} points were independent of all other points, then \eqn{G_{i\bullet}(r)}{Gi.(r)} would equal \eqn{G_{ii}(r)}{Gii(r)}, the nearest neighbour distance distribution function of the type \eqn{i} points alone. For a multitype Poisson point process with total intensity \eqn{\lambda}{lambda}, we have \deqn{G_{i\bullet}(r) = 1 - e^{ - \lambda \pi r^2} }{% Gi.(r) = 1 - exp( - lambda * pi * r^2)} Deviations between the empirical and theoretical \eqn{G_{i\bullet}}{Gi.} curves may suggest dependence of the type \eqn{i} points on the other points. This algorithm estimates the distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Gest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{i\bullet}(r)}{Gi.(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{i\bullet}(r)}{Gi.(r)}. This estimate should be used with caution as \eqn{G_{i\bullet}(r)}{Gi.(r)} is not necessarily differentiable. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G_{i\bullet}}{Gi.}. However this is also returned by the algorithm, as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{G_{i\bullet}}{Gi.} as if it were an unbiased estimator of \eqn{G_{i\bullet}}{Gi.}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. The function \eqn{G_{i\bullet}}{Gi.} does not necessarily have a density. The reduced sample estimator of \eqn{G_{i\bullet}}{Gi.} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G_{i\bullet}}{Gi.} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{Gcross}}, \code{\link{Gest}}, \code{\link{Gmulti}} } \examples{ # amacrine cells data G0. <- Gdot(amacrine, "off") plot(G0.) # synthetic example pp <- runifpoispp(30) pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE)) G <- Gdot(pp, "0") G <- Gdot(pp, 0) # equivalent } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/pool.fasp.Rd0000644000176200001440000000350714141452520016012 0ustar liggesusers\name{pool.fasp} \alias{pool.fasp} \title{ Pool Data from Several Function Arrays } \description{ Pool the simulation data from several function arrays (objects of class \code{"fasp"}) and compute a new function array. } \usage{ \method{pool}{fasp}(...) } \arguments{ \item{\dots}{ Objects of class \code{"fasp"}. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"fasp"} of function arrays. It is used to combine the simulation data from several arrays of simulation envelopes and to compute a new array of envelopes based on the combined data. Each of the arguments \code{\dots} must be a function array (object of class \code{"fasp"}) containing simulation envelopes. This is typically created by running the command \code{\link{alltypes}} with the arguments \code{envelope=TRUE} and \code{savefuns=TRUE}. This ensures that each object is an array of simulation envelopes, and that each envelope contains the simulated data (summary function values) that were used to construct the envelope. The simulated data are extracted from each object and combined. A new array of envelopes is computed from the combined set of simulations. Warnings or errors will be issued if the objects \code{\dots} appear to be incompatible. However, the code is not smart enough to decide whether it is sensible to pool the data. } \value{ An object of class \code{"fasp"}. } \seealso{ \code{\link{fasp}}, \code{\link{alltypes}}, \code{\link{pool.envelope}}, \code{\link{pool}} } \examples{ data(amacrine) A1 <- alltypes(amacrine,"K",nsim=9,envelope=TRUE,savefuns=TRUE) A2 <- alltypes(amacrine,"K",nsim=10,envelope=TRUE,savefuns=TRUE) pool(A1, A2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat.core/man/update.rmhcontrol.Rd0000644000176200001440000000210414141452520017551 0ustar liggesusers\name{update.rmhcontrol} \alias{update.rmhcontrol} \title{Update Control Parameters of Metropolis-Hastings Algorithm} \description{ \code{update} method for class \code{"rmhcontrol"}. } \usage{ \method{update}{rmhcontrol}(object, \dots) } \arguments{ \item{object}{ Object of class \code{"rmhcontrol"} containing control parameters for a Metropolis-Hastings algorithm. } \item{\dots}{ Arguments to be updated in the new call to \code{\link{rmhcontrol}}. } } \details{ This is a method for the generic function \code{\link{update}} for the class \code{"rmhcontrol"}. An object of class \code{"rmhcontrol"} describes a set of control parameters for the Metropolis-Hastings simulation algorithm. See \code{\link{rmhcontrol}}). \code{update.rmhcontrol} will modify the parameters specified by \code{object} according to the new arguments given. } \value{ Another object of class \code{"rmhcontrol"}. } \examples{ a <- rmhcontrol(expand=1) update(a, expand=2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.core/man/spatialcdf.Rd0000644000176200001440000000655214141452520016226 0ustar liggesusers\name{spatialcdf} \alias{spatialcdf} \title{ Spatial Cumulative Distribution Function } \description{ Compute the spatial cumulative distribution function of a spatial covariate, optionally using spatially-varying weights. } \usage{ spatialcdf(Z, weights = NULL, normalise = FALSE, ..., W = NULL, Zname = NULL) } \arguments{ \item{Z}{ Spatial covariate. A pixel image or a \code{function(x,y,...)} } \item{weights}{ Spatial weighting for different locations. A pixel image, a \code{function(x,y,...)}, a window, a constant value, or a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}). } \item{normalise}{ Logical. Whether the weights should be normalised so that they sum to 1. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution, or extra arguments passed to \code{Z} if it is a function. } \item{W}{ Optional window (object of class \code{"owin"}) defining the spatial domain. } \item{Zname}{ Optional character string for the name of the covariate \code{Z} used in plots. } } \details{ If \code{weights} is missing or \code{NULL}, it defaults to 1. The values of the covariate \code{Z} are computed on a grid of pixels. The weighted cumulative distribution function of \code{Z} values is computed, taking each value with weight equal to the pixel area. The resulting function \eqn{F} is such that \eqn{F(t)} is the area of the region of space where \eqn{Z \le t}{Z <= t}. If \code{weights} is a pixel image or a function, then the values of \code{weights} and of the covariate \code{Z} are computed on a grid of pixels. The \code{weights} are multiplied by the pixel area. Then the weighted empirical cumulative distribution function of \code{Z} values is computed using \code{\link{ewcdf}}. The resulting function \eqn{F} is such that \eqn{F(t)} is the total weight (or weighted area) of the region of space where \eqn{Z \le t}{Z <= t}. If \code{weights} is a fitted point process model, then it should be a Poisson process. The fitted intensity of the model, and the value of the covariate \code{Z}, are evaluated at the quadrature points used to fit the model. The \code{weights} are multiplied by the weights of the quadrature points. Then the weighted empirical cumulative distribution of \code{Z} values is computed using \code{\link{ewcdf}}. The resulting function \eqn{F} is such that \eqn{F(t)} is the expected number of points in the point process that will fall in the region of space where \eqn{Z \le t}{Z <= t}. If \code{normalise=TRUE}, the function is normalised so that its maximum value equals 1, so that it gives the cumulative \emph{fraction} of weight or cumulative fraction of points. The result can be printed, plotted, and used as a function. } \value{ A cumulative distribution function object belonging to the classes \code{"spatialcdf"}, \code{"ewcdf"}, \code{"ecdf"} (only if \code{normalise=TRUE}) and \code{"stepfun"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{ewcdf}}, \code{\link{cdf.test}} } \examples{ with(bei.extra, { plot(spatialcdf(grad)) fit <- ppm(bei ~ elev) plot(spatialcdf(grad, predict(fit))) plot(A <- spatialcdf(grad, fit)) A(0.1) }) plot(spatialcdf("x", W=letterR)) } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/Kmodel.Rd0000644000176200001440000000326414141452520015324 0ustar liggesusers\name{Kmodel} \alias{Kmodel} \alias{pcfmodel} \title{K Function or Pair Correlation Function of a Point Process Model} \description{ Returns the theoretical \eqn{K} function or the pair correlation function of a point process model. } \usage{ Kmodel(model, \dots) pcfmodel(model, \dots) } \arguments{ \item{model}{ A fitted point process model of some kind. } \item{\dots}{ Ignored. } } \value{ A \code{function} in the \R language, which takes one argument \code{r}. } \details{ For certain types of point process models, it is possible to write down a mathematical expression for the \eqn{K} function or the pair correlation function of the model. The functions \code{Kmodel} and \code{pcfmodel} give the theoretical \eqn{K}-function and the theoretical pair correlation function for a point process model that has been fitted to data. The functions \code{Kmodel} and \code{pcfmodel} are generic, with methods for the classes \code{"kppm"} (cluster processes and Cox processes) and \code{"ppm"} (Gibbs processes). The return value is a \code{function} in the \R language, which takes one argument \code{r}. Evaluation of this function, on a numeric vector \code{r}, yields values of the desired \eqn{K} function or pair correlation function at these distance values. } \seealso{ \code{\link{Kest}} or \code{\link{pcf}} to estimate the \eqn{K} function or pair correlation function nonparametrically from data. \code{\link{Kmodel.kppm}} for the method for cluster processes and Cox processes. \code{\link{Kmodel.ppm}} for the method for Gibbs processes. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/MultiStrauss.Rd0000644000176200001440000000763314141452520016574 0ustar liggesusers\name{MultiStrauss} \alias{MultiStrauss} \title{The Multitype Strauss Point Process Model} \description{ Creates an instance of the multitype Strauss point process model which can then be fitted to point pattern data. } \usage{ MultiStrauss(radii, types=NULL) } \arguments{ \item{radii}{Matrix of interaction radii} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the multitype Strauss process with interaction radii \eqn{radii[i,j]}. } \details{ The (stationary) multitype Strauss process with \eqn{m} types, with interaction radii \eqn{r_{ij}}{r[i,j]} and parameters \eqn{\beta_j}{beta[j]} and \eqn{\gamma_{ij}}{gamma[i,j]} is the pairwise interaction point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern, and a pair of points of types \eqn{i} and \eqn{j} closer than \eqn{r_{ij}}{r[i,j]} units apart contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the density. The nonstationary multitype Strauss process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the multitype Strauss process pairwise interaction is yielded by the function \code{MultiStrauss()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the MultiStrauss interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The matrix \code{radii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii are specified in \code{MultiStrauss}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{MultiStrauss()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}}, \code{\link{MultiHard}} } \examples{ r <- matrix(c(1,2,2,1), nrow=2,ncol=2) MultiStrauss(r) # prints a sensible description of itself r <- 0.03 * matrix(c(1,2,2,1), nrow=2,ncol=2) X <- amacrine \testonly{ X <- X[ owin(c(0, 0.8), c(0, 1)) ] } ppm(X ~1, MultiStrauss(r)) # fit the stationary multitype Strauss process to `amacrine' # ppm(X ~polynom(x,y,3), MultiStrauss(r, c("off","on"))) # fit a nonstationary multitype Strauss process with log-cubic trend } \section{Warnings}{ In order that \code{\link{ppm}} can fit the multitype Strauss model correctly to a point pattern \code{X}, this pattern must be marked, with \code{markformat} equal to \code{vector} and the mark vector \code{marks(X)} must be a factor. If the argument \code{types} is specified it is interpreted as a set of factor levels and this set must equal \code{levels(marks(X))}. } \section{Changed Syntax}{ Before \pkg{spatstat} version \code{1.37-0}, the syntax of this function was different: \code{MultiStrauss(types=NULL, radii)}. The new code attempts to handle the old syntax as well. } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{models} spatstat.core/man/runifdisc.Rd0000644000176200001440000000346714141452520016104 0ustar liggesusers\name{runifdisc} \alias{runifdisc} \title{Generate N Uniform Random Points in a Disc} \description{ Generate a random point pattern containing \eqn{n} independent uniform random points in a circular disc. } \usage{ runifdisc(n, radius=1, centre=c(0,0), ..., nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of points. } \item{radius}{Radius of the circle.} \item{centre}{Coordinates of the centre of the circle.} \item{\dots}{ Arguments passed to \code{\link{disc}} controlling the accuracy of approximation to the circle. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates \code{n} independent random points, uniformly distributed in a circular disc. It is faster (for a circular window) than the general code used in \code{\link{runifpoint}}. To generate random points in an ellipse, first generate points in a circle using \code{runifdisc}, then transform to an ellipse using \code{\link{affine}}, as shown in the examples. To generate random points in other windows, use \code{\link{runifpoint}}. To generate non-uniform random points, use \code{\link{rpoint}}. } \seealso{ \code{\link{disc}}, \code{\link{runifpoint}}, \code{\link{rpoint}} } \examples{ # 100 random points in the unit disc plot(runifdisc(100)) # 42 random points in the ellipse with major axis 3 and minor axis 1 X <- runifdisc(42) Y <- affine(X, mat=diag(c(3,1))) plot(Y) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/studpermu.test.Rd0000644000176200001440000001011314141452520017106 0ustar liggesusers\name{studpermu.test} \alias{studpermu.test} \title{ Studentised Permutation Test } \description{ Perform a studentised permutation test for a difference between groups of point patterns. } \usage{ studpermu.test(X, formula, summaryfunction = Kest, \dots, rinterval = NULL, nperm = 999, use.Tbar = FALSE, minpoints = 20, rsteps = 128, r = NULL, arguments.in.data = FALSE) } \arguments{ \item{X}{ Data. Either a \code{hyperframe} or a list of lists of point patterns. } \item{formula}{ Formula describing the grouping, when \code{X} is a hyperframe. The left side of the formula identifies which column of \code{X} contains the point patterns. The right side identifies the grouping factor. If the formula is missing, the grouping variable is taken to be the first column of \code{X} that contains a factor, and the point patterns are taken from the first column that contains point patterns. } \item{summaryfunction}{ Summary function applicable to point patterns. } \item{\dots}{ Additional arguments passed to \code{summaryfunction}. } \item{rinterval}{ Interval of distance values \eqn{r} over which the summary function should be evaluated and over which the test statistic will be integrated. If \code{NULL}, the default range of the summary statistic is used (taking the intersection of these ranges over all patterns). } \item{nperm}{ Number of random permutations for the test. } \item{use.Tbar}{ Logical value indicating choice of test statistic. If \code{TRUE}, use the alternative test statistic, which is appropriate for summary functions with roughly constant variance, such as \eqn{K(r)/r} or \eqn{L(r)}. } \item{minpoints}{ Minimum permissible number of points in a point pattern for inclusion in the test calculation. } \item{rsteps}{ Number of discretisation steps in the \code{rinterval}. } \item{r}{ Optional vector of distance values as the argument for \code{summaryfunction}. Should not usually be given. There is a sensible default. } \item{arguments.in.data}{ Logical. If \code{TRUE}, individual extra arguments to \code{summaryfunction} will be taken from \code{X} (which must be a hyperframe). This assumes that the first argument of \code{summaryfunction} is the point pattern dataset. } } \details{ This function performs the studentized permutation test of Hahn (2012) for a difference between groups of point patterns. The first argument \code{X} should be either \describe{ \item{a list of lists of point patterns.}{ Each element of \code{X} will be interpreted as a group of point patterns, assumed to be replicates of the same point process. } \item{a hyperframe:}{ One column of the hyperframe should contain point patterns, and another column should contain a factor indicating the grouping. The argument \code{formula} should be a formula in the \R language specifying the grouping: it should be of the form \code{P ~ G} where \code{P} is the name of the column of point patterns, and \code{G} is the name of the factor. } } A group needs to contain at least two point patterns with at least \code{minpoints} points in each pattern. The function returns an object of class \code{"htest"} and \code{"studpermutest"} that can be printed and plotted. The printout shows the test result and \eqn{p}-value. The plot shows the summary functions for the groups (and the group means if requested). } \value{ Object of class \code{"studpermutest"}. } \references{ Hahn, U. (2012) A studentized permutation test for the comparison of spatial point patterns. \emph{Journal of the American Statistical Association} \bold{107} (498), 754--764. } \seealso{ \code{\link{plot.studpermutest}} } \author{ Ute Hahn. Modified for \code{spatstat} by \spatstatAuthors. } \examples{ np <- if(interactive()) 99 else 19 testpyramidal <- studpermu.test(pyramidal, Neurons ~ group, nperm=np) testpyramidal } \keyword{spatial} \keyword{htest} spatstat.core/man/rStrauss.Rd0000644000176200001440000001157014141452520015736 0ustar liggesusers\name{rStrauss} \alias{rStrauss} \title{Perfect Simulation of the Strauss Process} \description{ Generate a random pattern of points, a simulated realisation of the Strauss process, using a perfect simulation algorithm. } \usage{ rStrauss(beta, gamma = 1, R = 0, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{gamma}{ interaction parameter (a number between 0 and 1, inclusive). } \item{R}{ interaction radius (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Strauss point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. The Strauss process (Strauss, 1975; Kelly and Ripley, 1976) is a model for spatial inhibition, ranging from a strong `hard core' inhibition to a completely random pattern according to the value of \code{gamma}. The Strauss process with interaction radius \eqn{R} and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma} is the pairwise interaction point process with probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of distinct unordered pairs of points that are closer than \eqn{R} units apart, and \eqn{\alpha}{alpha} is the normalising constant. Intuitively, each point of the pattern contributes a factor \eqn{\beta}{beta} to the probability density, and each pair of points closer than \eqn{r} units apart contributes a factor \eqn{\gamma}{gamma} to the density. The interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1} in order that the process be well-defined (Kelly and Ripley, 1976). This model describes an ``ordered'' or ``inhibitive'' pattern. If \eqn{\gamma=1}{gamma=1} it reduces to a Poisson process (complete spatial randomness) with intensity \eqn{\beta}{beta}. If \eqn{\gamma=0}{gamma=0} it is called a ``hard core process'' with hard core radius \eqn{R/2}, since no pair of points is permitted to lie closer than \eqn{R} units apart. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. \Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. Strauss, D.J. (1975) A model for clustering. \emph{Biometrika} \bold{62}, 467--475. } \author{ Kasper Klitgaard Berthelsen, adapted for \pkg{spatstat} by \adrian } \examples{ X <- rStrauss(0.05,0.2,1.5,square(50)) } \seealso{ \code{\link{rmh}}, \code{\link{Strauss}}, \code{\link{rHardcore}}, \code{\link{rStraussHard}}, \code{\link{rDiggleGratton}}, \code{\link{rDGS}}, \code{\link{rPenttinen}}. } \keyword{spatial} \keyword{datagen} spatstat.core/man/adaptive.density.Rd0000644000176200001440000000302714141452520017361 0ustar liggesusers\name{adaptive.density} \alias{adaptive.density} \title{Adaptive Estimate of Intensity of Point Pattern} \description{ Computes an adaptive estimate of the intensity function of a point pattern. } \usage{ adaptive.density(X, \dots, method=c("voronoi","kernel")) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"} or \code{"lpp"}).} \item{method}{Character string specifying the estimation method} \item{\dots}{ Additional arguments passed to \code{\link{densityVoronoi}} or \code{\link{densityAdaptiveKernel}}. } } \details{ This function is an alternative to \code{\link{density.ppp}}. It computes an estimate of the intensity function of a point pattern dataset. The result is a pixel image giving the estimated intensity. If \code{method="voronoi"} the data are passed to the function \code{\link{densityVoronoi}} which estimates the intensity using the Voronoi-Dirichlet tessellation. If \code{method="kernel"} the data are passed to the function \code{\link{densityAdaptiveKernel}} which estimates the intensity using a variable-bandwidth kernel estimator. } \value{ A pixel image (object of class \code{"im"}) whose values are estimates of the intensity of \code{X}. } \seealso{ \code{\link{density.ppp}}, \code{\link{densityVoronoi}}, \code{\link{densityAdaptiveKernel}}, \code{\link{im.object}}. } \examples{ plot(adaptive.density(nztrees, 1), main="Voronoi estimate") } \author{ \spatstatAuthors and Mehdi Moradi. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/edge.Ripley.Rd0000644000176200001440000000610014141452520016250 0ustar liggesusers\name{edge.Ripley} \alias{edge.Ripley} \alias{rmax.Ripley} \title{ Ripley's Isotropic Edge Correction } \description{ Computes Ripley's isotropic edge correction weights for a point pattern. } \usage{ edge.Ripley(X, r, W = Window(X), method = c("C", "interpreted"), maxweight = 100, internal=list()) rmax.Ripley(W) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{W}{ Window for which the edge correction is required. } \item{r}{ Vector or matrix of interpoint distances for which the edge correction should be computed. } \item{method}{ Choice of algorithm. Either \code{"interpreted"} or \code{"C"}. This is needed only for debugging purposes. } \item{maxweight}{ Maximum permitted value of the edge correction weight. } \item{internal}{For developer use only.} } \details{ The function \code{edge.Ripley} computes Ripley's (1977) isotropic edge correction weight, which is used in estimating the \eqn{K} function and in many other contexts. The function \code{rmax.Ripley} computes the maximum value of distance \eqn{r} for which the isotropic edge correction estimate of \eqn{K(r)} is valid. For a single point \eqn{x} in a window \eqn{W}, and a distance \eqn{r > 0}, the isotropic edge correction weight is \deqn{ e(u, r) = \frac{2\pi r}{\mbox{length}(c(u,r) \cap W)} }{ e(u, r) = 2 * \pi * r/length(intersection(c(u,r), W)) } where \eqn{c(u,r)} is the circle of radius \eqn{r} centred at the point \eqn{u}. The denominator is the length of the overlap between this circle and the window \eqn{W}. The function \code{edge.Ripley} computes this edge correction weight for each point in the point pattern \code{X} and for each corresponding distance value in the vector or matrix \code{r}. If \code{r} is a vector, with one entry for each point in \code{X}, then the result is a vector containing the edge correction weights \code{e(X[i], r[i])} for each \code{i}. If \code{r} is a matrix, with one row for each point in \code{X}, then the result is a matrix whose \code{i,j} entry gives the edge correction weight \code{e(X[i], r[i,j])}. For example \code{edge.Ripley(X, pairdist(X))} computes all the edge corrections required for the \eqn{K}-function. If any value of the edge correction weight exceeds \code{maxwt}, it is set to \code{maxwt}. The function \code{rmax.Ripley} computes the smallest distance \eqn{r} such that it is possible to draw a circle of radius \eqn{r}, centred at a point of \code{W}, such that the circle does not intersect the interior of \code{W}. } \value{ A numeric vector or matrix. } \references{ Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \seealso{ \code{\link{edge.Trans}}, \code{\link{rmax.Trans}}, \code{\link{Kest}} } \examples{ v <- edge.Ripley(cells, pairdist(cells)) rmax.Ripley(Window(cells)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/envelopeArray.Rd0000644000176200001440000000517414141452520016727 0ustar liggesusers\name{envelopeArray} \alias{envelopeArray} \title{ Array of Simulation Envelopes of Summary Function } \description{ Compute an array of simulation envelopes using a summary function that returns an array of curves. } \usage{ envelopeArray(X, fun, \dots, dataname = NULL, verb = FALSE, reuse = TRUE) } \arguments{ \item{X}{ Object containing point pattern data. A point pattern (object of class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"lppm"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. The result of \code{fun} should be a function array (object of class \code{"fasp"}). } \item{\dots}{ Arguments passed to \code{\link{envelope}} to control the simulations, or passed to \code{fun} when evaluating the function. } \item{dataname}{ Optional character string name for the data. } \item{verb}{ Logical value indicating whether to print progress reports. } \item{reuse}{ Logical value indicating whether the envelopes in each panel should be based on the same set of simulated patterns (\code{reuse=TRUE}, the default) or on different, independent sets of simulated patterns (\code{reuse=FALSE}). } } \details{ This command is the counterpart of \code{\link{envelope}} when the function \code{fun} that is evaluated on each simulated point pattern will return an object of class \code{"fasp"} representing an array of summary functions. Simulated point patterns are generated according to the rules described for \code{\link{envelope}}. In brief, if \code{X} is a point pattern, the algorithm generates simulated point patterns of the same kind, according to complete spatial randomness. If \code{X} is a fitted model, the algorithm generates simulated point patterns according to this model. For each simulated point pattern \code{Y}, the function \code{fun} is invoked. The result \code{Z <- fun(Y, ...)} should be an object of class \code{"fasp"} representing an array of summary functions. The dimensions of the array \code{Z} should be the same for each simulated pattern \code{Y}. This algorithm finds the simulation envelope of the summary functions in each cell of the array. } \value{ An object of class \code{"fasp"} representing an array of envelopes. } \author{ \spatstatAuthors. } \seealso{ \code{\link{envelope}}, \code{\link{alltypes}}. } \examples{ A <- envelopeArray(finpines, markcrosscorr, nsim=9) plot(A) } \keyword{spatial} \keyword{nonparametric} \keyword{iteration} spatstat.core/man/fitted.ppm.Rd0000644000176200001440000001177114141452520016165 0ustar liggesusers\name{fitted.ppm} \alias{fitted.ppm} \title{ Fitted Conditional Intensity for Point Process Model } \description{ Given a point process model fitted to a point pattern, compute the fitted conditional intensity or fitted trend of the model at the points of the pattern, or at the points of the quadrature scheme used to fit the model. } \usage{ \method{fitted}{ppm}(object, \dots, type="lambda", dataonly=FALSE, new.coef=NULL, leaveoneout=FALSE, drop=FALSE, check=TRUE, repair=TRUE, ignore.hardcore=FALSE, dropcoef=FALSE) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) } \item{\dots}{ Ignored. } \item{type}{ String (partially matched) indicating whether the fitted value is the conditional intensity (\code{"lambda"} or \code{"cif"}) or the first order trend (\code{"trend"}) or the logarithm of conditional intensity (\code{"link"}). } \item{dataonly}{ Logical. If \code{TRUE}, then values will only be computed at the points of the data point pattern. If \code{FALSE}, then values will be computed at all the points of the quadrature scheme used to fit the model, including the points of the data point pattern. } \item{new.coef}{ Numeric vector of parameter values to replace the fitted model parameters \code{coef(object)}. } \item{leaveoneout}{ Logical. If \code{TRUE} the fitted value at each data point will be computed using a leave-one-out method. See Details. } \item{drop}{ Logical value determining whether to delete quadrature points that were not used to fit the model. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{object}, if it is found to be damaged. } \item{ignore.hardcore}{ Advanced use only. Logical value specifying whether to compute only the finite part of the interaction potential (effectively removing any hard core interaction terms). } \item{dropcoef}{ Internal use only. } } \value{ A vector containing the values of the fitted conditional intensity, fitted spatial trend, or logarithm of the fitted conditional intensity. Entries in this vector correspond to the quadrature points (data or dummy points) used to fit the model. The quadrature points can be extracted from \code{object} by \code{union.quad(quad.ppm(object))}. } \details{ The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the model-fitting algorithm \code{\link{ppm}}). This function evaluates the conditional intensity \eqn{\hat\lambda(u, x)}{lambdahat(u,x)} or spatial trend \eqn{\hat b(u)}{bhat(u)} of the fitted point process model for certain locations \eqn{u}, where \code{x} is the original point pattern dataset to which the model was fitted. The locations \eqn{u} at which the fitted conditional intensity/trend is evaluated, are the points of the quadrature scheme used to fit the model in \code{\link{ppm}}. They include the data points (the points of the original point pattern dataset \code{x}) and other ``dummy'' points in the window of observation. If \code{leaveoneout=TRUE}, fitted values will be computed for the data points only, using a \sQuote{leave-one-out} rule: the fitted value at \code{X[i]} is effectively computed by deleting this point from the data and re-fitting the model to the reduced pattern \code{X[-i]}, then predicting the value at \code{X[i]}. (Instead of literally performing this calculation, we apply a Taylor approximation using the influence function computed in \code{\link{dfbetas.ppm}}. The argument \code{drop} is explained in \code{\link{quad.ppm}}. Use \code{\link{predict.ppm}} to compute the fitted conditional intensity at other locations or with other values of the explanatory variables. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005). Residual analysis for spatial point processes (with discussion). \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppm}}, \code{\link{predict.ppm}} } \examples{ str <- ppm(cells ~x, Strauss(r=0.1)) lambda <- fitted(str) # extract quadrature points in corresponding order quadpoints <- union.quad(quad.ppm(str)) # plot conditional intensity values # as circles centred on the quadrature points quadmarked <- setmarks(quadpoints, lambda) plot(quadmarked) if(!interactive()) str <- ppm(cells ~ x) lambdaX <- fitted(str, leaveoneout=TRUE) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.core/man/logLik.ppm.Rd0000644000176200001440000001243114141452520016121 0ustar liggesusers\name{logLik.ppm} \alias{logLik.ppm} \alias{deviance.ppm} \alias{AIC.ppm} \alias{extractAIC.ppm} \alias{nobs.ppm} \title{Log Likelihood and AIC for Point Process Model} \description{ Extracts the log likelihood, deviance, and AIC of a fitted Poisson point process model, or analogous quantities based on the pseudolikelihood or logistic likelihood for a fitted Gibbs point process model. } \usage{ \method{logLik}{ppm}(object, \dots, new.coef=NULL, warn=TRUE, absolute=FALSE) \method{deviance}{ppm}(object, \dots) \method{AIC}{ppm}(object, \dots, k=2, takeuchi=TRUE) \method{extractAIC}{ppm}(fit, scale=0, k=2, \dots, takeuchi=TRUE) \method{nobs}{ppm}(object, \dots) } \arguments{ \item{object,fit}{Fitted point process model. An object of class \code{"ppm"}. } \item{\dots}{Ignored.} \item{warn}{ If \code{TRUE}, a warning is given when the pseudolikelihood or logistic likelihood is returned instead of the likelihood. } \item{absolute}{ Logical value indicating whether to include constant terms in the loglikelihood. } \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(object)}. } \item{takeuchi}{ Logical value specifying whether to use the Takeuchi penalty (\code{takeuchi=TRUE}) or the number of fitted parameters (\code{takeuchi=FALSE}) in calculating AIC. } } \details{ These functions are methods for the generic commands \code{\link[stats]{logLik}}, \code{\link[stats]{deviance}}, \code{\link[stats]{extractAIC}} and \code{\link[stats]{nobs}} for the class \code{"ppm"}. An object of class \code{"ppm"} represents a fitted Poisson or Gibbs point process model. It is obtained from the model-fitting function \code{\link{ppm}}. The method \code{logLik.ppm} computes the maximised value of the log likelihood for the fitted model \code{object} (as approximated by quadrature using the Berman-Turner approximation) is extracted. If \code{object} is not a Poisson process, the maximised log \emph{pseudolikelihood} is returned, with a warning (if \code{warn=TRUE}). The Akaike Information Criterion AIC for a fitted model is defined as \deqn{ AIC = -2 \log(L) + k \times \mbox{penalty} }{ AIC = -2 * log(L) + k * penalty } where \eqn{L} is the maximised likelihood of the fitted model, and \eqn{\mbox{penalty}}{penalty} is a penalty for model complexity, usually equal to the effective degrees of freedom of the model. The method \code{extractAIC.ppm} returns the \emph{analogous} quantity \eqn{AIC*} in which \eqn{L} is replaced by \eqn{L*}, the quadrature approximation to the likelihood (if \code{fit} is a Poisson model) or the pseudolikelihood or logistic likelihood (if \code{fit} is a Gibbs model). The \eqn{\mbox{penalty}}{penalty} term is calculated as follows. If \code{takeuchi=FALSE} then \eqn{\mbox{penalty}}{penalty} is the number of fitted parameters. If \code{takeuchi=TRUE} then \eqn{\mbox{penalty} = \mbox{trace}(J H^{-1})}{penalty = trace(J H^(-1))} where \eqn{J} and \eqn{H} are the estimated variance and hessian, respectively, of the composite score. These two choices are equivalent for a Poisson process. The method \code{nobs.ppm} returns the number of points in the original data point pattern to which the model was fitted. The \R function \code{\link[stats]{step}} uses these methods. } \section{Model comparison}{ The values of \code{logLik} and \code{AIC} returned by these functions are based on the \emph{pseudolikelihood} of the Gibbs point process model. If the model is a Poisson process, then the pseudolikelihood is the same as the likelihood, but for other Gibbs models, the pseudolikelihood is different from the likelihood (and the likelihood of a Gibbs model is hard to compute). For model comparison and model selection, it is valid to compare the \code{logLik} values, or to compare the \code{AIC} values, but only when all the models are of class \code{"ppm"}. } \value{ \code{logLik} returns a numerical value, belonging to the class \code{"logLik"}, with an attribute \code{"df"} giving the degrees of freedom. \code{AIC} returns a numerical value. \code{extractAIC} returns a numeric vector of length 2 containing the degrees of freedom and the AIC value. \code{nobs} returns an integer value. } \references{ Varin, C. and Vidoni, P. (2005) A note on composite likelihood inference and model selection. \emph{Biometrika} \bold{92}, 519--528. } \seealso{ \code{\link{ppm}}, \code{\link{as.owin}}, \code{\link{anova.ppm}}, \code{\link{coef.ppm}}, \code{\link{fitted.ppm}}, \code{\link{formula.ppm}}, \code{\link{model.frame.ppm}}, \code{\link{model.matrix.ppm}}, \code{\link{plot.ppm}}, \code{\link{predict.ppm}}, \code{\link{residuals.ppm}}, \code{\link{simulate.ppm}}, \code{\link{summary.ppm}}, \code{\link{terms.ppm}}, \code{\link{update.ppm}}, \code{\link{vcov.ppm}}. } \author{ \spatstatAuthors. } \examples{ data(cells) fit <- ppm(cells, ~x) nobs(fit) logLik(fit) deviance(fit) extractAIC(fit) AIC(fit) step(fit) } \keyword{spatial} \keyword{models} spatstat.core/man/parres.Rd0000644000176200001440000001763014141452520015407 0ustar liggesusers\name{parres} \alias{parres} \title{ Partial Residuals for Point Process Model } \description{ Computes the smoothed partial residuals, a diagnostic for transformation of a covariate in a Poisson point process model. } \usage{ parres(model, covariate, ..., smooth.effect=FALSE, subregion=NULL, bw = "nrd0", adjust=1, from = NULL, to = NULL, n = 512, bw.input = c("points", "quad"), bw.restrict=FALSE, covname) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{covariate}{ The covariate of interest. Either a character string matching the name of one of the canonical covariates in the model, or one of the names \code{"x"} or \code{"y"} referring to the Cartesian coordinates, or one of the names of the covariates given when \code{model} was fitted, or a pixel image (object of class \code{"im"}) or \code{function(x,y)} supplying the values of a covariate at any location. If the \code{model} depends on only one covariate, then this covariate is the default; otherwise a covariate must be specified. } \item{smooth.effect}{ Logical. Determines the choice of algorithm. See Details. } \item{subregion}{ Optional. A window (object of class \code{"owin"}) specifying a subset of the spatial domain of the data. The calculation will be confined to the data in this subregion. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link[stats]{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link[stats]{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link[stats]{density.default}} to control the number and range of values at which the function will be estimated. } \item{\dots}{ Additional arguments passed to \code{\link[stats]{density.default}}. } \item{bw.input}{ Character string specifying the input data used for automatic bandwidth selection. } \item{bw.restrict}{ Logical value, specifying whether bandwidth selection is performed using data from the entire spatial domain or from the \code{subregion}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } } \details{ This command computes the smoothed partial residual diagnostic (Baddeley, Chang, Song and Turner, 2012) for the transformation of a covariate in a Poisson point process model. The argument \code{model} must be a fitted Poisson point process model. The diagnostic works in two different ways: \describe{ \item{Canonical covariate:}{ The argument \code{covariate} may be a character string which is the name of one of the \emph{canonical covariates} in the model. The canonical covariates are the functions \eqn{Z_j}{Z[j]} that appear in the expression for the Poisson point process intensity \deqn{ \lambda(u) = \exp(\beta_1 Z_1(u) + \ldots + \beta_p Z_p(u)) }{ lambda(u) = exp(beta[1] * Z[1](u) + \ldots + \beta[p] * Z[p](u)) } at spatial location \eqn{u}. Type \code{names(coef(model))} to see the names of the canonical covariates in \code{model}. If the selected covariate is \eqn{Z_j}{Z[j]}, then the diagnostic plot concerns the model term \eqn{\beta_j Z_j(u)}{beta[j] * Z[j](u)}. The plot shows a smooth estimate of a function \eqn{h(z)} that should replace this linear term, that is, \eqn{\beta_j Z_j(u)}{beta[j] * Z[j](u)} should be replaced by \eqn{h(Z_j(u))}{h(Z[j](u))}. The linear function is also plotted as a dotted line. } \item{New covariate:}{ If the argument \code{covariate} is a pixel image (object of class \code{"im"}) or a \code{function(x,y)}, it is assumed to provide the values of a covariate that is not present in the model. Alternatively \code{covariate} can be the name of a covariate that was supplied when the model was fitted (i.e. in the call to \code{\link{ppm}}) but which does not feature in the model formula. In either case we speak of a new covariate \eqn{Z(u)}. If the fitted model intensity is \eqn{\lambda(u)}{lambda(u)} then we consider modifying this to \eqn{\lambda(u) \exp(h(Z(u)))}{lambda(u) * exp(h(Z(u)))} where \eqn{h(z)} is some function. The diagnostic plot shows an estimate of \eqn{h(z)}. \bold{Warning: in this case the diagnostic is not theoretically justified. This option is provided for research purposes.} } } Alternatively \code{covariate} can be one of the character strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. The behaviour here depends on whether the coordinate was one of the canonical covariates in the model. If there is more than one canonical covariate in the model that depends on the specified \code{covariate}, then the covariate effect is computed using all these canonical covariates. For example in a log-quadratic model which includes the terms \code{x} and \code{I(x^2)}, the quadratic effect involving both these terms will be computed. There are two choices for the algorithm. If \code{smooth.effect=TRUE}, the fitted covariate effect (according to \code{model}) is added to the point process residuals, then smoothing is applied to these values. If \code{smooth.effect=FALSE}, the point process residuals are smoothed first, and then the fitted covariate effect is added to the result. The smoothing bandwidth is controlled by the arguments \code{bw}, \code{adjust}, \code{bw.input} and \code{bw.restrict}. If \code{bw} is a numeric value, then the bandwidth is taken to be \code{adjust * bw}. If \code{bw} is a string representing a bandwidth selection rule (recognised by \code{\link[stats]{density.default}}) then the bandwidth is selected by this rule. The data used for automatic bandwidth selection are specified by \code{bw.input} and \code{bw.restrict}. If \code{bw.input="points"} (the default) then bandwidth selection is based on the covariate values at the points of the original point pattern dataset to which the model was fitted. If \code{bw.input="quad"} then bandwidth selection is based on the covariate values at every quadrature point used to fit the model. If \code{bw.restrict=TRUE} then the bandwidth selection is performed using only data from inside the \code{subregion}. } \section{Slow computation}{ In a large dataset, computation can be very slow if the default settings are used, because the smoothing bandwidth is selected automatically. To avoid this, specify a numerical value for the bandwidth \code{bw}. One strategy is to use a coarser subset of the data to select \code{bw} automatically. The selected bandwidth can be read off the print output for \code{parres}. } \value{ A function value table (object of class \code{"fv"}) containing the values of the smoothed partial residual, the estimated variance, and the fitted effect of the covariate. Also belongs to the class \code{"parres"} which has methods for \code{print} and \code{plot}. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2013) Residual diagnostics for covariate effects in spatial point process models. \emph{Journal of Computational and Graphical Statistics}, \bold{22}, 886--905. } \author{ \adrian, \rolf, Ya-Mei Chang and Yong Song. } \seealso{ \code{\link{addvar}}, \code{\link{rhohat}}, \code{\link{rho2hat}} } \examples{ X <- rpoispp(function(x,y){exp(3+x+2*x^2)}) model <- ppm(X ~x+y) tra <- parres(model, "x") plot(tra) tra plot(parres(model, "x", subregion=square(0.5))) model2 <- ppm(X ~x+I(x^2)+y) plot(parres(model2, "x")) Z <- setcov(owin()) plot(parres(model2, Z)) #' when the model involves only one covariate modelb <- ppm(bei ~ elev + I(elev^2), data=bei.extra) plot(parres(modelb)) } \keyword{spatial} \keyword{models} spatstat.core/man/GmultiInhom.Rd0000644000176200001440000000575214141452520016351 0ustar liggesusers\name{GmultiInhom} \alias{GmultiInhom} \title{ Inhomogeneous Marked G-Function } \description{ For a marked point pattern, estimate the inhomogeneous version of the multitype \eqn{G} function, effectively the cumulative distribution function of the distance from a point in subset \eqn{I} to the nearest point in subset \eqn{J}, adjusted for spatially varying intensity. } \usage{ GmultiInhom(X, I, J, lambda = NULL, lambdaI = NULL, lambdaJ = NULL, lambdamin = NULL, \dots, r = NULL, ReferenceMeasureMarkSetI = NULL, ratio = FALSE) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}. } \item{I}{ A subset index specifying the subset of points \emph{from} which distances are measured. Any kind of subset index acceptable to \code{\link{[.ppp}}. } \item{J}{ A subset index specifying the subset of points \emph{to} which distances are measured. Any kind of subset index acceptable to \code{\link{[.ppp}}. } \item{lambda}{ Intensity estimates for each point of \code{X}. A numeric vector of length equal to \code{npoints(X)}. Incompatible with \code{lambdaI,lambdaJ}. } \item{lambdaI}{ Intensity estimates for each point of \code{X[I]}. A numeric vector of length equal to \code{npoints(X[I])}. Incompatible with \code{lambda}. } \item{lambdaJ}{ Intensity estimates for each point of \code{X[J]}. A numeric vector of length equal to \code{npoints(X[J])}. Incompatible with \code{lambda}. } \item{lambdamin}{ A lower bound for the intensity, or at least a lower bound for the values in \code{lambdaJ} or \code{lambda[J]}. } \item{\dots}{ Ignored. } \item{r}{ Vector of distance values at which the inhomogeneous \eqn{G} function should be estimated. There is a sensible default. } \item{ReferenceMeasureMarkSetI}{ Optional. The total measure of the mark set. A positive number. } \item{ratio}{ Logical value indicating whether to save ratio information. } } \details{ See Cronie and Van Lieshout (2015). } \value{ Object of class \code{"fv"} containing the estimate of the inhomogeneous multitype \eqn{G} function. } \references{ Cronie, O. and Van Lieshout, M.N.M. (2015) Summary statistics for inhomogeneous marked point processes. \emph{Annals of the Institute of Statistical Mathematics} DOI: 10.1007/s10463-015-0515-z } \author{ Ottmar Cronie and Marie-Colette van Lieshout. Rewritten for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{Ginhom}}, \code{\link{Gmulti}} } \examples{ X <- rescale(amacrine) I <- (marks(X) == "on") J <- (marks(X) == "off") mod <- ppm(X ~ marks * x) lam <- fitted(mod, dataonly=TRUE) lmin <- min(predict(mod)[["off"]]) * 0.9 plot(GmultiInhom(X, I, J, lambda=lam, lambdamin=lmin)) # equivalent plot(GmultiInhom(X, I, J, lambdaI=lam[I], lambdaJ=lam[J], lambdamin=lmin), main="") } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/rmh.default.Rd0000644000176200001440000006612414141452520016326 0ustar liggesusers\name{rmh.default} \alias{rmh.default} \title{Simulate Point Process Models using the Metropolis-Hastings Algorithm.} \description{ Generates a random point pattern, simulated from a chosen point process model, using the Metropolis-Hastings algorithm. } \usage{ \method{rmh}{default}(model, start=NULL, control=default.rmhcontrol(model), \dots, nsim=1, drop=TRUE, saveinfo=TRUE, verbose=TRUE, snoop=FALSE) } \arguments{ \item{model}{Data specifying the point process model that is to be simulated. } \item{start}{Data determining the initial state of the algorithm. } \item{control}{Data controlling the iterative behaviour and termination of the algorithm. } \item{\dots}{ Further arguments passed to \code{\link{rmhcontrol}} or to trend functions in \code{model}. } \item{nsim}{ Number of simulated point patterns that should be generated. } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a single point pattern. } \item{saveinfo}{ Logical value indicating whether to save auxiliary information. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{snoop}{ Logical. If \code{TRUE}, activate the visual debugger. } } \value{ A point pattern (an object of class \code{"ppp"}, see \code{\link{ppp.object}}) or a list of point patterns. The returned value has an attribute \code{info} containing modified versions of the arguments \code{model}, \code{start}, and \code{control} which together specify the exact simulation procedure. The \code{info} attribute can be printed (and is printed automatically by \code{\link{summary.ppp}}). For computational efficiency, the \code{info} attribute can be omitted by setting \code{saveinfo=FALSE}. The value of \code{\link[base:Random]{.Random.seed}} at the start of the simulations is also saved and returned as an attribute \code{seed}. If the argument \code{track=TRUE} was given (see \code{\link{rmhcontrol}}), the transition history of the algorithm is saved, and returned as an attribute \code{history}. The transition history is a data frame containing a factor \code{proposaltype} identifying the proposal type (Birth, Death or Shift) and a logical vector \code{accepted} indicating whether the proposal was accepted. The data frame also has columns \code{numerator}, \code{denominator} which give the numerator and denominator of the Hastings ratio for the proposal. If the argument \code{nsave} was given (see \code{\link{rmhcontrol}}), the return value has an attribute \code{saved} which is a list of point patterns, containing the intermediate states of the algorithm. } \details{ This function generates simulated realisations from any of a range of spatial point processes, using the Metropolis-Hastings algorithm. It is the default method for the generic function \code{\link{rmh}}. This function executes a Metropolis-Hastings algorithm with birth, death and shift proposals as described in Geyer and \Moller (1994). The argument \code{model} specifies the point process model to be simulated. It is either a list, or an object of class \code{"rmhmodel"}, with the following components: \describe{ \item{cif}{A character string specifying the choice of interpoint interaction for the point process. } \item{par}{ Parameter values for the conditional intensity function. } \item{w}{ (Optional) window in which the pattern is to be generated. An object of class \code{"owin"}, or data acceptable to \code{\link{as.owin}}. } \item{trend}{ Data specifying the spatial trend in the model, if it has a trend. This may be a function, a pixel image (of class \code{"im"}), (or a list of functions or images if the model is multitype). If the trend is a function or functions, any auxiliary arguments \code{...} to \code{rmh.default} will be passed to these functions, which should be of the form \code{function(x, y, ...)}. } \item{types}{ List of possible types, for a multitype point process. } } For full details of these parameters, see \code{\link{rmhmodel.default}}. The argument \code{start} determines the initial state of the Metropolis-Hastings algorithm. It is either \code{NULL}, or an object of class \code{"rmhstart"}, or a list with the following components: \describe{ \item{n.start}{ Number of points in the initial point pattern. A single integer, or a vector of integers giving the numbers of points of each type in a multitype point pattern. Incompatible with \code{x.start}. } \item{x.start}{ Initial point pattern configuration. Incompatible with \code{n.start}. \code{x.start} may be a point pattern (an object of class \code{"ppp"}), or data which can be coerced to this class by \code{\link{as.ppp}}, or an object with components \code{x} and \code{y}, or a two-column matrix. In the last two cases, the window for the pattern is determined by \code{model$w}. In the first two cases, if \code{model$w} is also present, then the final simulated pattern will be clipped to the window \code{model$w}. } } For full details of these parameters, see \code{\link{rmhstart}}. The third argument \code{control} controls the simulation procedure (including \emph{conditional simulation}), iterative behaviour, and termination of the Metropolis-Hastings algorithm. It is either \code{NULL}, or a list, or an object of class \code{"rmhcontrol"}, with components: \describe{ \item{p}{The probability of proposing a ``shift'' (as opposed to a birth or death) in the Metropolis-Hastings algorithm. } \item{q}{The conditional probability of proposing a death (rather than a birth) given that birth/death has been chosen over shift. } \item{nrep}{The number of repetitions or iterations to be made by the Metropolis-Hastings algorithm. It should be large. } \item{expand}{ Either a numerical expansion factor, or a window (object of class \code{"owin"}). Indicates that the process is to be simulated on a larger domain than the original data window \code{w}, then clipped to \code{w} when the algorithm has finished. The default is to expand the simulation window if the model is stationary and non-Poisson (i.e. it has no trend and the interaction is not Poisson) and not to expand in all other cases. If the model has a trend, then in order for expansion to be feasible, the trend must be given either as a function, or an image whose bounding box is large enough to contain the expanded window. } \item{periodic}{A logical scalar; if \code{periodic} is \code{TRUE} we simulate a process on the torus formed by identifying opposite edges of a rectangular window. } \item{ptypes}{A vector of probabilities (summing to 1) to be used in assigning a random type to a new point. } \item{fixall}{A logical scalar specifying whether to condition on the number of points of each type. } \item{nverb}{An integer specifying how often ``progress reports'' (which consist simply of the number of repetitions completed) should be printed out. If nverb is left at 0, the default, the simulation proceeds silently. } \item{x.cond}{If this argument is present, then \emph{conditional simulation} will be performed, and \code{x.cond} specifies the conditioning points and the type of conditioning. } \item{nsave,nburn}{ If these values are specified, then intermediate states of the simulation algorithm will be saved every \code{nsave} iterations, after an initial burn-in period of \code{nburn} iterations. } \item{track}{ Logical flag indicating whether to save the transition history of the simulations. } } For full details of these parameters, see \code{\link{rmhcontrol}}. The control parameters can also be given in the \code{\dots} arguments. } \section{Conditional Simulation}{ There are several kinds of conditional simulation. \itemize{ \item Simulation \emph{conditional upon the number of points}, that is, holding the number of points fixed. To do this, set \code{control$p} (the probability of a shift) equal to 1. The number of points is then determined by the starting state, which may be specified either by setting \code{start$n.start} to be a scalar, or by setting the initial pattern \code{start$x.start}. \item In the case of multitype processes, it is possible to simulate the model \emph{conditionally upon the number of points of each type}, i.e. holding the number of points of each type to be fixed. To do this, set \code{control$p} equal to 1 and \code{control$fixall} to be \code{TRUE}. The number of points is then determined by the starting state, which may be specified either by setting \code{start$n.start} to be an integer vector, or by setting the initial pattern \code{start$x.start}. \item Simulation \emph{conditional on the configuration observed in a sub-window}, that is, requiring that, inside a specified sub-window \eqn{V}, the simulated pattern should agree with a specified point pattern \eqn{y}.To do this, set \code{control$x.cond} to equal the specified point pattern \eqn{y}, making sure that it is an object of class \code{"ppp"} and that the window \code{Window(control$x.cond)} is the conditioning window \eqn{V}. \item Simulation \emph{conditional on the presence of specified points}, that is, requiring that the simulated pattern should include a specified set of points. This is simulation from the Palm distribution of the point process given a pattern \eqn{y}. To do this, set \code{control$x.cond} to be a \code{data.frame} containing the coordinates (and marks, if appropriate) of the specified points. } For further information, see \code{\link{rmhcontrol}}. Note that, when we simulate conditionally on the number of points, or conditionally on the number of points of each type, no expansion of the window is possible. } \section{Visual Debugger}{ If \code{snoop = TRUE}, an interactive debugger is activated. On the current plot device, the debugger displays the current state of the Metropolis-Hastings algorithm together with the proposed transition to the next state. Clicking on this graphical display (using the left mouse button) will re-centre the display at the clicked location. Surrounding this graphical display is an array of boxes representing different actions. Clicking on one of the action boxes (using the left mouse button) will cause the action to be performed. Debugger actions include: \itemize{ \item Zooming in or out \item Panning (shifting the field of view) left, right, up or down \item Jumping to the next iteration \item Skipping 10, 100, 1000, 10000 or 100000 iterations \item Jumping to the next Birth proposal (etc) \item Changing the fate of the proposal (i.e. changing whether the proposal is accepted or rejected) \item Dumping the current state and proposal to a file \item Printing detailed information at the terminal \item Exiting the debugger (so that the simulation algorithm continues without further interruption). } Right-clicking the mouse will also cause the debugger to exit. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283 -- 322. Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. Geyer, C.J. and \Moller, J. (1994) Simulation procedures and likelihood inference for spatial point processes. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \section{Warnings}{ There is never a guarantee that the Metropolis-Hastings algorithm has converged to its limiting distribution. If \code{start$x.start} is specified then \code{expand} is set equal to 1 and simulation takes place in \code{Window(x.start)}. Any specified value for \code{expand} is simply ignored. The presence of both a component \code{w} of \code{model} and a non-null value for \code{Window(x.start)} makes sense ONLY if \code{w} is contained in \code{Window(x.start)}. For multitype processes make sure that, even if there is to be no trend corresponding to a particular type, there is still a component (a NULL component) for that type, in the list. } \seealso{ \code{\link{rmh}}, \code{\link{rmh.ppm}}, \code{\link{rStrauss}}, \code{\link{ppp}}, \code{\link{ppm}}, \code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{LennardJones}}, \code{\link{MultiHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Penttinen}}, \code{\link{Poisson}}, \code{\link{Softcore}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Triplets}} } \section{Other models}{ In theory, any finite point process model can be simulated using the Metropolis-Hastings algorithm, provided the conditional intensity is uniformly bounded. In practice, the list of point process models that can be simulated using \code{rmh.default} is limited to those that have been implemented in the package's internal C code. More options will be added in the future. Note that the \code{lookup} conditional intensity function permits the simulation (in theory, to any desired degree of approximation) of any pairwise interaction process for which the interaction depends only on the distance between the pair of points. } \section{Reproducible simulations}{ If the user wants the simulation to be exactly reproducible (e.g. for a figure in a journal article, where it is useful to have the figure consistent from draft to draft) then the state of the random number generator should be set before calling \code{rmh.default}. This can be done either by calling \code{\link[base:Random]{set.seed}} or by assigning a value to \code{\link[base:Random]{.Random.seed}}. In the examples below, we use \code{\link[base:Random]{set.seed}}. If a simulation has been performed and the user now wants to repeat it exactly, the random seed should be extracted from the simulated point pattern \code{X} by \code{seed <- attr(x, "seed")}, then assigned to the system random nunber state by \code{.Random.seed <- seed} before calling \code{rmh.default}. } \examples{ if(interactive()) { nr <- 1e5 nv <- 5000 ns <- 200 } else { nr <- 20 nv <- 5 ns <- 20 oldopt <- spatstat.options() spatstat.options(expand=1.05) } set.seed(961018) # Strauss process. mod01 <- list(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) X1.strauss <- rmh(model=mod01,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X1.strauss) # Strauss process, conditioning on n = 42: X2.strauss <- rmh(model=mod01,start=list(n.start=42), control=list(p=1,nrep=nr,nverb=nv)) # Tracking algorithm progress: # (a) saving intermediate states: X <- rmh(model=mod01,start=list(n.start=ns), control=list(nrep=nr, nsave=nr/5, nburn=nr/2)) Saved <- attr(X, "saved") plot(Saved) # (b) inspecting transition history: X <- rmh(model=mod01,start=list(n.start=ns), control=list(nrep=nr, track=TRUE)) History <- attr(X, "history") head(History) # Hard core process: mod02 <- list(cif="hardcore",par=list(beta=2,hc=0.7),w=c(0,10,0,10)) X3.hardcore <- rmh(model=mod02,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X3.hardcore) # Strauss process equal to pure hardcore: mod02s <- list(cif="strauss",par=list(beta=2,gamma=0,r=0.7),w=c(0,10,0,10)) X3.strauss <- rmh(model=mod02s,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Strauss process in a polygonal window. x <- c(0.55,0.68,0.75,0.58,0.39,0.37,0.19,0.26,0.42) y <- c(0.20,0.27,0.68,0.99,0.80,0.61,0.45,0.28,0.33) mod03 <- list(cif="strauss",par=list(beta=2000,gamma=0.6,r=0.07), w=owin(poly=list(x=x,y=y))) X4.strauss <- rmh(model=mod03,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X4.strauss) # Strauss process in a polygonal window, conditioning on n = 80. X5.strauss <- rmh(model=mod03,start=list(n.start=ns), control=list(p=1,nrep=nr,nverb=nv)) # Strauss process, starting off from X4.strauss, but with the # polygonal window replace by a rectangular one. At the end, # the generated pattern is clipped to the original polygonal window. xxx <- X4.strauss Window(xxx) <- as.owin(c(0,1,0,1)) X6.strauss <- rmh(model=mod03,start=list(x.start=xxx), control=list(nrep=nr,nverb=nv)) # Strauss with hardcore: mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3), w=c(0,10,0,10)) X1.straush <- rmh(model=mod04,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Another Strauss with hardcore (with a perhaps surprising result): mod05 <- list(cif="straush",par=list(beta=80,gamma=0.36,r=45,hc=2.5), w=c(0,250,0,250)) X2.straush <- rmh(model=mod05,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Pure hardcore (identical to X3.strauss). mod06 <- list(cif="straush",par=list(beta=2,gamma=1,r=1,hc=0.7), w=c(0,10,0,10)) X3.straush <- rmh(model=mod06,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Soft core: w <- c(0,10,0,10) mod07 <- list(cif="sftcr",par=list(beta=0.8,sigma=0.1,kappa=0.5), w=c(0,10,0,10)) X.sftcr <- rmh(model=mod07,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.sftcr) # Area-interaction process: mod42 <- rmhmodel(cif="areaint",par=list(beta=2,eta=1.6,r=0.7), w=c(0,10,0,10)) X.area <- rmh(model=mod42,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.area) # Triplets process modtrip <- list(cif="triplets",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) X.triplets <- rmh(model=modtrip, start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.triplets) # Multitype Strauss: beta <- c(0.027,0.008) gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2) r <- matrix(c(45,45,45,45),2,2) mod08 <- list(cif="straussm",par=list(beta=beta,gamma=gmma,radii=r), w=c(0,250,0,250)) X1.straussm <- rmh(model=mod08,start=list(n.start=ns), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) if(interactive()) plot(X1.straussm) # Multitype Strauss conditioning upon the total number # of points being 80: X2.straussm <- rmh(model=mod08,start=list(n.start=ns), control=list(p=1,ptypes=c(0.75,0.25),nrep=nr, nverb=nv)) # Conditioning upon the number of points of type 1 being 60 # and the number of points of type 2 being 20: X3.straussm <- rmh(model=mod08,start=list(n.start=c(60,20)), control=list(fixall=TRUE,p=1,ptypes=c(0.75,0.25), nrep=nr,nverb=nv)) # Multitype Strauss hardcore: rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) mod09 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=c(0,250,0,250)) X.straushm <- rmh(model=mod09,start=list(n.start=ns), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) # Multitype Strauss hardcore with trends for each type: beta <- c(0.27,0.08) tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend tr4 <- function(x,y){x <- x/250; y <- y/250; exp(-0.6*x+0.5*y)} # log linear trend mod10 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=c(0,250,0,250), trend=list(tr3,tr4)) X1.straushm.trend <- rmh(model=mod10,start=list(n.start=ns), control=list(ptypes=c(0.75,0.25), nrep=nr,nverb=nv)) if(interactive()) plot(X1.straushm.trend) # Multitype Strauss hardcore with trends for each type, given as images: bigwin <- square(250) i1 <- as.im(tr3, bigwin) i2 <- as.im(tr4, bigwin) mod11 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=bigwin, trend=list(i1,i2)) X2.straushm.trend <- rmh(model=mod11,start=list(n.start=ns), control=list(ptypes=c(0.75,0.25),expand=1, nrep=nr,nverb=nv)) # Diggle, Gates, and Stibbard: mod12 <- list(cif="dgs",par=list(beta=3600,rho=0.08),w=c(0,1,0,1)) X.dgs <- rmh(model=mod12,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.dgs) # Diggle-Gratton: mod13 <- list(cif="diggra", par=list(beta=1800,kappa=3,delta=0.02,rho=0.04), w=square(1)) X.diggra <- rmh(model=mod13,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.diggra) # Fiksel: modFik <- list(cif="fiksel", par=list(beta=180,r=0.15,hc=0.07,kappa=2,a= -1.0), w=square(1)) X.fiksel <- rmh(model=modFik,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.fiksel) # Geyer: mod14 <- list(cif="geyer",par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5), w=c(0,10,0,10)) X1.geyer <- rmh(model=mod14,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X1.geyer) # Geyer; same as a Strauss process with parameters # (beta=2.25,gamma=0.16,r=0.7): mod15 <- list(cif="geyer",par=list(beta=2.25,gamma=0.4,r=0.7,sat=10000), w=c(0,10,0,10)) X2.geyer <- rmh(model=mod15,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) mod16 <- list(cif="geyer",par=list(beta=8.1,gamma=2.2,r=0.08,sat=3)) data(redwood) X3.geyer <- rmh(model=mod16,start=list(x.start=redwood), control=list(periodic=TRUE,nrep=nr,nverb=nv)) # Geyer, starting from the redwood data set, simulating # on a torus, and conditioning on n: X4.geyer <- rmh(model=mod16,start=list(x.start=redwood), control=list(p=1,periodic=TRUE,nrep=nr,nverb=nv)) # Lookup (interaction function h_2 from page 76, Diggle (2003)): r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0. h <- 20*(r-0.05) h[r<0.05] <- 0 h[r>0.10] <- 1 mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1)) X.lookup <- rmh(model=mod17,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.lookup) # Strauss with trend tr <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } beta <- 0.3 gmma <- 0.5 r <- 45 modStr <- list(cif="strauss",par=list(beta=beta,gamma=gmma,r=r), w=square(250), trend=tr) X1.strauss.trend <- rmh(model=modStr,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Baddeley-Geyer r <- seq(0,0.2,length=8)[-1] gmma <- c(0.5,0.6,0.7,0.8,0.7,0.6,0.5) mod18 <- list(cif="badgey",par=list(beta=4000, gamma=gmma,r=r,sat=5), w=square(1)) X1.badgey <- rmh(model=mod18,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) mod19 <- list(cif="badgey", par=list(beta=4000, gamma=gmma,r=r,sat=1e4), w=square(1)) set.seed(1329) X2.badgey <- rmh(model=mod18,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Check: h <- ((prod(gmma)/cumprod(c(1,gmma)))[-8])^2 hs <- stepfun(r,c(h,1)) mod20 <- list(cif="lookup",par=list(beta=4000,h=hs),w=square(1)) set.seed(1329) X.check <- rmh(model=mod20,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # X2.badgey and X.check will be identical. mod21 <- list(cif="badgey",par=list(beta=300,gamma=c(1,0.4,1), r=c(0.035,0.07,0.14),sat=5), w=square(1)) X3.badgey <- rmh(model=mod21,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Same result as Geyer model with beta=300, gamma=0.4, r=0.07, # sat = 5 (if seeds and control parameters are the same) # Or more simply: mod22 <- list(cif="badgey", par=list(beta=300,gamma=0.4,r=0.07, sat=5), w=square(1)) X4.badgey <- rmh(model=mod22,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Same again --- i.e. the BadGey model includes the Geyer model. # Illustrating scalability. if(FALSE) { M1 <- rmhmodel(cif="strauss",par=list(beta=60,gamma=0.5,r=0.04),w=owin()) set.seed(496) X1 <- rmh(model=M1,start=list(n.start=300)) M2 <- rmhmodel(cif="strauss",par=list(beta=0.6,gamma=0.5,r=0.4), w=owin(c(0,10),c(0,10))) set.seed(496) X2 <- rmh(model=M2,start=list(n.start=300)) chk <- affine(X1,mat=diag(c(10,10))) all.equal(chk,X2,check.attributes=FALSE) # Under the default spatstat options the foregoing all.equal() # will yield TRUE. Setting spatstat.options(scalable=FALSE) and # re-running the code will reveal differences between X1 and X2. } if(!interactive()) spatstat.options(oldopt) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/rshift.Rd0000644000176200001440000000300714141452520015403 0ustar liggesusers\name{rshift} \alias{rshift} \title{Random Shift} \description{ Randomly shifts the points of a point pattern or line segment pattern. Generic. } \usage{ rshift(X, \dots) } \arguments{ \item{X}{Pattern to be subjected to a random shift. A point pattern (class \code{"ppp"}), a line segment pattern (class \code{"psp"}) or an object of class \code{"splitppp"}. } \item{\dots}{ Arguments controlling the generation of the random shift vector, or specifying which parts of the pattern will be shifted. } } \value{ An object of the same type as \code{X}. } \details{ This operation applies a random shift (vector displacement) to the points in a point pattern, or to the segments in a line segment pattern. The argument \code{X} may be \itemize{ \item a point pattern (an object of class \code{"ppp"}) \item a line segment pattern (an object of class \code{"psp"}) \item an object of class \code{"splitppp"} (basically a list of point patterns, obtained from \code{\link{split.ppp}}). } The function \code{rshift} is generic, with methods for the three classes \code{"ppp"}, \code{"psp"} and \code{"splitppp"}. See the help pages for these methods, \code{\link{rshift.ppp}}, \code{\link{rshift.psp}} and \code{\link{rshift.splitppp}}, for further information. } \seealso{ \code{\link{rshift.ppp}}, \code{\link{rshift.psp}}, \code{\link{rshift.splitppp}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/scan.test.Rd0000644000176200001440000001260114141452520016006 0ustar liggesusers\name{scan.test} \alias{scan.test} \title{ Spatial Scan Test } \description{ Performs the Spatial Scan Test for clustering in a spatial point pattern, or for clustering of one type of point in a bivariate spatial point pattern. } \usage{ scan.test(X, r, ..., method = c("poisson", "binomial"), nsim = 19, baseline = NULL, case = 2, alternative = c("greater", "less", "two.sided"), verbose = TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{r}{ Radius of circle to use. A single number or a numeric vector. } \item{\dots}{ Optional. Arguments passed to \code{\link{as.mask}} to determine the spatial resolution of the computations. } \item{method}{ Either \code{"poisson"} or \code{"binomial"} specifying the type of likelihood. } \item{nsim}{ Number of simulations for computing Monte Carlo p-value. } \item{baseline}{ Baseline for the Poisson intensity, if \code{method="poisson"}. A pixel image or a function. } \item{case}{ Which type of point should be interpreted as a case, if \code{method="binomial"}. Integer or character string. } \item{alternative}{ Alternative hypothesis: \code{"greater"} if the alternative postulates that the mean number of points inside the circle will be greater than expected under the null. } \item{verbose}{ Logical. Whether to print progress reports. } } \details{ The spatial scan test (Kulldorf, 1997) is applied to the point pattern \code{X}. In a nutshell, \itemize{ \item If \code{method="poisson"} then a significant result would mean that there is a circle of radius \code{r}, located somewhere in the spatial domain of the data, which contains a significantly higher than expected number of points of \code{X}. That is, the pattern \code{X} exhibits spatial clustering. \item If \code{method="binomial"} then \code{X} must be a bivariate (two-type) point pattern. By default, the first type of point is interpreted as a control (non-event) and the second type of point as a case (event). A significant result would mean that there is a circle of radius \code{r} which contains a significantly higher than expected number of cases. That is, the cases are clustered together, conditional on the locations of all points. } Following is a more detailed explanation. \itemize{ \item If \code{method="poisson"} then the scan test based on Poisson likelihood is performed (Kulldorf, 1997). The dataset \code{X} is treated as an unmarked point pattern. By default (if \code{baseline} is not specified) the null hypothesis is complete spatial randomness CSR (i.e. a uniform Poisson process). The alternative hypothesis is a Poisson process with one intensity \eqn{\beta_1}{beta1} inside some circle of radius \code{r} and another intensity \eqn{\beta_0}{beta0} outside the circle. If \code{baseline} is given, then it should be a pixel image or a \code{function(x,y)}. The null hypothesis is an inhomogeneous Poisson process with intensity proportional to \code{baseline}. The alternative hypothesis is an inhomogeneous Poisson process with intensity \code{beta1 * baseline} inside some circle of radius \code{r}, and \code{beta0 * baseline} outside the circle. \item If \code{method="binomial"} then the scan test based on binomial likelihood is performed (Kulldorf, 1997). The dataset \code{X} must be a bivariate point pattern, i.e. a multitype point pattern with two types. The null hypothesis is that all permutations of the type labels are equally likely. The alternative hypothesis is that some circle of radius \code{r} has a higher proportion of points of the second type, than expected under the null hypothesis. } The result of \code{scan.test} is a hypothesis test (object of class \code{"htest"}) which can be plotted to report the results. The component \code{p.value} contains the \eqn{p}-value. The result of \code{scan.test} can also be plotted (using the plot method for the class \code{"scan.test"}). The plot is a pixel image of the Likelihood Ratio Test Statistic (2 times the log likelihood ratio) as a function of the location of the centre of the circle. This pixel image can be extracted from the object using \code{\link{as.im.scan.test}}. The Likelihood Ratio Test Statistic is computed by \code{\link{scanLRTS}}. } \value{ An object of class \code{"htest"} (hypothesis test) which also belongs to the class \code{"scan.test"}. Printing this object gives the result of the test. Plotting this object displays the Likelihood Ratio Test Statistic as a function of the location of the centre of the circle. } \references{ Kulldorff, M. (1997) A spatial scan statistic. \emph{Communications in Statistics --- Theory and Methods} \bold{26}, 1481--1496. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.scan.test}}, \code{\link{as.im.scan.test}}, \code{\link{relrisk}}, \code{\link{scanLRTS}} } \examples{ nsim <- if(interactive()) 19 else 2 rr <- if(interactive()) seq(0.5, 1, by=0.1) else c(0.5, 1) scan.test(redwood, 0.1 * rr, method="poisson", nsim=nsim) scan.test(chorley, rr, method="binomial", case="larynx", nsim=nsim) } \keyword{htest} \keyword{spatial} spatstat.core/man/spatcov.Rd0000644000176200001440000001105414141452520015564 0ustar liggesusers\name{spatcov} \alias{spatcov} \title{ Estimate the Spatial Covariance Function of a Random Field } \description{ Given a pixel image, calculate an estimate of the spatial covariance function. Given two pixel images, calculate an estimate of their spatial cross-covariance function. } \usage{ spatcov(X, Y=X, \dots, correlation=FALSE, isotropic = TRUE, clip = TRUE, pooling=TRUE) } \arguments{ \item{X}{ A pixel image (object of class \code{"im"}). } \item{Y}{ Optional. Another pixel image. } \item{correlation}{ Logical value specifying whether to standardise so that the spatial correlation function is returned. } \item{isotropic}{ Logical value specifying whether to assume the covariance is isotropic, so that the result is a function of the lag distance. } \item{clip}{ Logical value specifying whether to restrict the results to the range of spatial lags where the estimate is reliable. } \item{pooling}{ Logical value specifying the estimation method when \code{isotropic=TRUE}. } \item{\dots}{Ignored.} } \details{ In normal usage, only the first argument \code{X} is given. Then the pixel image \code{X} is treated as a realisation of a stationary random field, and its spatial covariance function is estimated. Alternatively if \code{Y} is given, then \code{X} and \code{Y} are assumed to be jointly stationary random fields, and their spatial cross-covariance function is estimated. For any random field \code{X}, the spatial covariance is defined for any two spatial locations \eqn{u} and \eqn{v} by \deqn{ C(u,v) = \mbox{cov}(X(u), X(v)) }{ C(u,v) = cov(X(u), X(v)) } where \eqn{X(u)} and \eqn{X(v)} are the values of the random field at those locations. Here\eqn{\mbox{cov}}{cov} denotes the statistical covariance, defined for any random variables \eqn{A} and \eqn{B} by \eqn{\mbox{cov}(A,B) = E(AB) - E(A) E(B)}{cov(A,B) = E(AB) - E(A) E(B)} where \eqn{E(A)} denotes the expected value of \eqn{A}. If the random field is assumed to be stationary (at least second-order stationary) then the spatial covariance \eqn{C(u,v)} depends only on the lag vector \eqn{v-u}: \deqn{ C(u,v) = C_2(v-u) } \deqn{ C(u,v) = C2(v-u) } where \eqn{C_2}{C2} is a function of a single vector argument. If the random field is stationary and isotropic, then the spatial covariance depends only on the lag distance \eqn{\| v - u \|}{||v-u||}: \deqn{ C_2(v-u) = C_1(\|v-u\|) }{ C2(v-u) = C1(||v-u||) } where \eqn{C_1}{C1} is a function of distance. The function \code{spatcov} computes estimates of the covariance function \eqn{C_1}{C1} or \eqn{C_2}{C2} as follows: \itemize{ \item If \code{isotropic=FALSE}, an estimate of the covariance function \eqn{C_2}{C2} is computed, assuming the random field is stationary, using the naive moment estimator, \code{C2 = imcov(X-mean(X))/setcov(Window(X))}. The result is a pixel image. \item If \code{isotropic=TRUE} (the default) an estimate of the covariance function \eqn{C_1}{C1} is computed, assuming the random field is stationary and isotropic. \itemize{ \item When \code{pooling=FALSE}, the estimate of \eqn{C_1}{C1} is the rotational average of the naive estimate of \eqn{C_2}{C2}. \item When \code{pooling=TRUE} (the default), the estimate of \eqn{C_1}{C1} is the ratio of the rotational averages of the numerator and denominator which form the naive estimate of \eqn{C_2}{C2}. } The result is a function object (class \code{"fv"}). } If the argument \code{Y} is given, it should be a pixel image compatible with \code{X}. An estimate of the spatial cross-covariance function between \code{X} and \code{Y} will be computed. } \value{ If \code{isotropic=TRUE} (the default), the result is a function value table (object of class \code{"fv"}) giving the estimated values of the covariance function or spatial correlation function for a sequence of values of the spatial lag distance \code{r}. If \code{isotropic=FALSE}, the result is a pixel image (object of class \code{"im"}) giving the estimated values of the spatial covariance function or spatial correlation function for a grid of values of the spatial lag vector. } \author{ \adrian } \seealso{ \code{\link{imcov}}, \code{\link{setcov}} } \examples{ if(offline <- !interactive()) op <- spatstat.options(npixel=32) D <- density(cells) plot(spatcov(D)) if(offline) spatstat.options(op) } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/berman.test.Rd0000644000176200001440000001473514141452520016340 0ustar liggesusers\name{berman.test} \alias{berman.test} \alias{berman.test.ppm} \alias{berman.test.ppp} \title{Berman's Tests for Point Process Model} \description{ Tests the goodness-of-fit of a Poisson point process model using methods of Berman (1986). } \usage{ berman.test(...) \method{berman.test}{ppp}(X, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) \method{berman.test}{ppm}(model, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}). } \item{model}{ A fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{covariate}{ The spatial covariate on which the test will be based. An image (object of class \code{"im"}) or a function. } \item{which}{ Character string specifying the choice of test. } \item{alternative}{ Character string specifying the alternative hypothesis. } \item{\dots}{ Additional arguments controlling the pixel resolution (arguments \code{dimyx} and \code{eps} passed to \code{\link{as.mask}}) or other undocumented features. } } \details{ These functions perform a goodness-of-fit test of a Poisson point process model fitted to point pattern data. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using either of two test statistics \eqn{Z_1}{Z[1]} and \eqn{Z_2}{Z[2]} proposed by Berman (1986). The \eqn{Z_1}{Z[1]} test is also known as the Lawson-Waller test. The function \code{berman.test} is generic, with methods for point patterns (\code{"ppp"} or \code{"lpp"}) and point process models (\code{"ppm"} or \code{"lppm"}). \itemize{ \item If \code{X} is a point pattern dataset (object of class \code{"ppp"} or \code{"lpp"}), then \code{berman.test(X, ...)} performs a goodness-of-fit test of the uniform Poisson point process (Complete Spatial Randomness, CSR) for this dataset. \item If \code{model} is a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) then \code{berman.test(model, ...)} performs a test of goodness-of-fit for this fitted model. In this case, \code{model} should be a Poisson point process. } The test is performed by comparing the observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same covariate under the model. Thus, you must nominate a spatial covariate for this test. The argument \code{covariate} should be either a \code{function(x,y)} or a pixel image (object of class \code{"im"} containing the values of a spatial function. If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. Next the values of the \code{covariate} at all locations in the observation window are evaluated. The point process intensity of the fitted model is also evaluated at all locations in the window. \itemize{ \item If \code{which="Z1"}, the test statistic \eqn{Z_1}{Z[1]} is computed as follows. The sum \eqn{S} of the covariate values at all data points is evaluated. The predicted mean \eqn{\mu}{\mu} and variance \eqn{\sigma^2}{\sigma^2} of \eqn{S} are computed from the values of the covariate at all locations in the window. Then we compute \eqn{Z_1 = (S-\mu)/\sigma}{Z[1]=(S-\mu)/\sigma}. Closely-related tests were proposed independently by Waller et al (1993) and Lawson (1993) so this test is often termed the Lawson-Waller test in epidemiological literature. \item If \code{which="Z2"}, the test statistic \eqn{Z_2}{Z[2]} is computed as follows. The values of the \code{covariate} at all locations in the observation window, weighted by the point process intensity, are compiled into a cumulative distribution function \eqn{F}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. The standardised sample mean of these numbers is the statistic \eqn{Z_2}{Z[2]}. } In both cases the null distribution of the test statistic is the standard normal distribution, approximately. The return value is an object of class \code{"htest"} containing the results of the hypothesis test. The print method for this class gives an informative summary of the test outcome. } \value{ An object of class \code{"htest"} (hypothesis test) and also of class \code{"bermantest"}, containing the results of the test. The return value can be plotted (by \code{\link{plot.bermantest}}) or printed to give an informative summary of the test. } \section{Warning}{ The meaning of a one-sided test must be carefully scrutinised: see the printed output. } \author{ \spatstatAuthors. } \seealso{ \code{\link{cdf.test}}, \code{\link{quadrat.test}}, \code{\link{ppm}} } \references{ Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. Lawson, A.B. (1993) On the analysis of mortality events around a prespecified fixed point. \emph{Journal of the Royal Statistical Society, Series A} \bold{156} (3) 363--377. Waller, L., Turnbull, B., Clark, L.C. and Nasca, P. (1992) Chronic Disease Surveillance and testing of clustering of disease and exposure: Application to leukaemia incidence and TCE-contaminated dumpsites in upstate New York. \emph{Environmetrics} \bold{3}, 281--300. } \examples{ # Berman's data data(copper) X <- copper$SouthPoints L <- copper$SouthLines D <- distmap(L, eps=1) # test of CSR berman.test(X, D) berman.test(X, D, "Z2") } \keyword{htest} \keyword{spatial} spatstat.core/man/rnoise.Rd0000644000176200001440000000342214141452520015404 0ustar liggesusers\name{rnoise} \alias{rnoise} \title{ Random Pixel Noise } \description{ Generate a pixel image whose pixel values are random numbers following a specified probability distribution. } \usage{ rnoise(rgen = runif, w = square(1), \dots) } \arguments{ \item{rgen}{ Random generator for the pixel values. A function in the \R language. } \item{w}{ Window (region or pixel raster) in which to generate the image. Any data acceptable to \code{\link{as.mask}}. } \item{\dots}{ Arguments, matched by name, to be passed to \code{rgen} to specify the parameters of the probability distribution, or passed to \code{\link{as.mask}} to control the pixel resolution. } } \details{ The argument \code{w} could be a window (class \code{"owin"}), a pixel image (class \code{"im"}) or other data. It is first converted to a binary mask by \code{\link{as.mask}} using any relevant arguments in \code{\dots}. Then each pixel inside the window (i.e. with logical value \code{TRUE} in the mask) is assigned a random numerical value by calling the function \code{rgen}. The function \code{rgen} would typically be one of the standard random variable generators like \code{\link{runif}} (uniformly distributed random values) or \code{\link{rnorm}} (Gaussian random values). Its first argument \code{n} is the number of values to be generated. Other arguments to \code{rgen} must be matched by name. } \value{ A pixel image (object of class \code{"im"}). } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{as.mask}}, \code{\link{as.im}}, \code{\link[stats]{Distributions}}. } \examples{ plot(rnoise(), main="Uniform noise") plot(rnoise(rnorm, dimyx=32, mean=2, sd=1), main="White noise") } \keyword{spatial} \keyword{datagen} spatstat.core/man/localK.Rd0000644000176200001440000001122614141452520015313 0ustar liggesusers\name{localK} \alias{localK} \alias{localL} \title{Neighbourhood density function} \description{ Computes the neighbourhood density function, a local version of the \eqn{K}-function or \eqn{L}-function, defined by Getis and Franklin (1987). } \usage{ localK(X, ..., rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL) localL(X, ..., rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{\dots}{Ignored.} \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{correction}{String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"translation"}, \code{"Ripley"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{verbose}{Logical flag indicating whether to print progress reports during the calculation. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the function L or K should be computed. } } \details{ The command \code{localL} computes the \emph{neighbourhood density function}, a local version of the \eqn{L}-function (Besag's transformation of Ripley's \eqn{K}-function) that was proposed by Getis and Franklin (1987). The command \code{localK} computes the corresponding local analogue of the K-function. Given a spatial point pattern \code{X}, the neighbourhood density function \eqn{L_i(r)}{L[i](r)} associated with the \eqn{i}th point in \code{X} is computed by \deqn{ L_i(r) = \sqrt{\frac a {(n-1) \pi} \sum_j e_{ij}} }{ L[i](r) = sqrt( (a/((n-1)* pi)) * sum[j] e[i,j]) } where the sum is over all points \eqn{j \neq i}{j != i} that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{a} is the area of the observation window, \eqn{n} is the number of points in \code{X}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The value of \eqn{L_i(r)}{L[i](r)} can also be interpreted as one of the summands that contributes to the global estimate of the L function. By default, the function \eqn{L_i(r)}{L[i](r)} or \eqn{K_i(r)}{K[i](r)} is computed for a range of \eqn{r} values for each point \eqn{i}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X}. Inhomogeneous counterparts of \code{localK} and \code{localL} are computed by \code{localKinhom} and \code{localLinhom}. } \value{ If \code{rvalue} is given, the result is a numeric vector of length equal to the number of points in the point pattern. If \code{rvalue} is absent, the result is an object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point. The last two columns contain the \code{r} and \code{theo} values. } \references{ Getis, A. and Franklin, J. (1987) Second-order neighbourhood analysis of mapped point patterns. \emph{Ecology} \bold{68}, 473--477. } \seealso{ \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{localKinhom}}, \code{\link{localLinhom}}. } \examples{ data(ponderosa) X <- ponderosa # compute all the local L functions L <- localL(X) # plot all the local L functions against r plot(L, main="local L functions for ponderosa", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) # compute the values of L(r) for r = 12 metres L12 <- localL(X, rvalue=12) # Spatially interpolate the values of L12 # Compare Figure 5(b) of Getis and Franklin (1987) X12 <- X \%mark\% L12 Z <- Smooth(X12, sigma=5, dimyx=128) plot(Z, col=topo.colors(128), main="smoothed neighbourhood density") contour(Z, add=TRUE) points(X, pch=16, cex=0.5) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/Finhom.Rd0000644000176200001440000001526014141452520015330 0ustar liggesusers\name{Finhom} \alias{Finhom} \title{ Inhomogeneous Empty Space Function } \description{ Estimates the inhomogeneous empty space function of a non-stationary point pattern. } \usage{ Finhom(X, lambda = NULL, lmin = NULL, ..., sigma = NULL, varcov = NULL, r = NULL, breaks = NULL, ratio = FALSE, update = TRUE, warn.bias=TRUE, savelambda=FALSE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{F} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lmin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{\dots}{ Extra arguments passed to \code{\link{as.mask}} to control the pixel resolution, or passed to \code{\link{density.ppp}} to control the smoothing bandwidth. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } \item{update}{ Logical. If \code{lambda} is a fitted model (class \code{"ppm"} or \code{"kppm"}) and \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without fitting it to \code{X}. } \item{warn.bias}{ Logical value specifying whether to issue a warning when the inhomogeneity correction factor takes extreme values, which can often lead to biased results. This usually occurs when insufficient smoothing is used to estimate the intensity. } \item{savelambda}{ Logical value specifying whether to save the values of \code{lmin} and \code{lambda} as attributes of the result. } } \details{ This command computes estimates of the inhomogeneous \eqn{F}-function (van Lieshout, 2010) of a point pattern. It is the counterpart, for inhomogeneous spatial point patterns, of the empty space function \eqn{F} for homogeneous point patterns computed by \code{\link{Fest}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). The inhomogeneous \eqn{F} function is computed using the border correction, equation (6) in Van Lieshout (2010). The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda} of the point process. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"} or \code{"kppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. (2010) A J-function for inhomogeneous point processes. \emph{Statistica Neerlandica} \bold{65}, 183--201. } \seealso{ \code{\link{Ginhom}}, \code{\link{Jinhom}}, \code{\link{Fest}} } \examples{ plot(Finhom(swedishpines, sigma=10)) # plot(Finhom(swedishpines, sigma=bw.diggle, adjust=2)) } \author{ Original code by Marie-Colette van Lieshout. C implementation and R adaptation by \adrian and \ege. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/methods.fii.Rd0000644000176200001440000000540014141452520016314 0ustar liggesusers\name{methods.fii} \alias{methods.fii} %DoNotExport \Rdversion{1.1} \alias{print.fii} \alias{plot.fii} \alias{coef.fii} \alias{coef<-.fii} \alias{summary.fii} \alias{print.summary.fii} \alias{coef.summary.fii} \title{ Methods for Fitted Interactions } \description{ These are methods specifically for the class \code{"fii"} of fitted interpoint interactions. } \usage{ \method{print}{fii}(x, \dots) \method{coef}{fii}(object, \dots) \method{coef}{fii}(object, \dots) <- value \method{plot}{fii}(x, \dots) \method{summary}{fii}(object,\dots) \method{print}{summary.fii}(x, ...) \method{coef}{summary.fii}(object, ...) } \arguments{ \item{x,object}{ An object of class \code{"fii"} representing a fitted interpoint interaction. } \item{\dots}{ Arguments passed to other methods. } \item{value}{ Numeric vector containing new values for the fitted interaction coefficients. } } \details{ These are methods for the class \code{"fii"}. An object of class \code{"fii"} represents a fitted interpoint interaction. It is usually obtained by using the command \code{\link{fitin}} to extract the fitted interaction part of a fitted point process model. See \code{\link{fitin}} for further explanation of this class. The commands listed here are methods for the generic functions \code{\link[base]{print}}, \code{\link[base]{summary}}, \code{\link[base]{plot}}, \code{\link[stats]{coef}} and \code{\link[nlme]{coef<-}} for objects of the class \code{"fii"}. Following the usual convention, \code{summary.fii} returns an object of class \code{summary.fii}, for which there is a print method. The effect is that, when the user types \code{summary(x)}, the summary is printed, but when the user types \code{y <- summary(x)}, the summary information is saved. The method \code{coef.fii} extracts the canonical coefficients of the fitted interaction, and returns them as a numeric vector. The method \code{coef.summary.fii} transforms these values into quantities that are more easily interpretable, in a format that depends on the particular model. There are also methods for the generic commands \code{\link{reach}} and \code{\link{as.interact}}, described elsewhere. } \value{ The \code{print} and \code{plot} methods return \code{NULL}. The \code{summary} method returns an object of class \code{summary.fii}. \code{coef.fii} returns a numeric vector. \code{coef.summary.fii} returns data whose structure depends on the model. } \author{ \adrian } \seealso{ \code{\link{fitin}}, \code{\link{reach.fii}}, \code{\link{as.interact.fii}} } \examples{ mod <- ppm(cells, ~1, Strauss(0.1)) f <- fitin(mod) f summary(f) plot(f) coef(f) coef(summary(f)) } \keyword{spatial} \keyword{methods} spatstat.core/man/slrm.Rd0000644000176200001440000001622314144333466015077 0ustar liggesusers\name{slrm} \alias{slrm} \title{Spatial Logistic Regression} \description{ Fits a spatial logistic regression model to a spatial point pattern. } \usage{ slrm(formula, ..., data = NULL, offset = TRUE, link = "logit", dataAtPoints=NULL, splitby=NULL) } \arguments{ \item{formula}{The model formula. See Details.} \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} determining the pixel resolution for the discretisation of the point pattern. } \item{data}{ Optional. A list containing data required in the formula. The names of entries in the list should correspond to variable names in the formula. The entries should be point patterns, pixel images or windows. } \item{offset}{ Logical flag indicating whether the model formula should be augmented by an offset equal to the logarithm of the pixel area. } \item{link}{The link function for the regression model. A character string, specifying a link function for binary regression. } \item{dataAtPoints}{Optional. Exact values of the covariates at the data points. A data frame, with column names corresponding to variables in the \code{formula}, with one row for each point in the point pattern dataset. } \item{splitby}{ Optional. Character string identifying a window. The window will be used to split pixels into sub-pixels. } } \details{ This function fits a Spatial Logistic Regression model (Tukey, 1972; Agterberg, 1974) to a spatial point pattern dataset. The logistic function may be replaced by another link function. The \code{formula} specifies the form of the model to be fitted, and the data to which it should be fitted. The \code{formula} must be an \R formula with a left and right hand side. The left hand side of the \code{formula} is the name of the point pattern dataset, an object of class \code{"ppp"}. The right hand side of the \code{formula} is an expression, in the usual \R formula syntax, representing the functional form of the linear predictor for the model. Each variable name that appears in the formula may be \itemize{ \item one of the reserved names \code{x} and \code{y}, referring to the Cartesian coordinates; \item the name of an entry in the list \code{data}, if this argument is given; \item the name of an object in the parent environment, that is, in the environment where the call to \code{slrm} was issued. } Each object appearing on the right hand side of the formula may be \itemize{ \item a pixel image (object of class \code{"im"}) containing the values of a covariate; \item a window (object of class \code{"owin"}), which will be interpreted as a logical covariate which is \code{TRUE} inside the window and \code{FALSE} outside it; \item a \code{function} in the \R language, with arguments \code{x,y}, which can be evaluated at any location to obtain the values of a covariate. } See the Examples below. The fitting algorithm discretises the point pattern onto a pixel grid. The value in each pixel is 1 if there are any points of the point pattern in the pixel, and 0 if there are no points in the pixel. The dimensions of the pixel grid will be determined as follows: \itemize{ \item The pixel grid will be determined by the extra arguments \code{\dots} if they are specified (for example the argument \code{dimyx} can be used to specify the number of pixels). \item Otherwise, if the right hand side of the \code{formula} includes the names of any pixel images containing covariate values, these images will determine the pixel grid for the discretisation. The covariate image with the finest grid (the smallest pixels) will be used. \item Otherwise, the default pixel grid size is given by \code{spatstat.options("npixel")}. } The covariates are evaluated at the centre of each pixel. If \code{dataAtPoints} is given, then the covariate values at the corresponding pixels are overwritten by the entries of \code{dataAtPoints} (and the spatial coordinates are overwritten by the exact spatial coordinates of the data points). If \code{link="logit"} (the default), the algorithm fits a Spatial Logistic Regression model. This model states that the probability \eqn{p} that a given pixel contains a data point, is related to the covariates through \deqn{\log\frac{p}{1-p} = \eta}{log(p/(1-p)) = eta} where \eqn{\eta}{eta} is the linear predictor of the model (a linear combination of the covariates, whose form is specified by the \code{formula}). If \code{link="cloglog"} then the algorithm fits a model stating that \deqn{\log(-\log(1-p)) = \eta}{log(-log(1-p)) = eta}. If \code{offset=TRUE} (the default), the model formula will be augmented by adding an offset term equal to the logarithm of the pixel area. This ensures that the fitted parameters are approximately independent of pixel size. If \code{offset=FALSE}, the offset is not included, and the traditional form of Spatial Logistic Regression is fitted. } \value{ An object of class \code{"slrm"} representing the fitted model. There are many methods for this class, including methods for \code{print}, \code{fitted}, \code{predict}, \code{anova}, \code{coef}, \code{logLik}, \code{terms}, \code{update}, \code{formula} and \code{vcov}. Automated stepwise model selection is possible using \code{\link{step}}. Confidence intervals for the parameters can be computed using \code{\link[stats]{confint}}. } \seealso{ \code{\link{anova.slrm}}, \code{\link{coef.slrm}}, \code{\link{fitted.slrm}}, \code{\link{logLik.slrm}}, \code{\link{plot.slrm}}, \code{\link{predict.slrm}}, \code{\link{vcov.slrm}} } \references{ Agterberg, F.P. (1974) Automatic contouring of geological maps to detect target areas for mineral exploration. \emph{Journal of the International Association for Mathematical Geology} \bold{6}, 373--395. Baddeley, A., Berman, M., Fisher, N.I., Hardegen, A., Milne, R.K., Schuhmacher, D., Shah, R. and Turner, R. (2010) Spatial logistic regression and change-of-support for spatial Poisson point processes. \emph{Electronic Journal of Statistics} \bold{4}, 1151--1201. \code{DOI: 10.1214/10-EJS581} Tukey, J.W. (1972) Discussion of paper by F.P. Agterberg and S.C. Robinson. \emph{Bulletin of the International Statistical Institute} \bold{44} (1) p. 596. Proceedings, 38th Congress, International Statistical Institute. } \examples{ if(offline <- !interactive()) op <- spatstat.options(npixel=32) X <- copper$SouthPoints slrm(X ~ 1) slrm(X ~ x+y) slrm(X ~ x+y, link="cloglog") # specify a grid of 2-km-square pixels slrm(X ~ 1, eps=2) Y <- copper$SouthLines Z <- distmap(Y) slrm(X ~ Z) slrm(X ~ Z, dataAtPoints=list(Z=nncross(X,Y,what="dist"))) mur <- murchison mur$dfault <- distfun(mur$faults) slrm(gold ~ dfault, data=mur) slrm(gold ~ dfault + greenstone, data=mur) slrm(gold ~ dfault, data=mur, splitby="greenstone") if(offline) spatstat.options(op) } \author{\adrian and \rolf. } \keyword{spatial} \keyword{models} spatstat.core/man/PPversion.Rd0000644000176200001440000000555214141452520016040 0ustar liggesusers\name{PPversion} \alias{PPversion} \alias{QQversion} \title{ Transform a Function into its P-P or Q-Q Version } \description{ Given a function object \code{f} containing both the estimated and theoretical versions of a summary function, these operations combine the estimated and theoretical functions into a new function. When plotted, the new function gives either the P-P plot or Q-Q plot of the original \code{f}. } \usage{ PPversion(f, theo = "theo", columns = ".") QQversion(f, theo = "theo", columns = ".") } \arguments{ \item{f}{ The function to be transformed. An object of class \code{"fv"}. } \item{theo}{ The name of the column of \code{f} that should be treated as the theoretical value of the function. } \item{columns}{ Character vector, specifying the columns of \code{f} to which the transformation will be applied. Either a vector of names of columns of \code{f}, or one of the abbreviations recognised by \code{\link{fvnames}}. } } \details{ The argument \code{f} should be an object of class \code{"fv"}, containing both empirical estimates \eqn{\widehat f(r)}{fhat(r)} and a theoretical value \eqn{f_0(r)}{f0(r)} for a summary function. The \emph{P--P version} of \code{f} is the function \eqn{g(x) = \widehat f (f_0^{-1}(x))}{g(x) = fhat(f0^(-1)(x))} where \eqn{f_0^{-1}}{f0^(-1)} is the inverse function of \eqn{f_0}{f0}. A plot of \eqn{g(x)} against \eqn{x} is equivalent to a plot of \eqn{\widehat f(r)}{fhat(r)} against \eqn{f_0(r)}{f0(r)} for all \eqn{r}. If \code{f} is a cumulative distribution function (such as the result of \code{\link{Fest}} or \code{\link{Gest}}) then this is a P--P plot, a plot of the observed versus theoretical probabilities for the distribution. The diagonal line \eqn{y=x} corresponds to perfect agreement between observed and theoretical distribution. The \emph{Q--Q version} of \code{f} is the function \eqn{h(x) = f_0^{-1}(\widehat f(x))}{f0^(-1)(fhat(x))}. If \code{f} is a cumulative distribution function, a plot of \eqn{h(x)} against \eqn{x} is a Q--Q plot, a plot of the observed versus theoretical quantiles of the distribution. The diagonal line \eqn{y=x} corresponds to perfect agreement between observed and theoretical distribution. Another straight line corresponds to the situation where the observed variable is a linear transformation of the theoretical variable. For a point pattern \code{X}, the Q--Q version of \code{Kest(X)} is essentially equivalent to \code{Lest(X)}. } \value{ Another object of class \code{"fv"}. } \author{ Tom Lawrence and Adrian Baddeley. Implemented by \spatstatAuthors. } \seealso{ \code{\link{plot.fv}} } \examples{ opa <- par(mar=0.1+c(5,5,4,2)) G <- Gest(redwoodfull) plot(PPversion(G)) plot(QQversion(G)) par(opa) } \keyword{spatial} \keyword{nonparametric} \keyword{manip} spatstat.core/man/Smoothfun.ppp.Rd0000644000176200001440000000370614141452520016672 0ustar liggesusers\name{Smoothfun.ppp} \alias{Smoothfun} \alias{Smoothfun.ppp} \title{ Smooth Interpolation of Marks as a Spatial Function } \description{ Perform spatial smoothing of numeric values observed at a set of irregular locations, and return the result as a function of spatial location. } \usage{ Smoothfun(X, \dots) \method{Smoothfun}{ppp}(X, sigma = NULL, \dots, weights = NULL, edge = TRUE, diggle = FALSE) } \arguments{ \item{X}{ Marked point pattern (object of class \code{"ppp"}). } \item{sigma}{ Smoothing bandwidth, or bandwidth selection function, passed to \code{\link{Smooth.ppp}}. } \item{\dots}{ Additional arguments passed to \code{\link{Smooth.ppp}}. } \item{weights}{ Optional vector of weights associated with the points of \code{X}. } \item{edge,diggle}{ Logical arguments controlling the edge correction. Arguments passed to \code{\link{Smooth.ppp}}. } } \details{ The commands \code{Smoothfun} and \code{\link{Smooth}} both perform kernel-smoothed spatial interpolation of numeric values observed at irregular spatial locations. The difference is that \code{\link{Smooth}} returns a pixel image, containing the interpolated values at a grid of locations, while \code{Smoothfun} returns a \code{function(x,y)} which can be used to compute the interpolated value at \emph{any} spatial location. For purposes such as model-fitting it is more accurate to use \code{Smoothfun} to interpolate data. } \value{ A \code{function} with arguments \code{x,y}. The function also belongs to the class \code{"Smoothfun"} which has methods for \code{print} and \code{\link{as.im}}. It also belongs to the class \code{"funxy"} which has methods for \code{plot}, \code{contour} and \code{persp}. } \seealso{ \code{\link{Smooth}} } \examples{ f <- Smoothfun(longleaf) f f(120, 80) plot(f) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/Gfox.Rd0000644000176200001440000001010714141452520015006 0ustar liggesusers\name{Gfox} \alias{Gfox} \alias{Jfox} \title{ Foxall's Distance Functions } \description{ Given a point pattern \code{X} and a spatial object \code{Y}, compute estimates of Foxall's \eqn{G} and \eqn{J} functions. } \usage{ Gfox(X, Y, r=NULL, breaks=NULL, correction=c("km", "rs", "han"), W, \dots) Jfox(X, Y, r=NULL, breaks=NULL, correction=c("km", "rs", "han"), W, \dots, warn.trim=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) from which distances will be measured. } \item{Y}{ An object of class \code{"ppp"}, \code{"psp"} or \code{"owin"} to which distances will be measured. Alternatively a pixel image (class \code{"im"}) with logical values. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which \eqn{Gfox(r)} or \eqn{Jfox(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{Gfox(r)} or \eqn{Jfox(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"cs"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{W}{ Optional. A window (object of class \code{"owin"}) to be taken as the window of observation. The distribution function will be estimated from data inside \code{W}. The default is \code{W=Frame(Y)} when \code{Y} is a window, and \code{W=Window(Y)} otherwise. } \item{\dots}{ Extra arguments affecting the discretisation of distances. These arguments are ignored by \code{Gfox}, but \code{Jfox} passes them to \code{\link{Hest}} to determine the discretisation of the spatial domain. } \item{warn.trim}{ Logical value indicating whether a warning should be issued by \code{Jfox} when the window of \code{X} had to be trimmed in order to be a subset of the frame of \code{Y}. } } \details{ Given a point pattern \code{X} and another spatial object \code{Y}, these functions compute two nonparametric measures of association between \code{X} and \code{Y}, introduced by Foxall (Foxall and Baddeley, 2002). Let the random variable \eqn{R} be the distance from a typical point of \code{X} to the object \code{Y}. Foxall's \eqn{G}-function is the cumulative distribution function of \eqn{R}: \deqn{G(r) = P(R \le r)}{P(R <= r)} Let the random variable \eqn{S} be the distance from a \emph{fixed} point in space to the object \code{Y}. The cumulative distribution function of \eqn{S} is the (unconditional) spherical contact distribution function \deqn{H(r) = P(S \le r)}{H(r) = P(S <= r)} which is computed by \code{\link{Hest}}. Foxall's \eqn{J}-function is the ratio \deqn{ J(r) = \frac{1-G(r)}{1-H(r)} }{ J(r) = (1-G(r))/(1-H(r)) } For further interpretation, see Foxall and Baddeley (2002). Accuracy of \code{Jfox} depends on the pixel resolution, which is controlled by the arguments \code{eps}, \code{dimyx} and \code{xy} passed to \code{\link{as.mask}}. For example, use \code{eps=0.1} to specify square pixels of side 0.1 units, and \code{dimyx=256} to specify a 256 by 256 grid of pixels. } \value{ A function value table (object of class \code{"fv"}) which can be printed, plotted, or converted to a data frame of values. } \references{ Foxall, R. and Baddeley, A. (2002) Nonparametric measures of association between a spatial point process and a random set, with geological applications. \emph{Applied Statistics} \bold{51}, 165--182. } \seealso{ \code{\link{Gest}}, \code{\link{Hest}}, \code{\link{Jest}}, \code{\link{Fest}} } \examples{ data(copper) X <- copper$SouthPoints Y <- copper$SouthLines G <- Gfox(X,Y) J <- Jfox(X,Y, correction="km") \testonly{ J <- Jfox(X,Y, correction="km", eps=1) } } \author{Rob Foxall and \adrian } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/pseudoR2.Rd0000644000176200001440000000400414141452520015605 0ustar liggesusers\name{pseudoR2} \alias{pseudoR2} \alias{pseudoR2.ppm} \alias{pseudoR2.slrm} \title{ Calculate Pseudo-R-Squared for Point Process Model } \description{ Given a fitted point process model, calculate the pseudo-R-squared value, which measures the fraction of variation in the data that is explained by the model. } \usage{ pseudoR2(object, \dots) \method{pseudoR2}{ppm}(object, \dots, keepoffset=TRUE) \method{pseudoR2}{slrm}(object, \dots, keepoffset=TRUE) } \arguments{ \item{object}{ Fitted point process model. An object of class \code{"ppm"} or \code{"slrm"}. } \item{keepoffset}{ Logical value indicating whether to retain offset terms in the model when computing the deviance difference. See Details. } \item{\dots}{ Additional arguments passed to \code{\link{deviance.ppm}} or \code{\link{deviance.slrm}}. } } \details{ The function \code{pseudoR2} is generic, with methods for fitted point process models of class \code{"ppm"} and \code{"slrm"}. This function computes McFadden's pseudo-Rsquared \deqn{ R^2 = 1 - \frac{D}{D_0} }{ R^2 = 1 - D/D0 } where \eqn{D} is the deviance of the fitted model \code{object}, and \eqn{D_0}{D0} is the deviance of the null model. Deviance is defined as twice the negative log-likelihood or log-pseudolikelihood. The null model is usually obtained by re-fitting the model using the trend formula \code{~1}. However if the original model formula included \code{offset} terms, and if \code{keepoffset=TRUE} (the default), then the null model formula consists of these offset terms. This ensures that the \code{pseudoR2} value is non-negative. } \value{ A single numeric value. } \author{ \spatstatAuthors. } \seealso{ \code{\link{deviance.ppm}}, \code{\link{deviance.slrm}}. } \examples{ fit <- ppm(swedishpines ~ x+y) pseudoR2(fit) xcoord <- as.im(function(x,y) x, Window(swedishpines)) fut <- ppm(swedishpines ~ offset(xcoord/200) + y) pseudoR2(fut) } \keyword{spatial} \keyword{models} spatstat.core/man/pcf3est.Rd0000644000176200001440000001073314141452520015457 0ustar liggesusers\name{pcf3est} \Rdversion{1.1} \alias{pcf3est} \title{ Pair Correlation Function of a Three-Dimensional Point Pattern } \description{ Estimates the pair correlation function from a three-dimensional point pattern. } \usage{ pcf3est(X, \dots, rmax = NULL, nrval = 128, correction = c("translation", "isotropic"), delta=NULL, adjust=1, biascorrect=TRUE) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{g_3(r)}{g3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{g_3(r)}{g3(r)} will be estimated. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } \item{delta}{ Optional. Half-width of the Epanechnikov smoothing kernel. } \item{adjust}{ Optional. Adjustment factor for the default value of \code{delta}. } \item{biascorrect}{ Logical value. Whether to correct for underestimation due to truncation of the kernel near \eqn{r=0}. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the pair correlation function is \deqn{ g_3(r) = \frac{K_3'(r)}{4\pi r^2} }{ g3(r) = K3'(r)/(4 * pi * r^2) } where \eqn{K_3'}{K3'} is the derivative of the three-dimensional \eqn{K}-function (see \code{\link{K3est}}). The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The distance between each pair of distinct points is computed. Kernel smoothing is applied to these distance values (weighted by an edge correction factor) and the result is renormalised to give the estimate of \eqn{g_3(r)}{g3(r)}. The available edge corrections are: \describe{ \item{\code{"translation"}:}{ the Ohser translation correction estimator (Ohser, 1983; Baddeley et al, 1993) } \item{\code{"isotropic"}:}{ the three-dimensional counterpart of Ripley's isotropic edge correction (Ripley, 1977; Baddeley et al, 1993). } } Kernel smoothing is performed using the Epanechnikov kernel with half-width \code{delta}. If \code{delta} is missing, the default is to use the rule-of-thumb \eqn{\delta = 0.26/\lambda^{1/3}}{delta = 0.26/lambda^(1/3)} where \eqn{\lambda = n/v}{lambda = n/v} is the estimated intensity, computed from the number \eqn{n} of data points and the volume \eqn{v} of the enclosing box. This default value of \code{delta} is multiplied by the factor \code{adjust}. The smoothing estimate of the pair correlation \eqn{g_3(r)}{g3(r)} is typically an underestimate when \eqn{r} is small, due to truncation of the kernel at \eqn{r=0}. If \code{biascorrect=TRUE}, the smoothed estimate is approximately adjusted for this bias. This is advisable whenever the dataset contains a sufficiently large number of points. } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. Additionally the value of \code{delta} is returned as an attribute of this object. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{ \adrian and Rana Moyeed. } \seealso{ \code{\link{pp3}} to create a three-dimensional point pattern (object of class \code{"pp3"}). \code{\link{F3est}}, \code{\link{G3est}}, \code{\link{K3est}} for other summary functions of a three-dimensional point pattern. \code{\link{pcf}} to estimate the pair correlation function of point patterns in two dimensions or other spaces. } \examples{ X <- rpoispp3(250) Z <- pcf3est(X) Zbias <- pcf3est(X, biascorrect=FALSE) if(interactive()) { opa <- par(mfrow=c(1,2)) plot(Z, ylim.covers=c(0, 1.2)) plot(Zbias, ylim.covers=c(0, 1.2)) par(opa) } attr(Z, "delta") } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/model.matrix.mppm.Rd0000644000176200001440000000401614141452520017460 0ustar liggesusers\name{model.matrix.mppm} \alias{model.matrix.mppm} \title{Extract Design Matrix of Point Process Model for Several Point Patterns} \description{ Given a point process model fitted to a list of point patterns, this function extracts the design matrix. } \usage{ \method{model.matrix}{mppm}(object, ..., keepNA=TRUE, separate=FALSE) } \arguments{ \item{object}{ A point process model fitted to several point patterns. An object of class \code{"mppm"}. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link{model.matrix.lm}}. } \item{keepNA}{ Logical. Determines whether rows containing \code{NA} values will be deleted or retained. } \item{separate}{ Logical value indicating whether to split the model matrix into sub-matrices corresponding to each of the original point patterns. } } \details{ This command is a method for the generic function \code{\link{model.matrix}}. It extracts the design matrix of a point process model fitted to several point patterns. The argument \code{object} must be a fitted point process model (object of class \code{"mppm"}) produced by the fitting algorithm \code{\link{mppm}}). This represents a point process model that has been fitted to a list of several point pattern datasets. See \code{\link{mppm}} for information. The result is a matrix with one column for every constructed covariate in the model, and one row for every quadrature point. If \code{separate=TRUE} this matrix will be split into sub-matrices corresponding to the original point patterns, and the result will be a list containing these matrices. } \value{ A matrix (or list of matrices). Columns of the matrix are canonical covariates in the model. } \author{ \spatstatAuthors. } \seealso{ \code{\link{model.matrix}}, \code{\link{mppm}}. } \examples{ fit <- mppm(Points ~ Image + x, demohyper) head(model.matrix(fit)) # matrix with three columns: '(Intercept)', 'x' and 'Image' } \keyword{spatial} \keyword{models} spatstat.core/man/emend.ppm.Rd0000644000176200001440000000776414141452520016005 0ustar liggesusers\name{emend.ppm} \alias{emend.ppm} \alias{project.ppm} \title{ Force Point Process Model to be Valid } \description{ Ensures that a fitted point process model satisfies the integrability conditions for existence of the point process. } \usage{ project.ppm(object, \dots, fatal=FALSE, trace=FALSE) \method{emend}{ppm}(object, \dots, fatal=FALSE, trace=FALSE) } \arguments{ \item{object}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{Ignored.} \item{fatal}{ Logical value indicating whether to generate an error if the model cannot be projected to a valid model. } \item{trace}{ Logical value indicating whether to print a trace of the decision process. } } \details{ The functions \code{emend.ppm} and \code{project.ppm} are identical: \code{emend.ppm} is a method for the generic \code{\link{emend}}, while \code{project.ppm} is an older name for the same function. The purpose of the function is to ensure that a fitted model is valid. The model-fitting function \code{\link{ppm}} fits Gibbs point process models to point pattern data. By default, the fitted model returned by \code{\link{ppm}} may not actually exist as a point process. First, some of the fitted coefficients of the model may be \code{NA} or infinite values. This usually occurs when the data are insufficient to estimate all the parameters. The model is said to be \emph{unidentifiable} or \emph{confounded}. Second, unlike a regression model, which is well-defined for any finite values of the fitted regression coefficients, a Gibbs point process model is only well-defined if the fitted interaction parameters satisfy some constraints. A famous example is the Strauss process (see \code{\link{Strauss}}) which exists only when the interaction parameter \eqn{\gamma}{gamma} is less than or equal to 1. For values \eqn{\gamma > 1}{gamma > 1}, the probability density is not integrable and the process does not exist (and cannot be simulated). By default, \code{\link{ppm}} does not enforce the constraint that a fitted Strauss process (for example) must satisfy \eqn{\gamma \le 1}{gamma <= 1}. This is because a fitted parameter value of \eqn{\gamma > 1}{gamma > 1} could be useful information for data analysis, as it indicates that the Strauss model is not appropriate, and suggests a clustered model should be fitted. The function \code{emend.ppm} or \code{project.ppm} modifies the model \code{object} so that the model is valid. It identifies the terms in the model \code{object} that are associated with illegal parameter values (i.e. parameter values which are either \code{NA}, infinite, or outside their permitted range). It considers all possible sub-models of \code{object} obtained by deleting one or more of these terms. It identifies which of these submodels are valid, and chooses the valid submodel with the largest pseudolikelihood. The result of \code{emend.ppm} or \code{project.ppm} is the true maximum pseudolikelihood fit to the data. For large datasets or complex models, the algorithm used in \code{emend.ppm} or \code{project.ppm} may be time-consuming, because it takes time to compute all the sub-models. A faster, approximate algorithm can be applied by setting \code{spatstat.options(project.fast=TRUE)}. This produces a valid submodel, which may not be the maximum pseudolikelihood submodel. Use the function \code{\link{valid.ppm}} to check whether a fitted model object specifies a well-defined point process. Use the expression \code{all(is.finite(coef(object)))} to determine whether all parameters are identifiable. } \value{ Another point process model (object of class \code{"ppm"}). } \author{\adrian and \rolf } \seealso{ \code{\link{ppm}}, \code{\link{valid.ppm}}, \code{\link{emend}}, \code{\link{spatstat.options}} } \examples{ fit <- ppm(redwood ~1, Strauss(0.1)) coef(fit) fit2 <- emend(fit) coef(fit2) } \keyword{spatial} \keyword{models} spatstat.core/man/is.ppm.Rd0000644000176200001440000000145014141452520015312 0ustar liggesusers\name{is.ppm} \alias{is.ppm} \alias{is.lppm} \alias{is.kppm} \alias{is.slrm} \title{Test Whether An Object Is A Fitted Point Process Model} \description{ Checks whether its argument is a fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"} or \code{"slrm"}). } \usage{ is.ppm(x) is.kppm(x) is.lppm(x) is.slrm(x) } \arguments{ \item{x}{Any object.} } \details{ These functions test whether the object \code{x} is a fitted point process model object of the specified class. The result of \code{is.ppm(x)} is \code{TRUE} if \code{x} has \code{"ppm"} amongst its classes, and otherwise \code{FALSE}. Similarly for the other functions. } \value{ A single logical value. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} \keyword{models} spatstat.core/man/logLik.dppm.Rd0000644000176200001440000000554514141452520016275 0ustar liggesusers\name{logLik.dppm} \alias{logLik.dppm} \alias{AIC.dppm} \alias{extractAIC.dppm} \alias{nobs.dppm} \title{Log Likelihood and AIC for Fitted Determinantal Point Process Model} \description{ Extracts the log Palm likelihood, deviance, and AIC of a fitted determinantal point process model. } \usage{ \method{logLik}{dppm}(object, ...) \method{AIC}{dppm}(object, \dots, k=2) \method{extractAIC}{dppm}(fit, scale=0, k=2, \dots) \method{nobs}{dppm}(object, ...) } \arguments{ \item{object,fit}{Fitted point process model. An object of class \code{"dppm"}. } \item{\dots}{Ignored.} \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details. } } \details{ These functions are methods for the generic commands \code{\link{logLik}}, \code{\link{extractAIC}} and \code{\link{nobs}} for the class \code{"dppm"}. An object of class \code{"dppm"} represents a fitted Cox or cluster point process model. It is obtained from the model-fitting function \code{\link{dppm}}. These methods apply only when the model was fitted by maximising the Palm likelihood (Tanaka et al, 2008) by calling \code{\link{dppm}} with the argument \code{method="palm"}. The method \code{logLik.dppm} computes the maximised value of the log Palm likelihood for the fitted model \code{object}. The methods \code{AIC.dppm} and \code{extractAIC.dppm} compute the Akaike Information Criterion AIC for the fitted model based on the Palm likelihood (Tanaka et al, 2008) \deqn{ AIC = -2 \log(PL) + k \times \mbox{edf} }{ AIC = -2 * log(PL) + k * edf } where \eqn{PL} is the maximised Palm likelihood of the fitted model, and \eqn{\mbox{edf}}{edf} is the effective degrees of freedom of the model. The method \code{nobs.dppm} returns the number of points in the original data point pattern to which the model was fitted. The \R function \code{\link{step}} uses these methods, but it does not work for determinantal models yet due to a missing implementation of \code{update.dppm}. } \value{ \code{logLik} returns a numerical value, belonging to the class \code{"logLik"}, with an attribute \code{"df"} giving the degrees of freedom. \code{AIC} returns a numerical value. \code{extractAIC} returns a numeric vector of length 2 containing the degrees of freedom and the AIC value. \code{nobs} returns an integer value. } \references{ Tanaka, U. and Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott point processes. \emph{Biometrical Journal} \bold{50}, 43--57. } \seealso{ \code{\link{dppm}}, \code{\link{logLik.ppm}} } \author{\adrian \rolf and \ege } \examples{ fit <- dppm(swedishpines ~ x, dppGauss(), method="palm") nobs(fit) logLik(fit) extractAIC(fit) AIC(fit) } \keyword{spatial} \keyword{models} spatstat.core/man/residuals.ppm.Rd0000644000176200001440000002007714141452520016700 0ustar liggesusers\name{residuals.ppm} \alias{residuals.ppm} \title{ Residuals for Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, compute residuals. } \usage{ \method{residuals}{ppm}(object, type="raw", \dots, check=TRUE, drop=FALSE, fittedvalues=NULL, new.coef=NULL, dropcoef=FALSE, quad=NULL) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which residuals should be calculated. } \item{type}{ String indicating the type of residuals to be calculated. Current options are \code{"raw"}, \code{"inverse"}, \code{"pearson"} and \code{"score"}. A partial match is adequate. } \item{\dots}{ Ignored. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{drop}{ Logical value determining whether to delete quadrature points that were not used to fit the model. See \code{\link{quad.ppm}} for explanation. } \item{fittedvalues}{ Vector of fitted values for the conditional intensity at the quadrature points, from which the residuals will be computed. For expert use only. } \item{new.coef}{ Optional. Numeric vector of coefficients for the model, replacing \code{coef(object)}. See the section on Modified Residuals below. } \item{dropcoef}{ Internal use only. } \item{quad}{ Optional. Data specifying how to re-fit the model. A list of arguments passed to \code{\link{quadscheme}}. See the section on Modified Residuals below. } } \value{ An object of class \code{"msr"} representing a signed measure or vector-valued measure (see \code{\link{msr}}). This object can be plotted. } \details{ This function computes several kinds of residuals for the fit of a point process model to a spatial point pattern dataset (Baddeley et al, 2005). Use \code{\link{plot.msr}} to plot the residuals directly, or \code{\link{diagnose.ppm}} to produce diagnostic plots based on these residuals. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}. This fitted model object contains complete information about the original data pattern. Residuals are attached both to the data points and to some other points in the window of observation (namely, to the dummy points of the quadrature scheme used to fit the model). If the fitted model is correct, then the sum of the residuals over all (data and dummy) points in a spatial region \eqn{B} has mean zero. For further explanation, see Baddeley et al (2005). The type of residual is chosen by the argument \code{type}. Current options are \describe{ \item{\code{"raw"}:}{ the raw residuals \deqn{ r_j = z_j - w_j \lambda_j }{ r[j] = z[j] - w[j] lambda[j] } at the quadrature points \eqn{u_j}{u[j]}, where \eqn{z_j}{z[j]} is the indicator equal to 1 if \eqn{u_j}{u[j]} is a data point and 0 if \eqn{u_j}{u[j]} is a dummy point; \eqn{w_j}{w[j]} is the quadrature weight attached to \eqn{u_j}{u[j]}; and \deqn{\lambda_j = \hat\lambda(u_j,x)}{lambda[j] = lambda(u[j],x)} is the conditional intensity of the fitted model at \eqn{u_j}{u[j]}. These are the spatial analogue of the martingale residuals of a one-dimensional counting process. } \item{\code{"inverse"}:}{ the `inverse-lambda' residuals (Baddeley et al, 2005) \deqn{ r^{(I)}_j = \frac{r_j}{\lambda_j} = \frac{z_j}{\lambda_j} - w_j }{ rI[j] = r[j]/lambda[j] = z[j]/lambda[j] - w[j] } obtained by dividing the raw residuals by the fitted conditional intensity. These are a counterpart of the exponential energy marks (see \code{\link{eem}}). } \item{\code{"pearson"}:}{ the Pearson residuals (Baddeley et al, 2005) \deqn{ r^{(P)}_j = \frac{r_j}{\sqrt{\lambda_j}} = \frac{z_j}{\sqrt{\lambda_j}} - w_j \sqrt{\lambda_j} }{ rP[j] = r[j]/sqrt(lambda[j]) = z[j]/sqrt(lambda[j]) - w[j] sqrt(lambda[j]) } obtained by dividing the raw residuals by the square root of the fitted conditional intensity. The Pearson residuals are standardised, in the sense that if the model (true and fitted) is Poisson, then the sum of the Pearson residuals in a spatial region \eqn{B} has variance equal to the area of \eqn{B}. } \item{\code{"score"}:}{ the score residuals (Baddeley et al, 2005) \deqn{ r_j = (z_j - w_j \lambda_j) x_j }{ r[j] = (z[j] - w[j] lambda[j]) * x[j,] } obtained by multiplying the raw residuals \eqn{r_j}{r[j]} by the covariates \eqn{x_j}{x[j,]} for quadrature point \eqn{j}. The score residuals always sum to zero. } } The result of \code{residuals.ppm} is a measure (object of class \code{"msr"}). Use \code{\link{plot.msr}} to plot the residuals directly, or \code{\link{diagnose.ppm}} to produce diagnostic plots based on these residuals. Use \code{\link{integral.msr}} to compute the total residual. By default, the window of the measure is the same as the original window of the data. If \code{drop=TRUE} then the window is the domain of integration of the pseudolikelihood or composite likelihood. This only matters when the model \code{object} was fitted using the border correction: in that case, if \code{drop=TRUE} the window of the residuals is the erosion of the original data window by the border correction distance \code{rbord}. } \section{Modified Residuals}{ Sometimes we want to modify the calculation of residuals by using different values for the model parameters. This capability is provided by the arguments \code{new.coef} and \code{quad}. If \code{new.coef} is given, then the residuals will be computed by taking the model parameters to be \code{new.coef}. This should be a numeric vector of the same length as the vector of fitted model parameters \code{coef(object)}. If \code{new.coef} is missing and \code{quad} is given, then the model parameters will be determined by re-fitting the model using a new quadrature scheme specified by \code{quad}. Residuals will be computed for the original model \code{object} using these new parameter values. The argument \code{quad} should normally be a list of arguments in \code{name=value} format that will be passed to \code{\link{quadscheme}} (together with the original data points) to determine the new quadrature scheme. It may also be a quadrature scheme (object of class \code{"quad"}) to which the model should be fitted, or a point pattern (object of class \code{"ppp"}) specifying the \emph{dummy points} in a new quadrature scheme. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. } \seealso{ \code{\link{msr}}, \code{\link{diagnose.ppm}}, \code{\link{ppm.object}}, \code{\link{ppm}} } \examples{ fit <- ppm(cells, ~x, Strauss(r=0.15)) # Pearson residuals rp <- residuals(fit, type="pe") rp # simulated data X <- rStrauss(100,0.7,0.05) # fit Strauss model fit <- ppm(X, ~1, Strauss(0.05)) res.fit <- residuals(fit) # check that total residual is 0 integral.msr(residuals(fit, drop=TRUE)) # true model parameters truecoef <- c(log(100), log(0.7)) res.true <- residuals(fit, new.coef=truecoef) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.core/man/densityfun.Rd0000644000176200001440000000657714141452520016313 0ustar liggesusers\name{densityfun.ppp} \alias{densityfun} \alias{densityfun.ppp} \title{ Kernel Estimate of Intensity as a Spatial Function } \description{ Compute a kernel estimate of intensity for a point pattern, and return the result as a function of spatial location. } \usage{ densityfun(X, \dots) \method{densityfun}{ppp}(X, sigma = NULL, \dots, weights = NULL, edge = TRUE, diggle = FALSE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{sigma}{ Smoothing bandwidth, or bandwidth selection function, passed to \code{\link{density.ppp}}. } \item{\dots}{ Additional arguments passed to \code{\link{density.ppp}}. } \item{weights}{ Optional vector of weights associated with the points of \code{X}. } \item{edge,diggle}{ Logical arguments controlling the edge correction. Arguments passed to \code{\link{density.ppp}}. } } \details{ The commands \code{densityfun} and \code{\link{density}} both perform kernel estimation of the intensity of a point pattern. The difference is that \code{\link{density}} returns a pixel image, containing the estimated intensity values at a grid of locations, while \code{densityfun} returns a \code{function(x,y)} which can be used to compute the intensity estimate at \emph{any} spatial locations with coordinates \code{x,y}. For purposes such as model-fitting it is more accurate to use \code{densityfun}. } \section{Using the result of \code{densityfun}}{ If \code{f <- densityfun(X)}, where \code{X} is a two-dimensional point pattern, the resulting object \code{f} is a \code{function} in the \R language. By calling this function, the user can evaluate the estimated intensity at any desired spatial locations. Additionally \code{f} belongs to other classes which allow it to be printed and plotted easily. The function \code{f} has arguments \code{x,y,drop}. \itemize{ \item The arguments \code{x,y} of \code{f} specify the query locations. They can be numeric vectors of coordinates. Alternatively \code{x} can be a point pattern (or data acceptable to \code{\link{as.ppp}}) and \code{y} is omitted. The result of \code{f(x,y)} is a numeric vector giving the values of the intensity. \item The argument \code{drop} of \code{f} specifies how to handle query locations which are outside the window of the original data. If \code{drop=TRUE} (the default), such locations are ignored. If \code{drop=FALSE}, a value of \code{NA} is returned for each such location. } Note that the smoothing parameters, such as the bandwidth \code{sigma}, are assigned when \code{densityfun} is executed. Smoothing parameters are fixed inside the function \code{f} and cannot be changed by arguments of \code{f}. } \value{ A \code{function} with arguments \code{x,y,drop}. The function also belongs to the class \code{"densityfun"} which has methods for \code{print} and \code{\link{as.im}}. It also belongs to the class \code{"funxy"} which has methods for \code{plot}, \code{contour} and \code{persp}. } \seealso{ \code{\link{density}}. To interpolate values observed at the points, use \code{\link{Smoothfun}}. } \examples{ f <- densityfun(swedishpines) f f(42, 60) X <- runifpoint(2, Window(swedishpines)) f(X) plot(f) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/matclust.estK.Rd0000644000176200001440000001444214141452520016652 0ustar liggesusers\name{matclust.estK} \alias{matclust.estK} \title{Fit the Matern Cluster Point Process by Minimum Contrast} \description{ Fits the \Matern Cluster point process to a point pattern dataset by the Method of Minimum Contrast. } \usage{ matclust.estK(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the \Matern Cluster model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the \Matern Cluster process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the \Matern Cluster point process model to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the \Matern Cluster point process to \code{X}, by finding the parameters of the \Matern Cluster model which give the closest match between the theoretical \eqn{K} function of the \Matern Cluster process and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The \Matern Cluster point process is described in \Moller and Waagepetersen (2003, p. 62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and uniformly distributed inside a circle of radius \eqn{R}{R} centred on the parent point, where \eqn{R}{R} is equal to the parameter \code{scale}. The named vector of stating values can use either \code{R} or \code{scale} as the name of the second component, but the latter is recommended for consistency with other cluster models. The theoretical \eqn{K}-function of the \Matern Cluster process is \deqn{ K(r) = \pi r^2 + \frac 1 \kappa h(\frac{r}{2R}) }{ K(r) = pi r^2 + h(r/(2*R))/kappa } where the radius R is the parameter \code{scale} and \deqn{ h(z) = 2 + \frac 1 \pi [ ( 8 z^2 - 4 ) \mbox{arccos}(z) - 2 \mbox{arcsin}(z) + 4 z \sqrt{(1 - z^2)^3} - 6 z \sqrt{1 - z^2} ] }{ h(z) = 2 + (1/pi) * ((8 * z^2 - 4) * arccos(z) - 2 * arcsin(z) + 4 * z * sqrt((1 - z^2)^3) - 6 * z * sqrt(1 - z^2)) } for \eqn{z <= 1}, and \eqn{h(z) = 1} for \eqn{z > 1}. The theoretical intensity of the \Matern Cluster process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{R}{R}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The \Matern Cluster process can be simulated, using \code{\link{rMatClust}}. Homogeneous or inhomogeneous \Matern Cluster models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Rasmus Waagepetersen \email{rw@math.auc.dk} Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{lgcp.estK}}, \code{\link{thomas.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{rMatClust}} to simulate the fitted model. } \examples{ data(redwood) u <- matclust.estK(redwood, c(kappa=10, scale=0.1)) u plot(u) } \keyword{spatial} \keyword{models} spatstat.core/man/logLik.kppm.Rd0000644000176200001440000000725214141452520016301 0ustar liggesusers\name{logLik.kppm} \alias{logLik.kppm} \alias{AIC.kppm} \alias{extractAIC.kppm} \alias{nobs.kppm} \title{Log Likelihood and AIC for Fitted Cox or Cluster Point Process Model} \description{ Extracts the log composite likelihood, deviance, and AIC of a fitted Cox or cluster point process model. } \usage{ \method{logLik}{kppm}(object, ...) \method{AIC}{kppm}(object, \dots, k=2) \method{extractAIC}{kppm}(fit, scale=0, k=2, \dots) \method{nobs}{kppm}(object, ...) } \arguments{ \item{object,fit}{Fitted point process model. An object of class \code{"kppm"}. } \item{\dots}{Ignored.} \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details. } } \details{ These functions are methods for the generic commands \code{\link{logLik}}, \code{\link{extractAIC}} and \code{\link{nobs}} for the class \code{"kppm"}. An object of class \code{"kppm"} represents a fitted Cox or cluster point process model. It is obtained from the model-fitting function \code{\link{kppm}}. These methods apply only when the model was fitted by maximising a composite likelihood: either the Palm likelihood (Tanaka et al, 2008) or the second order composite likelihood (Guan, 2006), by calling \code{\link{kppm}} with the argument \code{method="palm"} or \code{method="clik2"} respectively. The method \code{logLik.kppm} computes the maximised value of the log composite likelihood for the fitted model \code{object}. The methods \code{AIC.kppm} and \code{extractAIC.kppm} compute the Akaike Information Criterion AIC for the fitted model based on the composite likelihood \deqn{ AIC = -2 \log(CL) + k \times \mbox{edf} }{ AIC = -2 * log(CL) + k * edf } where \eqn{CL} is the maximised composite likelihood of the fitted model, and \eqn{\mbox{edf}}{edf} is the effective degrees of freedom of the model. The method \code{nobs.kppm} returns the number of points in the original data point pattern to which the model was fitted. The \R function \code{\link{step}} uses these methods. } \section{Model comparison}{ The values of log-likelihood and AIC returned by these functions are based on the \emph{composite likelihood} of the cluster process or Cox process model. They are available only when the model was fitted using \code{method="palm"} or \code{method="clik2"}. For model comparison and model selection, it is valid to compare the \code{logLik} values, or to compare the \code{AIC} values, but only when all the models are of class \code{"kppm"} and were fitted using the same \code{method}. For \code{method="palm"} some theoretical justification was provided by Tanaka et al (2008). } \value{ \code{logLik} returns a numerical value, belonging to the class \code{"logLik"}, with an attribute \code{"df"} giving the degrees of freedom. \code{AIC} returns a numerical value. \code{extractAIC} returns a numeric vector of length 2 containing the degrees of freedom and the AIC value. \code{nobs} returns an integer value. } \references{ Guan, Y. (2006) A composite likelihood approach in fitting spatial point process models. \emph{Journal of the American Statistical Association} \bold{101}, 1502--1512. Tanaka, U. and Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott point processes. \emph{Biometrical Journal} \bold{50}, 43--57. } \seealso{ \code{\link{kppm}}, \code{\link{logLik.ppm}} } \author{ \spatstatAuthors. } \examples{ fit <- kppm(redwood ~ x, "Thomas", method="palm") nobs(fit) logLik(fit) extractAIC(fit) AIC(fit) step(fit) } \keyword{spatial} \keyword{models} spatstat.core/man/envelope.pp3.Rd0000644000176200001440000002226114141452520016425 0ustar liggesusers\name{envelope.pp3} \alias{envelope.pp3} \title{Simulation Envelopes of Summary Function for 3D Point Pattern} \description{ Computes simulation envelopes of a summary function for a three-dimensional point pattern. } \usage{ \method{envelope}{pp3}(Y, fun=K3est, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, transform=NULL,global=FALSE,ginterval=NULL,use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) } \arguments{ \item{Y}{ A three-dimensional point pattern (object of class \code{"pp3"}). } \item{fun}{ Function that computes the desired summary statistic for a 3D point pattern. } \item{nsim}{ Number of simulated point patterns to be generated when computing the envelopes. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{\dots}{ Extra arguments passed to \code{fun}. } \item{funargs}{ A list, containing extra arguments to be passed to \code{fun}. } \item{funYargs}{ Optional. A list, containing extra arguments to be passed to \code{fun} when applied to the original data \code{Y} only. } \item{simulate}{ Optional. Specifies how to generate the simulated point patterns. If \code{simulate} is an expression in the R language, then this expression will be evaluated \code{nsim} times, to obtain \code{nsim} point patterns which are taken as the simulated patterns from which the envelopes are computed. If \code{simulate} is a function, then this function will be repeatedly applied to the data pattern \code{Y} to obtain \code{nsim} simulated patterns. If \code{simulate} is a list of point patterns, then the entries in this list will be treated as the simulated patterns from which the envelopes are computed. Alternatively \code{simulate} may be an object produced by the \code{envelope} command: see Details. } \item{verbose}{ Logical flag indicating whether to print progress reports during the simulations. } \item{transform}{ Optional. A transformation to be applied to the function values, before the envelopes are computed. An expression object (see Details). } \item{global}{ Logical flag indicating whether envelopes should be pointwise (\code{global=FALSE}) or simultaneous (\code{global=TRUE}). } \item{ginterval}{ Optional. A vector of length 2 specifying the interval of \eqn{r} values for the simultaneous critical envelopes. Only relevant if \code{global=TRUE}. } \item{use.theory}{ Logical value indicating whether to use the theoretical value, computed by \code{fun}, as the reference value for simultaneous envelopes. Applicable only when \code{global=TRUE}. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{side="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{side="less"}) or a one-sided test with an upper critical boundary (\code{side="greater"}). } \item{scale}{ Optional. Scaling function for global envelopes. A function in the \R language which determines the relative scale of deviations, as a function of distance \eqn{r}, when computing the global envelopes. Applicable only when \code{global=TRUE}. Summary function values for distance \code{r} will be \emph{divided} by \code{scale(r)} before the maximum deviation is computed. The resulting global envelopes will have width proportional to \code{scale(r)}. } \item{clamp}{ Logical value indicating how to compute envelopes when \code{alternative="less"} or \code{alternative="greater"}. Deviations of the observed summary function from the theoretical summary function are initially evaluated as signed real numbers, with large positive values indicating consistency with the alternative hypothesis. If \code{clamp=FALSE} (the default), these values are not changed. If \code{clamp=TRUE}, any negative values are replaced by zero. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } \item{nsim2}{ Number of extra simulated point patterns to be generated if it is necessary to use simulation to estimate the theoretical mean of the summary function. Only relevant when \code{global=TRUE} and the simulations are not based on CSR. } \item{VARIANCE}{ Logical. If \code{TRUE}, critical envelopes will be calculated as sample mean plus or minus \code{nSD} times sample standard deviation. } \item{nSD}{ Number of estimated standard deviations used to determine the critical envelopes, if \code{VARIANCE=TRUE}. } \item{Yname}{ Character string that should be used as the name of the data point pattern \code{Y} when printing or plotting the results. } \item{maxnerr}{ Maximum number of rejected patterns. If \code{fun} yields a fatal error when applied to a simulated point pattern (for example, because the pattern is empty and \code{fun} requires at least one point), the pattern will be rejected and a new random point pattern will be generated. If this happens more than \code{maxnerr} times, the algorithm will give up. } \item{rejectNA}{ Logical value specifying whether to reject a simulated pattern if the resulting values of \code{fun} are all equal to \code{NA}, \code{NaN} or infinite. If \code{FALSE} (the default), then simulated patterns are only rejected when \code{fun} gives a fatal error. } \item{silent}{ Logical value specifying whether to print a report each time a simulated pattern is rejected. } \item{do.pwrong}{ Logical. If \code{TRUE}, the algorithm will also estimate the true significance level of the \dQuote{wrong} test (the test that declares the summary function for the data to be significant if it lies outside the \emph{pointwise} critical boundary at any point). This estimate is printed when the result is printed. } \item{envir.simul}{ Environment in which to evaluate the expression \code{simulate}, if not the current environment. } } \value{ A function value table (object of class \code{"fv"}) which can be plotted directly. See \code{\link{envelope}} for further details. } \details{ The \code{envelope} command performs simulations and computes envelopes of a summary statistic based on the simulations. The result is an object that can be plotted to display the envelopes. The envelopes can be used to assess the goodness-of-fit of a point process model to point pattern data. The \code{envelope} function is generic, with methods for the classes \code{"ppp"}, \code{"ppm"} and \code{"kppm"} described in the help file for \code{\link{envelope}}. This function \code{envelope.pp3} is the method for three-dimensional point patterns (objects of class \code{"pp3"}). For the most basic use, if you have a 3D point pattern \code{X} and you want to test Complete Spatial Randomness (CSR), type \code{plot(envelope(X, K3est,nsim=39))} to see the three-dimensional \eqn{K} function for \code{X} plotted together with the envelopes of the three-dimensional \eqn{K} function for 39 simulations of CSR. To create simulation envelopes, the command \code{envelope(Y, ...)} first generates \code{nsim} random point patterns in one of the following ways. \itemize{ \item If \code{simulate=NULL}, then we generate \code{nsim} simulations of Complete Spatial Randomness (i.e. \code{nsim} simulated point patterns each being a realisation of the uniform Poisson point process) with the same intensity as the pattern \code{Y}. \item If \code{simulate} is supplied, then it determines how the simulated point patterns are generated. See \code{\link{envelope}} for details. } The summary statistic \code{fun} is applied to each of these simulated patterns. Typically \code{fun} is one of the functions \code{K3est}, \code{G3est}, \code{F3est} or \code{pcf3est}. It may also be a character string containing the name of one of these functions. For further information, see the documentation for \code{\link{envelope}}. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. } \seealso{ \code{\link{pp3}}, \code{\link{rpoispp3}}, \code{\link{K3est}}, \code{\link{G3est}}, \code{\link{F3est}}, \code{\link{pcf3est}}. } \examples{ X <- rpoispp3(20, box3()) if(interactive()) { plot(envelope(X, nsim=39)) } \testonly{ plot(envelope(X, nsim=4)) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat.core/man/bc.ppm.Rd0000644000176200001440000000363514141452520015272 0ustar liggesusers\name{bc.ppm} \alias{bc} \alias{bc.ppm} \title{ Bias Correction for Fitted Model } \description{ Applies a first-order bias correction to a fitted model. } \usage{ bc(fit, \dots) \method{bc}{ppm}(fit, \dots, nfine = 256) } \arguments{ \item{fit}{ A fitted point process model (object of class \code{"ppm"}) or a model of some other class. } \item{\dots}{ Additional arguments are currently ignored. } \item{nfine}{ Grid dimensions for fine grid of locations. An integer, or a pair of integers. See Details. } } \details{ This command applies the first order Newton-Raphson bias correction method of Baddeley and Turner (2014, sec 4.2) to a fitted model. The function \code{bc} is generic, with a method for fitted point process models of class \code{"ppm"}. A fine grid of locations, of dimensions \code{nfine * nfine} or \code{nfine[2] * nfine[1]}, is created over the original window of the data, and the intensity or conditional intensity of the fitted model is calculated on this grid. The result is used to update the fitted model parameters once by a Newton-Raphson update. This is only useful if the quadrature points used to fit the original model \code{fit} are coarser than the grid of points specified by \code{nfine}. } \value{ A numeric vector, of the same length as \code{coef(fit)}, giving updated values for the fitted model coefficients. } \references{ Baddeley, A. and Turner, R. (2014) Bias correction for parameter estimates of spatial point process models. \emph{Journal of Statistical Computation and Simulation} \bold{84}, 1621--1643. DOI: 10.1080/00949655.2012.755976 } \author{ \adrian and \rolf. } \seealso{ \code{\link{rex}} } \examples{ fit <- ppm(cells ~ x, Strauss(0.07)) coef(fit) if(!interactive()) { bc(fit, nfine=64) } else { bc(fit) } } \keyword{spatial} \keyword{models} \keyword{math} \keyword{optimize} spatstat.core/man/emend.slrm.Rd0000644000176200001440000000526314141452520016156 0ustar liggesusers\name{emend.slrm} \alias{emend.slrm} \title{ Force Spatial Logistic Regression Model to be Valid } \description{ Ensures that a fitted spatial logistic regression specifies a well-defined model. } \usage{ \method{emend}{slrm}(object, \dots, fatal=FALSE, trace=FALSE) } \arguments{ \item{object}{ Fitted point process model (object of class \code{"slrm"}). } \item{\dots}{Ignored.} \item{fatal}{ Logical value indicating whether to generate an error if the model cannot be projected to a valid model. } \item{trace}{ Logical value indicating whether to print a trace of the decision process. } } \details{ \code{emend.slrm} is a method for the generic \code{\link{emend}}, The purpose of the function is to ensure that a fitted model is valid. The model-fitting function \code{\link{slrm}} fits spatial logistic regression models to point pattern data. In some circumstances, the fitted model returned by \code{\link{slrm}} may not specify a well-defined model, because some of the fitted coefficients of the model may be \code{NA} or infinite values. This usually occurs when the data are insufficient to estimate all the parameters. The model is said to be \emph{unidentifiable} or \emph{confounded}. The function \code{emend.slrm} modifies the model \code{object} so that the model is valid. It identifies the terms in the model \code{object} that are associated with illegal parameter values (i.e. parameter values which are either \code{NA}, infinite, or outside their permitted range). It considers all possible sub-models of \code{object} obtained by deleting one or more of these terms. It identifies which of these submodels are valid, and chooses the valid submodel with the largest pseudolikelihood. The result of \code{emend.slrm} or \code{project.slrm} is the true maximum pseudolikelihood fit to the data. For large datasets or complex models, the algorithm used in \code{emend.slrm} may be time-consuming, because it takes time to compute all the sub-models. A faster, approximate algorithm can be applied by setting \code{spatstat.options(project.fast=TRUE)}. This produces a valid submodel, which may not be the maximum likelihood submodel. Use the function \code{\link{valid.slrm}} to check whether a fitted model object specifies a well-defined model. } \value{ Another point process model (object of class \code{"slrm"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{slrm}}, \code{\link{valid.slrm}}, \code{\link{emend}}, \code{\link{spatstat.options}} } \examples{ fit <- slrm(redwood ~ x + I(x)) coef(fit) fit2 <- emend(fit) coef(fit2) } \keyword{spatial} \keyword{models} spatstat.core/man/as.layered.msr.Rd0000644000176200001440000000162114141452520016733 0ustar liggesusers\name{as.layered.msr} \alias{as.layered.msr} \title{Convert Measure To Layered Object} \description{ Converts a measure into a layered object. } \usage{ \method{as.layered}{msr}(X) } \arguments{ \item{X}{ A measure (object of class \code{"msr"}). } } \value{ An object of class \code{"layered"} (see \code{\link{layered}}). } \details{ This function converts the object \code{X} into an object of class \code{"layered"}. It is a method for the generic \code{\link{as.layered}} for the class of measures. If \code{X} is a vector-valued measure, then \code{as.layered(X)} consists of several layers, each containing a scalar-valued measure. } \seealso{ \code{\link{as.layered}}, \code{\link{msr}}. } \examples{ P <- rpoispp(100) fit <- ppm(P ~ x+y) rs <- residuals(fit, type="score") as.layered(rs) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat.core/man/rmh.ppm.Rd0000644000176200001440000002116614141452520015473 0ustar liggesusers\name{rmh.ppm} \alias{rmh.ppm} \title{Simulate from a Fitted Point Process Model} \description{ Given a point process model fitted to data, generate a random simulation of the model, using the Metropolis-Hastings algorithm. } \usage{ \method{rmh}{ppm}(model, start=NULL, control=default.rmhcontrol(model, w=w), \dots, w = NULL, project=TRUE, nsim=1, drop=TRUE, saveinfo=TRUE, verbose=TRUE, new.coef=NULL) } \arguments{ \item{model}{A fitted point process model (object of class \code{"ppm"}, see \code{\link{ppm.object}}) which it is desired to simulate. This fitted model is usually the result of a call to \code{\link{ppm}}. See \bold{Details} below. } \item{start}{Data determining the initial state of the Metropolis-Hastings algorithm. See \code{\link{rmhstart}} for description of these arguments. Defaults to \code{list(x.start=data.ppm(model))} } \item{control}{Data controlling the iterative behaviour of the Metropolis-Hastings algorithm. See \code{\link{rmhcontrol}} for description of these arguments. } \item{\dots}{ Further arguments passed to \code{\link{rmhcontrol}}, or to \code{\link{rmh.default}}, or to covariate functions in the model. } \item{w}{ Optional. Window in which the simulations should be generated. Default is the window of the original data. } \item{project}{ Logical flag indicating what to do if the fitted model is invalid (in the sense that the values of the fitted coefficients do not specify a valid point process). If \code{project=TRUE} the closest valid model will be simulated; if \code{project=FALSE} an error will occur. } \item{nsim}{ Number of simulated point patterns that should be generated. } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a single point pattern. } \item{saveinfo}{ Logical value indicating whether to save auxiliary information. } \item{verbose}{ Logical flag indicating whether to print progress reports. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(model)}. } } \value{ A point pattern (an object of class \code{"ppp"}; see \code{\link{ppp.object}}) or a list of point patterns. } \details{ This function generates simulated realisations from a point process model that has been fitted to point pattern data. It is a method for the generic function \code{\link{rmh}} for the class \code{"ppm"} of fitted point process models. To simulate other kinds of point process models, see \code{\link{rmh}} or \code{\link{rmh.default}}. The argument \code{model} describes the fitted model. It must be an object of class \code{"ppm"} (see \code{\link{ppm.object}}), and will typically be the result of a call to the point process model fitting function \code{\link{ppm}}. The current implementation enables simulation from any fitted model involving the interactions \code{\link{AreaInter}}, \code{\link{DiggleGratton}}, \code{\link{DiggleGatesStibbard}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}} and \code{\link{Softcore}}, including nonstationary models. See the examples. It is also possible to simulate \emph{hybrids} of several such models. See \code{\link{Hybrid}} and the examples. It is possible that the fitted coefficients of a point process model may be ``illegal'', i.e. that there may not exist a mathematically well-defined point process with the given parameter values. For example, a Strauss process with interaction parameter \eqn{\gamma > 1}{gamma > 1} does not exist, but the model-fitting procedure used in \code{\link{ppm}} will sometimes produce values of \eqn{\gamma}{gamma} greater than 1. In such cases, if \code{project=FALSE} then an error will occur, while if \code{project=TRUE} then \code{rmh.ppm} will find the nearest legal model and simulate this model instead. (The nearest legal model is obtained by projecting the vector of coefficients onto the set of valid coefficient vectors. The result is usually the Poisson process with the same fitted intensity.) The arguments \code{start} and \code{control} are lists of parameters determining the initial state and the iterative behaviour, respectively, of the Metropolis-Hastings algorithm. The argument \code{start} is passed directly to \code{\link{rmhstart}}. See \code{\link{rmhstart}} for details of the parameters of the initial state, and their default values. The argument \code{control} is first passed to \code{\link{rmhcontrol}}. Then if any additional arguments \code{\dots} are given, \code{\link{update.rmhcontrol}} is called to update the parameter values. See \code{\link{rmhcontrol}} for details of the iterative behaviour parameters, and \code{\link{default.rmhcontrol}} for their default values. Note that if you specify expansion of the simulation window using the parameter \code{expand} (so that the model will be simulated on a window larger than the original data window) then the model must be capable of extrapolation to this larger window. This is usually not possible for models which depend on external covariates, because the domain of a covariate image is usually the same as the domain of the fitted model. After extracting the relevant information from the fitted model object \code{model}, \code{rmh.ppm} invokes the default \code{rmh} algorithm \code{\link{rmh.default}}, unless the model is Poisson. If the model is Poisson then the Metropolis-Hastings algorithm is not needed, and the model is simulated directly, using one of \code{\link{rpoispp}}, \code{\link{rmpoispp}}, \code{\link{rpoint}} or \code{\link{rmpoint}}. See \code{\link{rmh.default}} for further information about the implementation, or about the Metropolis-Hastings algorithm. } \section{Warnings}{ See Warnings in \code{\link{rmh.default}}. } \seealso{ \code{\link{simulate.ppm}}, \code{\link{rmh}}, \code{\link{rmhmodel}}, \code{\link{rmhcontrol}}, \code{\link{default.rmhcontrol}}, \code{\link{update.rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{rmh.default}}, \code{\link{ppp.object}}, \code{\link{ppm}}, Interactions: \code{\link{AreaInter}}, \code{\link{DiggleGratton}}, \code{\link{DiggleGatesStibbard}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{Hybrid}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Softcore}} } \examples{ live <- interactive() op <- spatstat.options() spatstat.options(rmh.nrep=1e5) Nrep <- 1e5 X <- swedishpines if(live) plot(X, main="Swedish Pines data") # Poisson process fit <- ppm(X, ~1, Poisson()) Xsim <- rmh(fit) if(live) plot(Xsim, main="simulation from fitted Poisson model") # Strauss process fit <- ppm(X, ~1, Strauss(r=7)) Xsim <- rmh(fit) if(live) plot(Xsim, main="simulation from fitted Strauss model") if(live) { # Strauss process simulated on a larger window # then clipped to original window Xsim <- rmh(fit, control=list(nrep=Nrep, expand=1.1, periodic=TRUE)) Xsim <- rmh(fit, nrep=Nrep, expand=2, periodic=TRUE) } if(live) { X <- rSSI(0.05, 100) # piecewise-constant pairwise interaction function fit <- ppm(X, ~1, PairPiece(seq(0.02, 0.1, by=0.01))) Xsim <- rmh(fit) } # marked point pattern Y <- amacrine if(live) { # marked Poisson models fit <- ppm(Y) fit <- ppm(Y,~marks) fit <- ppm(Y,~polynom(x,2)) fit <- ppm(Y,~marks+polynom(x,2)) fit <- ppm(Y,~marks*polynom(x,y,2)) Ysim <- rmh(fit) } # multitype Strauss models MS <- MultiStrauss(radii=matrix(0.07, ncol=2, nrow=2), types = levels(Y$marks)) if(live) { fit <- ppm(Y ~marks, MS) Ysim <- rmh(fit) } fit <- ppm(Y ~ marks*polynom(x,y,2), MS) Ysim <- rmh(fit) if(live) plot(Ysim, main="simulation from fitted inhomogeneous Multitype Strauss") spatstat.options(op) if(live) { # Hybrid model fit <- ppm(redwood, ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2))) Y <- rmh(fit) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{datagen} spatstat.core/man/rSSI.Rd0000644000176200001440000001111314141452520014721 0ustar liggesusers\name{rSSI} \alias{rSSI} \title{Simulate Simple Sequential Inhibition} \description{ Generate a random point pattern, a realisation of the Simple Sequential Inhibition (SSI) process. } \usage{ rSSI(r, n=Inf, win = square(1), giveup = 1000, x.init=NULL, ..., f=NULL, fmax=NULL, nsim=1, drop=TRUE) } \arguments{ \item{r}{ Inhibition distance. } \item{n}{ Maximum number of points allowed. If \code{n} is finite, stop when the \emph{total} number of points in the point pattern reaches \code{n}. If \code{n} is infinite (the default), stop only when it is apparently impossible to add any more points. See \bold{Details}. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. The default window is the unit square, unless \code{x.init} is specified, when the default window is the window of \code{x.init}. } \item{giveup}{ Number of rejected proposals after which the algorithm should terminate. } \item{x.init}{ Optional. Initial configuration of points. A point pattern (object of class \code{"ppp"}). The pattern returned by \code{rSSI} consists of this pattern together with the points added via simple sequential inhibition. See \bold{Details}. } \item{\dots}{Ignored.} \item{f,fmax}{ Optional arguments passed to \code{\link{rpoint}} to specify a non-uniform probability density for the random points. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This algorithm generates one or more realisations of the Simple Sequential Inhibition point process inside the window \code{win}. Starting with an empty window (or with the point pattern \code{x.init} if specified), the algorithm adds points one-by-one. Each new point is generated uniformly in the window and independently of preceding points. If the new point lies closer than \code{r} units from an existing point, then it is rejected and another random point is generated. The algorithm terminates when either \describe{ \item{(a)}{ the desired number \code{n} of points is reached, or } \item{(b)}{ the current point configuration has not changed for \code{giveup} iterations, suggesting that it is no longer possible to add new points. } } If \code{n} is infinite (the default) then the algorithm terminates only when (b) occurs. The result is sometimes called a \emph{Random Sequential Packing}. Note that argument \code{n} specifies the maximum permitted \bold{total} number of points in the pattern returned by \code{rSSI()}. If \code{x.init} is not \code{NULL} then the number of points that are \emph{added} is at most \code{n - npoints(x.init)} if \code{n} is finite. Thus if \code{x.init} is not \code{NULL} then argument \code{n} must be at least as large as \code{npoints(x.init)}, otherwise an error is given. If \code{n==npoints(x.init)} then a warning is given and the call to \code{rSSI()} has no real effect; \code{x.init} is returned. There is no requirement that the points of \code{x.init} be at a distance at least \code{r} from each other. All of the \emph{added} points will be at a distance at least \code{r} from each other and from any point of \code{x.init}. The points will be generated inside the window \code{win} and the result will be a point pattern in the same window. The default window is the unit square, \code{win = square(1)}, unless \code{x.init} is specified, when the default is \code{win=Window(x.init)}, the window of \code{x.init}. If both \code{win} and \code{x.init} are specified, and if the two windows are different, then a warning will be issued. Any points of \code{x.init} lying outside \code{win} will be removed, with a warning. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMaternI}}, \code{\link{rMaternII}}. } \examples{ Vinf <- rSSI(0.07) V100 <- rSSI(0.07, 100) X <- runifpoint(100) Y <- rSSI(0.03,142,x.init=X) # Y consists of X together with # 42 added points. plot(Y, main="rSSI") plot(X,add=TRUE,chars=20,cols="red") ## inhomogeneous Z <- rSSI(0.07, 50, f=function(x,y){x}) plot(Z) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{datagen} spatstat.core/man/summary.ppm.Rd0000644000176200001440000000507214141452520016400 0ustar liggesusers\name{summary.ppm} \alias{summary.ppm} \alias{print.summary.ppm} \title{Summarizing a Fitted Point Process Model} \description{ \code{summary} method for class \code{"ppm"}. } \usage{ \method{summary}{ppm}(object, \dots, quick=FALSE, fine=FALSE) \method{print}{summary.ppm}(x, \dots) } \arguments{ \item{object}{A fitted point process model.} \item{\dots}{Ignored.} \item{quick}{Logical flag controlling the scope of the summary.} \item{fine}{ Logical value passed to \code{\link{vcov.ppm}} determining whether to compute the quick, coarse estimate of variance (\code{fine=FALSE}, the default) or the slower, finer estimate (\code{fine=TRUE}). } \item{x}{Object of class \code{"summary.ppm"} as returned by \code{summary.ppm}. } } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"ppm"}. An object of class \code{"ppm"} describes a fitted point process model. See \code{\link{ppm.object}}) for details of this class. \code{summary.ppm} extracts information about the type of model that has been fitted, the data to which the model was fitted, and the values of the fitted coefficients. (If \code{quick=TRUE} then only the information about the type of model is extracted.) \code{print.summary.ppm} prints this information in a comprehensible format. In normal usage, \code{print.summary.ppm} is invoked implicitly when the user calls \code{summary.ppm} without assigning its value to anything. See the examples. You can also type \code{coef(summary(object))} to extract a table of the fitted coefficients of the point process model \code{object} together with standard errors and confidence limits. } \value{ \code{summary.ppm} returns an object of class \code{"summary.ppm"}, while \code{print.summary.ppm} returns \code{NULL}. } \examples{ # invent some data X <- rpoispp(42) # fit a model to it fit <- ppm(X ~ x, Strauss(r=0.1)) # summarize the fitted model summary(fit) # `quick' option summary(fit, quick=TRUE) # coefficients with standard errors and CI coef(summary(fit)) coef(summary(fit, fine=TRUE)) # save the full summary s <- summary(fit) # print it print(s) s # extract stuff names(s) coef(s) s$args$correction s$name s$trend$value # multitype pattern # data(demopat) # fit <- ppm(demopat, ~marks, Poisson()) # summary(fit) # model with external covariates fitX <- ppm(X, ~Z, covariates=list(Z=function(x,y){x+y})) summary(fitX) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.core/man/ippm.Rd0000644000176200001440000001340114141452520015050 0ustar liggesusers\name{ippm} \alias{ippm} \title{ Fit Point Process Model Involving Irregular Trend Parameters } \description{ Experimental extension to \code{ppm} which finds optimal values of the irregular trend parameters in a point process model. } \usage{ ippm(Q, \dots, iScore=NULL, start=list(), covfunargs=start, nlm.args=list(stepmax=1/2), silent=FALSE, warn.unused=TRUE) } \arguments{ \item{Q,\dots}{ Arguments passed to \code{\link{ppm}} to fit the point process model. } \item{iScore}{ Optional. A named list of \R functions that compute the partial derivatives of the logarithm of the trend, with respect to each irregular parameter. See Details. } \item{start}{ Named list containing initial values of the irregular parameters over which to optimise. } \item{covfunargs}{ Argument passed to \code{\link{ppm}}. A named list containing values for \emph{all} irregular parameters required by the covariates in the model. Must include all the parameters named in \code{start}. } \item{nlm.args}{ Optional list of arguments passed to \code{\link[stats]{nlm}} to control the optimization algorithm. } \item{silent}{ Logical. Whether to print warnings if the optimization algorithm fails to converge. } \item{warn.unused}{ Logical. Whether to print a warning if some of the parameters in \code{start} are not used in the model. } } \details{ This function is an experimental extension to the point process model fitting command \code{\link{ppm}}. The extension allows the trend of the model to include irregular parameters, which will be maximised by a Newton-type iterative method, using \code{\link[stats]{nlm}}. For the sake of explanation, consider a Poisson point process with intensity function \eqn{\lambda(u)}{\lambda(u)} at location \eqn{u}. Assume that \deqn{ \lambda(u) = \exp(\alpha + \beta Z(u)) \, f(u, \gamma) }{ \lambda(u) = exp(\alpha + \beta * Z(u)) * f(u, \gamma) } where \eqn{\alpha,\beta,\gamma} are parameters to be estimated, \eqn{Z(u)} is a spatial covariate function, and \eqn{f} is some known function. Then the parameters \eqn{\alpha,\beta} are called \emph{regular} because they appear in a loglinear form; the parameter \eqn{\gamma} is called \emph{irregular}. To fit this model using \code{ippm}, we specify the intensity using the \code{trend} formula in the same way as usual for \code{\link{ppm}}. The trend formula is a representation of the log intensity. In the above example the log intensity is \deqn{ \log\lambda(u) = \alpha + \beta Z(u) + \log f(u, \gamma) }{ log(\lambda(u)) = \alpha + \beta * Z(u) + log(f(u, \gamma)) } So the model above would be encoded with the trend formula \code{~Z + offset(log(f))}. Note that the irregular part of the model is an \emph{offset} term, which means that it is included in the log trend as it is, without being multiplied by another regular parameter. The optimisation runs faster if we specify the derivative of \eqn{\log f(u,\gamma)}{log(f(u,\gamma))} with respect to \eqn{\gamma}. We call this the \emph{irregular score}. To specify this, the user must write an \R function that computes the irregular score for any value of \eqn{\gamma} at any location \code{(x,y)}. Thus, to code such a problem, \enumerate{ \item The argument \code{trend} should define the log intensity, with the irregular part as an offset; \item The argument \code{start} should be a list containing initial values of each of the irregular parameters; \item The argument \code{iScore}, if provided, must be a list (with one entry for each entry of \code{start}) of functions with arguments \code{x,y,\dots}, that evaluate the partial derivatives of \eqn{\log f(u,\gamma)}{log(f(u,gamma))} with respect to each irregular parameter. } The coded example below illustrates the model with two irregular parameters \eqn{\gamma,\delta}{gamma,delta} and irregular term \deqn{ f((x,y), (\gamma, \delta)) = 1 + \exp(\gamma - \delta x^3) }{ f((x,y), (\gamma, \delta)) = 1 + \exp(\gamma - \delta * x^3) } Arguments \code{\dots} passed to \code{\link{ppm}} may also include \code{interaction}. In this case the model is not a Poisson point process but a more general Gibbs point process; the trend formula \code{trend} determines the first-order trend of the model (the first order component of the conditional intensity), not the intensity. } \value{ A fitted point process model (object of class \code{"ppm"}) which also belongs to the special class \code{"ippm"}. } \author{\spatstatAuthors.} \seealso{ \code{\link{ppm}}, \code{\link{profilepl}} } \examples{ nd <- 32 \testonly{nd <- 10} gamma0 <- 3 delta0 <- 5 POW <- 3 # Terms in intensity Z <- function(x,y) { -2*y } f <- function(x,y,gamma,delta) { 1 + exp(gamma - delta * x^POW) } # True intensity lamb <- function(x,y,gamma,delta) { 200 * exp(Z(x,y)) * f(x,y,gamma,delta) } # Simulate realisation lmax <- max(lamb(0,0,gamma0,delta0), lamb(1,1,gamma0,delta0)) set.seed(42) X <- rpoispp(lamb, lmax=lmax, win=owin(), gamma=gamma0, delta=delta0) # Partial derivatives of log f DlogfDgamma <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) topbit/(1 + topbit) } DlogfDdelta <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) - (x^POW) * topbit/(1 + topbit) } # irregular score Dlogf <- list(gamma=DlogfDgamma, delta=DlogfDdelta) # fit model ippm(X ~Z + offset(log(f)), covariates=list(Z=Z, f=f), iScore=Dlogf, start=list(gamma=1, delta=1), nlm.args=list(stepmax=1), nd=nd) } \keyword{spatial} \keyword{models} spatstat.core/man/ssf.Rd0000644000176200001440000000316314141452520014702 0ustar liggesusers\name{ssf} \alias{ssf} \title{ Spatially Sampled Function } \description{ Create an object that represents a spatial function which has been evaluated or sampled at an irregular set of points. } \usage{ ssf(loc, val) } \arguments{ \item{loc}{ The spatial locations at which the function has been evaluated. A point pattern (object of class \code{"ppp"}). } \item{val}{ The function values at these locations. A numeric vector with one entry for each point of \code{loc}, or a data frame with one row for each point of \code{loc}. } } \details{ An object of class \code{"ssf"} represents a real-valued or vector-valued function that has been evaluated or sampled at an irregular set of points. An example would be a spatial covariate that has only been measured at certain locations. An object of this class also inherits the class \code{"ppp"}, and is essentially the same as a marked point pattern, except for the class membership which enables it to be handled in a different way. There are methods for \code{plot}, \code{print} etc; see \code{\link{plot.ssf}} and \code{\link[spatstat.core:methods.ssf]{methods.ssf}}. Use \code{\link[spatstat.geom]{unmark}} to extract only the point locations, and \code{\link{marks.ssf}} to extract only the function values. } \value{ Object of class \code{"ssf"}. } \author{ \adrian } \seealso{ \code{\link{plot.ssf}}, \code{\link[spatstat.core:methods.ssf]{methods.ssf}}, \code{\link{Smooth.ssf}}, \code{\link{with.ssf}}, \code{\link{[.ssf}}. } \examples{ ssf(cells, nndist(cells, k=1:3)) } \keyword{spatial} \keyword{datagen} spatstat.core/man/Ops.msr.Rd0000644000176200001440000000300514141452520015443 0ustar liggesusers\name{Ops.msr} \alias{Ops.msr} \title{Arithmetic Operations on Measures} \description{ These group generic methods for the class \code{"msr"} allow the arithmetic operators \code{+}, \code{-}, \code{*} and \code{/} to be applied directly to measures. } \usage{ ## S3 methods for group generics have prototypes: \special{Ops(e1, e2)} %NAMESPACE S3method("Ops", "msr") } \arguments{ \item{e1, e2}{objects of class \code{"msr"}.} } \details{ Arithmetic operators on a measure \code{A} are only defined in some cases. The arithmetic operator is effectively applied to the value of \code{A(W)} for every spatial domain \code{W}. If the result is a measure, then this operation is valid. If \code{A} is a measure (object of class \code{"msr"}) then the operations \code{-A} and \code{+A} are defined. If \code{A} and \code{B} are measures with the same dimension (i.e. both are scalar-valued, or both are \code{k}-dimensional vector-valued) then \code{A + B} and \code{A - B} are defined. If \code{A} is a measure and \code{z} is a numeric value, then \code{A * z} and \code{A / z} are defined, and \code{z * A} is defined. } \value{ Another measure (object of class \code{"msr"}). } \seealso{ \code{\link{with.msr}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rp -rp 2 * rp rp /2 rp - rp rr <- residuals(fit, type="raw") rp - rr } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat.core/man/quadrat.test.splitppp.Rd0000644000176200001440000000362114141452520020377 0ustar liggesusers\name{quadrat.test.splitppp} \alias{quadrat.test.splitppp} \title{Dispersion Test of CSR for Split Point Pattern Based on Quadrat Counts} \description{ Performs a test of Complete Spatial Randomness for each of the component patterns in a split point pattern, based on quadrat counts. By default performs chi-squared tests; can also perform Monte Carlo based tests. } \usage{ \method{quadrat.test}{splitppp}(X, ..., df=NULL, df.est=NULL, Xname=NULL) } \arguments{ \item{X}{ A split point pattern (object of class \code{"splitppp"}), each component of which will be subjected to the goodness-of-fit test. } \item{\dots}{Arguments passed to \code{\link{quadrat.test.ppp}}.} \item{df,df.est,Xname}{Arguments passed to \code{\link{pool.quadrattest}}.} } \details{ The function \code{quadrat.test} is generic, with methods for point patterns (class \code{"ppp"}), split point patterns (class \code{"splitppp"}) and point process models (class \code{"ppm"}). If \code{X} is a split point pattern, then for each of the component point patterns (taken separately) we test the null hypotheses of Complete Spatial Randomness, then combine the result into a single test. The method \code{quadrat.test.ppp} is applied to each component point pattern. Then the results are pooled using \code{\link{pool.quadrattest}} to obtain a single test. } \seealso{ \code{\link{quadrat.test}}, \code{\link{quadratcount}}, \code{\link{quadrats}}, \code{\link{quadratresample}}, \code{\link{chisq.test}}, \code{\link{cdf.test}}. To test a Poisson point process model against a specific Poisson alternative, use \code{\link{anova.ppm}}. } \value{ An object of class \code{"quadrattest"} which can be printed and plotted. } \examples{ data(humberside) qH <- quadrat.test(split(humberside), 2, 3) plot(qH) qH } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} spatstat.core/man/markconnect.Rd0000644000176200001440000001460414141452520016415 0ustar liggesusers\name{markconnect} \alias{markconnect} \title{ Mark Connection Function } \description{ Estimate the marked connection function of a multitype point pattern. } \usage{ markconnect(X, i, j, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, normalise=FALSE) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the mark connection function \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to \code{\link{markcorr}}, or passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{ If \code{TRUE}, normalise the pair connection function by dividing it by \eqn{p_i p_j}{p[i]*p[j]}, the estimated probability that randomly-selected points will have marks \eqn{i} and \eqn{j}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark connection function \eqn{p_{ij}(r)}{p[i,j](r)} has been estimated } \item{theo}{the theoretical value of \eqn{p_{ij}(r)}{p[i,j](r)} when the marks attached to different points are independent } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{p_{ij}(r)}{p[i,j](r)} obtained by the edge corrections named. } \details{ The mark connection function \eqn{p_{ij}(r)}{p[i,j](r)} of a multitype point process \eqn{X} is a measure of the dependence between the types of two points of the process a distance \eqn{r} apart. Informally \eqn{p_{ij}(r)}{p[i,j](r)} is defined as the conditional probability, given that there is a point of the process at a location \eqn{u} and another point of the process at a location \eqn{v} separated by a distance \eqn{||u-v|| = r}, that the first point is of type \eqn{i} and the second point is of type \eqn{j}. See Stoyan and Stoyan (1994). If the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{p_{ij}(r) \equiv p_i p_j}{p[i,j](r) = p[i]p[j]} where \eqn{p_i}{p[i]} denotes the probability that a point is of type \eqn{i}. Values larger than this, \eqn{p_{ij}(r) > p_i p_j}{p[i,j](r) > p[i]p[j]}, indicate positive association between the two types, while smaller values indicate negative association. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a multitype point pattern (a marked point pattern with factor-valued marks). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{p_{ij}(r)}{p[i,j](r)} is estimated. There is a sensible default. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented only for rectangular and polygonal windows (not for binary masks) and is slow for complicated polygons. } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries. } \item{none}{No edge correction.} } The option \code{correction="none"} should only be used if the number of data points is extremely large (otherwise an edge correction is needed to correct bias). Note that the estimator assumes the process is stationary (spatially homogeneous). The mark connection function is estimated using density estimation techniques. The user can choose between \describe{ \item{\code{"density"}}{ which uses the standard kernel density estimation routine \code{\link{density}}, and works only for evenly-spaced \code{r} values; } \item{\code{"loess"}}{ which uses the function \code{loess} in the package \pkg{modreg}; } \item{\code{"sm"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is extremely slow; } \item{\code{"smrep"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is relatively fast, but may require manual control of the smoothing parameter \code{hmult}. } } } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ Multitype pair correlation \code{\link{pcfcross}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}}. Use \code{\link{alltypes}} to compute the mark connection functions between all pairs of types. Mark correlation \code{\link{markcorr}} and mark variogram \code{\link{markvario}} for numeric-valued marks. } \examples{ # Hughes' amacrine data # Cells marked as 'on'/'off' data(amacrine) M <- markconnect(amacrine, "on", "off") plot(M) # Compute for all pairs of types at once plot(alltypes(amacrine, markconnect)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/Emark.Rd0000644000176200001440000001426014141452520015146 0ustar liggesusers\name{Emark} \alias{Emark} \alias{Vmark} \title{ Diagnostics for random marking } \description{ Estimate the summary functions \eqn{E(r)} and \eqn{V(r)} for a marked point pattern, proposed by Schlather et al (2004) as diagnostics for dependence between the points and the marks. } \usage{ Emark(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, normalise=FALSE) Vmark(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, normalise=FALSE) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. The pattern should have numeric marks. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the function \eqn{E(r)} or \eqn{V(r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{ If\code{TRUE}, normalise the estimate of \eqn{E(r)} or \eqn{V(r)} so that it would have value equal to 1 if the marks are independent of the points. } } \value{ If \code{marks(X)} is a numeric vector, the result is an object of class \code{"fv"} (see \code{\link{fv.object}}). If \code{marks(X)} is a data frame, the result is a list of objects of class \code{"fv"}, one for each column of marks. An object of class \code{"fv"} is essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{E(r)} or \eqn{V(r)} has been estimated } \item{theo}{the theoretical, constant value of \eqn{E(r)} or \eqn{V(r)} when the marks attached to different points are independent } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{E(r)} or \eqn{V(r)} obtained by the edge corrections named. } \details{ For a marked point process, Schlather et al (2004) defined the functions \eqn{E(r)} and \eqn{V(r)} to be the conditional mean and conditional variance of the mark attached to a typical random point, given that there exists another random point at a distance \eqn{r} away from it. More formally, \deqn{ E(r) = E_{0u}[M(0)] }{ E(r) = E[0u] M(0) } and \deqn{ V(r) = E_{0u}[(M(0)-E(u))^2] }{ V(r) = E[0u]((M(0)-E(u))^2) } where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation given that there are points of the process at the locations \eqn{0} and \eqn{u} separated by a distance \eqn{r}, and where \eqn{M(0)} denotes the mark attached to the point \eqn{0}. These functions may serve as diagnostics for dependence between the points and the marks. If the points and marks are independent, then \eqn{E(r)} and \eqn{V(r)} should be constant (not depending on \eqn{r}). See Schlather et al (2004). The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern with numeric marks. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{k_f(r)}{k[f](r)} is estimated. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented only for rectangular and polygonal windows (not for binary masks). } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } } Note that the estimator assumes the process is stationary (spatially homogeneous). The numerator and denominator of the mark correlation function (in the expression above) are estimated using density estimation techniques. The user can choose between \describe{ \item{\code{"density"}}{ which uses the standard kernel density estimation routine \code{\link{density}}, and works only for evenly-spaced \code{r} values; } \item{\code{"loess"}}{ which uses the function \code{loess} in the package \pkg{modreg}; } \item{\code{"sm"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is extremely slow; } \item{\code{"smrep"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is relatively fast, but may require manual control of the smoothing parameter \code{hmult}. } } } \references{ Schlather, M. and Ribeiro, P. and Diggle, P. (2004) Detecting dependence between marks and locations of marked point processes. \emph{Journal of the Royal Statistical Society, series B} \bold{66} (2004) 79-83. } \seealso{ Mark correlation \code{\link{markcorr}}, mark variogram \code{\link{markvario}} for numeric marks. Mark connection function \code{\link{markconnect}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}} for factor-valued marks. } \examples{ plot(Emark(spruces)) E <- Emark(spruces, method="density", kernel="epanechnikov") plot(Vmark(spruces)) plot(Emark(finpines)) V <- Vmark(finpines) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/clusterfit.Rd0000644000176200001440000001320014141452520016264 0ustar liggesusers\name{clusterfit} \alias{clusterfit} \title{Fit Cluster or Cox Point Process Model via Minimum Contrast} \description{ Fit a homogeneous or inhomogeneous cluster process or Cox point process model to a point pattern by the Method of Minimum Contrast. } \usage{ clusterfit(X, clusters, lambda = NULL, startpar = NULL, \dots, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax), statistic = NULL, statargs = NULL, algorithm="Nelder-Mead", verbose=FALSE, pspace=NULL) } \arguments{ \item{X}{ Data to which the cluster or Cox model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{clusters}{ Character string determining the cluster or Cox model. Partially matched. Options are \code{"Thomas"}, \code{"MatClust"}, \code{"Cauchy"}, \code{"VarGamma"} and \code{"LGCP"}. } \item{lambda}{ Optional. An estimate of the intensity of the point process. Either a single numeric specifying a constant intensity, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{startpar}{ Vector of initial values of the parameters of the point process mode. If \code{X} is a point pattern sensible defaults are used. Otherwise rather arbitrary values are used. } \item{q,p}{ Optional. Exponents for the contrast criterion. See \code{\link{mincontrast}}. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. See \code{\link{mincontrast}}. } \item{ctrl}{ Optional. Named list containing values of the parameters \code{q,p,rmin,rmax}. } \item{\dots}{ Additional arguments passed to \code{\link{mincontrast}.} } \item{statistic}{ Optional. Name of the summary statistic to be used for minimum contrast estimation: either \code{"K"} or \code{"pcf"}. } \item{statargs}{ Optional list of arguments to be used when calculating the \code{statistic}. See Details. } \item{algorithm}{ Character string determining the mathematical optimisation algorithm to be used by \code{\link[stats]{optim}}. See the argument \code{method} of \code{\link[stats]{optim}}. } \item{verbose}{ Logical value indicating whether to print detailed progress reports for debugging purposes. } \item{pspace}{For internal use by package code only.} } \details{ This function fits the clustering parameters of a cluster or Cox point process model by the Method of Minimum Contrast, that is, by matching the theoretical \eqn{K}-function of the model to the empirical \eqn{K}-function of the data, as explained in \code{\link{mincontrast}}. If \code{statistic="pcf"} (or \code{X} appears to be an estimated pair correlation function) then instead of using the \eqn{K}-function, the algorithm will use the pair correlation function. If \code{X} is a point pattern of class \code{"ppp"} an estimate of the summary statistic specfied by \code{statistic} (defaults to \code{"K"}) is first computed before minimum contrast estimation is carried out as described above. In this case the argument \code{statargs} can be used for controlling the summary statistic estimation. The precise algorithm for computing the summary statistic depends on whether the intensity specification (\code{lambda}) is: \describe{ \item{homogeneous:}{ If \code{lambda} is \code{NUll} or a single numeric the pattern is considered homogeneous and either \code{\link{Kest}} or \code{\link{pcf}} is invoked. In this case \code{lambda} is \bold{not} used for anything when estimating the summary statistic. } \item{inhomogeneous:}{ If \code{lambda} is a pixel image (object of class \code{"im"}), a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} the pattern is considered inhomogeneous. In this case either \code{\link{Kinhom}} or \code{\link{pcfinhom}} is invoked with \code{lambda} as an argument. } } After the clustering parameters of the model have been estimated by minimum contrast \code{lambda} (if non-null) is used to compute the additional model parameter \eqn{\mu}{\mu}. The algorithm parameters \code{q,p,rmax,rmin} are described in the help for \code{\link{mincontrast}}. They may be provided either as individually-named arguments, or as entries in the list \code{ctrl}. The individually-named arguments \code{q,p,rmax,rmin} override the entries in the list \code{ctrl}. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. See \code{\link{mincontrast}}. } \references{ Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007). An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63} (2007) 252--258. } \author{ \spatstatAuthors. } \seealso{ \code{\link{kppm}} } \examples{ fit <- clusterfit(redwood, "Thomas") fit if(interactive()){ plot(fit) } K <- Kest(redwood) fit2 <- clusterfit(K, "MatClust") } \keyword{spatial} \keyword{models} spatstat.core/man/localKinhom.Rd0000644000176200001440000001327514141452520016354 0ustar liggesusers\name{localKinhom} \alias{localKinhom} \alias{localLinhom} \title{Inhomogeneous Neighbourhood Density Function} \description{ Computes spatially-weighted versions of the the local \eqn{K}-function or \eqn{L}-function. } \usage{ localKinhom(X, lambda, ..., rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL, sigma = NULL, varcov = NULL, update=TRUE, leaveoneout=TRUE) localLinhom(X, lambda, ..., rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL, sigma = NULL, varcov = NULL, update=TRUE, leaveoneout=TRUE) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Extra arguments. Ignored if \code{lambda} is present. Passed to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{correction}{ String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"Ripley"}, \code{"translation"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{verbose}{Logical flag indicating whether to print progress reports during the calculation. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the function L or K should be computed. } \item{sigma, varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the kernel smoothing procedure for estimating \code{lambda}, if \code{lambda} is missing. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } \item{update}{ Logical value indicating what to do when \code{lambda} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } } \details{ The functions \code{localKinhom} and \code{localLinhom} are inhomogeneous or weighted versions of the neighbourhood density function implemented in \code{\link{localK}} and \code{\link{localL}}. Given a spatial point pattern \code{X}, the inhomogeneous neighbourhood density function \eqn{L_i(r)}{L[i](r)} associated with the \eqn{i}th point in \code{X} is computed by \deqn{ L_i(r) = \sqrt{\frac 1 \pi \sum_j \frac{e_{ij}}{\lambda_j}} }{ L[i](r) = sqrt( (1/pi) * sum[j] e[i,j]/lambda[j]) } where the sum is over all points \eqn{j \neq i}{j != i} that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{\lambda_j}{\lambda[j]} is the estimated intensity of the point pattern at the point \eqn{j}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The value of \eqn{L_i(r)}{L[i](r)} can also be interpreted as one of the summands that contributes to the global estimate of the inhomogeneous L function (see \code{\link{Linhom}}). By default, the function \eqn{L_i(r)}{L[i](r)} or \eqn{K_i(r)}{K[i](r)} is computed for a range of \eqn{r} values for each point \eqn{i}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X}. } \value{ If \code{rvalue} is given, the result is a numeric vector of length equal to the number of points in the point pattern. If \code{rvalue} is absent, the result is an object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{Kinhom}}, \code{\link{Linhom}}, \code{\link{localK}}, \code{\link{localL}}. } \examples{ data(ponderosa) X <- ponderosa # compute all the local L functions L <- localLinhom(X) # plot all the local L functions against r plot(L, main="local L functions for ponderosa", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) # compute the values of L(r) for r = 12 metres L12 <- localL(X, rvalue=12) } \author{ Mike Kuhn, \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/rmhmodel.ppm.Rd0000644000176200001440000001102014141452520016500 0ustar liggesusers\name{rmhmodel.ppm} \alias{rmhmodel.ppm} \title{Interpret Fitted Model for Metropolis-Hastings Simulation.} \description{ Converts a fitted point process model into a format that can be used to simulate the model by the Metropolis-Hastings algorithm. } \usage{ \method{rmhmodel}{ppm}(model, w, ..., verbose=TRUE, project=TRUE, control=rmhcontrol(), new.coef=NULL) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{w}{ Optional. Window in which the simulations should be generated. } \item{\dots}{Ignored.} \item{verbose}{ Logical flag indicating whether to print progress reports while the model is being converted. } \item{project}{Logical flag indicating what to do if the fitted model does not correspond to a valid point process. See Details.} \item{control}{ Parameters determining the iterative behaviour of the simulation algorithm. Passed to \code{\link{rmhcontrol}}. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(model)}. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ The generic function \code{\link{rmhmodel}} takes a description of a point process model in some format, and converts it into an object of class \code{"rmhmodel"} so that simulations of the model can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. This function \code{rmhmodel.ppm} is the method for the class \code{"ppm"} of fitted point process models. The argument \code{model} should be a fitted point process model (object of class \code{"ppm"}) typically obtained from the model-fitting function \code{\link{ppm}}. This will be converted into an object of class \code{"rmhmodel"}. The optional argument \code{w} specifies the window in which the pattern is to be generated. If specified, it must be in a form which can be coerced to an object of class \code{owin} by \code{\link{as.owin}}. Not all fitted point process models obtained from \code{\link{ppm}} can be simulated. We have not yet implemented simulation code for the \code{\link{LennardJones}} and \code{\link{OrdThresh}} models. It is also possible that a fitted point process model obtained from \code{\link{ppm}} may not correspond to a valid point process. For example a fitted model with the \code{\link{Strauss}} interpoint interaction may have any value of the interaction parameter \eqn{\gamma}{gamma}; however the Strauss process is not well-defined for \eqn{\gamma > 1}{gamma > 1} (Kelly and Ripley, 1976). The argument \code{project} determines what to do in such cases. If \code{project=FALSE}, a fatal error will occur. If \code{project=TRUE}, the fitted model parameters will be adjusted to the nearest values which do correspond to a valid point process. For example a Strauss process with \eqn{\gamma > 1}{gamma > 1} will be projected to a Strauss process with \eqn{\gamma = 1}{gamma = 1}, equivalent to a Poisson process. } \references{ Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. } \seealso{ \code{\link{rmhmodel}}, \code{\link{rmhmodel.list}}, \code{\link{rmhmodel.default}}, \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \rmhInteractionsList. } \examples{ fit1 <- ppm(cells ~1, Strauss(0.07)) mod1 <- rmhmodel(fit1) fit2 <- ppm(cells ~x, Geyer(0.07, 2)) mod2 <- rmhmodel(fit2) fit3 <- ppm(cells ~x, Hardcore(0.07)) mod3 <- rmhmodel(fit3) # Then rmh(mod1), etc } \author{ \adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/dclf.progress.Rd0000644000176200001440000001340614141452520016663 0ustar liggesusers\name{dclf.progress} \alias{dclf.progress} \alias{mad.progress} \alias{mctest.progress} \title{ Progress Plot of Test of Spatial Pattern } \description{ Generates a progress plot (envelope representation) of the Diggle-Cressie-Loosmore-Ford test or the Maximum Absolute Deviation test for a spatial point pattern. } \usage{ dclf.progress(X, \dots) mad.progress(X, \dots) mctest.progress(X, fun = Lest, \dots, exponent = 1, nrank = 1, interpolate = FALSE, alpha, rmin=0) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{\dots}{ Arguments passed to \code{mctest.progress} or to \code{\link{envelope}}. Useful arguments include \code{fun} to determine the summary function, \code{nsim} to specify the number of Monte Carlo simulations, \code{alternative} to specify one-sided or two-sided envelopes, and \code{verbose=FALSE} to turn off the messages. } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{exponent}{ Positive number. The exponent of the \eqn{L^p} distance. See Details. } \item{nrank}{ Integer. The rank of the critical value of the Monte Carlo test, amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will become the critical values for the test. } \item{interpolate}{ Logical value indicating how to compute the critical value. If \code{interpolate=FALSE} (the default), a standard Monte Carlo test is performed, and the critical value is the largest simulated value of the test statistic (if \code{nrank=1}) or the \code{nrank}-th largest (if \code{nrank} is another number). If \code{interpolate=TRUE}, kernel density estimation is applied to the simulated values, and the critical value is the upper \code{alpha} quantile of this estimated distribution. } \item{alpha}{ Optional. The significance level of the test. Equivalent to \code{nrank/(nsim+1)} where \code{nsim} is the number of simulations. } \item{rmin}{ Optional. Left endpoint for the interval of \eqn{r} values on which the test statistic is calculated. } } \details{ The Diggle-Cressie-Loosmore-Ford test and the Maximum Absolute Deviation test for a spatial point pattern are described in \code{\link{dclf.test}}. These tests depend on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{progress plot} or \emph{envelope representation} of the test (Baddeley et al, 2014) is a plot of the test statistic (and the corresponding critical value) against the length of the interval \code{rinterval}. The command \code{dclf.progress} performs \code{\link{dclf.test}} on \code{X} using all possible intervals of the form \eqn{[0,R]}, and returns the resulting values of the test statistic, and the corresponding critical values of the test, as a function of \eqn{R}. Similarly \code{mad.progress} performs \code{\link{mad.test}} using all possible intervals and returns the test statistic and critical value. More generally, \code{mctest.progress} performs a test based on the \eqn{L^p} discrepancy between the curves. The deviation between two curves is measured by the \eqn{p}th root of the integral of the \eqn{p}th power of the absolute value of the difference between the two curves. The exponent \eqn{p} is given by the argument \code{exponent}. The case \code{exponent=2} is the Cressie-Loosmore-Ford test, while \code{exponent=Inf} is the MAD test. If the argument \code{rmin} is given, it specifies the left endpoint of the interval defining the test statistic: the tests are performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]} where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}. The result of each command is an object of class \code{"fv"} that can be plotted to obtain the progress plot. The display shows the test statistic (solid black line) and the Monte Carlo acceptance region (grey shading). The significance level for the Monte Carlo test is \code{nrank/(nsim+1)}. Note that \code{nsim} defaults to 99, so if the values of \code{nrank} and \code{nsim} are not given, the default is a test with significance level 0.01. If \code{X} is an envelope object, then some of the data stored in \code{X} may be re-used: \itemize{ \item If \code{X} is an envelope object containing simulated functions, and \code{fun=NULL}, then the code will re-use the simulated functions stored in \code{X}. \item If \code{X} is an envelope object containing simulated point patterns, then \code{fun} will be applied to the stored point patterns to obtain the simulated functions. If \code{fun} is not specified, it defaults to \code{\link{Lest}}. \item Otherwise, new simulations will be performed, and \code{fun} defaults to \code{\link{Lest}}. } } \value{ An object of class \code{"fv"} that can be plotted to obtain the progress plot. } \references{ Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. } \author{ \adrian , Andrew Hardegen, Tom Lawrence, Gopal Nair and Robin Milne. } \seealso{ \code{\link{dclf.test}} and \code{\link{mad.test}} for the tests. See \code{\link{plot.fv}} for information on plotting objects of class \code{"fv"}. } \examples{ plot(dclf.progress(cells, nsim=19)) } \keyword{spatial} \keyword{htest} spatstat.core/man/Poisson.Rd0000644000176200001440000000344114141452520015540 0ustar liggesusers\name{Poisson} \alias{Poisson} \title{Poisson Point Process Model} \description{ Creates an instance of the Poisson point process model which can then be fitted to point pattern data. } \usage{ Poisson() } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Poisson point process (namely, there are no interactions). } \details{ The function \code{\link{ppm}}, which fits point process models to point pattern data, requires an argument \code{interaction} of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Poisson process is provided by the value of the function \code{Poisson}. This works for all types of Poisson processes including multitype and nonstationary Poisson processes. } \seealso{ \code{\link{ppm}}, \code{\link{Strauss}} } \examples{ ppm(nztrees ~1, Poisson()) # fit the stationary Poisson process to 'nztrees' # no edge correction needed lon <- longleaf \testonly{ lon <- lon[seq(1, npoints(lon), by=50)] } longadult <- unmark(subset(lon, marks >= 30)) ppm(longadult ~ x, Poisson()) # fit the nonstationary Poisson process # with intensity lambda(x,y) = exp( a + bx) # trees marked by species lans <- lansing \testonly{ lans <- lans[seq(1, npoints(lans), by=30)] } ppm(lans ~ marks, Poisson()) # fit stationary marked Poisson process # with different intensity for each species # ppm(lansing ~ marks * polynom(x,y,3), Poisson()) # fit nonstationary marked Poisson process # with different log-cubic trend for each species \testonly{ # equivalent functionality - smaller dataset ppm(amacrine ~ marks * polynom(x,y,2), Poisson()) } } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat.core/man/fasp.object.Rd0000644000176200001440000000621514141452520016306 0ustar liggesusers\name{fasp.object} \alias{fasp.object} %DoNotExport \title{Function Arrays for Spatial Patterns} \description{ A class \code{"fasp"} to represent a \dQuote{matrix} of functions, amenable to plotting as a matrix of plot panels. } \details{ An object of this class is a convenient way of storing (and later plotting, editing, etc) a set of functions \eqn{f_{i,j}(r)}{f[i,j](r)} of a real argument \eqn{r}, defined for each possible pair \eqn{(i,j)} of indices \eqn{1 \le i,j \le n}{1 <= i,j <= n}. We may think of this as a matrix or array of functions \eqn{f_{i,j}}{f[i,j]}. Function arrays are particularly useful in the analysis of a multitype point pattern (a point pattern in which the points are identified as belonging to separate types). We may want to compute a summary function for the points of type \eqn{i} only, for each of the possible types \eqn{i}. This produces a \eqn{1 \times m}{1 * m} array of functions. Alternatively we may compute a summary function for each possible pair of types \eqn{(i,j)}. This produces an \eqn{m \times m}{m * m} array of functions. For multitype point patterns the command \code{\link[spatstat.core]{alltypes}} will compute arrays of summary functions for each possible type or for each possible pair of types. The function \code{\link[spatstat.core]{alltypes}} returns an object of class \code{"fasp"}. An object of class \code{"fasp"} is a list containing at least the following components: \describe{ \item{fns}{ A list of data frames, each representing one of the functions. } \item{which}{ A matrix representing the spatial arrangement of the functions. If \code{which[i,j] = k} then the function represented by \code{fns[[k]]} should be plotted in the panel at position \eqn{(i,j)}. If \code{which[i,j] = NA} then nothing is plotted in that position. } \item{titles}{ A list of character strings, providing suitable plotting titles for the functions. } \item{default.formulae}{ A list of default formulae for plotting each of the functions. } \item{title}{ A character string, giving a default title for the array when it is plotted. } } } \section{Functions available}{ There are methods for \code{plot}, \code{print} and \code{"["} for this class. The plot method displays the entire array of functions. The method \code{\link{[.fasp}} selects a sub-array using the natural indices \code{i,j}. The command \code{\link{eval.fasp}} can be used to apply a transformation to each function in the array, and to combine two arrays. } \seealso{ \code{\link[spatstat.core]{alltypes}}, \code{\link{plot.fasp}}, \code{\link{[.fasp}}, \code{\link{eval.fasp}} } \examples{ GG <- alltypes(amacrine, 'G') plot(GG) # select the row corresponding to cells of type "on" Gon <- GG["on", ] plot(Gon) # extract the G function for i = "on", j = "off" Gonoff <- GG["on", "off", drop=TRUE] # Fisher variance stabilising transformation GGfish <- eval.fasp(asin(sqrt(GG))) plot(GGfish) } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat.core/man/predict.kppm.Rd0000644000176200001440000000277114141452520016513 0ustar liggesusers\name{predict.kppm} \alias{predict.kppm} \alias{fitted.kppm} \title{Prediction from a Fitted Cluster Point Process Model} \description{ Given a fitted cluster point process model, these functions compute the fitted intensity. } \usage{ \method{fitted}{kppm}(object, ...) \method{predict}{kppm}(object, ...) } \arguments{ \item{object}{ Fitted cluster point process model. An object of class \code{"kppm"}. } \item{\dots}{ Arguments passed to \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } } \details{ These functions are methods for the generic functions \code{\link[stats]{fitted}} and \code{\link[stats]{predict}}. The argument \code{object} should be a cluster point process model (object of class \code{"kppm"}) obtained using the function \code{\link{kppm}}. The \emph{intensity} of the fitted model is computed, using \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } \value{ The value of \code{fitted.kppm} is a numeric vector giving the fitted values at the quadrature points. The value of \code{predict.kppm} is usually a pixel image (object of class \code{"im"}), but see \code{\link{predict.ppm}} for details. } \seealso{ \code{\link{kppm}}, \code{\link{plot.kppm}}, \code{\link{vcov.kppm}}, \code{\link{fitted.ppm}}, \code{\link{predict.ppm}} } \examples{ data(redwood) fit <- kppm(redwood ~ x, "Thomas") predict(fit) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/rpoisline.Rd0000644000176200001440000000276014141452520016115 0ustar liggesusers\name{rpoisline} \alias{rpoisline} \title{Generate Poisson Random Line Process} \description{ Generate a random pattern of line segments obtained from the Poisson line process. } \usage{ rpoisline(lambda, win=owin()) } \arguments{ \item{lambda}{ Intensity of the Poisson line process. A positive number. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } } \value{ A line segment pattern (an object of class \code{"psp"}). The result also has an attribute called \code{"lines"} (an object of class \code{"infline"} specifying the original infinite random lines) and an attribute \code{"linemap"} (an integer vector mapping the line segments to their parent lines). } \details{ This algorithm generates a realisation of the uniform Poisson line process, and clips it to the window \code{win}. The argument \code{lambda} must be a positive number. It controls the intensity of the process. The expected number of lines intersecting a convex region of the plane is equal to \code{lambda} times the perimeter length of the region. The expected total length of the lines crossing a region of the plane is equal to \code{lambda * pi} times the area of the region. } \seealso{ \code{\link{psp}} } \examples{ # uniform Poisson line process with intensity 10, # clipped to the unit square rpoisline(10) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/cdf.test.Rd0000644000176200001440000002553714141452520015632 0ustar liggesusers\name{cdf.test} \alias{cdf.test} \alias{cdf.test.ppm} \alias{cdf.test.ppp} \alias{cdf.test.slrm} \title{Spatial Distribution Test for Point Pattern or Point Process Model} \description{ Performs a test of goodness-of-fit of a point process model. The observed and predicted distributions of the values of a spatial covariate are compared using either the Kolmogorov-Smirnov test, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or Anderson-Darling test. For non-Poisson models, a Monte Carlo test is used. } \usage{ cdf.test(...) \method{cdf.test}{ppp}(X, covariate, test=c("ks", "cvm", "ad"), \dots, interpolate=TRUE, jitter=TRUE) \method{cdf.test}{ppm}(model, covariate, test=c("ks", "cvm", "ad"), \dots, interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE) \method{cdf.test}{slrm}(model, covariate, test=c("ks", "cvm", "ad"), \dots, modelname=NULL, covname=NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}). } \item{model}{ A fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) or fitted spatial logistic regression (object of class \code{"slrm"}). } \item{covariate}{ The spatial covariate on which the test will be based. A function, a pixel image (object of class \code{"im"}), a list of pixel images, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{test}{ Character string identifying the test to be performed: \code{"ks"} for Kolmogorov-Smirnov test, \code{"cvm"} for \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or \code{"ad"} for Anderson-Darling test. } \item{\dots}{ Arguments passed to \code{\link[stats]{ks.test}} (from the \pkg{stats} package) or \code{\link[goftest]{cvm.test}} or \code{\link[goftest]{ad.test}} (from the \pkg{goftest} package) to control the test. } \item{interpolate}{ Logical flag indicating whether to interpolate pixel images. If \code{interpolate=TRUE}, the value of the covariate at each point of \code{X} will be approximated by interpolating the nearby pixel values. If \code{interpolate=FALSE}, the nearest pixel value will be used. } \item{jitter}{ Logical flag. If \code{jitter=TRUE}, values of the covariate will be slightly perturbed at random, to avoid tied values in the test. } \item{modelname,covname}{ Character strings giving alternative names for \code{model} and \code{covariate} to be used in labelling plot axes. } \item{nsim}{ Number of simulated realisations from the \code{model} to be used for the Monte Carlo test, when \code{model} is not a Poisson process. } \item{verbose}{ Logical value indicating whether to print progress reports when performing a Monte Carlo test. } } \details{ These functions perform a goodness-of-fit test of a Poisson or Gibbs point process model fitted to point pattern data. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using the Kolmogorov-Smirnov test, the \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or the Anderson-Darling test. For Gibbs models, a Monte Carlo test is performed using these test statistics. The function \code{cdf.test} is generic, with methods for point patterns (\code{"ppp"} or \code{"lpp"}), point process models (\code{"ppm"} or \code{"lppm"}) and spatial logistic regression models (\code{"slrm"}). \itemize{ \item If \code{X} is a point pattern dataset (object of class \code{"ppp"}), then \code{cdf.test(X, \dots)} performs a goodness-of-fit test of the uniform Poisson point process (Complete Spatial Randomness, CSR) for this dataset. For a multitype point pattern, the uniform intensity is assumed to depend on the type of point (sometimes called Complete Spatial Randomness and Independence, CSRI). \item If \code{model} is a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) then \code{cdf.test(model, \dots)} performs a test of goodness-of-fit for this fitted model. \item If \code{model} is a fitted spatial logistic regression (object of class \code{"slrm"}) then \code{cdf.test(model, \dots)} performs a test of goodness-of-fit for this fitted model. } The test is performed by comparing the observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same covariate under the model, using a classical goodness-of-fit test. Thus, you must nominate a spatial covariate for this test. If \code{X} is a point pattern that does not have marks, the argument \code{covariate} should be either a \code{function(x,y)} or a pixel image (object of class \code{"im"} containing the values of a spatial function, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. If \code{X} is a multitype point pattern, the argument \code{covariate} can be either a \code{function(x,y,marks)}, or a pixel image, or a list of pixel images corresponding to each possible mark value, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. The predicted distribution of the values of the \code{covariate} under the fitted \code{model} is computed as follows. The values of the \code{covariate} at all locations in the observation window are evaluated, weighted according to the point process intensity of the fitted model, and compiled into a cumulative distribution function \eqn{F} using \code{\link{ewcdf}}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. The A goodness-of-fit test of the uniform distribution is applied to these numbers using \code{stats::\link[stats]{ks.test}}, \code{goftest::\link[goftest]{cvm.test}} or \code{goftest::\link[goftest]{ad.test}}. This test was apparently first described (in the context of spatial data, and using Kolmogorov-Smirnov) by Berman (1986). See also Baddeley et al (2005). If \code{model} is not a Poisson process, then a Monte Carlo test is performed, by generating \code{nsim} point patterns which are simulated realisations of the \code{model}, re-fitting the model to each simulated point pattern, and calculating the test statistic for each fitted model. The Monte Carlo \eqn{p} value is determined by comparing the simulated values of the test statistic with the value for the original data. The return value is an object of class \code{"htest"} containing the results of the hypothesis test. The print method for this class gives an informative summary of the test outcome. The return value also belongs to the class \code{"cdftest"} for which there is a plot method \code{\link{plot.cdftest}}. The plot method displays the empirical cumulative distribution function of the covariate at the data points, and the predicted cumulative distribution function of the covariate under the model, plotted against the value of the covariate. The argument \code{jitter} controls whether covariate values are randomly perturbed, in order to avoid ties. If the original data contains any ties in the covariate (i.e. points with equal values of the covariate), and if \code{jitter=FALSE}, then the Kolmogorov-Smirnov test implemented in \code{\link[stats]{ks.test}} will issue a warning that it cannot calculate the exact \eqn{p}-value. To avoid this, if \code{jitter=TRUE} each value of the covariate will be perturbed by adding a small random value. The perturbations are normally distributed with standard deviation equal to one hundredth of the range of values of the covariate. This prevents ties, and the \eqn{p}-value is still correct. There is a very slight loss of power. } \value{ An object of class \code{"htest"} containing the results of the test. See \code{\link[stats]{ks.test}} for details. The return value can be printed to give an informative summary of the test. The value also belongs to the class \code{"cdftest"} for which there is a plot method. } \section{Warning}{ The outcome of the test involves a small amount of random variability, because (by default) the coordinates are randomly perturbed to avoid tied values. Hence, if \code{cdf.test} is executed twice, the \eqn{p}-values will not be exactly the same. To avoid this behaviour, set \code{jitter=FALSE}. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.cdftest}}, \code{\link{quadrat.test}}, \code{\link{berman.test}}, \code{\link[stats]{ks.test}}, \code{\link[goftest]{cvm.test}}, \code{\link[goftest]{ad.test}}, \code{\link{ppm}} } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. } \examples{ op <- options(useFancyQuotes=FALSE) # test of CSR using x coordinate cdf.test(nztrees, "x") cdf.test(nztrees, "x", "cvm") cdf.test(nztrees, "x", "ad") # test of CSR using a function of x and y fun <- function(x,y){2* x + y} cdf.test(nztrees, fun) # test of CSR using an image covariate funimage <- as.im(fun, W=Window(nztrees)) cdf.test(nztrees, funimage) # fit inhomogeneous Poisson model and test model <- ppm(nztrees ~x) cdf.test(model, "x") if(interactive()) { # synthetic data: nonuniform Poisson process X <- rpoispp(function(x,y) { 100 * exp(x) }, win=square(1)) # fit uniform Poisson process fit0 <- ppm(X ~1) # fit correct nonuniform Poisson process fit1 <- ppm(X ~x) # test wrong model cdf.test(fit0, "x") # test right model cdf.test(fit1, "x") } # multitype point pattern cdf.test(amacrine, "x") yimage <- as.im(function(x,y){y}, W=Window(amacrine)) cdf.test(ppm(amacrine ~marks+y), yimage) options(op) } \keyword{htest} \keyword{spatial} spatstat.core/man/thomas.estK.Rd0000644000176200001440000001426114141452520016310 0ustar liggesusers\name{thomas.estK} \alias{thomas.estK} \title{Fit the Thomas Point Process by Minimum Contrast} \description{ Fits the Thomas point process to a point pattern dataset by the Method of Minimum Contrast using the K function. } \usage{ thomas.estK(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the Thomas model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the Thomas process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Thomas point process model to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Thomas point process to \code{X}, by finding the parameters of the Thomas model which give the closest match between the theoretical \eqn{K} function of the Thomas process and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Thomas point process is described in \Moller and Waagepetersen (2003, pp. 61--62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and isotropically Normally distributed around the parent point with standard deviation \eqn{\sigma}{sigma} which is equal to the parameter \code{scale}. The named vector of stating values can use either \code{sigma2} (\eqn{\sigma^2}{sigma^2}) or \code{scale} as the name of the second component, but the latter is recommended for consistency with other cluster models. The theoretical \eqn{K}-function of the Thomas process is \deqn{ K(r) = \pi r^2 + \frac 1 \kappa (1 - \exp(-\frac{r^2}{4\sigma^2})). }{ K(r) = pi r^2 + (1 - exp(-r^2/(4 sigma^2)))/kappa. } The theoretical intensity of the Thomas process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{\sigma^2}{sigma^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The Thomas process can be simulated, using \code{\link{rThomas}}. Homogeneous or inhomogeneous Thomas process models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Diggle, P. J., Besag, J. and Gleaves, J. T. (1976) Statistical analysis of spatial point patterns by means of distance methods. \emph{Biometrics} \bold{32} 659--667. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Thomas, M. (1949) A generalisation of Poisson's binomial limit for use in ecology. \emph{Biometrika} \bold{36}, 18--25. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ Rasmus Waagepetersen \email{rw@math.auc.dk} Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{lgcp.estK}}, \code{\link{matclust.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{rThomas}} to simulate the fitted model. } \examples{ data(redwood) u <- thomas.estK(redwood, c(kappa=10, scale=0.1)) u plot(u) } \keyword{spatial} \keyword{models} spatstat.core/man/thresholdSelect.Rd0000644000176200001440000000477614141452520017256 0ustar liggesusers\name{thresholdSelect} \alias{thresholdSelect} \title{ Select Threshold to Convert Numerical Predictor to Binary Predictor } \description{ Given a point pattern and a spatial covariate that has some predictive value for the point pattern, determine the optimal value of the threshold for converting the covariate to a binary predictor. } \usage{ thresholdSelect(X, Z, method = c("Y", "LL", "AR", "t", "C"), Zname) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{Z}{ Spatial covariate with numerical values. Either a pixel image (object of class \code{"im"}), a distance function (object of class \code{"distfun"}) or a \code{function(x,y)} in the \R language. } \item{method}{ Character string (partially matched) specifying the method to be used to select the optimal threshold value. See Details. } \item{Zname}{ Optional character string giving a short name for the covariate. } } \details{ The spatial covariate \code{Z} is assumed to have some utility as a predictor of the point pattern \code{X}. This code chooses the best threshold value \eqn{v} for converting the numerical predictor \code{Z} to a binary predictor, for use in techniques such as Weights of Evidence. The best threshold is selected by maximising the criterion specified by the argument \code{method}. Options are: \itemize{ \item \code{method="Y"} (the default): the Youden criterion \item \code{method="LL"}: log-likelihood \item \code{method="AR"}: the Akman-Raftery criterion \item \code{method="t"}: the Studentised Weights-of-Evidence contrast \item \code{method="C"}: the Weights-of-Evidence contrast } These criteria are explained in Baddeley et al (2021). } \value{ A numerical value giving the selected threshold. The result also belongs to the class \code{"bw.optim"} which can be plotted (the plot shows the criterion used to select the threshold). } \references{ Baddeley, A., Brown, W., Milne, R.K., Nair, G., Rakshit, S., Lawrence, T., Phatak, A. and Fu, S.C. (2021) Optimal thresholding of predictors in mineral prospectivity analysis. \emph{Natural Resources Research} \bold{30} 923--969. } \author{ \adrian. } \seealso{ \code{\link{thresholdCI}} } \examples{ gold <- rescale(murchison$gold, 1000, "km") faults <- rescale(murchison$faults, 1000, "km") distfault <- distfun(faults) z <- thresholdSelect(gold, distfault) z plot(z, xlim=c(0, 20)) } \keyword{spatial} \keyword{models} \keyword{nonparametric} spatstat.core/man/formula.fv.Rd0000644000176200001440000000357614141452520016176 0ustar liggesusers\name{formula.fv} \alias{formula.fv} \alias{formula<-} \alias{formula<-.fv} \title{ Extract or Change the Plot Formula for a Function Value Table } \description{ Extract or change the default plotting formula for an object of class \code{"fv"} (function value table). } \usage{ \method{formula}{fv}(x, \dots) formula(x, \dots) <- value \method{formula}{fv}(x, \dots) <- value } \arguments{ \item{x}{ An object of class \code{"fv"}, containing the values of several estimates of a function. } \item{\dots}{ Arguments passed to other methods. } \item{value}{ New value of the formula. Either a \code{formula} or a character string. } } \details{ A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}) is a convenient way of storing and plotting several different estimates of the same function. The default behaviour of \code{plot(x)} for a function value table \code{x} is determined by a formula associated with \code{x} called its \emph{plot formula}. See \code{\link{plot.fv}} for explanation about these formulae. The function \code{formula.fv} is a method for the generic command \code{\link{formula}}. It extracts the plot formula associated with the object. The function \code{formula<-} is generic. It changes the formula associated with an object. The function \code{formula<-.fv} is the method for \code{formula<-} for the class \code{"fv"}. It changes the plot formula associated with the object. } \value{ The result of \code{formula.fv} is a character string containing the plot formula. The result of \code{formula<-.fv} is a new object of class \code{"fv"}. } \author{ \adrian and \rolf } \seealso{ \code{\link{fv}}, \code{\link{plot.fv}}, \code{\link[stats]{formula}}. } \examples{ K <- Kest(cells) formula(K) formula(K) <- (iso ~ r) } \keyword{spatial} \keyword{methods} spatstat.core/man/Kdot.Rd0000644000176200001440000001673414141452520015020 0ustar liggesusers\name{Kdot} \alias{Kdot} \title{ Multitype K Function (i-to-any) } \description{ For a multitype point pattern, estimate the multitype \eqn{K} function which counts the expected number of other points of the process within a given distance of a point of type \eqn{i}. } \usage{ Kdot(X, i, r=NULL, breaks=NULL, correction, ..., ratio=FALSE, from) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{K} function \eqn{K_{i\bullet}(r)}{Ki.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{Ignored.} \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{from}{An alternative way to specify \code{i}.} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{i\bullet}(r)}{Ki.(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{i\bullet}(r)}{Ki.(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{i\bullet}(r)}{Ki.(r)} obtained by the edge corrections named. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ This function \code{Kdot} and its companions \code{\link{Kcross}} and \code{\link{Kmulti}} are generalisations of the function \code{\link{Kest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. If \code{i} is missing, it defaults to the first level of the marks factor, \code{i = levels(X$marks)[1]}. The ``type \eqn{i} to any type'' multitype \eqn{K} function of a stationary multitype point process \eqn{X} is defined so that \eqn{\lambda K_{i\bullet}(r)}{lambda Ki.(r)} equals the expected number of additional random points within a distance \eqn{r} of a typical point of type \eqn{i} in the process \eqn{X}. Here \eqn{\lambda}{lambda} is the intensity of the process, i.e. the expected number of points of \eqn{X} per unit area. The function \eqn{K_{i\bullet}}{Ki.} is determined by the second order moment properties of \eqn{X}. An estimate of \eqn{K_{i\bullet}(r)}{Ki.(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the subprocess of type \eqn{i} points were independent of the subprocess of points of all types not equal to \eqn{i}, then \eqn{K_{i\bullet}(r)}{Ki.(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{i\bullet}}{Ki.} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} may suggest dependence between types. This algorithm estimates the distribution function \eqn{K_{i\bullet}(r)}{Ki.(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}, using the chosen edge correction(s). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. The pair correlation function can also be applied to the result of \code{Kdot}; see \code{\link{pcf}}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. The reduced sample estimator of \eqn{K_{i\bullet}}{Ki.} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. } \seealso{ \code{\link{Kdot}}, \code{\link{Kest}}, \code{\link{Kmulti}}, \code{\link{pcf}} } \examples{ # Lansing woods data: 6 types of trees woods <- lansing \testonly{woods <- woods[seq(1, npoints(woods), by=80)]} Kh. <- Kdot(woods, "hickory") # diagnostic plot for independence between hickories and other trees plot(Kh.) # synthetic example with two marks "a" and "b" # pp <- runifpoispp(50) # pp <- pp \%mark\% factor(sample(c("a","b"), npoints(pp), replace=TRUE)) # K <- Kdot(pp, "a") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/Penttinen.Rd0000644000176200001440000000520014141452520016045 0ustar liggesusers\name{Penttinen} \alias{Penttinen} \title{Penttinen Interaction} \description{ Creates an instance of the Penttinen pairwise interaction point process model, which can then be fitted to point pattern data. } \usage{ Penttinen(r) } \arguments{ \item{r}{circle radius} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Penttinen (1984, Example 2.1, page 18), citing Cormack (1979), described the pairwise interaction point process with interaction factor \deqn{ h(d) = e^{\theta A(d)} = \gamma^{A(d)} }{ h(d) = exp(theta * A(d)) = gamma^(A(d)) } between each pair of points separated by a distance $d$. Here \eqn{A(d)} is the area of intersection between two discs of radius \eqn{r} separated by a distance \eqn{d}, normalised so that \eqn{A(0) = 1}. The scale of interaction is controlled by the disc radius \eqn{r}: two points interact if they are closer than \eqn{2 r}{2 * r} apart. The strength of interaction is controlled by the canonical parameter \eqn{\theta}{theta}, which must be less than or equal to zero, or equivalently by the parameter \eqn{\gamma = e^\theta}{gamma = exp(theta)}, which must lie between 0 and 1. The potential is inhibitory, i.e.\ this model is only appropriate for regular point patterns. For \eqn{\gamma=0}{gamma=0} the model is a hard core process with hard core diameter \eqn{2 r}{2 * r}. For \eqn{\gamma=1}{gamma=1} the model is a Poisson process. The irregular parameter \eqn{r} must be given in the call to \code{Penttinen}, while the regular parameter \eqn{\theta}{theta} will be estimated. This model can be considered as a pairwise approximation to the area-interaction model \code{\link{AreaInter}}. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{Pairwise}}, \code{\link{AreaInter}}. } \examples{ fit <- ppm(cells ~ 1, Penttinen(0.07)) fit reach(fit) # interaction range is circle DIAMETER } \references{ Cormack, R.M. (1979) Spatial aspects of competition between individuals. Pages 151--212 in \emph{Spatial and Temporal Analysis in Ecology}, eds. R.M. Cormack and J.K. Ord, International Co-operative Publishing House, Fairland, MD, USA. Penttinen, A. (1984) \emph{Modelling Interaction in Spatial Point Patterns: Parameter Estimation by the Maximum Likelihood Method.} \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla} Studies in Computer Science, Economics and Statistics \bold{7}, University of \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}, Finland. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat.core/man/pairs.im.Rd0000644000176200001440000000704714141452520015636 0ustar liggesusers\name{pairs.im} \alias{pairs.im} \title{ Scatterplot Matrix for Pixel Images } \description{ Produces a scatterplot matrix of the pixel values in two or more pixel images. } \usage{ \method{pairs}{im}(..., plot=TRUE, drop=TRUE) } \arguments{ \item{\dots}{ Any number of arguments, each of which is either a pixel image (object of class \code{"im"}) or a named argument to be passed to \code{\link{pairs.default}}. Alternatively, a single argument which is a list of pixel images. } \item{plot}{ Logical. If \code{TRUE}, the scatterplot matrix is plotted. } \item{drop}{ Logical value specifying whether pixel values that are \code{NA} should be removed from the data frame that is returned by the function. This does not affect the plot. } } \details{ This is a method for the generic function \code{\link{pairs}} for the class of pixel images. It produces a square array of plot panels, in which each panel shows a scatterplot of the pixel values of one image against the corresponding pixel values of another image. At least two of the arguments \code{\dots} should be pixel images (objects of class \code{"im"}). Their spatial domains must overlap, but need not have the same pixel dimensions. First the pixel image domains are intersected, and converted to a common pixel resolution. Then the corresponding pixel values of each image are extracted. Then \code{\link{pairs.default}} is called to plot the scatterplot matrix. Any arguments in \code{\dots} which are not pixel images will be passed to \code{\link{pairs.default}} to control the plot. The return value of \code{pairs.im} is a data frame, returned invisibly. The data frame has one column for each image. Each row contains the pixel values of the different images for one pixel in the raster. If \code{drop=TRUE} (the default), any row which contains \code{NA} is deleted. The plot is not affected by the value of \code{drop}. } \section{Image or Contour Plots}{ Since the scatterplots may show very dense concentrations of points, it may be useful to set \code{panel=panel.image} or \code{panel=panel.contour} to draw a colour image or contour plot of the kernel-smoothed density of the scatterplot in each panel. The argument \code{panel} is passed to \code{\link{pairs.default}}. See the help for \code{\link{panel.image}} and \code{\link{panel.contour}}. } \section{Low Level Control of Graphics}{ To control the appearance of the individual scatterplot panels, see \code{\link{pairs.default}}, \code{\link{points}} or \code{\link{par}}. To control the plotting symbol for the points in the scatterplot, use the arguments \code{pch}, \code{col}, \code{bg} as described under \code{\link{points}} (because the default panel plotter is the function \code{\link{points}}). To suppress the tick marks on the plot axes, type \code{par(xaxt="n", yaxt="n")} before calling \code{pairs}. } \value{ Invisible. A \code{data.frame} containing the corresponding pixel values for each image. The return value also belongs to the class \code{plotpairsim} which has a plot method, so that it can be re-plotted. } \seealso{ \code{\link{pairs}}, \code{\link{pairs.default}}, \code{\link{panel.contour}}, \code{\link{panel.image}}, \code{\link{plot.im}}, \code{\link{cov.im}}, \code{\link{im}}, \code{\link{par}} } \examples{ X <- density(rpoispp(30)) Y <- density(rpoispp(40)) Z <- density(rpoispp(30)) p <- pairs(X,Y,Z) p plot(p) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat.core/man/Linhom.Rd0000644000176200001440000000574714141452520015347 0ustar liggesusers\name{Linhom} \alias{Linhom} \title{Inhomogeneous L-function} \description{ Calculates an estimate of the inhomogeneous version of the \eqn{L}-function (Besag's transformation of Ripley's \eqn{K}-function) for a spatial point pattern. } \usage{ Linhom(X, ..., correction) } \arguments{ \item{X}{ The observed point pattern, from which an estimate of \eqn{L(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{correction,\dots}{ Other arguments passed to \code{\link{Kinhom}} to control the estimation procedure. } } \details{ This command computes an estimate of the inhomogeneous version of the \eqn{L}-function for a spatial point pattern. The original \eqn{L}-function is a transformation (proposed by Besag) of Ripley's \eqn{K}-function, \deqn{L(r) = \sqrt{\frac{K(r)}{\pi}}}{L(r) = sqrt(K(r)/pi)} where \eqn{K(r)} is the Ripley \eqn{K}-function of a spatially homogeneous point pattern, estimated by \code{\link{Kest}}. The inhomogeneous \eqn{L}-function is the corresponding transformation of the inhomogeneous \eqn{K}-function, estimated by \code{\link{Kinhom}}. It is appropriate when the point pattern clearly does not have a homogeneous intensity of points. It was proposed by Baddeley, \Moller and Waagepetersen (2000). The command \code{Linhom} first calls \code{\link{Kinhom}} to compute the estimate of the inhomogeneous K-function, and then applies the square root transformation. For a Poisson point pattern (homogeneous or inhomogeneous), the theoretical value of the inhomogeneous \eqn{L}-function is \eqn{L(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L} is more appropriate for use in simulation envelopes and hypothesis tests. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L} has been estimated } \item{theo}{the theoretical value \eqn{L(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L(r)} obtained by the edge corrections named. } \references{ Baddeley, A., \Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. } \seealso{ \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{Kinhom}}, \code{\link{pcf}} } \examples{ data(japanesepines) X <- japanesepines L <- Linhom(X, sigma=0.1) plot(L, main="Inhomogeneous L function for Japanese Pines") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/dfbetas.ppm.Rd0000644000176200001440000000735214141452520016316 0ustar liggesusers\name{dfbetas.ppm} \alias{dfbetas.ppm} \title{ Parameter Influence Measure } \description{ Computes the deletion influence measure for each parameter in a fitted point process model. } \usage{ \method{dfbetas}{ppm}(model, \dots, drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored, except for the arguments \code{dimyx} and \code{eps} which are passed to \code{\link{as.mask}} to control the spatial resolution of the image of the density component. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } } \details{ Given a fitted spatial point process \code{model}, this function computes the influence measure for each parameter, as described in Baddeley, Chang and Song (2013) and Baddeley, Rubak and Turner (2019). This is a method for the generic function \code{\link[stats]{dfbetas}}. The influence measure for each parameter \eqn{\theta}{\theta} is a signed measure in two-dimensional space. It consists of a discrete mass on each data point (i.e. each point in the point pattern to which the \code{model} was originally fitted) and a continuous density at all locations. The mass at a data point represents the change in the fitted value of the parameter \eqn{\theta}{\theta} that would occur if this data point were to be deleted. The density at other non-data locations represents the effect (on the fitted value of \eqn{\theta}{\theta}) of deleting these locations (and their associated covariate values) from the input to the fitting procedure. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the influence calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. } \value{ An object of class \code{"msr"} representing a signed or vector-valued measure. This object can be printed and plotted. } \references{ Baddeley, A. and Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. Baddeley, A., Rubak, E. and Turner, R. (2019) Leverage and influence diagnostics for Gibbs spatial point processes. \emph{Spatial Statistics} \bold{29}, {15--48}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{leverage.ppm}}, \code{\link{influence.ppm}}, \code{\link{ppmInfluence}}. See \code{\link{msr}} for information on how to use a measure. } \examples{ \testonly{op <- spatstat.options(npixel=32)} X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) \testonly{fit <- ppm(X ~x+y, nd=16)} plot(dfbetas(fit)) plot(Smooth(dfbetas(fit))) \testonly{spatstat.options(op)} } \keyword{spatial} \keyword{models} spatstat.core/man/repul.Rd0000644000176200001440000000340014141452520015230 0ustar liggesusers\name{repul.dppm} \alias{repul} \alias{repul.dppm} \title{ Repulsiveness Index of a Determinantal Point Process Model } \description{ Computes a measure of the degree of repulsion between points in a determinantal point process model. } \usage{ repul(model, \dots) \method{repul}{dppm}(model, \dots) } \arguments{ \item{model}{ A fitted point process model of determinantal type (object of class \code{"dppm"}). } \item{\dots}{ Ignored. } } \details{ The repulsiveness index \eqn{\mu}{mu} of a determinantal point process model was defined by Lavancier, \Moller and Rubak (2015) as \deqn{ \mu = \lambda \int (1- g(x)) \, dx }{ mu = lambda * integral[(1-g(x))] } where \eqn{\lambda}{\lambda} is the intensity of the model and \eqn{g(x)} is the pair correlation function, and the integral is taken over all two-dimensional vectors \eqn{x}. Values of \eqn{\mu} are dimensionless. Larger values of \eqn{\mu}{mu} indicate stronger repulsion between points. If the model is stationary, the result is a single number. If the model is not stationary, the result is a pixel image (obtained by multiplying the spatially-varying intensity by the integral defined above). } \value{ A numeric value or a pixel image. } \references{ Lavancier, F., \Moller, J. and Rubak, E. (2015), Determinantal point process models and statistical inference. \emph{Journal of Royal Statistical Society: Series B (Statistical Methodology)}, \bold{77}, 853--877. } \author{ \adrian. } \seealso{ \code{\link{dppm}} } \examples{ jpines <- residualspaper$Fig1 \testonly{ # smaller dataset for testing jpines <- jpines[c(TRUE,FALSE)] } fit <- dppm(jpines ~ 1, dppGauss) repul(fit) } \keyword{spatial} \keyword{models} spatstat.core/man/plot.cdftest.Rd0000644000176200001440000000642614141452520016525 0ustar liggesusers\name{plot.cdftest} \alias{plot.cdftest} \title{Plot a Spatial Distribution Test} \description{ Plot the result of a spatial distribution test computed by \code{cdf.test}. } \usage{ \method{plot}{cdftest}(x, ..., style=c("cdf", "PP", "QQ"), lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=2, lty0=2, do.legend) } \arguments{ \item{x}{ Object to be plotted. An object of class \code{"cdftest"} produced by a method for \code{\link{cdf.test}}. } \item{\dots}{ extra arguments that will be passed to the plotting function \code{\link{plot.default}}. } \item{style}{ Style of plot. See Details. } \item{col,lwd,lty}{ The width, colour and type of lines used to plot the empirical curve (the empirical distribution, or PP plot or QQ plot). } \item{col0,lwd0,lty0}{ The width, colour and type of lines used to plot the reference curve (the predicted distribution, or the diagonal). } \item{do.legend}{ Logical value indicating whether to add an explanatory legend. Applies only when \code{style="cdf"}. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for the class \code{"cdftest"}. An object of this class represents the outcome of a spatial distribution test, computed by \code{\link{cdf.test}}, and based on either the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises or Anderson-Darling test. If \code{style="cdf"} (the default), the plot displays the two cumulative distribution functions that are compared by the test: namely the empirical cumulative distribution function of the covariate at the data points, and the predicted cumulative distribution function of the covariate under the model, both plotted against the value of the covariate. The Kolmogorov-Smirnov test statistic (for example) is the maximum vertical separation between the two curves. If \code{style="PP"} then the P-P plot is drawn. The \eqn{x} coordinates of the plot are cumulative probabilities for the covariate under the model. The \eqn{y} coordinates are cumulative probabilities for the covariate at the data points. The diagonal line \eqn{y=x} is also drawn for reference. The Kolmogorov-Smirnov test statistic is the maximum vertical separation between the P-P plot and the diagonal reference line. If \code{style="QQ"} then the Q-Q plot is drawn. The \eqn{x} coordinates of the plot are quantiles of the covariate under the model. The \eqn{y} coordinates are quantiles of the covariate at the data points. The diagonal line \eqn{y=x} is also drawn for reference. The Kolmogorov-Smirnov test statistic cannot be read off the Q-Q plot. } \seealso{ \code{\link{cdf.test}} } \examples{ op <- options(useFancyQuotes=FALSE) # synthetic data: nonuniform Poisson process X <- rpoispp(function(x,y) { 100 * exp(x) }, win=square(1)) # fit uniform Poisson process fit0 <- ppm(X, ~1) # test covariate = x coordinate xcoord <- function(x,y) { x } # test wrong model k <- cdf.test(fit0, xcoord) # plot result of test plot(k, lwd0=3) plot(k, style="PP") plot(k, style="QQ") options(op) } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat.core/man/bw.pplHeat.Rd0000644000176200001440000000444514141452520016117 0ustar liggesusers\name{bw.pplHeat} \alias{bw.pplHeat} \title{ Bandwidth Selection for Diffusion Smoother by Likelihood Cross-Validation } \description{ Selects an optimal bandwidth for diffusion smoothing by point process likelihood cross-validation. } \usage{ bw.pplHeat(X, \dots, srange=NULL, ns=16, sigma=NULL, leaveoneout=TRUE, verbose = TRUE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link{densityHeat.ppp}}. } \item{srange}{ Numeric vector of length 2 specifying a range of bandwidths to be considered. } \item{ns}{ Integer. Number of candidate bandwidths to be considered. } \item{sigma}{ Maximum smoothing bandwidth. A numeric value, or a pixel image, or a \code{function(x,y)}. Alternatively a numeric vector containing a sequence of candidate bandwidths. } \item{leaveoneout}{ Logical value specifying whether intensity values at data points should be estimated using the leave-one-out rule. } \item{verbose}{ Logical value specifying whether to print progress reports. } } \details{ This algorithm selects the optimal global bandwidth for kernel estimation of intensity for the dataset \code{X} using diffusion smoothing \code{\link{densityHeat.ppp}}. If \code{sigma} is a numeric value, the algorithm finds the optimal bandwidth \code{tau <= sigma}. If \code{sigma} is a pixel image or function, the algorithm finds the optimal fraction \code{0 < f <= 1} such that smoothing with \code{f * sigma} would be optimal. } \value{ A numerical value giving the selected bandwidth (if \code{sigma} was a numeric value) or the selected fraction of the maximum bandwidth (if \code{sigma} was a pixel image or function). The result also belongs to the class \code{"bw.optim"} which can be plotted. } \author{ Adrian Baddeley and Tilman Davies. } \seealso{ \code{\link{bw.CvLHeat}} for an alternative method. \code{\link{densityHeat.ppp}} } \examples{ online <- interactive() if(!online) op <- spatstat.options(npixel=32) f <- function(x,y) { dnorm(x, 2.3, 0.1) * dnorm(y, 2.0, 0.2) } X <- rpoint(15, f, win=letterR) plot(X) b <- bw.pplHeat(X, sigma=0.25) b plot(b) if(!online) spatstat.options(op) } \keyword{spatial} \keyword{smooth} spatstat.core/man/dclf.test.Rd0000644000176200001440000002576014141452520016004 0ustar liggesusers\name{dclf.test} \alias{dclf.test} \alias{mad.test} \title{ Diggle-Cressie-Loosmore-Ford and Maximum Absolute Deviation Tests } \description{ Perform the Diggle (1986) / Cressie (1991) / Loosmore and Ford (2006) test or the Maximum Absolute Deviation test for a spatial point pattern. } \usage{ dclf.test(X, \dots, alternative=c("two.sided", "less", "greater"), rinterval = NULL, leaveout=1, scale=NULL, clamp=FALSE, interpolate=FALSE) mad.test(X, \dots, alternative=c("two.sided", "less", "greater"), rinterval = NULL, leaveout=1, scale=NULL, clamp=FALSE, interpolate=FALSE) } \arguments{ \item{X}{ Data for the test. Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class), a simulation envelope (object of class \code{"envelope"}) or a previous result of \code{dclf.test} or \code{mad.test}. } \item{\dots}{ Arguments passed to \code{\link{envelope}}. Useful arguments include \code{fun} to determine the summary function, \code{nsim} to specify the number of Monte Carlo simulations, \code{verbose=FALSE} to turn off the messages, \code{savefuns} or \code{savepatterns} to save the simulation results, and \code{use.theory} described under Details. } \item{alternative}{ The alternative hypothesis. A character string. The default is a two-sided alternative. See Details. } \item{rinterval}{ Interval of values of the summary function argument \code{r} over which the maximum absolute deviation, or the integral, will be computed for the test. A numeric vector of length 2. } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{scale}{ Optional. A function in the \R language which determines the relative scale of deviations, as a function of distance \eqn{r}. Summary function values for distance \code{r} will be \emph{divided} by \code{scale(r)} before the test statistic is computed. } \item{clamp}{ Logical value indicating how to compute deviations in a one-sided test. Deviations of the observed summary function from the theoretical summary function are initially evaluated as signed real numbers, with large positive values indicating consistency with the alternative hypothesis. If \code{clamp=FALSE} (the default), these values are not changed. If \code{clamp=TRUE}, any negative values are replaced by zero. } \item{interpolate}{ Logical value specifying whether to calculate the \eqn{p}-value by interpolation. If \code{interpolate=FALSE} (the default), a standard Monte Carlo test is performed, yielding a \eqn{p}-value of the form \eqn{(k+1)/(n+1)} where \eqn{n} is the number of simulations and \eqn{k} is the number of simulated values which are more extreme than the observed value. If \code{interpolate=TRUE}, the \eqn{p}-value is calculated by applying kernel density estimation to the simulated values, and computing the tail probability for this estimated distribution. } } \details{ These functions perform hypothesis tests for goodness-of-fit of a point pattern dataset to a point process model, based on Monte Carlo simulation from the model. \code{dclf.test} performs the test advocated by Loosmore and Ford (2006) which is also described in Diggle (1986), Cressie (1991, page 667, equation (8.5.42)) and Diggle (2003, page 14). See Baddeley et al (2014) for detailed discussion. \code{mad.test} performs the \sQuote{global} or \sQuote{Maximum Absolute Deviation} test described by Ripley (1977, 1981). See Baddeley et al (2014). The type of test depends on the type of argument \code{X}. \itemize{ \item If \code{X} is some kind of point pattern, then a test of Complete Spatial Randomness (CSR) will be performed. That is, the null hypothesis is that the point pattern is completely random. \item If \code{X} is a fitted point process model, then a test of goodness-of-fit for the fitted model will be performed. The model object contains the data point pattern to which it was originally fitted. The null hypothesis is that the data point pattern is a realisation of the model. \item If \code{X} is an envelope object generated by \code{\link{envelope}}, then it should have been generated with \code{savefuns=TRUE} or \code{savepatterns=TRUE} so that it contains simulation results. These simulations will be treated as realisations from the null hypothesis. \item Alternatively \code{X} could be a previously-performed test of the same kind (i.e. the result of calling \code{dclf.test} or \code{mad.test}). The simulations used to perform the original test will be re-used to perform the new test (provided these simulations were saved in the original test, by setting \code{savefuns=TRUE} or \code{savepatterns=TRUE}). } The argument \code{alternative} specifies the alternative hypothesis, that is, the direction of deviation that will be considered statistically significant. If \code{alternative="two.sided"} (the default), both positive and negative deviations (between the observed summary function and the theoretical function) are significant. If \code{alternative="less"}, then only negative deviations (where the observed summary function is lower than the theoretical function) are considered. If \code{alternative="greater"}, then only positive deviations (where the observed summary function is higher than the theoretical function) are considered. In all cases, the algorithm will first call \code{\link{envelope}} to generate or extract the simulated summary functions. The number of simulations that will be generated or extracted, is determined by the argument \code{nsim}, and defaults to 99. The summary function that will be computed is determined by the argument \code{fun} (or the first unnamed argument in the list \code{\dots}) and defaults to \code{\link{Kest}} (except when \code{X} is an envelope object generated with \code{savefuns=TRUE}, when these functions will be taken). The choice of summary function \code{fun} affects the power of the test. It is normally recommended to apply a variance-stabilising transformation (Ripley, 1981). If you are using the \eqn{K} function, the normal practice is to replace this by the \eqn{L} function (Besag, 1977) computed by \code{\link{Lest}}. If you are using the \eqn{F} or \eqn{G} functions, the recommended practice is to apply Fisher's variance-stabilising transformation \eqn{\sin^{-1}\sqrt x}{asin(sqrt(x))} using the argument \code{transform}. See the Examples. The argument \code{rinterval} specifies the interval of distance values \eqn{r} which will contribute to the test statistic (either maximising over this range of values for \code{mad.test}, or integrating over this range of values for \code{dclf.test}). This affects the power of the test. General advice and experiments in Baddeley et al (2014) suggest that the maximum \eqn{r} value should be slightly larger than the maximum possible range of interaction between points. The \code{dclf.test} is quite sensitive to this choice, while the \code{mad.test} is relatively insensitive. It is also possible to specify a pointwise test (i.e. taking a single, fixed value of distance \eqn{r}) by specifing \code{rinterval = c(r,r)}. The argument \code{use.theory} passed to \code{\link{envelope}} determines whether to compare the summary function for the data to its theoretical value for CSR (\code{use.theory=TRUE}) or to the sample mean of simulations from CSR (\code{use.theory=FALSE}). The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). } \section{Handling Ties}{ If the observed value of the test statistic is equal to one or more of the simulated values (called a \emph{tied value}), then the tied values will be assigned a random ordering, and a message will be printed. } \value{ An object of class \code{"htest"}. Printing this object gives a report on the result of the test. The \eqn{p}-value is contained in the component \code{p.value}. } \references{ Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84}(3) 477--489. Baddeley, A., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2015) \emph{Pushing the envelope}. In preparation. Besag, J. (1977) Discussion of Dr Ripley's paper. \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 193--195. Cressie, N.A.C. (1991) \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neuroscience Methods} \bold{18}, 115--125. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Loosmore, N.B. and Ford, E.D. (2006) Statistical inference using the \emph{G} or \emph{K} point pattern spatial statistics. \emph{Ecology} \bold{87}, 1925--1931. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. } \author{ \adrian, Andrew Hardegen and Suman Rakshit. } \seealso{ \code{\link{envelope}}, \code{\link{dclf.progress}} } \examples{ dclf.test(cells, Lest, nsim=39) m <- mad.test(cells, Lest, verbose=FALSE, rinterval=c(0, 0.1), nsim=19) m # extract the p-value m$p.value # variance stabilised G function dclf.test(cells, Gest, transform=expression(asin(sqrt(.))), verbose=FALSE, nsim=19) ## one-sided test ml <- mad.test(cells, Lest, verbose=FALSE, nsim=19, alternative="less") ## scaled mad.test(cells, Kest, verbose=FALSE, nsim=19, rinterval=c(0.05, 0.2), scale=function(r) { r }) } \keyword{spatial} \keyword{htest} spatstat.core/man/bw.pcf.Rd0000644000176200001440000001150614141452520015266 0ustar liggesusers\name{bw.pcf} \alias{bw.pcf} \title{ Cross Validated Bandwidth Selection for Pair Correlation Function } \description{ Uses composite likelihood or generalized least squares cross-validation to select a smoothing bandwidth for the kernel estimation of pair correlation function. } \usage{ bw.pcf(X, rmax=NULL, lambda=NULL, divisor="r", kernel="epanechnikov", nr=10000, bias.correct=TRUE, cv.method=c("compLik", "leastSQ"), simple=TRUE, srange=NULL, \dots, verbose=FALSE, warn=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{rmax}{ Numeric. Maximum value of the spatial lag distance \eqn{r} for which \eqn{g(r)} should be evaluated. } \item{lambda}{ Optional. Values of the estimated intensity function. A vector giving the intensity values at the points of the pattern \code{X}. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See \code{pcf.ppp}. } \item{kernel}{ Choice of smoothing kernel, passed to \code{density}; see \code{\link{pcf}} and \code{\link{pcfinhom}}. } \item{nr}{ Integer. Number of subintervals for discretization of [0, rmax] to use in computing numerical integrals. } \item{bias.correct}{ Logical. Whether to use bias corrected version of the kernel estimate. See Details. } \item{cv.method}{ Choice of cross validation method: either \code{"compLik"} or \code{"leastSQ"} (partially matched). } \item{simple}{ Logical. Whether to use simple removal of spatial lag distances. See Details. } \item{srange}{ Optional. Numeric vector of length 2 giving the range of bandwidth values that should be searched to find the optimum bandwidth. } \item{\dots}{ Other arguments, passed to \code{\link{pcf}} or \code{\link{pcfinhom}}. } \item{verbose}{ Logical value indicating whether to print progress reports during the optimization procedure. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the optimum value of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth \code{bw} for the kernel estimator of the pair correlation function of a point process intensity computed by \code{\link{pcf.ppp}} (homogeneous case) or \code{\link{pcfinhom}} (inhomogeneous case). With \code{cv.method="leastSQ"}, the bandwidth \eqn{h} is chosen to minimise an unbiased estimate of the integrated mean-square error criterion \eqn{M(h)} defined in equation (4) in Guan (2007a). The code implements the fast algorithm of Jalilian and Waagepetersen (2018). With \code{cv.method="compLik"}, the bandwidth \eqn{h} is chosen to maximise a likelihood cross-validation criterion \eqn{CV(h)} defined in equation (6) of Guan (2007b). \deqn{ M(b) = \frac{\mbox{MSE}(\sigma)}{\lambda^2} - g(0) }{ M(b) = \int_{0}^{rmax} \hat{g}^2(r;b) r dr - \sum_{u,v} } The result is a numerical value giving the selected bandwidth. } \section{Definition of bandwidth}{ The bandwidth \code{bw} returned by \code{bw.pcf} is the standard deviation of the smoothing kernel, following the standard convention in \R. As mentioned in the documentation for \code{\link{density.default}} and \code{\link{pcf.ppp}}, this differs from other definitions of bandwidth that can be found in the literature. The scale parameter \code{h}, which is called the bandwidth in some literature, is defined differently. For example for the Epanechnikov kernel, \code{h} is the half-width of the kernel, and \code{bw=h/sqrt(5)}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcfinhom}} } \examples{ b <- bw.pcf(redwood) plot(pcf(redwood, bw=b)) } \references{ Guan, Y. (2007a). A composite likelihood cross-validation approach in selecting bandwidth for the estimation of the pair correlation function. \emph{Scandinavian Journal of Statistics}, \bold{34}(2), 336--346. Guan, Y. (2007b). A least-squares cross-validation bandwidth selection approach in pair correlation function estimations. \emph{Statistics & Probability Letters}, \bold{77}(18), 1722--1729. Jalilian, A. and Waagepetersen, R. (2018) Fast bandwidth selection for estimation of the pair correlation function. \emph{Journal of Statistical Computation and Simulation}, \bold{88}(10), 2001--2011. \url{https://www.tandfonline.com/doi/full/10.1080/00949655.2018.1428606} } \author{ Rasmus Waagepetersen and Abdollah Jalilian. Adapted for \pkg{spatstat} by \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/harmonise.msr.Rd0000644000176200001440000000173314141452520016675 0ustar liggesusers\name{harmonise.msr} \alias{harmonise.msr} \title{Make Measures Compatible} \description{ Convert several measures to a common quadrature scheme } \usage{ \method{harmonise}{msr}(\dots) } \arguments{ \item{\dots}{ Any number of measures (objects of class \code{"msr"}). } } \details{ This function makes any number of measures compatible, by converting them all to a common quadrature scheme. The command \code{\link{harmonise}} is generic. This is the method for objects of class \code{"msr"}. } \value{ A list, of length equal to the number of arguments \code{\dots}, whose entries are measures. } \author{ \spatstatAuthors. } \examples{ fit1 <- ppm(cells ~ x) fit2 <- ppm(rpoispp(ex=cells) ~ x) m1 <- residuals(fit1) m2 <- residuals(fit2) harmonise(m1, m2) s1 <- residuals(fit1, type="score") s2 <- residuals(fit2, type="score") harmonise(s1, s2) } \seealso{ \code{\link{harmonise}}, \code{\link{msr}} } \keyword{spatial} \keyword{manip} spatstat.core/man/Gres.Rd0000644000176200001440000000536014141452520015010 0ustar liggesusers\name{Gres} \Rdversion{1.1} \alias{Gres} \title{ Residual G Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the residual \eqn{G} function, which serves as a diagnostic for goodness-of-fit of the model. } \usage{ Gres(object, ...) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}), a point pattern (object of class \code{"ppp"}), a quadrature scheme (object of class \code{"quad"}), or the value returned by a previous call to \code{\link{Gcom}}. } \item{\dots}{ Arguments passed to \code{\link{Gcom}}. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes a residual version of the \eqn{G} function of the dataset, which should be approximately zero if the model is a good fit to the data. In normal use, \code{object} is a fitted point process model or a point pattern. Then \code{Gres} first calls \code{\link{Gcom}} to compute both the nonparametric estimate of the \eqn{G} function and its model compensator. Then \code{Gres} computes the difference between them, which is the residual \eqn{G}-function. Alternatively, \code{object} may be a function value table (object of class \code{"fv"}) that was returned by a previous call to \code{\link{Gcom}}. Then \code{Gres} computes the residual from this object. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Related functions: \code{\link{Gcom}}, \code{\link{Gest}}. Alternative functions: \code{\link{Kres}}, \code{\link{psstA}}, \code{\link{psstG}}, \code{\link{psst}}. Model-fitting: \code{\link{ppm}}. } \examples{ data(cells) fit0 <- ppm(cells, ~1) # uniform Poisson G0 <- Gres(fit0) plot(G0) # Hanisch correction estimate plot(G0, hres ~ r) # uniform Poisson is clearly not correct fit1 <- ppm(cells, ~1, Strauss(0.08)) plot(Gres(fit1), hres ~ r) # fit looks approximately OK; try adjusting interaction distance plot(Gres(cells, interaction=Strauss(0.12))) # How to make envelopes if(interactive()) { E <- envelope(fit1, Gres, model=fit1, nsim=39) plot(E) } # For computational efficiency Gc <- Gcom(fit1) G1 <- Gres(Gc) } \keyword{spatial} \keyword{models} spatstat.core/man/density.splitppp.Rd0000644000176200001440000000615014141452520017437 0ustar liggesusers\name{density.splitppp} \alias{density.splitppp} \alias{density.ppplist} \title{Kernel Smoothed Intensity of Split Point Pattern} \description{ Compute a kernel smoothed intensity function for each of the components of a split point pattern, or each of the point patterns in a list. } \usage{ \method{density}{splitppp}(x, \dots, weights=NULL, se=FALSE) \method{density}{ppplist}(x, \dots, weights=NULL, se=FALSE) } \arguments{ \item{x}{ Split point pattern (object of class \code{"splitppp"} created by \code{\link{split.ppp}}) to be smoothed. Alternatively a list of point patterns, of class \code{"ppplist"}. } \item{\dots}{ Arguments passed to \code{\link{density.ppp}} to control the smoothing, pixel resolution, edge correction etc. } \item{weights}{ Numerical weights for the points. See Details. } \item{se}{ Logical value indicating whether to compute standard errors as well. } } \value{ A list of pixel images (objects of class \code{"im"}) which can be plotted or printed; or a list of numeric vectors giving the values at specified points. If \code{se=TRUE}, the result is a list with two elements named \code{estimate} and \code{SE}, each of the format described above. } \details{ This is a method for the generic function \code{density}. The argument \code{x} should be a list of point patterns, and should belong to one of the classes \code{"ppplist"} or \code{"splitppp"}. Typically \code{x} is obtained by applying the function \code{\link{split.ppp}} to a point pattern \code{y} by calling \code{split(y)}. This splits the points of \code{y} into several sub-patterns. A kernel estimate of the intensity function of each of the point patterns is computed using \code{\link{density.ppp}}. The return value is usually a list, each of whose entries is a pixel image (object of class \code{"im"}). The return value also belongs to the class \code{"solist"} and can be plotted or printed. If the argument \code{at="points"} is given, the result is a list of numeric vectors giving the intensity values at the data points. If \code{se=TRUE}, the result is a list with two elements named \code{estimate} and \code{SE}, each of the format described above. The argument \code{weights} specifies numerical case weights for the data points. Normally it should be a list, with the same length as \code{x}. The entry \code{weights[[i]]} will determine the case weights for the pattern \code{x[[i]]}, and may be given in any format acceptable to \code{\link{density.ppp}}. For example, \code{weights[[i]]} can be a numeric vector of length equal to \code{npoints(x[[i]])}, a single numeric value, a numeric matrix, a pixel image (object of class \code{"im"}), or an \code{expression}. For convenience, \code{weights} can also be a single \code{expression} or a single pixel image (object of class \code{"im"}). } \seealso{ \code{\link{ppp.object}}, \code{\link{im.object}} } \examples{ Z <- density(split(amacrine), 0.05) plot(Z) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/summary.dppm.Rd0000644000176200001440000000414214141452520016541 0ustar liggesusers\name{summary.dppm} \alias{summary.dppm} \alias{print.summary.dppm} \title{Summarizing a Fitted Determinantal Point Process Model} \description{ \code{summary} method for class \code{"dppm"}. } \usage{ \method{summary}{dppm}(object, \dots, quick=FALSE) \method{print}{summary.dppm}(x, \dots) } \arguments{ \item{object}{ A fitted determinantal point process model (object of class \code{"dppm"}). } \item{quick}{Logical value controlling the scope of the summary.} \item{\dots}{Arguments passed to \code{\link{summary.ppm}} or \code{\link{print.summary.ppm}} controlling the treatment of the trend component of the model.} \item{x}{Object of class \code{"summary.dppm"} as returned by \code{summary.dppm}. } } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"dppm"}. An object of class \code{"dppm"} describes a fitted determinantal point process model. See \code{\link{dppm}}. \code{summary.dppm} extracts information about the type of model that has been fitted, the data to which the model was fitted, and the values of the fitted coefficients. \code{print.summary.dppm} prints this information in a comprehensible format. In normal usage, \code{print.summary.dppm} is invoked implicitly when the user calls \code{summary.dppm} without assigning its value to anything. See the examples. } \value{ \code{summary.dppm} returns an object of class \code{"summary.dppm"}, while \code{print.summary.dppm} returns \code{NULL}. The result of \code{summary.dppm} includes at least the following components: \item{Xname}{character string name of the original point pattern data} \item{stationary}{logical value indicating whether the model is stationary} \item{trend}{Object of class \code{summary.ppm} summarising the trend} \item{repul}{Repulsiveness index} } \examples{ jpines <- residualspaper$Fig1 \testonly{ # smaller dataset for testing jpines <- jpines[c(TRUE,FALSE)] } fit <- dppm(jpines ~ 1, dppGauss) summary(fit) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.core/man/Lcross.inhom.Rd0000644000176200001440000000775414141452520016477 0ustar liggesusers\name{Lcross.inhom} \alias{Lcross.inhom} \title{ Inhomogeneous Cross Type L Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the cross-type \eqn{L} function. } \usage{ Lcross.inhom(X, i, j, \dots, correction) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{L} function \eqn{L_{ij}(r)}{Lij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{correction,\dots}{ Other arguments passed to \code{\link{Kcross.inhom}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{L_{ij}(r)}{Lij(r)} has been estimated } \item{theo}{the theoretical value of \eqn{L_{ij}(r)}{Lij(r)} for a marked Poisson process, identically equal to \code{r} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{ij}(r)}{Lij(r)} obtained by the edge corrections named. } \details{ This is a generalisation of the function \code{\link{Lcross}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Linhom}}. All the arguments are passed to \code{\link{Kcross.inhom}}, which estimates the inhomogeneous multitype K function \eqn{K_{ij}(r)}{Kij(r)} for the point pattern. The resulting values are then transformed by taking \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. } \references{ \Moller, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Lcross}}, \code{\link{Linhom}}, \code{\link{Kcross.inhom}} } \examples{ # Lansing Woods data woods <- lansing \testonly{woods <- woods[seq(1,npoints(woods), by=10)]} ma <- split(woods)$maple wh <- split(woods)$whiteoak # method (1): estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdaW <- density.ppp(wh, sigma=0.15, at="points") L <- Lcross.inhom(woods, "whiteoak", "maple", lambdaW, lambdaM) # method (2): fit parametric intensity model fit <- ppm(woods ~marks * polynom(x,y,2)) # evaluate fitted intensities at data points # (these are the intensities of the sub-processes of each type) inten <- fitted(fit, dataonly=TRUE) # split according to types of points lambda <- split(inten, marks(woods)) L <- Lcross.inhom(woods, "whiteoak", "maple", lambda$whiteoak, lambda$maple) # synthetic example: type A points have intensity 50, # type B points have intensity 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) L <- Lcross.inhom(X, "A", "B", lambdaI=as.im(50, Window(X)), lambdaJ=lamB) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/influence.ppm.Rd0000644000176200001440000000706114141452520016653 0ustar liggesusers\name{influence.ppm} \alias{influence.ppm} \title{ Influence Measure for Spatial Point Process Model } \description{ Computes the influence measure for a fitted spatial point process model. } \usage{ \method{influence}{ppm}(model, \dots, drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } } \details{ Given a fitted spatial point process model \code{model}, this function computes the influence measure described in Baddeley, Chang and Song (2013) and Baddeley, Rubak and Turner (2019). The function \code{\link[stats]{influence}} is generic, and \code{influence.ppm} is the method for objects of class \code{"ppm"} representing point process models. The influence of a point process model is a value attached to each data point (i.e. each point of the point pattern to which the \code{model} was fitted). The influence value \eqn{s(x_i)}{s(x[i])} at a data point \eqn{x_i}{x[i]} represents the change in the maximised log (pseudo)likelihood that occurs when the point \eqn{x_i}{x[i]} is deleted. A relatively large value of \eqn{s(x_i)}{s(x[i])} indicates a data point with a large influence on the fitted model. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the influence calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. The result of \code{influence.ppm} is an object of class \code{"influence.ppm"}. It can be printed and plotted. It can be converted to a marked point pattern by \code{as.ppp} (see \code{\link{as.ppp.influence.ppm}}). There are also methods for \code{[}, \code{\link{as.owin}}, \code{\link{domain}}, \code{\link{shift}}, \code{\link{integral}} and \code{\link{Smooth}}. } \value{ An object of class \code{"influence.ppm"}. } \references{ Baddeley, A. and Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. Baddeley, A., Rubak, E. and Turner, R. (2019) Leverage and influence diagnostics for Gibbs spatial point processes. \emph{Spatial Statistics} \bold{29}, {15--48}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{leverage.ppm}}, \code{\link{dfbetas.ppm}}, \code{\link{ppmInfluence}}, \code{\link{plot.influence.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) plot(influence(fit)) } \keyword{spatial} \keyword{models} spatstat.core/man/fv.object.Rd0000644000176200001440000000316614141452520015772 0ustar liggesusers\name{fv.object} \alias{fv.object} %DoNotExport \title{Function Value Table} \description{ A class \code{"fv"} to support the convenient plotting of several estimates of the same function. } \details{ An object of this class is a convenient way of storing and plotting several different estimates of the same function. It is a data frame with extra attributes indicating the recommended way of plotting the function, and other information. There are methods for \code{print} and \code{plot} for this class. Objects of class \code{"fv"} are returned by \code{\link[spatstat.core]{Fest}}, \code{\link[spatstat.core]{Gest}},\code{\link[spatstat.core]{Jest}}, and \code{\link[spatstat.core]{Kest}} along with many other functions. } \seealso{ Objects of class \code{"fv"} are returned by \code{\link[spatstat.core]{Fest}}, \code{\link[spatstat.core]{Gest}},\code{\link[spatstat.core]{Jest}}, and \code{\link[spatstat.core]{Kest}} along with many other functions. See \code{\link{plot.fv}} for plotting an \code{"fv"} object. See \code{\link{as.function.fv}} to convert an \code{"fv"} object to an \R function. Use \code{\link{cbind.fv}} to combine several \code{"fv"} objects. Use \code{\link{bind.fv}} to glue additional columns onto an existing \code{"fv"} object. \emph{Undocumented} functions for modifying an \code{"fv"} object include \code{fvnames}, \code{fvnames<-}, \code{tweak.fv.entry} and \code{rebadge.fv}. } \examples{ K <- Kest(cells) class(K) K # prints a sensible summary plot(K) } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat.core/man/leverage.ppm.Rd0000644000176200001440000001016314141452520016472 0ustar liggesusers\name{leverage.ppm} \alias{leverage} \alias{leverage.ppm} \title{ Leverage Measure for Spatial Point Process Model } \description{ Computes the leverage measure for a fitted spatial point process model. } \usage{ leverage(model, \dots) \method{leverage}{ppm}(model, \dots, drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored, except for the arguments \code{dimyx} and \code{eps} which are passed to \code{\link{as.mask}} to control the spatial resolution of the result. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } } \details{ The function \code{leverage} is generic, and \code{leverage.ppm} is the method for objects of class \code{"ppm"}. Given a fitted spatial point process model \code{model}, the function \code{leverage.ppm} computes the leverage of the model, described in Baddeley, Chang and Song (2013) and Baddeley, Rubak and Turner (2019). The leverage of a spatial point process model is a function of spatial location, and is typically displayed as a colour pixel image. The leverage value \eqn{h(u)} at a spatial location \eqn{u} represents the change in the fitted trend of the fitted point process model that would have occurred if a data point were to have occurred at the location \eqn{u}. A relatively large value of \eqn{h()} indicates a part of the space where the data have a \emph{potentially} strong effect on the fitted model (specifically, a strong effect on the intensity or conditional intensity of the fitted model) due to the values of the covariates. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the leverage calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. The result of \code{leverage.ppm} is an object of class \code{"leverage.ppm"}. It can be printed or plotted. It can be converted to a pixel image by \code{as.im} (see \code{\link{as.im.leverage.ppm}}). There are also methods for \code{contour}, \code{persp}, \code{[}, \code{as.function}, \code{\link{as.owin}}, \code{\link{domain}}, \code{\link{Smooth}}, \code{\link{integral}}, and \code{mean}. } \value{ An object of class \code{"leverage.ppm"}. } \references{ Baddeley, A., Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. Baddeley, A., Rubak, E. and Turner, R. (2019) Leverage and influence diagnostics for Gibbs spatial point processes. \emph{Spatial Statistics} \bold{29}, {15--48}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{influence.ppm}}, \code{\link{dfbetas.ppm}}, \code{\link{ppmInfluence}}, \code{\link{plot.leverage.ppm}} \code{\link{as.function.leverage.ppm}} } \examples{ if(offline <- !interactive()) op <- spatstat.options(npixel=32, ndummy.min=16) X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) le <- leverage(fit) if(!offline) plot(le) mean(le) if(offline) spatstat.options(op) } \keyword{spatial} \keyword{models} \concept{diagnostics} spatstat.core/man/residuals.dppm.Rd0000644000176200001440000000215314141452520017037 0ustar liggesusers\name{residuals.dppm} \alias{residuals.dppm} \title{ Residuals for Fitted Determinantal Point Process Model } \description{ Given a determinantal point process model fitted to a point pattern, compute residuals. } \usage{ \method{residuals}{dppm}(object, \dots) } \arguments{ \item{object}{ The fitted determinatal point process model (an object of class \code{"dppm"}) for which residuals should be calculated. } \item{\dots}{ Arguments passed to \code{\link{residuals.ppm}}. } } \value{ An object of class \code{"msr"} representing a signed measure or vector-valued measure (see \code{\link{msr}}). This object can be plotted. } \details{ This function extracts the intensity component of the model using \code{\link{as.ppm}} and then applies \code{\link{residuals.ppm}} to compute the residuals. Use \code{\link{plot.msr}} to plot the residuals directly. } \seealso{ \code{\link{msr}}, \code{\link{dppm}} } \examples{ fit <- dppm(swedishpines ~ x, dppGauss, method="c") rr <- residuals(fit) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.core/man/panel.contour.Rd0000644000176200001440000000442214141452520016675 0ustar liggesusers\name{panel.contour} \alias{panel.contour} \alias{panel.image} \alias{panel.histogram} \title{ Panel Plots using Colour Image or Contour Lines } \description{ These functions can be passed to \code{\link[graphics]{pairs}} or \code{\link[graphics]{coplot}} to determine what kind of plotting is done in each panel of a multi-panel graphical display. } \usage{ panel.contour(x, y, ..., sigma = NULL) panel.image(x, y, ..., sigma = NULL) panel.histogram(x, ...) } \arguments{ \item{x,y}{ Coordinates of points in a scatterplot. } \item{\dots}{ Extra graphics arguments, passed to \code{\link{contour.im}}, \code{\link{plot.im}} or \code{\link[graphics]{rect}}, respectively, to control the appearance of the panel. } \item{sigma}{ Bandwidth of kernel smoother, on a scale where \eqn{x} and \eqn{y} range between 0 and 1. } } \details{ These functions can serve as one of the arguments \code{panel}, \code{lower.panel}, \code{upper.panel}, \code{diag.panel} passed to graphics commands like \code{\link[graphics]{pairs}} or \code{\link[graphics]{coplot}}, to determine what kind of plotting is done in each panel of a multi-panel graphical display. In particular they work with \code{\link{pairs.im}}. The functions \code{panel.contour} and \code{panel.contour} are suitable for the off-diagonal plots which involve two datasets \code{x} and \code{y}. They first rescale \code{x} and \code{y} to the unit square, then apply kernel smoothing with bandwidth \code{sigma} using \code{\link{density.ppp}}. Then \code{panel.contour} draws a contour plot while \code{panel.image} draws a colour image. The function \code{panel.histogram} is suitable for the diagonal plots which involve a single dataset \code{x}. It displays a histogram of the data. } \value{ Null. } \author{ \spatstatAuthors. } \seealso{ \code{\link{pairs.im}}, \code{\link{pairs.default}}, \code{\link{panel.smooth}} } \examples{ pairs(bei.extra, panel = panel.contour, diag.panel = panel.histogram) with(bei.extra, pairs(grad, elev, panel = panel.image, diag.panel = panel.histogram)) pairs(marks(finpines), panel=panel.contour, diag.panel=panel.histogram) } \keyword{spatial} \keyword{hplot} spatstat.core/man/methods.dppm.Rd0000644000176200001440000000303514141452520016507 0ustar liggesusers\name{methods.dppm} \alias{methods.dppm} %DoNotExport \alias{coef.dppm} \alias{formula.dppm} \alias{print.dppm} \alias{terms.dppm} \alias{labels.dppm} \title{ Methods for Determinantal Point Process Models } \description{ These are methods for the class \code{"dppm"}. } \usage{ \method{coef}{dppm}(object, \dots) \method{formula}{dppm}(x, \dots) \method{print}{dppm}(x, ...) \method{terms}{dppm}(x, \dots) \method{labels}{dppm}(object, \dots) } \arguments{ \item{x,object}{ An object of class \code{"dppm"}, representing a fitted determinantal point process model. } \item{\dots}{ Arguments passed to other methods. } } \details{ These functions are methods for the generic commands \code{\link{coef}}, \code{\link{formula}}, \code{\link{print}}, \code{\link{terms}} and \code{\link{labels}} for the class \code{"dppm"}. An object of class \code{"dppm"} represents a fitted determinantal point process model. It is obtained from \code{\link{dppm}}. The method \code{coef.dppm} returns the vector of \emph{regression coefficients} of the fitted model. It does not return the interaction parameters. } \value{ See the help files for the corresponding generic functions. } \author{ \adrian } \seealso{ \code{\link{dppm}}, \code{\link{plot.dppm}}, \code{\link{predict.dppm}}, \code{\link{simulate.dppm}}, \code{\link{as.ppm.dppm}}. } \examples{ fit <- dppm(swedishpines ~ x + y, dppGauss, method="c") coef(fit) formula(fit) tf <- terms(fit) labels(fit) } \keyword{spatial} \keyword{methods} spatstat.core/man/markcorr.Rd0000644000176200001440000002663514141452520015740 0ustar liggesusers\name{markcorr} \alias{markcorr} \title{ Mark Correlation Function } \description{ Estimate the marked correlation function of a marked point pattern. } \usage{ markcorr(X, f = function(m1, m2) { m1 * m2}, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, weights=NULL, f1=NULL, normalise=TRUE, fargs=NULL, internal=NULL) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. } \item{f}{Optional. Test function \eqn{f} used in the definition of the mark correlation function. An \R function with at least two arguments. There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{weights}{ Optional. Numeric weights for each data point in \code{X}. A numeric vector, a pixel image, or a \code{function(x,y)}. Alternatively, an \code{expression} to be evaluated to yield the weights; the expression may involve the variables \code{x,y,marks} representing the coordinates and marks of\code{X}. } \item{f1}{ An alternative to \code{f}. If this argument is given, then \eqn{f} is assumed to take the form \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)}. } \item{normalise}{ If \code{normalise=FALSE}, compute only the numerator of the expression for the mark correlation. } \item{fargs}{ Optional. A list of extra arguments to be passed to the function \code{f} or \code{f1}. } \item{internal}{Do not use this argument.} } \value{ A function value table (object of class \code{"fv"}) or a list of function value tables, one for each column of marks. An object of class \code{"fv"} (see \code{\link{fv.object}}) is essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} has been estimated } \item{theo}{the theoretical value of \eqn{k_f(r)}{k[f](r)} when the marks attached to different points are independent, namely 1 } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the mark correlation function \eqn{k_f(r)}{k[f](r)} obtained by the edge corrections named. } \details{ By default, this command calculates an estimate of Stoyan's mark correlation \eqn{k_{mm}(r)}{k[mm](r)} for the point pattern. Alternatively if the argument \code{f} or \code{f1} is given, then it calculates Stoyan's generalised mark correlation \eqn{k_f(r)}{k[f](r)} with test function \eqn{f}. Theoretical definitions are as follows (see Stoyan and Stoyan (1994, p. 262)): \itemize{ \item For a point process \eqn{X} with numeric marks, Stoyan's mark correlation function \eqn{k_{mm}(r)}{k[mm](r)}, is \deqn{ k_{mm}(r) = \frac{E_{0u}[M(0) M(u)]}{E[M,M']} }{ k[mm](r) = E[0u](M(0) * M(u))/E(M * M') } where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation given that there are points of the process at the locations \eqn{0} and \eqn{u} separated by a distance \eqn{r}, and where \eqn{M(0),M(u)} denote the marks attached to these two points. On the denominator, \eqn{M,M'} are random marks drawn independently from the marginal distribution of marks, and \eqn{E} is the usual expectation. \item For a multitype point process \eqn{X}, the mark correlation is \deqn{ k_{mm}(r) = \frac{P_{0u}[M(0) M(u)]}{P[M = M']} }{ k[mm](r) = P[0u](M(0) = M(u))/P(M = M') } where \eqn{P} and \eqn{P_{0u}}{P[0u]} denote the probability and conditional probability. \item The \emph{generalised} mark correlation function \eqn{k_f(r)}{k[f](r)} of a marked point process \eqn{X}, with test function \eqn{f}, is \deqn{ k_f(r) = \frac{E_{0u}[f(M(0),M(u))]}{E[f(M,M')]} }{ k[f](r) = E[0u](f(M(0),M(u))]/E(f(M,M')) } } The test function \eqn{f} is any function \eqn{f(m_1,m_2)}{f(m1,m2)} with two arguments which are possible marks of the pattern, and which returns a nonnegative real value. Common choices of \eqn{f} are: for continuous nonnegative real-valued marks, \deqn{f(m_1,m_2) = m_1 m_2}{f(m1,m2)= m1 * m2} for discrete marks (multitype point patterns), \deqn{f(m_1,m_2) = 1(m_1 = m_2)}{f(m1,m2)= (m1 == m2)} and for marks taking values in \eqn{[0,2\pi)}{[0,2 * pi)}, \deqn{f(m_1,m_2) = \sin(m_1 - m_2)}{f(m1,m2) = sin(m1-m2)}. Note that \eqn{k_f(r)}{k[f](r)} is not a ``correlation'' in the usual statistical sense. It can take any nonnegative real value. The value 1 suggests ``lack of correlation'': if the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{k_f(r) \equiv 1}{k[f](r) = 1}. The interpretation of values larger or smaller than 1 depends on the choice of function \eqn{f}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern. The argument \code{f} determines the function to be applied to pairs of marks. It has a sensible default, which depends on the kind of marks in \code{X}. If the marks are numeric values, then \code{f <- function(m1, m2) { m1 * m2}} computes the product of two marks. If the marks are a factor (i.e. if \code{X} is a multitype point pattern) then \code{f <- function(m1, m2) { m1 == m2}} yields the value 1 when the two marks are equal, and 0 when they are unequal. These are the conventional definitions for numerical marks and multitype points respectively. The argument \code{f} may be specified by the user. It must be an \R function, accepting two arguments \code{m1} and \code{m2} which are vectors of equal length containing mark values (of the same type as the marks of \code{X}). (It may also take additional arguments, passed through \code{fargs}). It must return a vector of numeric values of the same length as \code{m1} and \code{m2}. The values must be non-negative, and \code{NA} values are not permitted. Alternatively the user may specify the argument \code{f1} instead of \code{f}. This indicates that the test function \eqn{f} should take the form \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)} where \eqn{f_1(u)}{f1(u)} is given by the argument \code{f1}. The argument \code{f1} should be an \R function with at least one argument. (It may also take additional arguments, passed through \code{fargs}). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{k_f(r)}{k[f](r)} is estimated. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented only for rectangular and polygonal windows (not for binary masks). } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } } Note that the estimator assumes the process is stationary (spatially homogeneous). The numerator and denominator of the mark correlation function (in the expression above) are estimated using density estimation techniques. The user can choose between \describe{ \item{\code{"density"}}{ which uses the standard kernel density estimation routine \code{\link{density}}, and works only for evenly-spaced \code{r} values; } \item{\code{"loess"}}{ which uses the function \code{loess} in the package \pkg{modreg}; } \item{\code{"sm"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is extremely slow; } \item{\code{"smrep"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is relatively fast, but may require manual control of the smoothing parameter \code{hmult}. } } If \code{normalise=FALSE} then the algorithm will compute only the numerator \deqn{ c_f(r) = E_{0u} f(M(0),M(u)) }{ c[f](r) = E[0u] f(M(0),M(u)) } of the expression for the mark correlation function. } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ Mark variogram \code{\link{markvario}} for numeric marks. Mark connection function \code{\link{markconnect}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}} for factor-valued marks. Mark cross-correlation function \code{\link{markcrosscorr}} for point patterns with several columns of marks. \code{\link{Kmark}} to estimate a cumulative function related to the mark correlation function. } \examples{ # CONTINUOUS-VALUED MARKS: # (1) Spruces # marks represent tree diameter # mark correlation function ms <- markcorr(spruces) plot(ms) # (2) simulated data with independent marks # X <- rpoispp(100) # X <- X \%mark\% runif(npoints(X)) # Xc <- markcorr(X) # plot(Xc) # MULTITYPE DATA: # Hughes' amacrine data # Cells marked as 'on'/'off' X <- if(interactive()) amacrine else amacrine[c(FALSE, TRUE)] # (3) Kernel density estimate with Epanecnikov kernel # (as proposed by Stoyan & Stoyan) M <- markcorr(X, function(m1,m2) {m1==m2}, correction="translate", method="density", kernel="epanechnikov") # Note: kernel="epanechnikov" comes from help(density) # (4) Same again with explicit control over bandwidth # M <- markcorr(X, # correction="translate", method="density", # kernel="epanechnikov", bw=0.02) # see help(density) for correct interpretation of 'bw' \testonly{ niets <- markcorr(X, function(m1,m2){m1 == m2}, method="loess") if(require(sm)) niets <- markcorr(X, correction="isotropic", method="smrep", hmult=2) } # weighted mark correlation X <- if(interactive()) betacells else betacells[c(TRUE,FALSE)] Y <- subset(X, select=type) a <- marks(X)$area v <- markcorr(Y, weights=a) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/ragsAreaInter.Rd0000644000176200001440000000565214141452520016643 0ustar liggesusers\name{ragsAreaInter} \alias{ragsAreaInter} \title{ Alternating Gibbs Sampler for Area-Interaction Process } \description{ Generate a realisation of the area-interaction process using the alternating Gibbs sampler. Applies only when the interaction parameter \eqn{eta} is greater than 1. } \usage{ ragsAreaInter(beta, eta, r, \dots, win = NULL, bmax = NULL, periodic = FALSE, ncycles = 100) } \arguments{ \item{beta}{ First order trend. A number, a pixel image (object of class \code{"im"}), or a \code{function(x,y)}. } \item{eta}{ Interaction parameter (canonical form) as described in the help for \code{\link{AreaInter}}. A number greater than 1. } \item{r}{ Disc radius in the model. A number greater than 1. } \item{\dots}{ Additional arguments for \code{beta} if it is a function. } \item{win}{ Simulation window. An object of class \code{"owin"}. (Ignored if \code{beta} is a pixel image.) } \item{bmax}{ Optional. The maximum possible value of \code{beta}, or a number larger than this. } \item{periodic}{ Logical value indicating whether to treat opposite sides of the simulation window as being the same, so that points close to one side may interact with points close to the opposite side. Feasible only when the window is a rectangle. } \item{ncycles}{ Number of cycles of the alternating Gibbs sampler to be performed. } } \details{ This function generates a simulated realisation of the area-interaction process (see \code{\link{AreaInter}}) using the alternating Gibbs sampler (see \code{\link{rags}}). It exploits a mathematical relationship between the (unmarked) area-interaction process and the two-type hard core process (Baddeley and Van Lieshout, 1995; Widom and Rowlinson, 1970). This relationship only holds when the interaction parameter \code{eta} is greater than 1 so that the area-interaction process is clustered. The parameters \code{beta,eta} are the canonical parameters described in the help for \code{\link{AreaInter}}. The first order trend \code{beta} may be a constant, a function, or a pixel image. The simulation window is determined by \code{beta} if it is a pixel image, and otherwise by the argument \code{win} (the default is the unit square). } \value{ A point pattern (object of class \code{"ppp"}). } \references{ Baddeley, A.J. and Van Lieshout, M.N.M. (1995). Area-interaction point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{47} (1995) 601--619. Widom, B. and Rowlinson, J.S. (1970). New model for the study of liquid-vapor phase transitions. \emph{The Journal of Chemical Physics} \bold{52} (1970) 1670--1684. } \author{ \adrian. } \seealso{ \code{\link{rags}}, \code{\link{ragsMultiHard}} \code{\link{AreaInter}} } \examples{ plot(ragsAreaInter(100, 2, 0.07, ncycles=15)) } \keyword{spatial} \keyword{datagen} spatstat.core/man/clarkevans.test.Rd0000644000176200001440000000675114141452520017224 0ustar liggesusers\name{clarkevans.test} \alias{clarkevans.test} \title{Clark and Evans Test} \description{ Performs the Clark-Evans test of aggregation for a spatial point pattern. } \usage{ clarkevans.test(X, ..., correction="none", clipregion=NULL, alternative=c("two.sided", "less", "greater", "clustered", "regular"), nsim=999) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}). } \item{\dots}{Ignored.} \item{correction}{ Character string. The type of edge correction to be applied. See \code{\link{clarkevans}} } \item{clipregion}{ Clipping region for the guard area correction. A window (object of class \code{"owin"}). See \code{\link{clarkevans}} } \item{alternative}{ String indicating the type of alternative for the hypothesis test. Partially matched. } \item{nsim}{ Number of Monte Carlo simulations to perform, if a Monte Carlo p-value is required. } } \details{ This command uses the Clark and Evans (1954) aggregation index \eqn{R} as the basis for a crude test of clustering or ordering of a point pattern. The Clark-Evans index is computed by the function \code{\link{clarkevans}}. See the help for \code{\link{clarkevans}} for information about the Clark-Evans index \eqn{R} and about the arguments \code{correction} and \code{clipregion}. This command performs a hypothesis test of clustering or ordering of the point pattern \code{X}. The null hypothesis is Complete Spatial Randomness, i.e.\ a uniform Poisson process. The alternative hypothesis is specified by the argument \code{alternative}: \itemize{ \item \code{alternative="less"} or \code{alternative="clustered"}: the alternative hypothesis is that \eqn{R < 1} corresponding to a clustered point pattern; \item \code{alternative="greater"} or \code{alternative="regular"}: the alternative hypothesis is that \eqn{R > 1} corresponding to a regular or ordered point pattern; \item \code{alternative="two.sided"}: the alternative hypothesis is that \eqn{R \neq 1}{R != 1} corresponding to a clustered or regular pattern. } The Clark-Evans index \eqn{R} is computed for the data as described in \code{\link{clarkevans}}. If \code{correction="none"} and \code{nsim} is missing, the \eqn{p}-value for the test is computed by standardising \eqn{R} as proposed by Clark and Evans (1954) and referring the statistic to the standard Normal distribution. Otherwise, the \eqn{p}-value for the test is computed by Monte Carlo simulation of \code{nsim} realisations of Complete Spatial Randomness conditional on the observed number of points. } \value{ An object of class \code{"htest"} representing the result of the test. } \references{ Clark, P.J. and Evans, F.C. (1954) Distance to nearest neighbour as a measure of spatial relationships in populations. \emph{Ecology} \bold{35}, 445--453. Donnelly, K. (1978) Simulations to determine the variance and edge-effect of total nearest neighbour distance. In \emph{Simulation methods in archaeology}, Cambridge University Press, pp 91--95. } \author{ \adrian } \seealso{ \code{\link{clarkevans}}, \code{\link{hopskel.test}} } \examples{ # Redwood data - clustered clarkevans.test(redwood) clarkevans.test(redwood, alternative="clustered") clarkevans.test(redwood, correction="cdf", nsim=39) } \keyword{spatial} \keyword{nonparametric} \keyword{htest} spatstat.core/man/as.interact.Rd0000644000176200001440000000364114141452520016323 0ustar liggesusers\name{as.interact} \alias{as.interact} \alias{as.interact.fii} \alias{as.interact.interact} \alias{as.interact.ppm} \title{Extract Interaction Structure} \description{ Extracts the interpoint interaction structure from a point pattern model. } \usage{ as.interact(object) \method{as.interact}{fii}(object) \method{as.interact}{interact}(object) \method{as.interact}{ppm}(object) } \arguments{ \item{object}{A fitted point process model (object of class \code{"ppm"}) or an interpoint interaction structure (object of class \code{"interact"}). } } \details{ The function \code{as.interact} extracts the interpoint interaction structure from a suitable object. An object of class \code{"interact"} describes an interpoint interaction structure, before it has been fitted to point pattern data. The irregular parameters of the interaction (such as the interaction range) are fixed, but the regular parameters (such as interaction strength) are undetermined. Objects of this class are created by the functions \code{\link{Poisson}}, \code{\link{Strauss}} and so on. The main use of such objects is in a call to \code{\link{ppm}}. The function \code{as.interact} is generic, with methods for the classes \code{"ppm"}, \code{"fii"} and \code{"interact"}. The result is an object of class \code{"interact"} which can be printed. } \section{Note on parameters}{ This function does \bold{not} extract the fitted coefficients of the interaction. To extract the fitted interaction including the fitted coefficients, use \code{\link{fitin}}. } \value{ An object of class \code{"interact"} representing the interpoint interaction. This object can be printed and plotted. } \author{\adrian and \rolf } \seealso{ \code{\link{fitin}}, \code{\link{ppm}}. } \examples{ data(cells) model <- ppm(cells, ~1, Strauss(0.07)) f <- as.interact(model) f } \keyword{spatial} \keyword{models} spatstat.core/man/deriv.fv.Rd0000644000176200001440000000730614141452520015635 0ustar liggesusers\name{deriv.fv} \alias{deriv.fv} \title{ Calculate Derivative of Function Values } \description{ Applies numerical differentiation to the values in selected columns of a function value table. } \usage{ \method{deriv}{fv}(expr, which = "*", ..., method=c("spline", "numeric"), kinks=NULL, periodic=FALSE, Dperiodic=periodic) } \arguments{ \item{expr}{ Function values to be differentiated. A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). } \item{which}{ Character vector identifying which columns of the table should be differentiated. Either a vector containing names of columns, or one of the wildcard strings \code{"*"} or \code{"."} explained below. } \item{\dots}{ Extra arguments passed to \code{\link[stats]{smooth.spline}} to control the differentiation algorithm, if \code{method="spline"}. } \item{method}{ Differentiation method. A character string, partially matched to either \code{"spline"} or \code{"numeric"}. } \item{kinks}{ Optional vector of \eqn{x} values where the derivative is allowed to be discontinuous. } \item{periodic}{ Logical value indicating whether the function \code{expr} is periodic. } \item{Dperiodic}{ Logical value indicating whether the resulting derivative should be a periodic function. } } \details{ This command performs numerical differentiation on the function values in a function value table (object of class \code{"fv"}). The differentiation is performed either by \code{\link[stats]{smooth.spline}} or by a naive numerical difference algorithm. The command \code{\link{deriv}} is generic. This is the method for objects of class \code{"fv"}. Differentiation is applied to every column (or to each of the selected columns) of function values in turn, using the function argument as the \eqn{x} coordinate and the selected column as the \eqn{y} coordinate. The original function values are then replaced by the corresponding derivatives. The optional argument \code{which} specifies which of the columns of function values in \code{expr} will be differentiated. The default (indicated by the wildcard \code{which="*"}) is to differentiate all function values, i.e.\ all columns except the function argument. Alternatively \code{which="."} designates the subset of function values that are displayed in the default plot. Alternatively \code{which} can be a character vector containing the names of columns of \code{expr}. If the argument \code{kinks} is given, it should be a numeric vector giving the discontinuity points of the function: the value or values of the function argument at which the function is not differentiable. Differentiation will be performed separately on intervals between the discontinuity points. If \code{periodic=TRUE} then the function \code{expr} is taken to be periodic, with period equal to the range of the function argument in \code{expr}. The resulting derivative is periodic. If \code{periodic=FALSE} but \code{Dperiodic=TRUE}, then the \emph{derivative} is assumed to be periodic. This would be appropriate if \code{expr} is the cumulative distribution function of an angular variable, for example. } \value{ Another function value table (object of class \code{"fv"}) of the same format. } \author{\adrian and \rolf } \seealso{ \code{\link{with.fv}}, \code{\link{fv.object}}, \code{\link[stats]{smooth.spline}} } \examples{ G <- Gest(cells) plot(deriv(G, which=".", spar=0.5)) A <- pairorient(redwood, 0.05, 0.15) DA <- deriv(A, spar=0.6, Dperiodic=TRUE) } \keyword{spatial} \keyword{math} \keyword{nonparametric} spatstat.core/man/ic.kppm.Rd0000644000176200001440000000570214141452520015451 0ustar liggesusers\name{ic.kppm} \alias{ic} \alias{ic.ppm} \alias{ic.kppm} \title{Model selection criteria for the intensity function of a point process} \description{ Information criteria for selecting the intensity function model of a Poisson, cluster or Cox point process. } \usage{ ic(object) \method{ic}{ppm}(object) \method{ic}{kppm}(object) } \arguments{ \item{object}{ Fitted point process model (object of class \code{"ppm"} or \code{"kppm"}). } } \details{ This function returns information criteria for selecting the intensity function model of a Poisson, Cox or cluster point process fitted by first order composite likelihood (i.e. using the Poisson likelihood function). Degrees of freedom \eqn{df} for the information criteria are given by the trace of \eqn{S^{-1} \Sigma} where \eqn{S} is the sensitivity matrix and \eqn{\Sigma} is the variance matrix for the log composite likelihood score function. In case of a Poisson process, \eqn{df} is the number of parameters in the model for the intensity function. The composite Bayesian information criterion (cbic) is \eqn{-2\ell + \log(n) df}{-2 * ll + log(n) * df} where \eqn{\ell}{ll} is the maximal log first-order composite likelihood (Poisson loglikelihood for the intensity function) and \eqn{n} is the observed number of points. It reduces to the BIC criterion in case of a Poisson process. The composite information criterion (cic) is \eqn{-2\ell + 2 df}{- 2 * ll + 2 * df} and reduces to the AIC in case of a Poisson process. NOTE: the information criteria are for selecting the intensity function model (a set of covariates) within a given model class. They cannot be used to choose among different types of cluster or Cox point process models (e.g. can not be used to choose between Thomas and LGCP models). } \value{ A list with entries \code{loglike}, \code{cbic}, \code{cic} and \code{df}. Here \code{loglike} is the fitted log first-order composite likelihood, \code{cbic} is composite Bayesian information criterion, \code{cic} is is the composite likelihood criterion and \code{df} is the adjusted degrees of freedom for the fitted intensity function model. } \seealso{ \code{\link{kppm}} } \references{ Choiruddin, A., Coeurjolly, J.F. and Waagepetersen, R. (2020) Information criteria for inhomogeneous spatial point processes. \emph{Australian and New Zealand Journal of Statistics}. To appear. } \examples{ if(interactive()) { # model with one covariate fit1 <- kppm(bei~elev,data=bei.extra) ic1 <- ic(fit1) # model with two covariates fit2 <- kppm(bei~elev+grad,data=bei.extra) ic2 <- ic(fit2) # smallest cbic for fit1 but smallest cic for fit2 } } \author{ Achmad Choiruddin, Jean-Francois Coeurjolly and Rasmus Waagepetersen. } \keyword{spatial} \keyword{models} \concept{point process model} \concept{Cox point process} \concept{cluster process} \concept{Neyman-Scott cluster process} spatstat.core/man/dppm.Rd0000644000176200001440000003562214145332773015067 0ustar liggesusers\name{dppm} \alias{dppm} \concept{point process model} \concept{determinantal point process} \title{Fit Determinantal Point Process Model} \description{ Fit a determinantal point process model to a point pattern. } \usage{ dppm(formula, family, data=NULL, ..., startpar = NULL, method = c("mincon", "clik2", "palm", "adapcl"), weightfun = NULL, control = list(), algorithm, statistic = "K", statargs = list(), rmax = NULL, epsilon = 0.01, covfunargs = NULL, use.gam = FALSE, nd = NULL, eps = NULL) } \arguments{ \item{formula}{ A \code{formula} in the \R language specifying the data (on the left side) and the form of the model to be fitted (on the right side). For a stationary model it suffices to provide a point pattern without a formula. See Details. } \item{family}{ Information specifying the family of point processes to be used in the model. Typically one of the family functions \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppCauchy}}, \code{\link{dppBessel}} or \code{\link{dppPowerExp}}. Alternatively a character string giving the name of a family function, or the result of calling one of the family functions. See Details. } \item{data}{ The values of spatial covariates (other than the Cartesian coordinates) required by the model. A named list of pixel images, functions, windows, tessellations or numeric constants. } \item{\dots}{ Additional arguments. See Details. } \item{startpar}{ Named vector of starting parameter values for the optimization. } \item{method}{ The fitting method. Either \code{"mincon"} for minimum contrast, \code{"clik2"} for second order composite likelihood, \code{"adapcl"} for adaptive second order composite likelihood, or \code{"palm"} for Palm likelihood. Partially matched. } \item{weightfun}{ Optional weighting function \eqn{w} in the composite likelihoods or Palm likelihood. A \code{function} in the \R language. See Details. } \item{control}{ List of control parameters passed to the optimization function \code{\link[stats]{optim}}. } \item{algorithm}{ Character string determining the mathematical algorithm to be used to solve the fitting problem. If \code{method="mincon", "clik2"} or \code{"palm"} this argument is passed to the generic optimization function \code{\link[stats]{optim}} (renamed as the argument \code{method}) with default \code{"Nelder-Mead"}. If \code{method="adapcl"}) the argument is passed to the equation solver \code{\link[nleqslv]{nleqslv}}, with default \code{"Bryden"}. } \item{statistic}{ Name of the summary statistic to be used for minimum contrast estimation: either \code{"K"} or \code{"pcf"}. } \item{statargs}{ Optional list of arguments to be used when calculating the \code{statistic}. See Details. } \item{rmax}{ Maximum value of interpoint distance to use in the composite likelihood. } \item{epsilon}{ Tuning parameter for the adaptive composite likelihood. } \item{covfunargs,use.gam,nd,eps}{ Arguments passed to \code{\link{ppm}} when fitting the intensity. } } \details{ This function fits a determinantal point process model to a point pattern dataset as described in Lavancier et al. (2015). The model to be fitted is specified by the arguments \code{formula} and \code{family}. The argument \code{formula} should normally be a \code{formula} in the \R language. The left hand side of the formula specifies the point pattern dataset to which the model should be fitted. This should be a single argument which may be a point pattern (object of class \code{"ppp"}) or a quadrature scheme (object of class \code{"quad"}). The right hand side of the formula is called the \code{trend} and specifies the form of the \emph{logarithm of the intensity} of the process. Alternatively the argument \code{formula} may be a point pattern or quadrature scheme, and the trend formula is taken to be \code{~1}. The argument \code{family} specifies the family of point processes to be used in the model. It is typically one of the family functions \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppCauchy}}, \code{\link{dppBessel}} or \code{\link{dppPowerExp}}. Alternatively it may be a character string giving the name of a family function, or the result of calling one of the family functions. A family function belongs to class \code{"detpointprocfamilyfun"}. The result of calling a family function is a point process family, which belongs to class \code{"detpointprocfamily"}. The algorithm first estimates the intensity function of the point process using \code{\link{ppm}}. If the trend formula is \code{~1} (the default if a point pattern or quadrature scheme is given rather than a \code{"formula"}) then the model is \emph{homogeneous}. The algorithm begins by estimating the intensity as the number of points divided by the area of the window. Otherwise, the model is \emph{inhomogeneous}. The algorithm begins by fitting a Poisson process with log intensity of the form specified by the formula \code{trend}. (See \code{\link{ppm}} for further explanation). The interaction parameters of the model are then fitted either by minimum contrast estimation, or by a composite likelihood method (maximum composite likelihood, maximum Palm likelihood, or by solving the adaptive composite likelihood estimating equation). \describe{ \item{Minimum contrast:}{ If \code{method = "mincon"} (the default) interaction parameters of the model will be fitted by minimum contrast estimation, that is, by matching the theoretical \eqn{K}-function of the model to the empirical \eqn{K}-function of the data, as explained in \code{\link{mincontrast}}. For a homogeneous model (\code{ trend = ~1 }) the empirical \eqn{K}-function of the data is computed using \code{\link{Kest}}, and the interaction parameters of the model are estimated by the method of minimum contrast. For an inhomogeneous model, the inhomogeneous \eqn{K} function is estimated by \code{\link{Kinhom}} using the fitted intensity. Then the interaction parameters of the model are estimated by the method of minimum contrast using the inhomogeneous \eqn{K} function. This two-step estimation procedure is heavily inspired by Waagepetersen (2007). If \code{statistic="pcf"} then instead of using the \eqn{K}-function, the algorithm will use the pair correlation function \code{\link{pcf}} for homogeneous models and the inhomogeneous pair correlation function \code{\link{pcfinhom}} for inhomogeneous models. In this case, the smoothing parameters of the pair correlation can be controlled using the argument \code{statargs}, as shown in the Examples. Additional arguments \code{\dots} will be passed to \code{\link{clusterfit}} to control the minimum contrast fitting algorithm. } \item{Composite likelihood:}{ If \code{method = "clik2"} the interaction parameters of the model will be fitted by maximising the second-order composite likelihood (Guan, 2006). The log composite likelihood is \deqn{ \sum_{i,j} w(d_{ij}) \log\rho(d_{ij}; \theta) - \left( \sum_{i,j} w(d_{ij}) \right) \log \int_D \int_D w(\|u-v\|) \rho(\|u-v\|; \theta)\, du\, dv }{ \sum[i,j] w(d[i,j]) log(\rho(d[i,j]; \theta)) - (\sum[i,j] w(d[i,j])) log(integral[D,D] w(||u-v||) \rho(||u-v||; \theta) du dv) } where the sums are taken over all pairs of data points \eqn{x_i, x_j}{x[i], x[j]} separated by a distance \eqn{d_{ij} = \| x_i - x_j\|}{d[i,j] = ||x[i] - x[j]||} less than \code{rmax}, and the double integral is taken over all pairs of locations \eqn{u,v} in the spatial window of the data. Here \eqn{\rho(d;\theta)}{\rho(d;\theta)} is the pair correlation function of the model with interaction parameters \eqn{\theta}{\theta}. The function \eqn{w} in the composite likelihood is a weighting function and may be chosen arbitrarily. It is specified by the argument \code{weightfun}. If this is missing or \code{NULL} then the default is a threshold weight function, \eqn{w(d) = 1(d \le R)}{w(d) = 1(d \le R)}, where \eqn{R} is \code{rmax/2}. } \item{Palm likelihood:}{ If \code{method = "palm"} the interaction parameters of the model will be fitted by maximising the Palm loglikelihood (Tanaka et al, 2008) \deqn{ \sum_{i,j} w(x_i, x_j) \log \lambda_P(x_j \mid x_i; \theta) - \int_D w(x_i, u) \lambda_P(u \mid x_i; \theta) {\rm d} u }{ \sum[i,j] w(x[i], x[j]) log(\lambda[P](x[j] | x[i]; \theta) - integral[D] w(x[i], u) \lambda[P](u | x[i]; \theta) du } with the same notation as above. Here \eqn{\lambda_P(u|v;\theta}{\lambda[P](u|v;\theta)} is the Palm intensity of the model at location \eqn{u} given there is a point at \eqn{v}. } \item{Adaptive Composite likelihood:}{ If \code{method = "cladap"} the clustering parameters of the model will be fitted by solving the adaptive second order composite likelihood estimating equation (Lavancier et al, 2021). The estimating function is \deqn{ \sum_{u, v} w(\epsilon \frac{|g(0; \theta) - 1|}{g(\|u-v\|; \theta)-1}) \frac{\nabla_\theta g(\|u-v\|;\theta)}{g(\|u-v\|;\theta)} - \int_D \int_D w(\epsilon \frac{M(u,v; \theta)} \nabla_\theta g(\|u-v\|; \theta) \rho(u) \rho(v)\, du\, dv }{ \sum[u,v] w(epsilon |g(0; theta)-1|/(g(||u-v||; theta)-1)) g'(||u-v||; theta) / g(||u-v||; theta) - integral[D,D] w(epsilon |g(0; theta)-1|/(g(||u-v||; theta)-1)) g'(||u-v||; theta) rho(u) rho(v) du dv } where the sum is taken over all distinct pairs of points. Here \eqn{g(d;\theta)}{g(d; theta)} is the pair correlation function with parameters \eqn{\theta}{theta}. The partial derivative with respect to \eqn{\theta}{theta} is \eqn{g'(d; \theta)}{g'(d; theta)}, and \eqn{\rho(u)}{rho(u)} denotes the fitted intensity function of the model. The tuning parameter \eqn{\epsilon}{epsilon} is independent of the data. It can be specified by the argument \code{epsilon} and has default value \eqn{0.01}. The function \eqn{w} in the estimating function is a weighting function of bounded support \eqn{[-1,1]}. It is specified by the argument \code{weightfun}. If this is missing or \code{NULL} then the default is \eqn{ w(d) = 1(\|d\| \le 1) \exp(1/(r^2-1))}{w(d) = 1(||d|| \le 1) exp(1/(d^2-1)) }. The estimating equation is solved using the nonlinear equation solver \code{\link[nleqslv]{nleqslv}} from the package \pkg{nleqslv}. The package \pkg{nleqslv} must be installed in order to use this option. } } It is also possible to fix any parameters desired before the optimisation by specifying them as \code{name=value} in the call to the family function. See Examples. } \section{Optimization algorithm}{ The following details allow greater control over the fitting procedure. For the first three fitting methods (\code{method="mincon", "clik2"} and \code{"palm"}), the optimisation is performed by the generic optimisation algorithm \code{\link[stats]{optim}}. The behaviour of this algorithm can be modified using the arguments \code{control} and \code{algorithm}. Useful control arguments include \code{trace}, \code{maxit} and \code{abstol} (documented in the help for \code{\link[stats]{optim}}). For \code{method="adapcl"}, the estimating equation is solved using the nonlinear equation solver \code{\link[nleqslv]{nleqslv}} from the package \pkg{nleqslv}. Arguments available for controlling the solver are documented in the help for \code{\link[nleqslv]{nleqslv}}; they include \code{control}, \code{globStrat}, \code{startparm} for the initial estimates and \code{algorithm} for the method. The package \pkg{nleqslv} must be installed in order to use this option. } \value{ An object of class \code{"dppm"} representing the fitted model. There are methods for printing, plotting, predicting and simulating objects of this class. } \seealso{ methods for \code{dppm} objects: \code{\link{plot.dppm}}, \code{\link{fitted.dppm}}, \code{\link{predict.dppm}}, \code{\link{simulate.dppm}}, \code{\link{methods.dppm}}, \code{\link{as.ppm.dppm}}, \code{\link{Kmodel.dppm}}, \code{\link{pcfmodel.dppm}}. Minimum contrast fitting algorithm: higher level interface \code{\link{clusterfit}}; low-level algorithm \code{\link{mincontrast}}. Deterimantal point process models: \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppCauchy}}, \code{\link{dppBessel}}, \code{\link{dppPowerExp}}, Summary statistics: \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{pcf}}, \code{\link{pcfinhom}}. See also \code{\link{ppm}} } \references{ Guan, Y. (2006) A composite likelihood approach in fitting spatial point process models. \emph{Journal of the American Statistical Association} \bold{101}, 1502--1512. Lavancier, F., \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference. \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. Lavancier, F., Poinas, A., and Waagepetersen, R. (2021) Adaptive estimating function inference for nonstationary determinantal point processes. \emph{Scandinavian Journal of Statistics}, \bold{48} (1), 87--107. Tanaka, U., Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott point processes. \emph{Biometrical Journal} \bold{50}, 43--57. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \examples{ jpines <- residualspaper$Fig1 \testonly{ # smaller dataset for testing jpines <- jpines[c(TRUE,FALSE)] } dppm(jpines ~ 1, dppGauss) dppm(jpines ~ 1, dppGauss, method="c") dppm(jpines ~ 1, dppGauss, method="p") dppm(jpines ~ 1, dppGauss, method="a") if(interactive()) { # Fixing the intensity at lambda=2 rather than the Poisson MLE 2.04: dppm(jpines ~ 1, dppGauss(lambda=2)) # The following is quite slow (using K-function) dppm(jpines ~ x, dppMatern) } # much faster using pair correlation function dppm(jpines ~ x, dppMatern, statistic="pcf", statargs=list(stoyan=0.2)) # Fixing the Matern shape parameter at nu=2 rather than estimating it: dppm(jpines ~ x, dppMatern(nu=2)) } \author{ \spatstatAuthors. Adaptive composite likelihood method contributed by Chiara Fend and modified by Adrian Baddeley. } \keyword{spatial} \keyword{models} spatstat.core/man/intensity.slrm.Rd0000644000176200001440000000350314144333466017121 0ustar liggesusers\name{intensity.slrm} \alias{intensity.slrm} \title{ Intensity of Fitted Spatial Logistic Regression Model } \description{ Computes the intensity of a fitted spatial logistic regression model, treated as a point process model. } \usage{ \method{intensity}{slrm}(X, \dots) } \arguments{ \item{X}{ A fitted spatial logistic regression model (object of class \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{predict.slrm}} in some cases. See Details. } } \details{ This is a method for the generic function \code{\link{intensity}} for spatial logistic regression models (class \code{"slrm"}). The fitted spatial logistic regression model \code{X} is interpreted as a point process model. The intensity of a point process model is defined as the expected number of random points per unit area. The fitted probabilities of presence according to \code{X} are converted to intensity values. The result is a numerical value if \code{X} is stationary, and a pixel image if \code{X} is non-stationary. In the latter case, the resolution of the pixel image is controlled by the arguments \code{\dots} which are passed to \code{\link{predict.slrm}}. } \value{ A numeric value (if the model is stationary) or a pixel image. } \references{ Baddeley, A., Berman, M., Fisher, N.I., Hardegen, A., Milne, R.K., Schuhmacher, D., Shah, R. and Turner, R. (2010) Spatial logistic regression and change-of-support for spatial Poisson point processes. \emph{Electronic Journal of Statistics} \bold{4}, 1151--1201. \code{DOI: 10.1214/10-EJS581} } \seealso{ \code{\link{intensity}}, \code{\link{intensity.ppm}} } \examples{ fitS <- slrm(swedishpines ~ 1) intensity(fitS) fitX <- slrm(swedishpines ~ x) intensity(fitX) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.core/man/increment.fv.Rd0000644000176200001440000000166514141452520016512 0ustar liggesusers\name{increment.fv} \alias{increment.fv} \title{ Increments of a Function } \description{ Compute the change in the value of a function \code{f} when the function argument increases by \code{delta}. } \usage{ increment.fv(f, delta) } \arguments{ \item{f}{ Object of class \code{"fv"} representing a function. } \item{delta}{ Numeric. The increase in the value of the function argument. } } \details{ This command computes the new function \deqn{g(x) = f(x+h) - f(x-h)} where \code{h = delta/2}. The value of \eqn{g(x)} is the change in the value of \eqn{f} over an interval of length \code{delta} centred at \eqn{x}. } \value{ Another object of class \code{"fv"} compatible with \code{X}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{fv.object}}, \code{\link{deriv.fv}} } \examples{ plot(increment.fv(Kest(cells), 0.05)) } \keyword{spatial} \keyword{math} \keyword{nonparametric} spatstat.core/man/densityVoronoi.Rd0000644000176200001440000001417014141452520017142 0ustar liggesusers\name{densityVoronoi} \alias{densityVoronoi} \alias{densityVoronoi.ppp} \title{Intensity Estimate of Point Pattern Using Voronoi-Dirichlet Tessellation} \description{ Computes an adaptive estimate of the intensity function of a point pattern using the Dirichlet-Voronoi tessellation. } \usage{ densityVoronoi(X, \dots) \method{densityVoronoi}{ppp}(X, f = 1, \dots, counting=FALSE, fixed=FALSE, nrep = 1, verbose=TRUE) } \arguments{ \item{X}{Point pattern dataset (object of class \code{"ppp"}).} \item{f}{ Fraction (between 0 and 1 inclusive) of the data points that will be used to build a tessellation for the intensity estimate. } \item{\dots}{Arguments passed to \code{\link{as.im}} determining the pixel resolution of the result. } \item{counting}{ Logical value specifying the choice of estimation method. See Details. } \item{fixed}{ Logical. If \code{FALSE} (the default), the data points are independently randomly thinned, so the number of data points that are retained is random. If \code{TRUE}, the number of data points retained is fixed. See Details. } \item{nrep}{Number of independent repetitions of the randomised procedure.} \item{verbose}{Logical value indicating whether to print progress reports.} } \details{ This function is an alternative to \code{\link{density.ppp}}. It computes an estimate of the intensity function of a point pattern dataset. The result is a pixel image giving the estimated intensity. If \code{f=1} (the default), the Voronoi estimate (Barr and Schoenberg, 2010) is computed: the point pattern \code{X} is used to construct a Voronoi/Dirichlet tessellation (see \code{\link{dirichlet}}); the areas of the Dirichlet tiles are computed; the estimated intensity in each tile is the reciprocal of the tile area. The result is a pixel image of intensity estimates which are constant on each tile of the tessellation. If \code{f=0}, the intensity estimate at every location is equal to the average intensity (number of points divided by window area). The result is a pixel image of intensity estimates which are constant. If \code{f} is strictly between 0 and 1, the estimation method is applied to a random subset of \code{X}. This randomised procedure is repeated \code{nrep} times, and the results are averaged. The subset is selected as follows: \itemize{ \item if \code{fixed=FALSE}, the dataset \code{X} is randomly thinned by deleting or retaining each point independently, with probability \code{f} of retaining a point. \item if \code{fixed=TRUE}, a random sample of fixed size \code{m} is taken from the dataset \code{X}, where \code{m} is the largest integer less than or equal to \code{f*n} and \code{n} is the number of points in \code{X}. } Then the intensity estimate is calculated as follows: \itemize{ \item if \code{counting = FALSE} (the default), the thinned pattern is used to construct a Dirichlet tessellation and form the Voronoi estimate (Barr and Schoenberg, 2010) which is then adjusted by a factor \code{1/f} or \code{n/m} as appropriate. to obtain an estimate of the intensity of \code{X} in the tile. \item if \code{counting = TRUE}, the randomly selected subset \code{A} is used to construct a Dirichlet tessellation, while the complementary subset \code{B} (consisting of points that were not selected in the sample) is used for counting to calculate a quadrat count estimate of intensity. For each tile of the Dirichlet tessellation formed by \code{A}, we count the number of points of \code{B} falling in the tile, and divide by the area of the same tile, to obtain an estimate of the intensity of the pattern \code{B} in the tile. This estimate is adjusted by \code{1/(1-f)} or \code{n/(n-m)} as appropriate to obtain an estimate of the intensity of \code{X} in the tile. } Ogata et al. (2003) and Ogata (2004) estimated intensity using the Dirichlet-Voronoi tessellation in a modelling context. Baddeley (2007) proposed intensity estimation by subsampling with \code{0 < f < 1}, and used the technique described above with \code{fixed=TRUE} and \code{counting=TRUE}. Barr and Schoenberg (2010) described and analysed the Voronoi estimator (corresponding to \code{f=1}). Moradi et al (2019) developed the subsampling technique with \code{fixed=FALSE} and \code{counting=FALSE} and called it the \emph{smoothed Voronoi estimator}. } \value{ A pixel image (object of class \code{"im"}) whose values are estimates of the intensity of \code{X}. } \seealso{ \code{\link{adaptive.density}}, \code{\link{density.ppp}}, \code{\link{dirichlet}}, \code{\link{im.object}}. } \references{ Baddeley, A. (2007) Validation of statistical models for spatial point patterns. In J.G. Babu and E.D. Feigelson (eds.) \emph{SCMA IV: Statistical Challenges in Modern Astronomy IV}, volume 317 of Astronomical Society of the Pacific Conference Series, San Francisco, California USA, 2007. Pages 22--38. Barr, C., and Schoenberg, F.P. (2010). On the Voronoi estimator for the intensity of an inhomogeneous planar Poisson process. \emph{Biometrika} \bold{97} (4), 977--984. Moradi, M., Cronie, 0., Rubak, E., Lachieze-Rey, R., Mateu, J. and Baddeley, A. (2019) Resample-smoothing of Voronoi intensity estimators. \emph{Statistics and Computing} \bold{29} (5) 995--1010. Ogata, Y. (2004) Space-time model for regional seismicity and detection of crustal stress changes. \emph{Journal of Geophysical Research}, \bold{109}, 2004. Ogata, Y., Katsura, K. and Tanemura, M. (2003). Modelling heterogeneous space-time occurrences of earthquakes and its residual analysis. \emph{Applied Statistics} \bold{52} 499--509. } \examples{ plot(densityVoronoi(nztrees, 1, f=1), main="Voronoi estimate") nr <- if(interactive()) 100 else 5 plot(densityVoronoi(nztrees, f=0.5, nrep=nr), main="smoothed Voronoi estimate") } \author{ \spatstatAuthors and Mehdi Moradi. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/intensity.ppm.Rd0000644000176200001440000000570314144333466016744 0ustar liggesusers\name{intensity.ppm} \alias{intensity.ppm} \title{ Intensity of Fitted Point Process Model } \description{ Computes the intensity of a fitted point process model. } \usage{ \method{intensity}{ppm}(X, \dots) } \arguments{ \item{X}{ A fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Arguments passed to \code{\link{predict.ppm}} in some cases. See Details. } } \details{ This is a method for the generic function \code{\link{intensity}} for fitted point process models (class \code{"ppm"}). The intensity of a point process model is the expected number of random points per unit area. If \code{X} is a Poisson point process model, the intensity of the process is computed exactly. The result is a numerical value if \code{X} is a stationary Poisson point process, and a pixel image if \code{X} is non-stationary. (In the latter case, the resolution of the pixel image is controlled by the arguments \code{\dots} which are passed to \code{\link{predict.ppm}}.) If \code{X} is another Gibbs point process model, the intensity is computed approximately using the Poisson-saddlepoint approximation (Baddeley and Nair, 2012a, 2012b, 2016; Anderssen et al, 2014). The approximation is currently available for pairwise-interaction models (Baddeley and Nair, 2012a, 2012b) and for the area-interaction model and Geyer saturation model (Baddeley and Nair, 2016). For a non-stationary Gibbs model, the pseudostationary solution (Baddeley and Nair, 2012b; Anderssen et al, 2014) is used. The result is a pixel image, whose resolution is controlled by the arguments \code{\dots} which are passed to \code{\link{predict.ppm}}. } \value{ A numeric value (if the model is stationary) or a pixel image. } \references{ Anderssen, R.S., Baddeley, A., DeHoog, F.R. and Nair, G.M. (2014) Solution of an integral equation arising in spatial point process theory. \emph{Journal of Integral Equations and Applications} \bold{26} (4) 437--453. Baddeley, A. and Nair, G. (2012a) Fast approximation of the intensity of Gibbs point processes. \emph{Electronic Journal of Statistics} \bold{6} 1155--1169. Baddeley, A. and Nair, G. (2012b) Approximating the moments of a spatial point process. \emph{Stat} \bold{1}, 1, 18--30. \code{DOI: 10.1002/sta4.5} Baddeley, A. and Nair, G. (2016) Poisson-saddlepoint approximation for spatial point processes with infinite order interaction. Submitted for publication. } \seealso{ \code{\link{intensity}}, \code{\link{intensity.ppp}} } \examples{ fitP <- ppm(swedishpines ~ 1) intensity(fitP) fitS <- ppm(swedishpines ~ 1, Strauss(9)) intensity(fitS) fitSx <- ppm(swedishpines ~ x, Strauss(9)) lamSx <- intensity(fitSx) fitG <- ppm(swedishpines ~ 1, Geyer(9, 1)) lamG <- intensity(fitG) fitA <- ppm(swedishpines ~ 1, AreaInter(7)) lamA <- intensity(fitA) } \author{ \adrian and Gopalan Nair. } \keyword{spatial} \keyword{models} spatstat.core/man/plot.kppm.Rd0000644000176200001440000000563614141452520016042 0ustar liggesusers\name{plot.kppm} \alias{plot.kppm} \title{Plot a fitted cluster point process} \description{ Plots a fitted cluster point process model, displaying the fitted intensity and the fitted \eqn{K}-function. } \usage{ \method{plot}{kppm}(x, \dots, what=c("intensity", "statistic", "cluster"), pause=interactive(), xname) } \arguments{ \item{x}{ Fitted cluster point process model. An object of class \code{"kppm"}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppm}} and \code{\link{plot.fv}} to control the plot. } \item{what}{ Character vector determining what will be plotted. } \item{pause}{ Logical value specifying whether to pause between plots. } \item{xname}{ Optional. Character string. The name of the object \code{x} for use in the title of the plot. } } \details{ This is a method for the generic function \code{\link[graphics]{plot}} for the class \code{"kppm"} of fitted cluster point process models. The argument \code{x} should be a cluster point process model (object of class \code{"kppm"}) obtained using the function \code{\link{kppm}}. The choice of plots (and the order in which they are displayed) is controlled by the argument \code{what}. The options (partially matched) are \code{"intensity"}, \code{"statistic"} and \code{"cluster"}. This command is capable of producing three different plots: \describe{ \item{what="intensity"}{specifies the fitted intensity of the model, which is plotted using \code{\link{plot.ppm}}. By default this plot is not produced for stationary models.} \item{what="statistic"}{specifies the empirical and fitted summary statistics, which are plotted using \code{\link{plot.fv}}. This is only meaningful if the model has been fitted using the Method of Minimum Contrast, and it is turned off otherwise.} \item{what="cluster"}{specifies a fitted cluster, which is computed by \code{\link{clusterfield}} and plotted by \code{\link{plot.im}}. It is only meaningful for Poisson cluster (incl. Neyman-Scott) processes, and it is turned off for log-Gaussian Cox processes (LGCP). If the model is stationary (and non-LGCP) this option is turned on by default and shows a fitted cluster positioned at the centroid of the observation window. For non-stationary (and non-LGCP) models this option is only invoked if explicitly told so, and in that case an additional argument \code{locations} (see \code{\link{clusterfield}}) must be given to specify where to position the parent point(s) .} } Alternatively \code{what="all"} selects all available options. } \value{ Null. } \examples{ data(redwood) fit <- kppm(redwood~1, "Thomas") plot(fit) } \seealso{ \code{\link{kppm}}, \code{\link{plot.ppm}}, } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat.core/man/alltypes.Rd0000644000176200001440000002341714141452520015750 0ustar liggesusers\name{alltypes} \alias{alltypes} \title{Calculate Summary Statistic for All Types in a Multitype Point Pattern} \description{ Given a marked point pattern, this computes the estimates of a selected summary function (\eqn{F},\eqn{G}, \eqn{J}, \eqn{K} etc) of the pattern, for all possible combinations of marks, and returns these functions in an array. } \usage{ alltypes(X, fun="K", \dots, dataname=NULL,verb=FALSE,envelope=FALSE,reuse=TRUE) } \arguments{ \item{X}{The observed point pattern, for which summary function estimates are required. An object of class \code{"ppp"} or \code{"lpp"}. } \item{fun}{The summary function. Either an \R function, or a character string indicating the summary function required. Options for strings are \code{"F"}, \code{"G"}, \code{"J"}, \code{"K"}, \code{"L"}, \code{"pcf"}, \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"}, \code{"Lcross"}, \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"}, \code{"Ldot"}. } \item{\dots}{ Arguments passed to the summary function (and to the function \code{\link{envelope}} if appropriate) } \item{dataname}{Character string giving an optional (alternative) name to the point pattern, different from what is given in the call. This name, if supplied, may be used by \code{\link{plot.fasp}()} in forming the title of the plot. If not supplied it defaults to the parsing of the argument supplied as \code{X} in the call. } \item{verb}{ Logical value. If \code{verb} is true then terse ``progress reports'' (just the values of the mark indices) are printed out when the calculations for that combination of marks are completed. } \item{envelope}{ Logical value. If \code{envelope} is true, then simulation envelopes of the summary function will also be computed. See Details. } \item{reuse}{ Logical value indicating whether the envelopes in each panel should be based on the same set of simulated patterns (\code{reuse=TRUE}) or on different, independent sets of simulated patterns (\code{reuse=FALSE}). } } \details{ This routine is a convenient way to analyse the dependence between types in a multitype point pattern. It computes the estimates of a selected summary function of the pattern, for all possible combinations of marks. It returns these functions in an array (an object of class \code{"fasp"}) amenable to plotting by \code{\link{plot.fasp}()}. The argument \code{fun} specifies the summary function that will be evaluated for each type of point, or for each pair of types. It may be either an \R function or a character string. Suppose that the points have possible types \eqn{1,2,\ldots,m} and let \eqn{X_i}{X[i]} denote the pattern of points of type \eqn{i} only. If \code{fun="F"} then this routine calculates, for each possible type \eqn{i}, an estimate of the Empty Space Function \eqn{F_i(r)}{F[i](r)} of \eqn{X_i}{X[i]}. See \code{\link{Fest}} for explanation of the empty space function. The estimate is computed by applying \code{\link{Fest}} to \eqn{X_i}{X[i]} with the optional arguments \code{\dots}. If \code{fun} is \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"} or \code{"Lcross"}, the routine calculates, for each pair of types \eqn{(i,j)}, an estimate of the ``\code{i}-to\code{j}'' cross-type function \eqn{G_{ij}(r)}{G[i,j](r)}, \eqn{J_{ij}(r)}{J[i,j](r)}, \eqn{K_{ij}(r)}{K[i,j](r)} or \eqn{L_{ij}(r)}{L[i,j](r)} respectively describing the dependence between \eqn{X_i}{X[i]} and \eqn{X_j}{X[j]}. See \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}} or \code{\link{Lcross}} respectively for explanation of these functions. The estimate is computed by applying the relevant function (\code{\link{Gcross}} etc) to \code{X} using each possible value of the arguments \code{i,j}, together with the optional arguments \code{\dots}. If \code{fun} is \code{"pcf"} the routine calculates the cross-type pair correlation function \code{\link{pcfcross}} between each pair of types. If \code{fun} is \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"} or \code{"Ldot"}, the routine calculates, for each type \eqn{i}, an estimate of the ``\code{i}-to-any'' dot-type function \eqn{G_{i\bullet}(r)}{G[i.](r)}, \eqn{J_{i\bullet}(r)}{J[i.](r)} or \eqn{K_{i\bullet}(r)}{K[i.](r)} or \eqn{L_{i\bullet}(r)}{L[i.](r)} respectively describing the dependence between \eqn{X_i}{X[i]} and \eqn{X}{X}. See \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}} or \code{\link{Ldot}} respectively for explanation of these functions. The estimate is computed by applying the relevant function (\code{\link{Gdot}} etc) to \code{X} using each possible value of the argument \code{i}, together with the optional arguments \code{\dots}. The letters \code{"G"}, \code{"J"}, \code{"K"} and \code{"L"} are interpreted as abbreviations for \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}} and \code{\link{Lcross}} respectively, assuming the point pattern is marked. If the point pattern is unmarked, the appropriate function \code{\link{Fest}}, \code{\link{Jest}}, \code{\link{Kest}} or \code{\link{Lest}} is invoked instead. If \code{envelope=TRUE}, then as well as computing the value of the summary function for each combination of types, the algorithm also computes simulation envelopes of the summary function for each combination of types. The arguments \code{\dots} are passed to the function \code{\link{envelope}} to control the number of simulations, the random process generating the simulations, the construction of envelopes, and so on. When \code{envelope=TRUE} it is possible that errors could occur because the simulated point patterns do not satisfy the requirements of the summary function (for example, because the simulated pattern is empty and \code{fun} requires at least one point). If the number of such errors exceeds the maximum permitted number \code{maxnerr}, then the envelope algorithm will give up, and will return the empirical summary function for the data point pattern, \code{fun(X)}, in place of the envelope. } \value{ A function array (an object of class \code{"fasp"}, see \code{\link{fasp.object}}). This can be plotted using \code{\link{plot.fasp}}. If the pattern is not marked, the resulting ``array'' has dimensions \eqn{1 \times 1}{1 x 1}. Otherwise the following is true: If \code{fun="F"}, the function array has dimensions \eqn{m \times 1}{m * 1} where \eqn{m} is the number of different marks in the point pattern. The entry at position \code{[i,1]} in this array is the result of applying \code{\link{Fest}} to the points of type \code{i} only. If \code{fun} is \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"} or \code{"Ldot"}, the function array again has dimensions \eqn{m \times 1}{m * 1}. The entry at position \code{[i,1]} in this array is the result of \code{Gdot(X, i)}, \code{Jdot(X, i)} \code{Kdot(X, i)} or \code{Ldot(X, i)} respectively. If \code{fun} is \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"} or \code{"Lcross"} (or their abbreviations \code{"G"}, \code{"J"}, \code{"K"} or \code{"L"}), the function array has dimensions \eqn{m \times m}{m * m}. The \code{[i,j]} entry of the function array (for \eqn{i \neq j}{i != j}) is the result of applying the function \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}} or\code{\link{Lcross}} to the pair of types \code{(i,j)}. The diagonal \code{[i,i]} entry of the function array is the result of applying the univariate function \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}} or \code{\link{Lest}} to the points of type \code{i} only. If \code{envelope=FALSE}, then each function entry \code{fns[[i]]} retains the format of the output of the relevant estimating routine \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{Gcross}}, \code{\link{Jcross}} ,\code{\link{Kcross}}, \code{\link{Lcross}}, \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}} or \code{\link{Ldot}} The default formulae for plotting these functions are \code{cbind(km,theo) ~ r} for F, G, and J functions, and \code{cbind(trans,theo) ~ r} for K and L functions. If \code{envelope=TRUE}, then each function entry \code{fns[[i]]} has the same format as the output of the \code{\link{envelope}} command. } \note{ Sizeable amounts of memory may be needed during the calculation. } \seealso{ \code{\link{plot.fasp}}, \code{\link{fasp.object}}, \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}}, \code{\link{Lcross}}, \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}}, \code{\link{envelope}}. } \examples{ # bramblecanes (3 marks). bram <- bramblecanes \testonly{ bram <- bram[c(seq(1, 744, by=20), seq(745, 823, by=4))] } bF <- alltypes(bram,"F",verb=TRUE) plot(bF) if(interactive()) { plot(alltypes(bram,"G")) plot(alltypes(bram,"Gdot")) } # Swedishpines (unmarked). swed <- swedishpines \testonly{ swed <- swed[1:25] } plot(alltypes(swed,"K")) plot(alltypes(amacrine, "pcf"), ylim=c(0,1.3)) # A setting where you might REALLY want to use dataname: # xxx <- alltypes(ppp(Melvin$x,Melvin$y, # window=as.owin(c(5,20,15,50)),marks=clyde), # fun="F",verb=TRUE,dataname="Melvin") # envelopes bKE <- alltypes(bram,"K",envelope=TRUE,nsim=19) # global version: # bFE <- alltypes(bram,"F",envelope=TRUE,nsim=19,global=TRUE) # extract one entry as.fv(bKE[1,1]) } \author{\adrian and \rolf. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/pcf.fasp.Rd0000644000176200001440000001110014141452520015575 0ustar liggesusers\name{pcf.fasp} \alias{pcf.fasp} \title{Pair Correlation Function obtained from array of K functions} \description{ Estimates the (bivariate) pair correlation functions of a point pattern, given an array of (bivariate) K functions. } \usage{ \method{pcf}{fasp}(X, \dots, method="c") } \arguments{ \item{X}{ An array of multitype \eqn{K} functions (object of class \code{"fasp"}). } \item{\dots}{ Arguments controlling the smoothing spline function \code{smooth.spline}. } \item{method}{ Letter \code{"a"}, \code{"b"}, \code{"c"} or \code{"d"} indicating the method for deriving the pair correlation function from the \code{K} function. } } \value{ A function array (object of class \code{"fasp"}, see \code{\link{fasp.object}}) representing an array of pair correlation functions. This can be thought of as a matrix \code{Y} each of whose entries \code{Y[i,j]} is a function value table (class \code{"fv"}) representing the pair correlation function between points of type \code{i} and points of type \code{j}. } \details{ The pair correlation function of a stationary point process is \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. We also apply the same definition to other variants of the classical \eqn{K} function, such as the multitype \eqn{K} functions (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}). For all these variants, the benchmark value of \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to \eqn{g(r) = 1}. This routine computes an estimate of \eqn{g(r)} from an array of estimates of \eqn{K(r)} or its variants, using smoothing splines to approximate the derivatives. It is a method for the generic function \code{\link{pcf}}. The argument \code{X} should be a function array (object of class \code{"fasp"}, see \code{\link{fasp.object}}) containing several estimates of \eqn{K} functions. This should have been obtained from \code{\link{alltypes}} with the argument \code{fun="K"}. The smoothing spline operations are performed by \code{\link{smooth.spline}} and \code{\link{predict.smooth.spline}} from the \code{modreg} library. Four numerical methods are available: \itemize{ \item \bold{"a"} apply smoothing to \eqn{K(r)}, estimate its derivative, and plug in to the formula above; \item \bold{"b"} apply smoothing to \eqn{Y(r) = \frac{K(r)}{2 \pi r}}{Y(r) = K(r)/(2 * pi * r)} constraining \eqn{Y(0) = 0}, estimate the derivative of \eqn{Y}, and solve; \item \bold{"c"} apply smoothing to \eqn{Z(r) = \frac{K(r)}{\pi r^2}}{Y(r) = K(r)/(pi * r^2)} constraining \eqn{Z(0)=1}, estimate its derivative, and solve. \item \bold{"d"} apply smoothing to \eqn{V(r) = \sqrt{K(r)}}{V(r) = sqrt(K(r))}, estimate its derivative, and solve. } Method \code{"c"} seems to be the best at suppressing variability for small values of \eqn{r}. However it effectively constrains \eqn{g(0) = 1}. If the point pattern seems to have inhibition at small distances, you may wish to experiment with method \code{"b"} which effectively constrains \eqn{g(0)=0}. Method \code{"a"} seems comparatively unreliable. Useful arguments to control the splines include the smoothing tradeoff parameter \code{spar} and the degrees of freedom \code{df}. See \code{\link{smooth.spline}} for details. } \references{ Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kmulti}}, \code{\link{alltypes}}, \code{\link{smooth.spline}}, \code{\link{predict.smooth.spline}} } \examples{ # multitype point pattern KK <- alltypes(amacrine, "K") p <- pcf.fasp(KK, spar=0.5, method="b") plot(p) # strong inhibition between points of the same type } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/Geyer.Rd0000644000176200001440000001101214141452520015152 0ustar liggesusers\name{Geyer} \alias{Geyer} \title{Geyer's Saturation Point Process Model} \description{ Creates an instance of Geyer's saturation point process model which can then be fitted to point pattern data. } \usage{ Geyer(r,sat) } \arguments{ \item{r}{Interaction radius. A positive real number.} \item{sat}{Saturation threshold. A non-negative real number.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of Geyer's saturation point process with interaction radius \eqn{r} and saturation threshold \code{sat}. } \details{ Geyer (1999) introduced the \dQuote{saturation process}, a modification of the Strauss process (see \code{\link{Strauss}}) in which the total contribution to the potential from each point (from its pairwise interaction with all other points) is trimmed to a maximum value \eqn{s}. The interaction structure of this model is implemented in the function \code{\link{Geyer}()}. The saturation point process with interaction radius \eqn{r}, saturation threshold \eqn{s}, and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{ \beta \gamma^{\min(s, t(x_i, X))} }{ beta gamma^min(s, t(x[i],X)) } to the probability density of the point pattern, where \eqn{t(x_i, X)}{t(x[i],X)} denotes the number of \sQuote{close neighbours} of \eqn{x_i}{x[i]} in the pattern \eqn{X}. A close neighbour of \eqn{x_i}{x[i]} is a point \eqn{x_j}{x[j]} with \eqn{j \neq i}{j != i} such that the distance between \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]} is less than or equal to \eqn{r}. If the saturation threshold \eqn{s} is set to infinity, this model reduces to the Strauss process (see \code{\link{Strauss}}) with interaction parameter \eqn{\gamma^2}{gamma^2}. If \eqn{s = 0}, the model reduces to the Poisson point process. If \eqn{s} is a finite positive number, then the interaction parameter \eqn{\gamma}{gamma} may take any positive value (unlike the case of the Strauss process), with values \eqn{\gamma < 1}{gamma < 1} describing an \sQuote{ordered} or \sQuote{inhibitive} pattern, and values \eqn{\gamma > 1}{gamma > 1} describing a \sQuote{clustered} or \sQuote{attractive} pattern. The nonstationary saturation process is similar except that the value \eqn{\beta}{beta} is replaced by a function \eqn{\beta(x_i)}{beta(x[i])} of location. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the saturation process interaction is yielded by \code{Geyer(r, sat)} where the arguments \code{r} and \code{sat} specify the Strauss interaction radius \eqn{r} and the saturation threshold \eqn{s}, respectively. See the examples below. Note the only arguments are the interaction radius \code{r} and the saturation threshold \code{sat}. When \code{r} and \code{sat} are fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\gamma)}{log(gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Geyer()}. } \section{Zero saturation}{ The value \code{sat=0} is permitted by \code{Geyer}, but this is not very useful. For technical reasons, when \code{\link{ppm}} fits a Geyer model with \code{sat=0}, the default behaviour is to return an \dQuote{invalid} fitted model in which the estimate of \eqn{\gamma}{gamma} is \code{NA}. In order to get a Poisson process model returned when \code{sat=0}, you would need to set \code{emend=TRUE} in the call to \code{\link{ppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}}. To make an interaction object like \code{\link{Geyer}} but having multiple interaction radii, see \code{\link{BadGey}} or \code{\link{Hybrid}}. } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \examples{ ppm(cells, ~1, Geyer(r=0.07, sat=2)) # fit the stationary saturation process to `cells' } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/bw.frac.Rd0000644000176200001440000000435014141452520015430 0ustar liggesusers\name{bw.frac} \alias{bw.frac} \title{ Bandwidth Selection Based on Window Geometry } \description{ Select a smoothing bandwidth for smoothing a point pattern, based only on the geometry of the spatial window. The bandwidth is a specified quantile of the distance between two independent random points in the window. } \usage{ bw.frac(X, \dots, f=1/4) } \arguments{ \item{X}{ A window (object of class \code{"owin"}) or point pattern (object of class \code{"ppp"}) or other data which can be converted to a window using \code{\link{as.owin}}. } \item{\dots}{ Arguments passed to \code{\link{distcdf}}. } \item{f}{ Probability value (between 0 and 1) determining the quantile of the distribution. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{\sigma} is computed as a quantile of the distance between two independent random points in the window. The default is the lower quartile of this distribution. If \eqn{F(r)} is the cumulative distribution function of the distance between two independent random points uniformly distributed in the window, then the value returned is the quantile with probability \eqn{f}. That is, the bandwidth is the value \eqn{r} such that \eqn{F(r) = f}. The cumulative distribution function \eqn{F(r)} is computed using \code{\link{distcdf}}. We then we compute the smallest number \eqn{r} such that \eqn{F(r) \ge f}{F(r) >= f}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.frac"} which can be plotted to show the cumulative distribution function and the selected quantile. } \seealso{ For estimating point process intensity, see \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.ppl}}, \code{\link{bw.scott}}, \code{\link{bw.CvL}}. For other smoothing purposes, see \code{\link{bw.stoyan}}, \code{\link{bw.smoothppp}}, \code{\link{bw.relrisk}}. } \examples{ h <- bw.frac(letterR) h plot(h, main="bw.frac(letterR)") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/spatstat.core-internal.Rd0000644000176200001440000007763414142420411020524 0ustar liggesusers\name{spatstat.core-internal} \title{Internal spatstat.core functions} \alias{[.localpcfmatrix} \alias{[.rat} \alias{accumulateStatus} \alias{active.interactions} \alias{adaptcoef} \alias{adjust.ratfv} \alias{affine.msr} \alias{ang2rad} \alias{areadelta2} \alias{as.data.frame.bw.optim} \alias{as.data.frame.fv} \alias{as.ppm.rppm} \alias{assemble.plot.objects} \alias{augment.msr} \alias{bandwidth.is.infinite} \alias{BartCalc} \alias{bermantestCalc} \alias{bermantestEngine} \alias{bigvaluerule} \alias{bind.ratfv} \alias{blankcoefnames} \alias{bt.frame} \alias{bw.optim} \alias{calc.DR} \alias{calc.NNIR} \alias{calc.SAVE} \alias{calc.SIR} \alias{calc.TSE} \alias{cannot.update} \alias{censtimeCDFest} \alias{change.default.expand} \alias{check.separable} \alias{check.testfun} \alias{circticks} \alias{clarkevansCalc} \alias{coef.summary.kppm} \alias{coef.summary.ppm} \alias{coef.summary.slrm} \alias{coef.vblogit} \alias{compatible.rat} \alias{compileCDF} \alias{condSimCox} \alias{conform.ratfv} \alias{CressieReadStatistic} \alias{CressieReadSymbol} \alias{CressieReadName} \alias{cutoff2Dkernel} \alias{CVforPCF} \alias{damaged.ppm} \alias{data.mppm} \alias{datagen.runifpointOnLines} \alias{datagen.runifpoisppOnLines} \alias{datagen.rpoisppOnLines} \alias{default.clipwindow} \alias{deltasuffstat} \alias{Deviation} \alias{dfbetas.ppmInfluence} \alias{densitycrossEngine} \alias{densitypointsEngine} \alias{diagnose.ppm.engine} \alias{digestCovariates} \alias{digital.volume} \alias{dim.fasp} \alias{dim.msr} \alias{dimnames.fasp} \alias{dimnames<-.fasp} \alias{dimnames.msr} \alias{distributecbind} \alias{doMultiStraussHard} \alias{dppDpcf} \alias{dppmFixAlgorithm} \alias{dppmFixIntensity} \alias{envelopeEngine} \alias{envelopeProgressData} \alias{envelopeTest} \alias{envelope.hasenvelope} \alias{envelope.matrix} \alias{equalpairs} \alias{evalCovar} \alias{evalCovar.ppm} \alias{evalCovar.slrm} \alias{evalCovariate} \alias{evalInteraction} \alias{evalInterEngine} \alias{evalPairPotential} \alias{evaluate2Dkernel} \alias{expandwinPerfect} \alias{ExpSmoothLog} \alias{extractAIC.slrm} \alias{extractAtomicQtests} \alias{fakeNeyScot} \alias{family.vblogit} \alias{fasp} \alias{f3engine} \alias{f3Cengine} \alias{fill.coefs} \alias{findCovariate} \alias{fii} \alias{findbestlegendpos} \alias{findcbind} \alias{flatfname} \alias{flipxy.msr} \alias{forbid.logi} \alias{FormatFaspFormulae} \alias{fvexprmap} \alias{fvlabels} \alias{fvlabels<-} \alias{fvlabelmap} \alias{fvlegend} \alias{g3engine} \alias{g3Cengine} \alias{getdataname} \alias{getglmdata} \alias{getglmfit} \alias{getglmsubset} \alias{getppmdatasubset} \alias{getppmOriginalCovariates} \alias{getRandomFieldsModelGen} \alias{getSumFun} \alias{geyercounts} \alias{geyerdelta2} \alias{GLMpredict} \alias{good.correction.K} %\alias{gridadjacencymatrix} %DoNotExport \alias{hackglmmPQL} \alias{hasenvelope} \alias{hasglmfit} \alias{HermiteCoefs} \alias{handle.rshift.args} \alias{hierarchicalordering} \alias{hiermat} \alias{ho.engine} \alias{illegal.iformula} \alias{implemented.for.K} \alias{impliedpresence} \alias{impliedcoefficients} \alias{influence.ppmInfluence} \alias{instantiate.interact} \alias{interactionfamilyname} \alias{intermaker} \alias{is.atomicQtest} \alias{is.cadlag} \alias{is.expandable} \alias{is.expandable.ppm} \alias{is.expandable.rmhmodel} \alias{is.interact} \alias{is.marked.mppm} \alias{is.marked.msr} \alias{is.marked.slrm} \alias{is.mppm} \alias{is.multitype.mppm} \alias{is.multitype.msr} \alias{is.multitype.slrm} \alias{is.poisson.mppm} \alias{is.scov} \alias{k3engine} \alias{Kborder.engine} \alias{Knone.engine} \alias{Krect.engine} \alias{Kount} \alias{Kwtsum} \alias{Kpcf.kppm} \alias{Kmodel.slrm} \alias{killinteraction} \alias{km.rs.opt} \alias{kppmComLik} \alias{kppmMinCon} \alias{kppmPalmLik} \alias{kppmCLadap} \alias{kraever} \alias{kraeverRandomFields} \alias{labels.ppm} \alias{leverage.ppmInfluence} \alias{localKengine} \alias{localKmultiEngine} \alias{localpcfengine} \alias{localpcfmatrix} \alias{logi.engine} \alias{logLik.vblogit} \alias{lookup2DkernelInfo} \alias{LurkEngine} \alias{makefvlabel} \alias{maskLaslett} \alias{match2DkernelName} \alias{match.kernel} \alias{mctestSigtraceEngine} \alias{meanlistfv} \alias{model.se.image} \alias{modelFrameGam} \alias{mpl.engine} \alias{mpl.get.covariates} \alias{mpl.prepare} \alias{mpl.usable} \alias{MultiPair.checkmatrix} \alias{names<-.fv} \alias{newformula} \alias{newstyle.coeff.handling} \alias{nncleanEngine} \alias{nndcumfun} \alias{no.trend.ppm} \alias{objsurfEngine} \alias{optimConverged} \alias{optimStatus} \alias{optimNsteps} \alias{outdated.interact} \alias{oversize.quad} \alias{pairs.listof} \alias{pairs.solist} \alias{PairPotentialType} \alias{partialModelMatrix} \alias{pcf3engine} \alias{pcfmulti.inhom} \alias{pcfmodel.slrm} \alias{ploterodewin} \alias{ploterodeimage} \alias{plot.addvar} \alias{plot.bw.frac} \alias{plot.bw.optim} \alias{plot.localpcfmatrix} \alias{plot.lurk} \alias{plot.minconfit} \alias{plot.parres} \alias{plot.plotpairsim} \alias{plot.qqppm} \alias{plot.spatialcdf} \alias{PoisSaddle} \alias{PoisSaddleArea} \alias{PoisSaddleGeyer} \alias{PoisSaddlePairwise} \alias{polyLaslett} \alias{PPMmodelmatrix} \alias{ppm.default} \alias{ppmCovariates} \alias{ppmDerivatives} \alias{ppmInfluenceEngine} \alias{predict.profilepl} \alias{predict.vblogit} \alias{prefixfv} \alias{printStatus} \alias{printStatusList} \alias{print.addvar} \alias{print.bt.frame} \alias{print.bw.frac} \alias{print.bw.optim} \alias{print.diagppm} \alias{print.densityfun} \alias{print.detpointprocfamily} \alias{print.detpointprocfamilyfun} \alias{print.envelope} \alias{print.fasp} \alias{print.fv} \alias{print.fvfun} \alias{print.hasenvelope} \alias{print.hierarchicalordering} \alias{print.influence.ppm} \alias{print.interact} \alias{print.intermaker} \alias{print.isf} \alias{print.laslett} \alias{print.leverage.ppm} \alias{print.localpcfmatrix} \alias{print.lurk} \alias{print.minconfit} \alias{print.mppm} \alias{print.msr} \alias{print.parres} \alias{print.plotpairsim} \alias{print.plotppm} \alias{print.profilepl} \alias{print.quadrattest} \alias{print.qqppm} \alias{print.rat} \alias{print.rmhcontrol} \alias{print.rmhexpand} \alias{print.rmhmodel} \alias{print.rmhstart} \alias{print.rmhInfoList} \alias{print.rppm} \alias{print.Smoothfun} \alias{print.summary.mppm} \alias{print.summary.rmhexpand} \alias{print.summary.ssf} \alias{print.summary.slrm} \alias{print.vblogit} \alias{quad.mppm} \alias{quadBlockSizes} \alias{quadrat.testEngine} \alias{ratfv} \alias{RandomFieldsSafe} \alias{reach.slrm} \alias{rebadge.as.crossfun} \alias{rebadge.as.dotfun} \alias{rebadge.fv} \alias{rebadgeLabels} \alias{reconcile.fv} \alias{reduceformula} \alias{reheat} \alias{reincarnate.interact} \alias{RelevantDeviation} \alias{rename.fv} \alias{rescale.msr} \alias{resid4plot} \alias{resid1plot} \alias{resid1panel} \alias{resolve.2D.kernel} \alias{resolveEinfo} \alias{resolve.foxall.window} \alias{resolve.lambda} \alias{resolve.lambda.cross} \alias{resolve.vargamma.shape} \alias{rhohatEngine} \alias{rhohatCalc} \alias{rMaternInhibition} \alias{rmax.Rigid} \alias{rmax.rule} \alias{RmhExpandRule} \alias{rmhsnoop} \alias{rmhResolveControl} \alias{rmhResolveExpansion} \alias{rmhResolveTypes} \alias{rmhSnoopEnv} \alias{rmhcontrol.rmhcontrol} \alias{rmhcontrol.list} \alias{rmhEngine} \alias{rmhmodel.rmhmodel} \alias{rmhstart.rmhstart} \alias{rmhstart.list} \alias{rmpoint.I.allim} \alias{rocData} \alias{rocModel} \alias{roseContinuous} \alias{rotate.msr} \alias{rpoint.multi} \alias{runifpoispp} \alias{runifpoisppOnLines} \alias{safeFiniteValue} \alias{safePositiveValue} \alias{scalardilate.msr} \alias{scanmeasure} \alias{scanmeasure.ppp} \alias{scanmeasure.im} \alias{scanBinomLRTS} \alias{scanPoisLRTS} \alias{second.moment.calc} \alias{second.moment.engine} \alias{sewpcf} \alias{sewsmod} \alias{shift.influence.ppm} \alias{shift.leverage.ppm} \alias{shift.quadrattest} \alias{shift.msr} \alias{signalStatus} \alias{simulate.profilepl} \alias{simulrecipe} \alias{slr.prepare} \alias{slrAssemblePixelData} \alias{slrmInfluence} \alias{Smooth.solist} \alias{smoothcrossEngine} \alias{smoothpointsEngine} \alias{spatstatClusterModelInfo} \alias{spatstatDPPModelInfo} \alias{spatstatRmhInfo} \alias{spatialCDFframe} \alias{spatialCDFtest} \alias{spatialCDFtestCalc} \alias{sphere.volume} \alias{splitHybridInteraction} \alias{sp.foundclass} \alias{sp.foundclasses} \alias{strausscounts} \alias{suffloc} \alias{suffstat.generic} \alias{suffstat.poisson} \alias{summarise.trend} \alias{summary.envelope} \alias{summary.mppm} \alias{summary.msr} \alias{summary.profilepl} \alias{summary.rmhexpand} \alias{summary.vblogit} \alias{thinjump} \alias{tweak.coefs} \alias{tweak.fv.entry} \alias{tweak.ratfv.entry} \alias{twostage.test} \alias{twostage.envelope} \alias{unitname.msr} \alias{unitname<-.msr} \alias{update.ippm} \alias{update.msr} \alias{update.rmhstart} \alias{validate2Dkernel} \alias{validate.angles} \alias{validate.weights} \alias{vanilla.fv} \alias{varcountEngine} %\alias{vblogit} %DoNotExport %\alias{vblogit.fmla} %DoNotExport \alias{versionstring.interact} \alias{versionstring.ppm} \alias{weightedclosepairs} \alias{windows.mppm} \alias{X2testEngine} %%%%%%% \description{ Internal spatstat.core functions. } \usage{ \method{[}{localpcfmatrix}(x, i, \dots) \method{[}{rat}(x, \dots) accumulateStatus(x, stats) active.interactions(object) adaptcoef(new.coef, fitcoef, drop) adjust.ratfv(f, columns, numfactor, denfactor) \method{affine}{msr}(X, mat, vec, \dots) ang2rad(ang, unit, start, clockwise) areadelta2(X, r, \dots, sparseOK) assemble.plot.objects(xlim, ylim, \dots, lines, polygon) \method{as.ppm}{rppm}(object) \method{as.data.frame}{bw.optim}(x, \dots) \method{as.data.frame}{fv}(x, \dots) augment.msr(x, \dots, sigma, recompute) bandwidth.is.infinite(sigma) BartCalc(fY, fK) bermantestCalc(fram, which, alternative, \dots) bermantestEngine(model, covariate, which, alternative, \dots, modelname, covname, dataname) bigvaluerule(objfun, objargs, startpar, \dots) bind.ratfv(x, numerator, denominator, labl, desc, preferred, ratio, quotient) blankcoefnames(x) bt.frame(Q, trend, interaction, \dots, covariates, correction, rbord, use.gam, allcovar) bw.optim(cv, h, iopt, \dots, cvname, hname, criterion, optimum, warnextreme, hargnames, unitname) calc.DR(COV, z, Dim) calc.NNIR(COV, z, pos, Dim) calc.SAVE(COV, z, Dim) calc.SIR(COV, z) calc.TSE(COV, z, pos, Dim1, Dim2) cannot.update(\dots) censtimeCDFest(o, cc, d, breaks, \dots, KM, RS, HAN, RAW, han.denom, tt, pmax) change.default.expand(x, newdefault) check.separable(dmat, covname, isconstant, fatal) check.testfun(f, f1, X) circticks(R, at, unit, start, clockwise, labels) clarkevansCalc(X, correction, clipregion, working) \method{coef}{summary.kppm}(object, \dots) \method{coef}{summary.ppm}(object, \dots) \method{coef}{summary.slrm}(object, \dots) \method{coef}{vblogit}(object, \dots) \method{compatible}{rat}(A, B, \dots) compileCDF(D, B, r, \dots, han.denom, check) condSimCox(object, nsim, \dots, window, n.cond, w.cond, giveup, maxchunk, verbose, drop) conform.ratfv(x) CressieReadStatistic(OBS,EXP,lambda,normalise,named) CressieReadSymbol(lambda) CressieReadName(lambda) cutoff2Dkernel(kernel, sigma, varcov, \dots, scalekernel, cutoff, fatal) CVforPCF(bw, stuff) damaged.ppm(object) data.mppm(x) datagen.runifpointOnLines(n, L) datagen.runifpoisppOnLines(lambda, L) datagen.rpoisppOnLines(lambda, L, lmax, \dots, check) default.clipwindow(object, epsilon) deltasuffstat(model, \dots, restrict, dataonly, sparseOK, quadsub, force, warn.forced, verbose, use.special) Deviation(x, ref, leaveout, n, xi) \method{dfbetas}{ppmInfluence}(model, \dots) densitycrossEngine(Xdata, Xquery, sigma, \dots, kernel, scalekernel, weights, edge, varcov, diggle, sorted, cutoff) densitypointsEngine(x, sigma, \dots, kernel, scalekernel, weights, edge, varcov, leaveoneout, diggle, sorted, spill, cutoff, debug) diagnose.ppm.engine(object, \dots, type, typename, opt, sigma, rbord, compute.sd, compute.cts, envelope, nsim, nrank, rv, oldstyle, splineargs, verbose) digestCovariates(\dots, W) digital.volume(range, nval, vside) \method{dim}{fasp}(x) \method{dim}{msr}(x) \method{dimnames}{fasp}(x) \method{dimnames}{fasp}(x) <- value \method{dimnames}{msr}(x) distributecbind(x) doMultiStraussHard(iradii, hradii, types) dppDpcf(model, \dots) dppmFixIntensity(DPP, lambda, po) dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar) envelopeEngine(X, fun, simul, nsim, nrank, \dots, funargs, funYargs, verbose, clipdata, transform, global, ginterval, use.theory, alternative, scale, clamp, savefuns, savepatterns, saveresultof, weights, nsim2, VARIANCE, nSD, Yname, maxnerr, rejectNA, silent, maxerr.action, internal, cl, envir.user, expected.arg, do.pwrong, foreignclass, collectrubbish) envelopeProgressData(X, fun, \dots, exponent, alternative, leaveout, scale, clamp, normalize, deflate, rmin, save.envelope, savefuns, savepatterns) envelopeTest(X, \dots, exponent, alternative, rinterval, leaveout, scale, clamp, tie.rule, interpolate, save.interpolant, save.envelope, savefuns, savepatterns, Xname, badXfatal, verbose) \method{envelope}{hasenvelope}(Y, \dots, Yname) \method{envelope}{matrix}(Y, \dots, rvals, observed, theory, funX, nsim, nsim2, jsim, jsim.mean, type, alternative, scale, clamp, csr, use.theory, nrank, ginterval, nSD, savefuns, check, Yname, do.pwrong, weights, precomputed, gaveup) equalpairs(U, X, marked=FALSE) evalCovar(model, covariate, \dots) \method{evalCovar}{ppm}(model, covariate, \dots, lambdatype, dimyx, eps, interpolate, jitter, jitterfactor, modelname, covname, dataname, subset) \method{evalCovar}{slrm}(model, covariate, \dots, lambdatype, jitter, jitterfactor, modelname, covname, dataname, subset) evalCovariate(covariate, locations) evalInteraction(X,P,E,interaction,correction,splitInf,\dots, precomputed,savecomputed) evalInterEngine(X,P,E,interaction,correction,splitInf,\dots, Reach,precomputed,savecomputed) evalPairPotential(X,P,E,pairpot,potpars,R) evaluate2Dkernel(kernel, x, y, sigma, varcov, \dots, scalekernel) expandwinPerfect(W, expand, amount) ExpSmoothLog(X, \dots, at, weights) \method{extractAIC}{slrm}(fit, scale = 0, k = 2, \dots) extractAtomicQtests(x) fakeNeyScot(Y, lambda, win, saveLambda, saveparents) \method{family}{vblogit}(object, \dots) fasp(fns, which, formulae, dataname, title, rowNames, colNames, checkfv) f3engine(x, y, z, box, vside, range, nval, correction) f3Cengine(x, y, z, box, vside, rmax, nrval) fill.coefs(coefs, required) findCovariate(covname, scope, scopename=NULL) findbestlegendpos(\dots) findcbind(root, depth, maxdepth) fii(interaction, coefs, Vnames, IsOffset, vnameprefix) flatfname(x) \method{flipxy}{msr}(X) forbid.logi(object) FormatFaspFormulae(f, argname) fvexprmap(x) fvlabels(x, expand=FALSE) fvlabels(x) <- value fvlabelmap(x, dot=TRUE) fvlegend(object, elang) g3engine(x, y, z, box, rmax, nrval, correction) g3Cengine(x, y, z, box, rmax, nrval) getdataname(defaultvalue, \dots, dataname) getglmdata(object, drop=FALSE) getglmfit(object) getglmsubset(object) getppmdatasubset(object) getppmOriginalCovariates(object) getRandomFieldsModelGen(model) getSumFun(abbreviation, classname, ismarked, fatal) geyercounts(U,X,r,sat,Xcounts,EqualPairs) geyerdelta2(X,r,sat,\dots,sparseOK, correction) GLMpredict(fit, data, coefs, changecoef, type) good.correction.K(X) %gridadjacencymatrix(dims) hackglmmPQL(fixed, random, family, data, correlation, weights, control, niter, verbose, subset, \dots, reltol) hasenvelope(X, E) hasglmfit(object) HermiteCoefs(order) handle.rshift.args(W, \dots, radius, width, height, edge, clip, edgedefault) hierarchicalordering(i, s) hiermat(x, h) ho.engine(model, \dots, nsim, nrmh, start, control, verb) illegal.iformula(ifmla, itags, dfvarnames) implemented.for.K(correction, windowtype, explicit) impliedpresence(tags, formula, df, extranames=character(0)) impliedcoefficients(object, tag) \method{influence}{ppmInfluence}(model, \dots) instantiate.interact(x, par) interactionfamilyname(x) intermaker(f, blank) is.atomicQtest(x) is.cadlag(s) is.expandable(x) \method{is.expandable}{ppm}(x) \method{is.expandable}{rmhmodel}(x) is.interact(x) \method{is.marked}{mppm}(X, \dots) \method{is.marked}{msr}(X, \dots) \method{is.marked}{slrm}(X, \dots) is.mppm(x) \method{is.multitype}{mppm}(X, \dots) \method{is.multitype}{msr}(X, \dots) \method{is.multitype}{slrm}(X, \dots) \method{is.poisson}{mppm}(x) is.scov(x) k3engine(x, y, z, box, rmax, nrval, correction) Kborder.engine(X, rmax, nr, correction, weights, ratio) Knone.engine(X, rmax, nr, weights, ratio) Krect.engine(X, rmax, nr, correction, weights, ratio, fname, use.integers) Kount(dIJ, bI, b, breaks) Kwtsum(dIJ, bI, wIJ, b, w, breaks, fatal) Kpcf.kppm(model, what) \method{Kmodel}{slrm}(model, \dots) killinteraction(model) km.rs.opt(o, cc, d, breaks, KM, RS) kppmComLik(X, Xname, po, clusters, control, stabilize, weightfun, rmax, algorithm, DPP, \dots, pspace) kppmMinCon(X, Xname, po, clusters, control, stabilize, statistic, statargs, algorithm, DPP, \dots) kppmPalmLik(X, Xname, po, clusters, control, stabilize, weightfun, rmax, algorithm, DPP, \dots, pspace) kppmCLadap(X, Xname, po, clusters, control, weightfun, rmax, epsilon, DPP, algorithm, \dots, startpar, globStrat) kraever(package, fatal) kraeverRandomFields() \method{labels}{ppm}(object, \dots) \method{leverage}{ppmInfluence}(model, \dots) localKengine(X, \dots, wantL, lambda, rmax, correction, verbose, rvalue) localKmultiEngine(X, from, to, lambdaFrom, lambdaTo, \dots, rmax, wantL, correction, verbose, rvalue, sigma, varcov, lambdaX, update, leaveoneout, Iexplain, Jexplain, Ikey, Jkey, miss.update, miss.leave) localpcfengine(X, \dots, delta, rmax, nr, stoyan, lambda, rvalue) localpcfmatrix(X, i, \dots, lambda, delta, rmax, nr, stoyan) logi.engine(Q, trend, interaction, \dots, covariates, subsetexpr, clipwin, correction, rbord, covfunargs, allcovar, vnamebase, vnameprefix, justQ, savecomputed, precomputed, VB) \method{logLik}{vblogit}(object, \dots) lookup2DkernelInfo(kernel) LurkEngine(object, type, cumulative, plot.sd, quadpoints, wts, Z, subQset, covvalues, resvalues, clip, clipwindow, cov.is.im, covrange, typename, covname, cl, clenv, oldstyle, check, verbose, nx, splineargs, envelope, nsim, nrank, Xsim, internal, checklength) makefvlabel(op, accent, fname, sub, argname) maskLaslett(X, \dots, eps, dimyx, xy, oldX, verbose, plotit) match.kernel(kernel) match2DkernelName(kernel) mctestSigtraceEngine(R, devdata, devsim, \dots, interpolate, confint, alpha, exponent, unitname) meanlistfv(z, \dots) model.se.image(fit, W, \dots, what) modelFrameGam(formula, \dots) mpl.engine(Q, trend, interaction, \dots, covariates, subsetexpr, clipwin, covfunargs, correction, rbord, use.gam, gcontrol, GLM, GLMfamily, GLMcontrol, famille, forcefit, nd, eps, allcovar, callstring, precomputed, savecomputed, preponly, rename.intercept, justQ, weightfactor) mpl.get.covariates(covariates, locations, type, covfunargs, need.deriv) mpl.prepare(Q, X, P, trend, interaction, covariates, want.trend, want.inter, correction, rbord, Pname, callstring, \dots, subsetexpr, covfunargs, allcovar, precomputed, savecomputed, vnamebase, vnameprefix, warn.illegal, warn.unidentifiable, weightfactor, skip.border, clip.interaction, splitInf) mpl.usable(x) MultiPair.checkmatrix(mat, n, matname, naok, zerook, asymmok) \method{names}{fv}(x) <- value newformula(old, change, eold, enew, expandpoly) newstyle.coeff.handling(object) nncleanEngine(kthNND, k, d, \dots, tol, maxit, plothist, lineargs, verbose, Xname) nndcumfun(X, \dots, r) no.trend.ppm(x) objsurfEngine(objfun, optpar, objargs, \dots, dotargs, objname, ngrid, xlim, ylim, ratio, verbose) optimConverged(x) optimStatus(x, call) optimNsteps(x) outdated.interact(object) oversize.quad(Q, \dots, nU, nX, p) PairPotentialType(pairpot) \method{pairs}{listof}(\dots, plot=TRUE) \method{pairs}{solist}(\dots, plot=TRUE) \method{plot}{localpcfmatrix}(x, \dots) PoisSaddle(beta, fi) PoisSaddleArea(beta, fi) PoisSaddleGeyer(beta, fi) PoisSaddlePairwise(beta, fi) polyLaslett(X, \dots, oldX, verbose, plotit) PPMmodelmatrix(object, data, \dots, subset, Q, keepNA, irregular, splitInf) printStatus(x, errors.only) printStatusList(stats) partialModelMatrix(X,D,model,callstring,\dots) pcf3engine(x, y, z, box, rmax, nrval, correction, delta) pcfmulti.inhom(X, I, J, lambdaI = NULL, lambdaJ = NULL, \dots, r = NULL, breaks = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("translate", "Ripley"), sigma = NULL, varcov = NULL, Iname = "points satisfying condition I", Jname = "points satisfying condition J") \method{pcfmodel}{slrm}(model, \dots) ploterodewin(W1, W2, col.edge, col.inside, do.plot, \dots) ploterodeimage(W, Z, \dots, Wcol, rangeZ, colsZ, do.plot) \method{plot}{addvar}(x, \dots, do.points=FALSE) \method{plot}{bw.frac}(x, \dots) \method{plot}{bw.optim}(x, \dots, showopt, optargs) \method{plot}{lurk}(x, \dots, shade) \method{plot}{minconfit}(x, \dots) \method{plot}{parres}(x, \dots) \method{plot}{plotpairsim}(x, \dots) \method{plot}{qqppm}(x, \dots, limits=TRUE, monochrome=spatstat.options('monochrome'), limcol=if(monochrome) "black" else "red") \method{plot}{spatialcdf}(x, \dots, xlab, ylab) \method{ppm}{default}(Q, trend, interaction, \dots, covariates, data, covfunargs, subset, clipwin, correction, rbord, use.gam, method, forcefit, emend, project, prior.mean, prior.var, nd, eps, gcontrol, nsim, nrmh, start, control, verb, callstring) ppmCovariates(model) ppmDerivatives(fit, what, Dcovfun, loc, covfunargs) ppmInfluenceEngine(fit, what, \dots, iScore, iHessian, iArgs, drop, method, fine, precomputed, sparseOK, fitname, multitypeOK, entrywise, matrix.action, dimyx, eps, geomsmooth) \method{predict}{vblogit}(object, newdata, type, se.fit, dispersion, terms, na.action, \dots) \method{predict}{profilepl}(object, \dots) prefixfv(x, tagprefix, descprefix, lablprefix, whichtags) \method{print}{addvar}(x, \dots) \method{print}{bt.frame}(x, \dots) \method{print}{bw.frac}(x, \dots) \method{print}{bw.optim}(x, \dots) \method{print}{densityfun}(x, \dots) \method{print}{diagppm}(x, \dots) \method{print}{detpointprocfamily}(x, \dots) \method{print}{detpointprocfamilyfun}(x, \dots) \method{print}{envelope}(x, \dots) \method{print}{fasp}(x, \dots) \method{print}{fv}(x, \dots, tight) \method{print}{fvfun}(x, \dots) \method{print}{hasenvelope}(x, \dots) \method{print}{hierarchicalordering}(x, \dots) \method{print}{influence.ppm}(x, \dots) \method{print}{interact}(x, \dots, family, brief, banner) \method{print}{intermaker}(x, \dots) \method{print}{isf}(x, \dots) \method{print}{laslett}(x, \dots) \method{print}{leverage.ppm}(x, \dots) \method{print}{localpcfmatrix}(x, \dots) \method{print}{lurk}(x, \dots) \method{print}{minconfit}(x, \dots) \method{print}{mppm}(x, \dots) \method{print}{msr}(x, \dots) \method{print}{parres}(x, \dots) \method{print}{plotppm}(x, \dots) \method{print}{plotpairsim}(x, \dots) \method{print}{profilepl}(x, \dots) \method{print}{quadrattest}(x, \dots) \method{print}{qqppm}(x, \dots) \method{print}{rat}(x, \dots) \method{print}{rmhcontrol}(x, \dots) \method{print}{rmhexpand}(x, \dots, prefix=TRUE) \method{print}{rmhmodel}(x, \dots) \method{print}{rmhstart}(x, \dots) \method{print}{rmhInfoList}(x, \dots) \method{print}{rppm}(x, \dots) \method{print}{Smoothfun}(x, \dots) \method{print}{summary.mppm}(x, \dots, brief) \method{print}{summary.rmhexpand}(x, \dots) \method{print}{summary.slrm}(x, \dots) \method{print}{summary.ssf}(x, \dots) \method{print}{vblogit}(x, \dots) quad.mppm(x) quadBlockSizes(nX, nD, p, nMAX, announce) RandomFieldsSafe() ratfv(df, numer, denom, \dots, ratio) \method{reach}{slrm}(x, \dots) rebadge.as.crossfun(x, main, sub, i, j) rebadge.as.dotfun(x, main, sub, i) rebadge.fv(x, new.ylab, new.fname, tags, new.desc, new.labl, new.yexp, new.dotnames, new.preferred, new.formula, new.tags) rebadgeLabels(x, new.fname) reconcile.fv(\dots) reduceformula(fmla, deletevar, verbose) reheat(model, invtemp) RelevantDeviation(x, alternative, clamp, scaling) rename.fv(x, fname, ylab, yexp) \method{rescale}{msr}(X, s, unitname) resolveEinfo(x, what, fallback, warn, atomic) resolve.foxall.window(X, Y, W, warn.trim) resolve.lambda(X, lambda, \dots, sigma, varcov, leaveoneout, update) resolve.lambda.cross(X, I, J, lambdaI, lambdaJ, \dots, lambdaX, sigma, varcov, leaveoneout, update, lambdaIJ, Iexplain, Jexplain, miss.update, miss.leave, caller) resolve.vargamma.shape(\dots, nu.ker, nu.pcf, default) roseContinuous(ang, rad, unit, \dots, start, clockwise, main, labels, at, do.plot) \method{rotate}{msr}(X, angle, \dots, centre) quadrat.testEngine(X, nx, ny, alternative, method, conditional, CR, \dots, nsim, Xcount, xbreaks, ybreaks, tess, fit, df.est, Xname, fitname) reincarnate.interact(object) resid4plot(RES, plot.neg, plot.smooth, spacing, outer, srange, monochrome, main, xlab, ylab, rlab, col.neg, col.smooth, \dots) resid1plot(RES, opt, plot.neg, plot.smooth, srange, monochrome, main, add, show.all, do.plot, col.neg, col.smooth, \dots) resid1panel(observedX, observedV, theoreticalX, theoreticalV, theoreticalSD, xlab,ylab, \dots, do.plot) resolve.2D.kernel(\dots, sigma, varcov, x, mindist, adjust, bwfun, allow.zero) rhohatEngine(model, covariate, reference, volume, \dots, subset, weights, method, horvitz, smoother, resolution, evalCovarArgs, n, bw, adjust, from, to, bwref, covname, covunits, confidence, breaks, modelcall, callstring) rhohatCalc(ZX, Zvalues, lambda, denom, \dots, weights, lambdaX, method, horvitz, smoother, n, bw, adjust, from, to, bwref, covname, confidence, breaks, positiveCI, markovCI, covunits, modelcall, callstring, savestuff) rMaternInhibition(type, kappa, r, win, stationary, \dots, nsim, drop) rmax.Rigid(X, g) rmax.rule(fun, W, lambda) \method{rmhcontrol}{rmhcontrol}(\dots) \method{rmhcontrol}{list}(\dots) rmhEngine(InfoList, \dots, verbose, kitchensink, preponly, snoop, overrideXstart, overrideclip) RmhExpandRule(nama) rmhResolveControl(control, model) rmhResolveExpansion(win, control, imagelist, itype) rmhResolveTypes(model, start, control) rmhsnoop(\dots, Wsim, Wclip, R, xcoords, ycoords, mlevels, mcodes, irep, itype, proptype, proplocn, propmark, propindx, numerator, denominator, panel.only) rmhSnoopEnv(Xinit, Wclip, R) \method{rmhmodel}{rmhmodel}(model, \dots) \method{rmhstart}{rmhstart}(start, \dots) \method{rmhstart}{list}(start, \dots) rmpoint.I.allim(n, f, types) rocData(covariate, nullmodel, \dots, high) rocModel(lambda, nullmodel, \dots, high) rpoint.multi(n, f, fmax, marks, win, giveup, verbose, warn, nsim, drop) runifpoispp(lambda, win, \dots, nsim, drop) runifpoisppOnLines(lambda, L, nsim, drop) safeFiniteValue(x, default) safePositiveValue(x, default) \method{scalardilate}{msr}(X, f, \dots) scanmeasure(X, \dots) \method{scanmeasure}{ppp}(X, r, \dots, method) \method{scanmeasure}{im}(X, r, \dots) scanPoisLRTS(nZ, nG, muZ, muG, alternative) scanBinomLRTS(nZ, nG, muZ, muG, alternative) second.moment.calc(x, sigma, edge, what, \dots, varcov, expand, obswin, npts, debug) second.moment.engine(x, sigma, edge, what, \dots, kernel, scalekernel, obswin, varcov, npts, debug, fastgauss) sewpcf(d, w, denargs, lambda2area, divisor) sewsmod(d, ff, wt, Ef, rvals, method="smrep", \dots, nwtsteps=500) \method{shift}{influence.ppm}(X, \dots) \method{shift}{leverage.ppm}(X, \dots) \method{shift}{msr}(X, \dots) \method{shift}{quadrattest}(X, \dots) signalStatus(x, errors.only) \method{simulate}{profilepl}(object, \dots) simulrecipe(type, expr, envir, csr, pois, constraints) slr.prepare(CallInfo, envir, data, dataAtPoints, splitby, clip) slrAssemblePixelData(Y, Yname, W, covimages, dataAtPoints, pixelarea) slrmInfluence(model, what, \dots) \method{Smooth}{solist}(X, \dots) smoothcrossEngine(Xdata, Xquery, values, sigma, \dots, weights, varcov, kernel, scalekernel, sorted, cutoff) smoothpointsEngine(x, values, sigma, \dots, kernel, scalekernel, weights, varcov, leaveoneout, sorted, cutoff, debug) spatstatClusterModelInfo(name, onlyPCP) spatstatDPPModelInfo(model) spatstatRmhInfo(cifname) spatialCDFframe(model, covariate, \dots, jitter) spatialCDFtest(model, covariate, test, \dots, dimyx, eps, interpolate, jitter, nsim, verbose, modelname, covname, dataname) spatialCDFtestCalc(fra, test, \dots, details) sphere.volume(range, nval = 10) splitHybridInteraction(coeffs, inte) sp.foundclass(cname, inlist, formalname, argsgiven) sp.foundclasses(cnames, inlist, formalname, argsgiven) strausscounts(U,X,r,EqualPairs) suffloc(object) suffstat.generic(model, X, callstring) suffstat.poisson(model, X, callstring) summarise.trend(trend, w, a) \method{summary}{envelope}(object,\dots) \method{summary}{mppm}(object, \dots, brief=FALSE) \method{summary}{msr}(object, \dots) \method{summary}{profilepl}(object, \dots) \method{summary}{rmhexpand}(object, \dots) \method{summary}{vblogit}(object, \dots) thinjump(n, p) tweak.coefs(model, new.coef) tweak.fv.entry(x, current.tag, new.labl, new.desc, new.tag) tweak.ratfv.entry(x, \dots) twostage.test(X, \dots, exponent, nsim, nsimsub, alternative, reuse, leaveout, interpolate, savefuns, savepatterns, verbose, badXfatal, testblurb) twostage.envelope(X, \dots, nsim, nsimsub, nrank, alternative, reuse, leaveout, interpolate, savefuns, savepatterns, verbose, badXfatal, testlabel) \method{unitname}{msr}(x) \method{unitname}{msr}(x) <- value \method{update}{ippm}(object, \dots, envir) \method{update}{msr}(object, \dots) \method{update}{rmhstart}(object, \dots) validate2Dkernel(kernel, fatal) validate.angles(angles, unit, guess) validate.weights(x, recip, how, allowzero, allowinf) vanilla.fv(x) varcountEngine(g, B, lambdaB, f, R, what) %vblogit(y, X, offset, eps, m0, S0, S0i, xi0, verb, maxiter, \dots) %vblogit.fmla(formula, offset, data, subset, weights, verbose, epsilon, \dots) versionstring.interact(object) versionstring.ppm(object) weightedclosepairs(X, r, correction, what) windows.mppm(x) X2testEngine(OBS, EXP, \dots, method, CR, df, nsim, conditional, alternative, testname, dataname) } \details{ These internal \pkg{spatstat.core} functions should not be called directly by the user. Their names and capabilities may change without warning from one version of \pkg{spatstat.core} to the next. } \value{ The return values of these functions are not documented, and may change without warning. } \keyword{internal} spatstat.core/man/rVarGamma.Rd0000644000176200001440000001473514141452520015773 0ustar liggesusers\name{rVarGamma} \alias{rVarGamma} \title{Simulate Neyman-Scott Point Process with Variance Gamma cluster kernel} \description{ Generate a random point pattern, a simulated realisation of the Neyman-Scott process with Variance Gamma (Bessel) cluster kernel. } \usage{ rVarGamma(kappa, nu, scale, mu, win = owin(), thresh = 0.001, nsim=1, drop=TRUE, saveLambda=FALSE, expand = NULL, ..., poisthresh=1e-6, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{nu}{ Shape parameter for the cluster kernel. A number greater than -1. } \item{scale}{ Scale parameter for cluster kernel. Determines the size of clusters. A positive number in the same units as the spatial coordinates. } \item{mu}{ Mean number of points per cluster (a single positive number) or reference intensity for the cluster points (a function or a pixel image). } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{thresh}{ Threshold relative to the cluster kernel value at the origin (parent location) determining when the cluster kernel will be treated as zero for simulation purposes. Will be overridden by argument \code{expand} if that is given. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{saveLambda}{ Logical. If \code{TRUE} then the random intensity corresponding to the simulated parent points will also be calculated and saved, and returns as an attribute of the point pattern. } \item{expand}{ Numeric. Size of window expansion for generation of parent points. By default determined by calling \code{\link{clusterradius}} with the numeric threshold value given in \code{thresh}. } \item{\dots}{ Passed to \code{\link{clusterfield}} to control the image resolution when \code{saveLambda=TRUE} and to \code{\link{clusterradius}} when \code{expand} is missing or \code{NULL}. } \item{poisthresh}{ Numerical threshold below which the model will be treated as a Poisson process. See Details. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of this point pattern (see \code{\link{rNeymanScott}}). Furthermore, the simulated intensity function is returned as an attribute \code{"Lambda"}, if \code{saveLambda=TRUE}. } \details{ This algorithm generates a realisation of the Neyman-Scott process with Variance Gamma (Bessel) cluster kernel, inside the window \code{win}. The process is constructed by first generating a Poisson point process of ``parent'' points with intensity \code{kappa}. Then each parent point is replaced by a random cluster of points, the number of points in each cluster being random with a Poisson (\code{mu}) distribution, and the points being placed independently and uniformly according to a Variance Gamma kernel. The shape of the kernel is determined by the dimensionless index \code{nu}. This is the parameter \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in equation (12) on page 126 of Jalilian et al (2013). The scale of the kernel is determined by the argument \code{scale}, which is the parameter \eqn{\eta}{eta} appearing in equations (12) and (13) of Jalilian et al (2013). It is expressed in units of length (the same as the unit of length for the window \code{win}). In this implementation, parent points are not restricted to lie in the window; the parent process is effectively the uniform Poisson process on the infinite plane. This model can be fitted to data by the method of minimum contrast, maximum composite likelihood or Palm likelihood using \code{\link{kppm}}. The algorithm can also generate spatially inhomogeneous versions of the cluster process: \itemize{ \item The parent points can be spatially inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process that generates the parent points. \item The offspring points can be inhomogeneous. If the argument \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is interpreted as the reference density for offspring points, in the sense of Waagepetersen (2006). } When the parents are homogeneous (\code{kappa} is a single number) and the offspring are inhomogeneous (\code{mu} is a function or pixel image), the model can be fitted to data using \code{\link{kppm}}, or using \code{\link{vargamma.estK}} or \code{\link{vargamma.estpcf}} applied to the inhomogeneous \eqn{K} function. If the pair correlation function of the model is very close to that of a Poisson process, deviating by less than \code{poisthresh}, then the model is approximately a Poisson process, and will be simulated as a Poisson process with intensity \code{kappa * mu}, using \code{\link{rpoispp}}. This avoids computations that would otherwise require huge amounts of memory. } \seealso{ \code{\link{rpoispp}}, \code{\link{rNeymanScott}}, \code{\link{kppm}}. \code{\link{vargamma.estK}}, \code{\link{vargamma.estpcf}}. } \examples{ # homogeneous X <- rVarGamma(30, 2, 0.02, 5) # inhomogeneous ff <- function(x,y){ exp(2 - 3 * abs(x)) } Z <- as.im(ff, W= owin()) Y <- rVarGamma(30, 2, 0.02, Z) YY <- rVarGamma(ff, 2, 0.02, 3) } \references{ Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \keyword{spatial} \keyword{datagen} spatstat.core/man/bw.stoyan.Rd0000644000176200001440000000355114141452520016034 0ustar liggesusers\name{bw.stoyan} \alias{bw.stoyan} \title{ Stoyan's Rule of Thumb for Bandwidth Selection } \description{ Computes a rough estimate of the appropriate bandwidth for kernel smoothing estimators of the pair correlation function and other quantities. } \usage{ bw.stoyan(X, co=0.15) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{co}{ Coefficient appearing in the rule of thumb. See Details. } } \details{ Estimation of the pair correlation function and other quantities by smoothing methods requires a choice of the smoothing bandwidth. Stoyan and Stoyan (1995, equation (15.16), page 285) proposed a rule of thumb for choosing the smoothing bandwidth. For the Epanechnikov kernel, the rule of thumb is to set the kernel's half-width \eqn{h} to \eqn{0.15/\sqrt{\lambda}}{0.15/sqrt(\lambda)} where \eqn{\lambda}{\lambda} is the estimated intensity of the point pattern, typically computed as the number of points of \code{X} divided by the area of the window containing \code{X}. For a general kernel, the corresponding rule is to set the standard deviation of the kernel to \eqn{\sigma = 0.15/\sqrt{5\lambda}}{\sigma = 0.15/sqrt(5 * \lambda)}. The coefficient \eqn{0.15} can be tweaked using the argument \code{co}. To ensure the bandwidth is finite, an empty point pattern is treated as if it contained 1 point. } \value{ A finite positive numerical value giving the selected bandwidth (the standard deviation of the smoothing kernel). } \seealso{ \code{\link{pcf}}, \code{\link{bw.relrisk}} } \examples{ data(shapley) bw.stoyan(shapley) } \references{ Stoyan, D. and Stoyan, H. (1995) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/rtemper.Rd0000644000176200001440000000557714141452520015600 0ustar liggesusers\name{rtemper} \alias{rtemper} \title{ Simulated Annealing or Simulated Tempering for Gibbs Point Processes } \description{ Performs simulated annealing or simulated tempering for a Gibbs point process model using a specified annealing schedule. } \usage{ rtemper(model, invtemp, nrep, \dots, track=FALSE, start = NULL, verbose = FALSE) } \arguments{ \item{model}{ A Gibbs point process model: a fitted Gibbs point process model (object of class \code{"ppm"}), or any data acceptable to \code{\link{rmhmodel}}. } \item{invtemp}{ A numeric vector of positive numbers. The sequence of values of inverse temperature that will be used. } \item{nrep}{ An integer vector of the same length as \code{invtemp}. The value \code{nrep[i]} specifies the number of steps of the Metropolis-Hastings algorithm that will be performed at inverse temperature \code{invtemp[i]}. } \item{start}{ Initial starting state for the simulation. Any data acceptable to \code{\link{rmhstart}}. } \item{track}{ Logical flag indicating whether to save the transition history of the simulations. } \item{\dots}{ Additional arguments passed to \code{\link{rmh.default}}. } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ The Metropolis-Hastings simulation algorithm \code{\link{rmh}} is run for \code{nrep[1]} steps at inverse temperature \code{invtemp[1]}, then for \code{nrep[2]} steps at inverse temperature \code{invtemp[2]}, and so on. Setting the inverse temperature to a value \eqn{\alpha}{alpha} means that the probability density of the Gibbs model, \eqn{f(x)}, is replaced by \eqn{g(x) = C\, f(x)^\alpha}{g(x) = C f(x)^alpha} where \eqn{C} is a normalising constant depending on \eqn{\alpha}{alpha}. Larger values of \eqn{\alpha}{alpha} exaggerate the high and low values of probability density, while smaller values of \eqn{\alpha}{alpha} flatten out the probability density. For example if the original \code{model} is a Strauss process, the modified model is close to a hard core process for large values of inverse temperature, and close to a Poisson process for small values of inverse temperature. } \value{ A point pattern (object of class \code{"ppp"}). If \code{track=TRUE}, the result also has an attribute \code{"history"} which is a data frame with columns \code{proposaltype}, \code{accepted}, \code{numerator} and \code{denominator}, as described in \code{\link{rmh.default}}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{rmh.default}}, \code{\link{rmh}}. } \examples{ stra <- rmhmodel(cif="strauss", par=list(beta=2,gamma=0.2,r=0.7), w=square(10)) nr <- if(interactive()) 1e5 else 1e4 Y <- rtemper(stra, c(1, 2, 4, 8), nr * (1:4), verbose=TRUE, track=TRUE) } \keyword{spatial} \keyword{datagen} spatstat.core/man/rex.Rd0000644000176200001440000000523314141452520014705 0ustar liggesusers\name{rex} \alias{rex} \title{ Richardson Extrapolation } \description{ Performs Richardson Extrapolation on a sequence of approximate values. } \usage{ rex(x, r = 2, k = 1, recursive = FALSE) } \arguments{ \item{x}{ A numeric vector or matrix, whose columns are successive estimates or approximations to a vector of parameters. } \item{r}{ A number greater than 1. The ratio of successive step sizes. See Details. } \item{k}{ Integer. The order of convergence assumed. See Details. } \item{recursive}{ Logical value indicating whether to perform one step of Richardson extrapolation (\code{recursive=FALSE}, the default) or repeat the extrapolation procedure until a best estimate is obtained (\code{recursive=TRUE}. } } \details{ Richardson extrapolation is a general technique for improving numerical approximations, often used in numerical integration (Brezinski and Zaglia, 1991). It can also be used to improve parameter estimates in statistical models (Baddeley and Turner, 2014). The successive columns of \code{x} are assumed to have been obtained using approximations with step sizes \eqn{a, a/r, a/r^2, \ldots}{a, a/r, a/r^2, ...} where \eqn{a} is the initial step size (which does not need to be specified). Estimates based on a step size \eqn{s} are assumed to have an error of order \eqn{s^k}. Thus, the default values \code{r=2} and {k=1} imply that the errors in the second column of \code{x} should be roughly \eqn{(1/r)^k = 1/2} as large as the errors in the first column, and so on. } \value{ A matrix whose columns contain a sequence of improved estimates. } \references{ Baddeley, A. and Turner, R. (2014) Bias correction for parameter estimates of spatial point process models. \emph{Journal of Statistical Computation and Simulation} \bold{84}, 1621--1643. DOI: 10.1080/00949655.2012.755976 Brezinski, C. and Zaglia, M.R. (1991) \emph{Extrapolation Methods. Theory and Practice}. North-Holland. } \author{ \adrian and \rolf. } \seealso{ \code{\link{bc}} } \examples{ # integrals of sin(x) and cos(x) from 0 to pi # correct answers: 2, 0 est <- function(nsteps) { xx <- seq(0, pi, length=nsteps) ans <- pi * c(mean(sin(xx)), mean(cos(xx))) names(ans) <- c("sin", "cos") ans } X <- cbind(est(10), est(20), est(40)) X rex(X) rex(X, recursive=TRUE) # fitted Gibbs point process model fit0 <- ppm(cells ~ 1, Strauss(0.07), nd=16) fit1 <- update(fit0, nd=32) fit2 <- update(fit0, nd=64) co <- cbind(coef(fit0), coef(fit1), coef(fit2)) co rex(co, k=2, recursive=TRUE) } \keyword{math} \keyword{optimize} spatstat.core/man/Smooth.msr.Rd0000644000176200001440000000401214141452520016152 0ustar liggesusers\name{Smooth.msr} \alias{Smooth.msr} \title{ Smooth a Signed or Vector-Valued Measure } \description{ Apply kernel smoothing to a signed measure or vector-valued measure. } \usage{ \method{Smooth}{msr}(X, ..., drop=TRUE) } \arguments{ \item{X}{ Object of class \code{"msr"} representing a signed measure or vector-valued measure. } \item{\dots}{ Arguments passed to \code{\link{density.ppp}} controlling the smoothing bandwidth and the pixel resolution. } \item{drop}{ Logical. If \code{TRUE} (the default), the result of smoothing a scalar-valued measure is a pixel image. If \code{FALSE}, the result of smoothing a scalar-valued measure is a list containing one pixel image. } } \details{ This function applies kernel smoothing to a signed measure or vector-valued measure \code{X}. The Gaussian kernel is used. The object \code{X} would typically have been created by \code{\link{residuals.ppm}} or \code{\link{msr}}. } \value{ A pixel image or a list of pixel images. For scalar-valued measures, a pixel image (object of class \code{"im"}) provided \code{drop=TRUE}. For vector-valued measures (or if \code{drop=FALSE}), a list of pixel images; the list also belongs to the class \code{"solist"} so that it can be printed and plotted. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. } \author{ \adrian } \seealso{ \code{\link{Smooth}}, \code{\link{msr}}, \code{\link{plot.msr}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rs <- residuals(fit, type="score") plot(Smooth(rp)) plot(Smooth(rs)) } \keyword{spatial} \keyword{models} spatstat.core/man/as.data.frame.envelope.Rd0000644000176200001440000000260714141452520020331 0ustar liggesusers\name{as.data.frame.envelope} \alias{as.data.frame.envelope} \title{Coerce Envelope to Data Frame} \description{ Converts an envelope object to a data frame. } \usage{ \method{as.data.frame}{envelope}(x, \dots, simfuns=FALSE) } \arguments{ \item{x}{Envelope object (class \code{"envelope"}).} \item{\dots}{Ignored.} \item{simfuns}{Logical value indicating whether the result should include the values of the simulated functions that were used to build the envelope. } } \details{ This is a method for the generic function \code{\link{as.data.frame}} for the class of envelopes (see \code{\link{envelope}}. The result is a data frame with columns containing the values of the function argument (usually named \code{r}), the function estimate for the original point pattern data (\code{obs}), the upper and lower envelope limits (\code{hi} and \code{lo}), and possibly additional columns. If \code{simfuns=TRUE}, the result also includes columns of values of the simulated functions that were used to compute the envelope. This is possible only when the envelope was computed with the argument \code{savefuns=TRUE} in the call to \code{\link{envelope}}. } \value{ A data frame. } \examples{ E <- envelope(cells, nsim=5, savefuns=TRUE) tail(as.data.frame(E)) tail(as.data.frame(E, simfuns=TRUE)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat.core/man/density.ppp.Rd0000644000176200001440000004037014141452520016365 0ustar liggesusers\name{density.ppp} \alias{density.ppp} \title{Kernel Smoothed Intensity of Point Pattern} \description{ Compute a kernel smoothed intensity function from a point pattern. } \usage{ \method{density}{ppp}(x, sigma=NULL, \dots, weights=NULL, edge=TRUE, varcov=NULL, at="pixels", leaveoneout=TRUE, adjust=1, diggle=FALSE, se=FALSE, kernel="gaussian", scalekernel=is.character(kernel), positive=FALSE, verbose=TRUE) } \arguments{ \item{x}{ Point pattern (object of class \code{"ppp"}). } \item{sigma}{ The smoothing bandwidth (the amount of smoothing). The standard deviation of the isotropic smoothing kernel. Either a numerical value, or a function that computes an appropriate value of \code{sigma}. } \item{weights}{ Optional weights to be attached to the points. A numeric vector, numeric matrix, an \code{expression}, or a pixel image. } \item{\dots}{ Additional arguments passed to \code{\link{pixellate.ppp}} and \code{\link{as.mask}} to determine the pixel resolution, or passed to \code{sigma} if it is a function. } \item{edge}{ Logical value indicating whether to apply edge correction. } \item{varcov}{ Variance-covariance matrix of anisotropic smoothing kernel. Incompatible with \code{sigma}. } \item{at}{ String specifying whether to compute the intensity values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{x} (\code{at="points"}). } \item{leaveoneout}{ Logical value indicating whether to compute a leave-one-out estimator. Applicable only when \code{at="points"}. } \item{adjust}{ Optional. Adjustment factor for the smoothing parameter. } \item{diggle}{ Logical. If \code{TRUE}, use the Jones-Diggle improved edge correction, which is more accurate but slower to compute than the default correction. } \item{kernel}{ The smoothing kernel. A character string specifying the smoothing kernel (current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}), or a pixel image (object of class \code{"im"}) containing values of the kernel, or a \code{function(x,y)} which yields values of the kernel. } \item{scalekernel}{ Logical value. If \code{scalekernel=TRUE}, then the kernel will be rescaled to the bandwidth determined by \code{sigma} and \code{varcov}: this is the default behaviour when \code{kernel} is a character string. If \code{scalekernel=FALSE}, then \code{sigma} and \code{varcov} will be ignored: this is the default behaviour when \code{kernel} is a function or a pixel image. } \item{se}{ Logical value indicating whether to compute standard errors as well. } \item{positive}{ Logical value indicating whether to force all density values to be positive numbers. Default is \code{FALSE}. } \item{verbose}{ Logical value indicating whether to issue warnings about numerical problems and conditions. } } \value{ By default, the result is a pixel image (object of class \code{"im"}). Pixel values are estimated intensity values, expressed in \dQuote{points per unit area}. If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{x}. Values are estimated intensity values at the points of \code{x}. In either case, the return value has attributes \code{"sigma"} and \code{"varcov"} which report the smoothing bandwidth that was used. If \code{weights} is a matrix with more than one column, then the result is a list of images (if \code{at="pixels"}) or a matrix of numerical values (if \code{at="points"}). If \code{se=TRUE}, the result is a list with two elements named \code{estimate} and \code{SE}, each of the format described above. } \details{ This is a method for the generic function \code{density}. It computes a fixed-bandwidth kernel estimate (Diggle, 1985) of the intensity function of the point process that generated the point pattern \code{x}. The amount of smoothing is controlled by \code{sigma} if it is specified. By default, smoothing is performed using a Gaussian kernel. The resulting density estimate is the convolution of the isotropic Gaussian kernel, of standard deviation \code{sigma}, with point masses at each of the data points in \code{x}. Anisotropic kernels, and non-Gaussian kernels, are also supported. Each point has unit weight, unless the argument \code{weights} is given. If \code{edge=TRUE} (the default), the intensity estimate is corrected for edge effect bias. If \code{at="pixels"} (the default), the result is a pixel image giving the estimated intensity at each pixel in a grid. If \code{at="points"}, the result is a numeric vector giving the estimated intensity at each of the original data points in \code{x}. } \section{Amount of smoothing}{ The amount of smoothing is determined by the arguments \code{sigma}, \code{varcov} and \code{adjust}. \itemize{ \item if \code{sigma} is a single numerical value, this is taken as the standard deviation of the isotropic Gaussian kernel. \item alternatively \code{sigma} may be a function that computes an appropriate bandwidth from the data point pattern by calling \code{sigma(x)}. To perform automatic bandwidth selection using cross-validation, it is recommended to use the functions \code{\link{bw.diggle}}, \code{\link{bw.CvL}}, \code{\link{bw.scott}} or \code{\link{bw.ppl}}. \item The smoothing kernel may be made anisotropic by giving the variance-covariance matrix \code{varcov}. The arguments \code{sigma} and \code{varcov} are incompatible. \item Alternatively \code{sigma} may be a vector of length 2 giving the standard deviations of the \eqn{x} and \eqn{y} coordinates, thus equivalent to \code{varcov = diag(rep(sigma^2, 2))}. \item if neither \code{sigma} nor \code{varcov} is specified, an isotropic Gaussian kernel will be used, with a default value of \code{sigma} calculated by a simple rule of thumb that depends only on the size of the window. \item The argument \code{adjust} makes it easy for the user to change the bandwidth specified by any of the rules above. The value of \code{sigma} will be multiplied by the factor \code{adjust}. The matrix \code{varcov} will be multiplied by \code{adjust^2}. To double the smoothing bandwidth, set \code{adjust=2}. \item An infinite bandwidth, \code{sigma=Inf} or \code{adjust=Inf}, is permitted, and yields an intensity estimate which is constant over the spatial domain. } } \section{Edge correction}{ If \code{edge=TRUE}, the intensity estimate is corrected for edge effect bias in one of two ways: \itemize{ \item If \code{diggle=FALSE} (the default) the intensity estimate is correted by dividing it by the convolution of the Gaussian kernel with the window of observation. This is the approach originally described in Diggle (1985). Thus the intensity value at a point \eqn{u} is \deqn{ \hat\lambda(u) = e(u) \sum_i k(x_i - u) w_i }{ \lambda(u) = e(u) \sum[i] k(x[i] - u) w[i] } where \eqn{k} is the Gaussian smoothing kernel, \eqn{e(u)} is an edge correction factor, and \eqn{w_i}{w[i]} are the weights. \item If \code{diggle=TRUE} then the code uses the improved edge correction described by Jones (1993) and Diggle (2010, equation 18.9). This has been shown to have better performance (Jones, 1993) but is slightly slower to compute. The intensity value at a point \eqn{u} is \deqn{ \hat\lambda(u) = \sum_i k(x_i - u) w_i e(x_i) }{ \lambda(u) = \sum[i] k(x[i] - u) w[i] e(x[i]) } where again \eqn{k} is the Gaussian smoothing kernel, \eqn{e(x_i)}{e(x[i])} is an edge correction factor, and \eqn{w_i}{w[i]} are the weights. } In both cases, the edge correction term \eqn{e(u)} is the reciprocal of the kernel mass inside the window: \deqn{ \frac{1}{e(u)} = \int_W k(v-u) \, {\rm d}v }{ 1/e(u) = integral[v in W] k(v-u) dv } where \eqn{W} is the observation window. } \section{Smoothing kernel}{ By default, smoothing is performed using a Gaussian kernel. The choice of smoothing kernel is determined by the argument \code{kernel}. This should be a character string giving the name of a recognised two-dimensional kernel (current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}), or a pixel image (object of class \code{"im"}) containing values of the kernel, or a \code{function(x,y)} which yields values of the kernel. The default is a Gaussian kernel. If \code{scalekernel=TRUE} then the kernel values will be rescaled according to the arguments \code{sigma}, \code{varcov} and \code{adjust} as explained above, effectively treating \code{kernel} as the template kernel with standard deviation equal to 1. This is the default behaviour when \code{kernel} is a character string. If \code{scalekernel=FALSE}, the kernel values will not be altered, and the arguments \code{sigma}, \code{varcov} and \code{adjust} are ignored. This is the default behaviour when \code{kernel} is a pixel image or a function. } \section{Desired output}{ If \code{at="pixels"} (the default), intensity values are computed at every location \eqn{u} in a fine grid, and are returned as a pixel image. The point pattern is first discretised using \code{\link{pixellate.ppp}}, then the intensity is computed using the Fast Fourier Transform. Accuracy depends on the pixel resolution and the discretisation rule. The pixel resolution is controlled by the arguments \code{\dots} passed to \code{\link{as.mask}} (specify the number of pixels by \code{dimyx} or the pixel size by \code{eps}). The discretisation rule is controlled by the arguments \code{\dots} passed to \code{\link{pixellate.ppp}} (the default rule is that each point is allocated to the nearest pixel centre; this can be modified using the arguments \code{fractional} and \code{preserve}). If \code{at="points"}, the intensity values are computed to high accuracy at the points of \code{x} only. Computation is performed by directly evaluating and summing the kernel contributions without discretising the data. The result is a numeric vector giving the density values. The intensity value at a point \eqn{x_i}{x[i]} is (if \code{diggle=FALSE}) \deqn{ \hat\lambda(x_i) = e(x_i) \sum_j k(x_j - x_i) w_j }{ \lambda(x[i]) = e(x[i]) \sum[j] k(x[j] - x[i]) w[j] } or (if \code{diggle=TRUE}) \deqn{ \hat\lambda(x_i) = \sum_j k(x_j - x_i) w_j e(x_j) }{ \lambda(x[i]) = \sum[j] k(x[j] - x[i]) w[j] e(x[j]) } If \code{leaveoneout=TRUE} (the default), then the sum in the equation is taken over all \eqn{j} not equal to \eqn{i}, so that the intensity value at a data point is the sum of kernel contributions from all \emph{other} data points. If \code{leaveoneout=FALSE} then the sum is taken over all \eqn{j}, so that the intensity value at a data point includes a contribution from the same point. } \section{Weights}{ If \code{weights} is a matrix with more than one column, then the calculation is effectively repeated for each column of weights. The result is a list of images (if \code{at="pixels"}) or a matrix of numerical values (if \code{at="points"}). The argument \code{weights} can also be an \code{expression}. It will be evaluated in the data frame \code{as.data.frame(x)} to obtain a vector or matrix of weights. The expression may involve the symbols \code{x} and \code{y} representing the Cartesian coordinates, the symbol \code{marks} representing the mark values if there is only one column of marks, and the names of the columns of marks if there are several columns. The argument \code{weights} can also be a pixel image (object of class \code{"im"}). numerical weights for the data points will be extracted from this image (by looking up the pixel values at the locations of the data points in \code{x}). } \section{The meaning of \code{density.ppp}}{ This function is often misunderstood. The result of \code{density.ppp} is not a spatial smoothing of the marks or weights attached to the point pattern. To perform spatial interpolation of values that were observed at the points of a point pattern, use \code{\link{Smooth.ppp}}. The result of \code{density.ppp} is not a probability density. It is an estimate of the \emph{intensity function} of the point process that generated the point pattern data. Intensity is the expected number of random points per unit area. The units of intensity are \dQuote{points per unit area}. Intensity is usually a function of spatial location, and it is this function which is estimated by \code{density.ppp}. The integral of the intensity function over a spatial region gives the expected number of points falling in this region. Inspecting an estimate of the intensity function is usually the first step in exploring a spatial point pattern dataset. For more explanation, see Baddeley, Rubak and Turner (2015) or Diggle (2003, 2010). If you have two (or more) types of points, and you want a probability map or relative risk surface (the spatially-varying probability of a given type), use \code{\link{relrisk}}. } \section{Technical issue: Negative Values}{ Negative and zero values of the density estimate are possible when \code{at="pixels"} because of numerical errors in finite-precision arithmetic. By default, \code{density.ppp} does not try to repair such errors. This would take more computation time and is not always needed. (Also it would not be appropriate if \code{weights} include negative values.) To ensure that the resulting density values are always positive, set \code{positive=TRUE}. } \seealso{ To select the bandwidth \code{sigma} automatically by cross-validation, use \code{\link{bw.diggle}}, \code{\link{bw.CvL}}, \code{\link{bw.scott}} or \code{\link{bw.ppl}}. To perform spatial interpolation of values that were observed at the points of a point pattern, use \code{\link{Smooth.ppp}}. For adaptive nonparametric estimation, see \code{\link{adaptive.density}}. For data sharpening, see \code{\link{sharpen.ppp}}. To compute a relative risk surface or probability map for two (or more) types of points, use \code{\link{relrisk}}. For information about the data structures, see \code{\link{ppp.object}}, \code{\link{im.object}}. } \examples{ if(interactive()) { opa <- par(mfrow=c(1,2)) plot(density(cells, 0.05)) plot(density(cells, 0.05, diggle=TRUE)) par(opa) v <- diag(c(0.05, 0.07)^2) plot(density(cells, varcov=v)) } # automatic bandwidth selection plot(density(cells, sigma=bw.diggle(cells))) # equivalent: plot(density(cells, bw.diggle)) # evaluate intensity at points density(cells, 0.05, at="points") # non-Gaussian kernel plot(density(cells, sigma=0.4, kernel="epanechnikov")) if(interactive()) { # see effect of changing pixel resolution opa <- par(mfrow=c(1,2)) plot(density(cells, sigma=0.4)) plot(density(cells, sigma=0.4, eps=0.05)) par(opa) } # relative risk calculation by hand (see relrisk.ppp) lung <- split(chorley)$lung larynx <- split(chorley)$larynx D <- density(lung, sigma=2) plot(density(larynx, sigma=2, weights=1/D)) } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. Diggle, P.J. (1985) A kernel method for smoothing point process data. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C) \bold{34} (1985) 138--147. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Diggle, P.J. (2010) Nonparametric methods. Chapter 18, pp. 299--316 in A.E. Gelfand, P.J. Diggle, M. Fuentes and P. Guttorp (eds.) \emph{Handbook of Spatial Statistics}, CRC Press, Boca Raton, FL. Jones, M.C. (1993) Simple boundary corrections for kernel density estimation. \emph{Statistics and Computing} \bold{3}, 135--146. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/StraussHard.Rd0000644000176200001440000000776614141452520016367 0ustar liggesusers\name{StraussHard} \alias{StraussHard} \title{The Strauss / Hard Core Point Process Model} \description{ Creates an instance of the ``Strauss/ hard core'' point process model which can then be fitted to point pattern data. } \usage{ StraussHard(r, hc=NA) } \arguments{ \item{r}{The interaction radius of the Strauss interaction} \item{hc}{The hard core distance. Optional.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the ``Strauss/hard core'' process with Strauss interaction radius \eqn{r} and hard core distance \code{hc}. } \details{ A Strauss/hard core process with interaction radius \eqn{r}, hard core distance \eqn{h < r}, and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, is a pairwise interaction point process in which \itemize{ \item distinct points are not allowed to come closer than a distance \eqn{h} apart \item each pair of points closer than \eqn{r} units apart contributes a factor \eqn{\gamma}{gamma} to the probability density. } This is a hybrid of the Strauss process and the hard core process. The probability density is zero if any pair of points is closer than \eqn{h} units apart, and otherwise equals \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of distinct unordered pairs of points that are closer than \eqn{r} units apart, and \eqn{\alpha}{alpha} is the normalising constant. The interaction parameter \eqn{\gamma}{gamma} may take any positive value (unlike the case for the Strauss process). If \eqn{\gamma < 1}{gamma < 1}, the model describes an ``ordered'' or ``inhibitive'' pattern. If \eqn{\gamma > 1}{gamma > 1}, the model is ``ordered'' or ``inhibitive'' up to the distance \eqn{h}, but has an ``attraction'' between points lying at distances in the range between \eqn{h} and \eqn{r}. If \eqn{\gamma = 1}{gamma = 1}, the process reduces to a classical hard core process with hard core distance \eqn{h}. If \eqn{\gamma = 0}{gamma = 0}, the process reduces to a classical hard core process with hard core distance \eqn{r}. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Strauss/hard core process pairwise interaction is yielded by the function \code{StraussHard()}. See the examples below. The canonical parameter \eqn{\log(\gamma)}{log(gamma)} is estimated by \code{\link{ppm}()}, not fixed in \code{StraussHard()}. If the hard core distance argument \code{hc} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{hc} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Strauss, D.J. (1975) A model for clustering. \emph{Biometrika} \bold{62}, 467--475. } \examples{ StraussHard(r=1,hc=0.02) # prints a sensible description of itself data(cells) # ppm(cells, ~1, StraussHard(r=0.1, hc=0.05)) # fit the stationary Strauss/hard core process to `cells' ppm(cells, ~ polynom(x,y,3), StraussHard(r=0.1, hc=0.05)) # fit a nonstationary Strauss/hard core process # with log-cubic polynomial trend } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/eval.fv.Rd0000644000176200001440000001206314141452520015447 0ustar liggesusers\name{eval.fv} \alias{eval.fv} \title{Evaluate Expression Involving Functions} \description{ Evaluates any expression involving one or more function value (fv) objects, and returns another object of the same kind. } \usage{ eval.fv(expr, envir, dotonly=TRUE, equiv=NULL, relabel=TRUE) } \arguments{ \item{expr}{An expression.} \item{envir}{ Optional. The environment in which to evaluate the expression, or a named list containing \code{"fv"} objects to be used in the expression. } \item{dotonly}{Logical. See Details.} \item{equiv}{Mapping between column names of different objects that are deemed to be equivalent. See Details.} \item{relabel}{ Logical value indicating whether to compute appropriate labels for the resulting function. This should normally be \code{TRUE} (the default). See Details. } } \details{ This is a wrapper to make it easier to perform pointwise calculations with the summary functions used in spatial statistics. An object of class \code{"fv"} is essentially a data frame containing several different statistical estimates of the same function. Such objects are returned by \code{\link[spatstat.core]{Kest}} and its relatives. For example, suppose \code{X} is an object of class \code{"fv"} containing several different estimates of the Ripley's K function \eqn{K(r)}, evaluated at a sequence of values of \eqn{r}. Then \code{eval.fv(X+3)} effectively adds 3 to each function estimate in \code{X}, and returns the resulting object. Suppose \code{X} and \code{Y} are two objects of class \code{"fv"} which are compatible (in particular they have the same vector of \eqn{r} values). Then \code{eval.im(X + Y)} will add the corresponding function values in \code{X} and \code{Y}, and return the resulting function. In general, \code{expr} can be any expression involving (a) the \emph{names} of objects of class \code{"fv"}, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.fv} determines which of the \emph{variable names} in the expression \code{expr} refer to objects of class \code{"fv"}. Each such name is replaced by a vector containing the function values. The expression is then evaluated. The result should be a vector; it is taken as the new vector of function values. The expression \code{expr} must be vectorised. There must be at least one object of class \code{"fv"} in the expression. If the objects are not compatible, they will be made compatible by \code{\link{harmonise.fv}}. If \code{dotonly=TRUE} (the default), the expression will be evaluated only for those columns of an \code{"fv"} object that contain values of the function itself (rather than values of the derivative of the function, the hazard rate, etc). If \code{dotonly=FALSE}, the expression will be evaluated for all columns. For example the result of \code{\link[spatstat.core]{Fest}} includes several columns containing estimates of the empty space function \eqn{F(r)}, but also includes an estimate of the \emph{hazard} \eqn{h(r)} of \eqn{F(r)}. Transformations that are valid for \eqn{F} may not be valid for \eqn{h}. Accordingly, \eqn{h} would normally be omitted from the calculation. The columns of an object \code{x} that represent the function itself are identified by its \dQuote{dot} names, \code{fvnames(x, ".")}. They are the columns normally plotted by \code{\link{plot.fv}} and identified by the symbol \code{"."} in plot formulas in \code{\link{plot.fv}}. The argument \code{equiv} can be used to specify that two different column names in different function objects are mathematically equivalent or cognate. It should be a list of \code{name=value} pairs, or a named vector of character strings, indicating the pairing of equivalent names. (Without this argument, these columns would be discarded.) See the Examples. The argument \code{relabel} should normally be \code{TRUE} (the default). It determines whether to compute appropriate mathematical labels and descriptions for the resulting function object (used when the object is printed or plotted). If \code{relabel=FALSE} then this does not occur, and the mathematical labels and descriptions in the result are taken from the function object that appears first in the expression. This reduces computation time slightly (for advanced use only). } \value{ Another object of class \code{"fv"}. } \seealso{ \code{\link{fv.object}}, \code{\link[spatstat.core]{Kest}} } \examples{ # manipulating the K function X <- runifrect(42) Ks <- Kest(X) eval.fv(Ks + 3) Ls <- eval.fv(sqrt(Ks/pi)) # manipulating two K functions Y <- runifrect(20) Kr <- Kest(Y) Kdif <- eval.fv(Ks - Kr) Z <- eval.fv(sqrt(Ks/pi) - sqrt(Kr/pi)) ## Use of 'envir' U <- eval.fv(sqrt(K), list(K=Ks)) ## Use of 'equiv' Fc <- Fest(cells) Gc <- Gest(cells) # Hanisch and Chiu-Stoyan estimators are cognate Dc <- eval.fv(Fc - Gc, equiv=list(cs="han")) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat.core/man/update.interact.Rd0000644000176200001440000000227714141452520017206 0ustar liggesusers\name{update.interact} \alias{update.interact} \title{ Update an Interpoint Interaction } \description{ This command updates the \code{object} using the arguments given. } \usage{ \method{update}{interact}(object, \dots) } \arguments{ \item{object}{ Interpoint interaction (object of class \code{"interact"}). } \item{\dots}{ Additional or replacement values of parameters of \code{object}. } } \details{ This is a method for the generic function \code{\link[stats]{update}} for the class \code{"interact"} of interpoint interactions. It updates the \code{object} using the parameters given in the extra arguments \code{\dots}. The extra arguments must be given in the form \code{name=value} and must be recognisable to the interaction object. They override any parameters of the same name in \code{object}. } \value{ Another object of class \code{"interact"}, equivalent to \code{object} except for changes in parameter values. } \author{ \spatstatAuthors. } \seealso{ \code{\link{update.ppm}} } \examples{ Str <- Strauss(r=1) Str update(Str, r=2) M <- MultiStrauss(radii=matrix(1,2,2)) update(M, types=c("on", "off")) } \keyword{spatial} \keyword{models} spatstat.core/man/triplet.family.Rd0000644000176200001440000000241714141452520017053 0ustar liggesusers\name{triplet.family} \alias{triplet.family} \title{Triplet Interaction Family} \description{ An object describing the family of all Gibbs point processes with interaction order equal to 3. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the interaction structure of Gibbs point processes which have infinite order of interaction, such as the triplet interaction process \cite{\link{Triplets}}. Anyway, \code{triplet.family} is an object of class \code{"isf"} containing a function \code{triplet.family$eval} for evaluating the sufficient statistics of a Gibbs point process model taking an exponential family form. } \value{ Object of class \code{"isf"}, see \code{\link{isf.object}}. } \seealso{ \code{\link{Triplets}} to create the triplet interaction process structure. Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{inforder.family}}, \code{\link{ord.family}}. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/WindowOnly.Rd0000644000176200001440000000347614141452520016227 0ustar liggesusers\name{WindowOnly} \alias{Window.ppm} \alias{Window.kppm} \alias{Window.dppm} \alias{Window.slrm} \alias{Window.msr} \alias{Window.quadrattest} \alias{Window.rmhmodel} \title{Extract Window of Spatial Object} \description{ Given a spatial object (such as a point pattern or pixel image) in two dimensions, these functions extract the window in which the object is defined. } \usage{ \method{Window}{ppm}(X, \dots, from=c("points", "covariates")) \method{Window}{kppm}(X, \dots, from=c("points", "covariates")) \method{Window}{dppm}(X, \dots, from=c("points", "covariates")) \method{Window}{slrm}(X, \dots, from=c("points", "covariates")) \method{Window}{msr}(X, \dots) \method{Window}{quadrattest}(X, \dots) \method{Window}{rmhmodel}(X, \dots) } \arguments{ \item{X}{A spatial object.} \item{\dots}{Ignored.} \item{from}{Character string. See Details.} } \value{ An object of class \code{"owin"} (see \code{\link{owin.object}}) specifying an observation window. } \details{ These are methods for the generic function \code{\link{Window}} which extract the spatial window in which the object \code{X} is defined. The argument \code{from} applies when \code{X} is a fitted two-dimensional point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"slrm"} or \code{"dppm"}). If \code{from="data"} (the default), \code{Window} extracts the window of the original point pattern data to which the model was fitted. If \code{from="covariates"} then \code{Window} returns the window in which the spatial covariates of the model were provided. } \seealso{ \code{\link{Window}}, \code{\link{Window.ppp}}, \code{\link{Window.psp}}. \code{\link{owin.object}} } \examples{ X <- quadratcount(cells, 4) Window(X) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat.core/man/is.hybrid.Rd0000644000176200001440000000347514144333466016022 0ustar liggesusers\name{is.hybrid} \alias{is.hybrid} \alias{is.hybrid.ppm} \alias{is.hybrid.interact} \title{ Test Whether Object is a Hybrid } \description{ Tests where a point process model or point process interaction is a hybrid of several interactions. } \usage{ is.hybrid(x) \method{is.hybrid}{ppm}(x) \method{is.hybrid}{interact}(x) } \arguments{ \item{x}{ A point process model (object of class \code{"ppm"}) or a point process interaction structure (object of class \code{"interact"}). } } \details{ A \emph{hybrid} (Baddeley, Turner, Mateu and Bevan, 2012) is a point process model created by combining two or more point process models, or an interpoint interaction created by combining two or more interpoint interactions. The function \code{is.hybrid} is generic, with methods for point process models (objects of class \code{"ppm"}) and point process interactions (objects of class \code{"interact"}). These functions return \code{TRUE} if the object \code{x} is a hybrid, and \code{FALSE} if it is not a hybrid. Hybrids of two or more interpoint interactions are created by the function \code{\link{Hybrid}}. Such a hybrid interaction can then be fitted to point pattern data using \code{\link{ppm}}. } \value{ \code{TRUE} if the object is a hybrid, and \code{FALSE} otherwise. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \code{DOI: 10.18637/jss.v055.i11} } \seealso{ \code{\link{Hybrid}} } \examples{ S <- Strauss(0.1) is.hybrid(S) H <- Hybrid(Strauss(0.1), Geyer(0.2, 3)) is.hybrid(H) data(redwood) fit <- ppm(redwood, ~1, H) is.hybrid(fit) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/model.images.Rd0000644000176200001440000001037414141452520016455 0ustar liggesusers\name{model.images} \alias{model.images} \alias{model.images.ppm} \alias{model.images.dppm} \alias{model.images.kppm} \alias{model.images.slrm} \title{Compute Images of Constructed Covariates} \description{ For a point process model fitted to spatial point pattern data, this function computes pixel images of the covariates in the design matrix. } \usage{ model.images(object, ...) \method{model.images}{ppm}(object, W = as.owin(object), ...) \method{model.images}{kppm}(object, W = as.owin(object), ...) \method{model.images}{dppm}(object, W = as.owin(object), ...) \method{model.images}{slrm}(object, ...) } \arguments{ \item{object}{ The fitted point process model. An object of class \code{"ppm"} or \code{"kppm"} or \code{"slrm"} or \code{"dppm"}. } \item{W}{ A window (object of class \code{"owin"}) in which the images should be computed. Defaults to the window in which the model was fitted. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link[stats:model.matrix]{model.matrix.lm}}. } } \details{ This command is similar to \code{\link{model.matrix.ppm}} except that it computes pixel images of the covariates, instead of computing the covariate values at certain points only. The \code{object} must be a fitted spatial point process model object of class \code{"ppm"} (produced by the model-fitting function \code{\link{ppm}}) or class \code{"kppm"} (produced by the fitting function \code{\link{kppm}}) or class \code{"dppm"} (produced by the fitting function \code{\link{dppm}}) or class \code{"slrm"} (produced by \code{\link{slrm}}). The spatial covariates required by the model-fitting procedure are computed at every pixel location in the window \code{W}. For \code{slrm} objects, the covariates are computed on the pixels that were used to fit the model. Note that the spatial covariates computed here are not necessarily the original covariates that were supplied when fitting the model. Rather, they are the canonical covariates, the covariates that appear in the loglinear representation of the (conditional) intensity and in the columns of the design matrix. For example, they might include dummy or indicator variables for different levels of a factor, depending on the contrasts that are in force. The pixel resolution is determined by \code{W} if \code{W} is a mask (that is \code{W$type = "mask"}). Otherwise, the pixel resolution is determined by \code{\link{spatstat.options}}. The format of the result depends on whether the original point pattern data were marked or unmarked. \itemize{ \item If the original dataset was unmarked, the result is a named list of pixel images (objects of class \code{"im"}) containing the values of the spatial covariates. The names of the list elements are the names of the covariates determined by \code{\link[stats:model.matrix]{model.matrix.lm}}. The result is also of class \code{"solist"} so that it can be plotted immediately. \item If the original dataset was a multitype point pattern, the result is a \code{\link{hyperframe}} with one column for each possible type of points. Each column is a named list of pixel images (objects of class \code{"im"}) containing the values of the spatial covariates. The row names of the hyperframe are the names of the covariates determined by \code{\link[stats:model.matrix]{model.matrix.lm}}. } } \value{ A list (of class \code{"solist"}) or array (of class \code{"hyperframe"}) containing pixel images (objects of class \code{"im"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{model.matrix.ppm}}, \code{\link[stats]{model.matrix}}, \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{dppm}}, \code{\link{kppm}}, \code{\link{slrm}}, \code{\link{im}}, \code{\link{im.object}}, \code{\link{plot.solist}}, \code{\link{spatstat.options}} } \examples{ fit <- ppm(cells ~ x) model.images(fit) B <- owin(c(0.2, 0.4), c(0.3, 0.8)) model.images(fit, B) fit2 <- ppm(cells ~ cut(x,3)) model.images(fit2) fit3 <- slrm(japanesepines ~ x) model.images(fit3) fit4 <- ppm(amacrine ~ marks + x) model.images(fit4) } \keyword{spatial} \keyword{models} spatstat.core/man/dclf.sigtrace.Rd0000644000176200001440000001364314141452520016623 0ustar liggesusers\name{dclf.sigtrace} \alias{dclf.sigtrace} \alias{mad.sigtrace} \alias{mctest.sigtrace} \title{ Significance Trace of Cressie-Loosmore-Ford or Maximum Absolute Deviation Test } \description{ Generates a Significance Trace of the Diggle(1986)/ Cressie (1991)/ Loosmore and Ford (2006) test or the Maximum Absolute Deviation test for a spatial point pattern. } \usage{ dclf.sigtrace(X, \dots) mad.sigtrace(X, \dots) mctest.sigtrace(X, fun=Lest, \dots, exponent=1, interpolate=FALSE, alpha=0.05, confint=TRUE, rmin=0) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{\dots}{ Arguments passed to \code{\link{envelope}} or \code{\link{mctest.progress}}. Useful arguments include \code{fun} to determine the summary function, \code{nsim} to specify the number of Monte Carlo simulations, \code{alternative} to specify a one-sided test, and \code{verbose=FALSE} to turn off the messages. } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{exponent}{ Positive number. The exponent of the \eqn{L^p} distance. See Details. } \item{interpolate}{ Logical value specifying whether to calculate the \eqn{p}-value by interpolation. If \code{interpolate=FALSE} (the default), a standard Monte Carlo test is performed, yielding a \eqn{p}-value of the form \eqn{(k+1)/(n+1)} where \eqn{n} is the number of simulations and \eqn{k} is the number of simulated values which are more extreme than the observed value. If \code{interpolate=TRUE}, the \eqn{p}-value is calculated by applying kernel density estimation to the simulated values, and computing the tail probability for this estimated distribution. } \item{alpha}{ Significance level to be plotted (this has no effect on the calculation but is simply plotted as a reference value). } \item{confint}{ Logical value indicating whether to compute a confidence interval for the \sQuote{true} \eqn{p}-value. } \item{rmin}{ Optional. Left endpoint for the interval of \eqn{r} values on which the test statistic is calculated. } } \details{ The Diggle (1986)/ Cressie (1991)/Loosmore and Ford (2006) test and the Maximum Absolute Deviation test for a spatial point pattern are described in \code{\link{dclf.test}}. These tests depend on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{significance trace} (Bowman and Azzalini, 1997; Baddeley et al, 2014, 2015) of the test is a plot of the \eqn{p}-value obtained from the test against the length of the interval \code{rinterval}. The command \code{dclf.sigtrace} performs \code{\link{dclf.test}} on \code{X} using all possible intervals of the form \eqn{[0,R]}, and returns the resulting \eqn{p}-values as a function of \eqn{R}. Similarly \code{mad.sigtrace} performs \code{\link{mad.test}} using all possible intervals and returns the \eqn{p}-values. More generally, \code{mctest.sigtrace} performs a test based on the \eqn{L^p} discrepancy between the curves. The deviation between two curves is measured by the \eqn{p}th root of the integral of the \eqn{p}th power of the absolute value of the difference between the two curves. The exponent \eqn{p} is given by the argument \code{exponent}. The case \code{exponent=2} is the Cressie-Loosmore-Ford test, while \code{exponent=Inf} is the MAD test. If the argument \code{rmin} is given, it specifies the left endpoint of the interval defining the test statistic: the tests are performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]} where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}. The result of each command is an object of class \code{"fv"} that can be plotted to obtain the significance trace. The plot shows the Monte Carlo \eqn{p}-value (solid black line), the critical value \code{0.05} (dashed red line), and a pointwise 95\% confidence band (grey shading) for the \sQuote{true} (Neyman-Pearson) \eqn{p}-value. The confidence band is based on the Agresti-Coull (1998) confidence interval for a binomial proportion (when \code{interpolate=FALSE}) or the delta method and normal approximation (when \code{interpolate=TRUE}). If \code{X} is an envelope object and \code{fun=NULL} then the code will re-use the simulated functions stored in \code{X}. } \value{ An object of class \code{"fv"} that can be plotted to obtain the significance trace. } \references{ Agresti, A. and Coull, B.A. (1998) Approximate is better than \dQuote{Exact} for interval estimation of binomial proportions. \emph{American Statistician} \bold{52}, 119--126. Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84}(3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2015) Pushing the envelope: extensions of graphical Monte Carlo tests. Submitted for publication. Bowman, A.W. and Azzalini, A. (1997) \emph{Applied smoothing techniques for data analysis: the kernel approach with S-Plus illustrations}. Oxford University Press, Oxford. } \author{ \adrian, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \adrian \rolf and \ege } \seealso{ \code{\link{dclf.test}} for the tests; \code{\link{dclf.progress}} for progress plots. See \code{\link{plot.fv}} for information on plotting objects of class \code{"fv"}. See also \code{\link{dg.sigtrace}}. } \examples{ plot(dclf.sigtrace(cells, Lest, nsim=19)) } \keyword{spatial} \keyword{htest} spatstat.core/man/objsurf.Rd0000644000176200001440000000611014141452520015554 0ustar liggesusers\name{objsurf} \alias{objsurf} \alias{objsurf.dppm} \alias{objsurf.kppm} \alias{objsurf.minconfit} \title{ Objective Function Surface } \description{ For a model that was fitted by optimisation, compute the values of the objective function in a neighbourhood of the optimal value. } \usage{ objsurf(x, \dots) \method{objsurf}{dppm}(x, ..., ngrid = 32, xlim=NULL, ylim=NULL, ratio = 1.5, verbose = TRUE) \method{objsurf}{kppm}(x, ..., ngrid = 32, xlim=NULL, ylim=NULL, ratio = 1.5, verbose = TRUE) \method{objsurf}{minconfit}(x, ..., ngrid = 32, xlim=NULL, ylim=NULL, ratio = 1.5, verbose = TRUE) } \arguments{ \item{x}{ Some kind of model that was fitted by finding the optimal value of an objective function. An object of class \code{"dppm"}, \code{"kppm"} or \code{"minconfit"}. } \item{\dots}{ Extra arguments are usually ignored. } \item{ngrid}{ Number of grid points to evaluate along each axis. Either a single integer, or a pair of integers. For example \code{ngrid=32} would mean a \code{32 * 32} grid. } \item{xlim,ylim}{ Optional. Numeric vectors of length 2, specifying the range of parameter values to be considered. } \item{ratio}{ Number greater than 1 determining the range of parameter values to be considered, if \code{xlim} and \code{ylim} are not specified. If the optimal parameter value is \code{opt} then the objective function will be evaluated for values between \code{opt/ratio} and \code{opt * ratio}. } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ The object \code{x} should be some kind of model that was fitted by maximising or minimising the value of an objective function. The objective function will be evaluated on a grid of values of the model parameters. Currently the following types of objects are accepted: \itemize{ \item an object of class \code{"dppm"} representing a determinantal point process. See \code{\link{dppm}}. \item an object of class \code{"kppm"} representing a cluster point process or Cox point process. See \code{\link{kppm}}. \item an object of class \code{"minconfit"} representing a minimum-contrast fit between a summary function and its theoretical counterpart. See \code{\link{mincontrast}}. } The result is an object of class \code{"objsurf"} which can be printed and plotted: see \code{\link{methods.objsurf}}. } \value{ An object of class \code{"objsurf"} which can be printed and plotted. Essentially a list containing entries \code{x}, \code{y}, \code{z} giving the parameter values and objective function values. There are methods for \code{plot}, \code{print}, \code{summary}, \code{image}, \code{contour} and \code{persp}. } \author{ \adrian and \ege. } \seealso{ \code{\link{methods.objsurf}}, \code{\link{kppm}}, \code{\link{mincontrast}} } \examples{ fit <- kppm(redwood ~ 1, "Thomas") os <- objsurf(fit) if(interactive()) { plot(os) contour(os, add=TRUE) persp(os) } } \keyword{spatial} \keyword{models} spatstat.core/man/as.function.leverage.ppm.Rd0000644000176200001440000000243214141452520020720 0ustar liggesusers\name{as.function.leverage.ppm} \alias{as.function.leverage.ppm} \title{ Convert Leverage Object to Function of Coordinates } \description{ Converts an object of class \code{"leverage.ppm"} to a function of the \eqn{x} and \eqn{y} coordinates. } \usage{ \method{as.function}{leverage.ppm}(x, ...) } \arguments{ \item{x}{ Object of class \code{"leverage.ppm"} produced by \code{\link{leverage.ppm}}. } \item{\dots}{ Ignored. } } \details{ An object of class \code{"leverage.ppm"} represents the leverage function of a fitted point process model. This command converts the object to a \code{function(x,y)} where the arguments \code{x} and \code{y} are (vectors of) spatial coordinates. This function returns the leverage values at the specified locations (calculated by referring to the nearest location where the leverage has been computed). } \value{ A function in the \R language, also belonging to the class \code{"funxy"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{as.im.leverage.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) lev <- leverage(fit) f <- as.function(lev) f(0.2, 0.3) # evaluate at (x,y) coordinates y <- f(X) # evaluate at a point pattern } \keyword{spatial} \keyword{manip} spatstat.core/man/plot.quadrattest.Rd0000644000176200001440000000311414141452520017421 0ustar liggesusers\name{plot.quadrattest} \alias{plot.quadrattest} \title{ Display the result of a quadrat counting test. } \description{ Given the result of a quadrat counting test, graphically display the quadrats that were used, the observed and expected counts, and the residual in each quadrat. } \usage{ \method{plot}{quadrattest}(x, ..., textargs=list()) } \arguments{ \item{x}{ Object of class \code{"quadrattest"} containing the result of \code{\link{quadrat.test}}. } \item{\dots}{ Additional arguments passed to \code{\link{plot.tess}} to control the display of the quadrats. } \item{textargs}{ List of additional arguments passed to \code{\link[graphics]{text.default}} to control the appearance of the text. } } \details{ This is the plot method for objects of class \code{"quadrattest"}. Such an object is produced by \code{\link{quadrat.test}} and represents the result of a \eqn{\chi^2}{chi^2} test for a spatial point pattern. The quadrats are first plotted using \code{\link{plot.tess}}. Then in each quadrat, the observed and expected counts and the Pearson residual are displayed as text using \code{\link[graphics]{text.default}}. Observed count is displayed at top left; expected count at top right; and Pearson residual at bottom. } \value{ Null. } \examples{ plot(quadrat.test(swedishpines, 3)) } \seealso{ \code{\link{quadrat.test}}, \code{\link{plot.tess}}, \code{\link[graphics]{text.default}}, \code{\link{plot.quadratcount}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} spatstat.core/man/relrisk.Rd0000644000176200001440000000272514141452520015565 0ustar liggesusers\name{relrisk} \alias{relrisk} \title{ Estimate of Spatially-Varying Relative Risk } \description{ Generic command to estimate the spatially-varying probability of each type of point, or the ratios of such probabilities. } \usage{ relrisk(X, \dots) } \arguments{ \item{X}{ Either a point pattern (class \code{"ppp"}) or a fitted point process model (class \code{"ppm"}) from which the probabilities will be estimated. } \item{\dots}{ Additional arguments appropriate to the method. } } \details{ In a point pattern containing several different types of points, we may be interested in the spatially-varying probability of each possible type, or the relative risks which are the ratios of such probabilities. The command \code{\link{relrisk}} is generic and can be used to estimate relative risk in different ways. The function \code{\link{relrisk.ppp}} is the method for point pattern datasets. It computes \emph{nonparametric} estimates of relative risk by kernel smoothing. The function \code{\link{relrisk.ppm}} is the method for fitted point process models (class \code{"ppm"}). It computes \emph{parametric} estimates of relative risk, using the fitted model. } \seealso{ \code{\link{relrisk.ppp}}, \code{\link{relrisk.ppm}}. } \value{ A pixel image, or a list of pixel images, or a numeric vector or matrix, containing the requested estimates of relative risk. } \author{ \spatstatAuthors. } \keyword{spatial} spatstat.core/man/rpoisppOnLines.Rd0000644000176200001440000000742614141452520017101 0ustar liggesusers\name{rpoisppOnLines} \alias{rpoisppOnLines} \title{Generate Poisson Point Pattern on Line Segments} \description{ Given a line segment pattern, generate a Poisson random point pattern on the line segments. } \usage{ rpoisppOnLines(lambda, L, lmax = NULL, \dots, nsim=1, drop=TRUE) } \arguments{ \item{lambda}{Intensity of the Poisson process. A single number, a \code{function(x,y)}, a pixel image (object of class \code{"im"}), or a vector of numbers, a list of functions, or a list of images. } \item{L}{Line segment pattern (object of class \code{"psp"}) on which the points should be generated. } \item{lmax}{ Optional upper bound (for increased computational efficiency). A known upper bound for the values of \code{lambda}, if \code{lambda} is a function or a pixel image. That is, \code{lmax} should be a number which is known to be greater than or equal to all values of \code{lambda}. } \item{\dots}{Additional arguments passed to \code{lambda} if it is a function. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This command generates a Poisson point process on the one-dimensional system of line segments in \code{L}. The result is a point pattern consisting of points lying on the line segments in \code{L}. The number of random points falling on any given line segment follows a Poisson distribution. The patterns of points on different segments are independent. The intensity \code{lambda} is the expected number of points per unit \bold{length} of line segment. It may be constant, or it may depend on spatial location. In order to generate an unmarked Poisson process, the argument \code{lambda} may be a single number, or a \code{function(x,y)}, or a pixel image (object of class \code{"im"}). In order to generate a \emph{marked} Poisson process, \code{lambda} may be a numeric vector, a list of functions, or a list of images, each entry giving the intensity for a different mark value. If \code{lambda} is not numeric, then the (Lewis-Shedler) rejection method is used. The rejection method requires knowledge of \code{lmax}, the maximum possible value of \code{lambda}. This should be either a single number, or a numeric vector of the same length as \code{lambda}. If \code{lmax} is not given, it will be computed approximately, by sampling many values of \code{lambda}. If \code{lmax} is given, then it \bold{must} be larger than any possible value of \code{lambda}, otherwise the results of the algorithm will be incorrect. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}) in the same window as \code{L}. If \code{nsim > 1}, a list of such point patterns. } \seealso{ \code{\link{psp}}, \code{\link{ppp}}, \code{\link{runifpointOnLines}}, \code{\link{rpoispp}} } \examples{ live <- interactive() L <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) if(live) plot(L, main="") # uniform intensity Y <- rpoisppOnLines(4, L) if(live) plot(Y, add=TRUE, pch="+") # uniform MARKED process with types 'a' and 'b' Y <- rpoisppOnLines(c(a=4, b=5), L) if(live) { plot(L, main="") plot(Y, add=TRUE, pch="+") } # intensity is a function Y <- rpoisppOnLines(function(x,y){ 10 * x^2}, L, 10) if(live) { plot(L, main="") plot(Y, add=TRUE, pch="+") } # intensity is an image Z <- as.im(function(x,y){10 * sqrt(x+y)}, unit.square()) Y <- rpoisppOnLines(Z, L, 15) if(live) { plot(L, main="") plot(Y, add=TRUE, pch="+") } } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/Ldot.inhom.Rd0000644000176200001440000000655314141452520016130 0ustar liggesusers\name{Ldot.inhom} \alias{Ldot.inhom} \title{ Inhomogeneous Multitype L Dot Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the dot \eqn{L} function. } \usage{ Ldot.inhom(X, i, \dots, correction) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{L} function \eqn{L_{i\bullet}(r)}{Li.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{correction,\dots}{ Other arguments passed to \code{\link{Kdot.inhom}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{L_{i\bullet}(r)}{Li.(r)} has been estimated } \item{theo}{the theoretical value of \eqn{L_{i\bullet}(r)}{Li.(r)} for a marked Poisson process, identical to \eqn{r}. } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{i\bullet}(r)}{Li.(r)} obtained by the edge corrections named. } \details{ This a generalisation of the function \code{\link{Ldot}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Linhom}}. All the arguments are passed to \code{\link{Kdot.inhom}}, which estimates the inhomogeneous multitype K function \eqn{K_{i\bullet}(r)}{Ki.(r)} for the point pattern. The resulting values are then transformed by taking \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. } \references{ \Moller, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Ldot}}, \code{\link{Linhom}}, \code{\link{Kdot.inhom}}, \code{\link{Lcross.inhom}}. } \examples{ # Lansing Woods data lan <- lansing lan <- lan[seq(1,npoints(lan), by=10)] ma <- split(lan)$maple lg <- unmark(lan) # Estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdadot <- density.ppp(lg, sigma=0.15, at="points") L <- Ldot.inhom(lan, "maple", lambdaI=lambdaM, lambdadot=lambdadot) # synthetic example: type A points have intensity 50, # type B points have intensity 50 + 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) lamdot <- as.im(function(x,y) { 100 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) L <- Ldot.inhom(X, "B", lambdaI=lamB, lambdadot=lamdot) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/rcell.Rd0000644000176200001440000000645014141452520015212 0ustar liggesusers\name{rcell} \alias{rcell} \title{Simulate Baddeley-Silverman Cell Process} \description{ Generates a random point pattern, a simulated realisation of the Baddeley-Silverman cell process model. } \usage{ rcell(win=square(1), nx=NULL, ny=nx, \dots, dx=NULL, dy=dx, N=10, nsim=1, drop=TRUE) } \arguments{ \item{win}{ A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{ Number of columns of cells in the window. Incompatible with \code{dx}. } \item{ny}{ Number of rows of cells in the window. Incompatible with \code{dy}. } \item{\dots}{Ignored.} \item{dx}{ Width of the cells. Incompatible with \code{nx}. } \item{dy}{ Height of the cells. Incompatible with \code{ny}. } \item{N}{ Integer. Distributional parameter: the maximum number of random points in each cell. Passed to \code{\link{rcellnumber}}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates a simulated realisation of the \dQuote{cell process} (Baddeley and Silverman, 1984), a random point process with the same second-order properties as the uniform Poisson process. In particular, the \eqn{K} function of this process is identical to the \eqn{K} function of the uniform Poisson process (aka Complete Spatial Randomness). The same holds for the pair correlation function and all other second-order properties. The cell process is a counterexample to the claim that the \eqn{K} function completely characterises a point pattern. A cell process is generated by dividing space into equal rectangular tiles. In each tile, a random number of random points is placed. By default, there are either \eqn{0}, \eqn{1} or \eqn{10} points, with probabilities \eqn{1/10}, \eqn{8/9} and \eqn{1/90} respectively. The points within a tile are independent and uniformly distributed in that tile, and the numbers of points in different tiles are independent random integers. The tile width is determined either by the number of columns \code{nx} or by the horizontal spacing \code{dx}. The tile height is determined either by the number of rows \code{ny} or by the vertical spacing \code{dy}. The cell process is then generated in these tiles. The random numbers of points are generated by \code{\link{rcellnumber}}. Some of the resulting random points may lie outside the window \code{win}: if they do, they are deleted. The result is a point pattern inside the window \code{win}. } \seealso{ \code{\link{rcellnumber}}, \code{\link{rstrat}}, \code{\link{rsyst}}, \code{\link{runifpoint}}, \code{\link{Kest}} } \examples{ X <- rcell(nx=15) plot(X) plot(Kest(X)) } \references{ Baddeley, A.J. and Silverman, B.W. (1984) A cautionary example on the use of second-order methods for analyzing point patterns. \emph{Biometrics} \bold{40}, 1089-1094. } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/Kmulti.inhom.Rd0000644000176200001440000002615714141452520016475 0ustar liggesusers\name{Kmulti.inhom} \alias{Kmulti.inhom} \title{ Inhomogeneous Marked K-Function } \description{ For a marked point pattern, estimate the inhomogeneous version of the multitype \eqn{K} function which counts the expected number of points of subset \eqn{J} within a given distance from a typical point in subset \code{I}, adjusted for spatially varying intensity. } \usage{ Kmulti.inhom(X, I, J, lambdaI=NULL, lambdaJ=NULL, \dots, r=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), lambdaIJ=NULL, sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset index specifying the points of \code{X} from which distances are measured. See Details. } \item{J}{Subset index specifying the points in \code{X} to which distances are measured. See Details. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process \code{X[I]}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X[I]}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location, } \item{lambdaJ}{ Optional. Values of the estimated intensity of the sub-process \code{X[J]}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X[J]}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{Ignored.} \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{lambdaIJ}{ Optional. A matrix containing estimates of the product of the intensities \code{lambdaI} and \code{lambdaJ} for each pair of points, the first point belonging to subset \code{I} and the second point to subset \code{J}. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{lambdaX}{ Optional. Values of the intensity for all points of \code{X}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If present, this argument overrides both \code{lambdaI} and \code{lambdaJ}. } \item{update}{ Logical value indicating what to do when \code{lambdaI}, \code{lambdaJ} or \code{lambdaX} is a fitted point process model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{IJ}(r)}{KIJ(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{IJ}(r)}{KIJ(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{IJ}(r)}{KIJ(r)} obtained by the edge corrections named. } \details{ The function \code{Kmulti.inhom} is the counterpart, for spatially-inhomogeneous marked point patterns, of the multitype \eqn{K} function \code{\link{Kmulti}}. Suppose \eqn{X} is a marked point process, with marks of any kind. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are two sub-processes, possibly overlapping. Typically \eqn{X_I}{X[I]} would consist of those points of \eqn{X} whose marks lie in a specified range of mark values, and similarly for \eqn{X_J}{X[J]}. Suppose that \eqn{\lambda_I(u)}{lambdaI(u)}, \eqn{\lambda_J(u)}{lambdaJ(u)} are the spatially-varying intensity functions of \eqn{X_I}{X[I]} and \eqn{X_J}{X[J]} respectively. Consider all the pairs of points \eqn{(u,v)} in the point process \eqn{X} such that the first point \eqn{u} belongs to \eqn{X_I}{X[I]}, the second point \eqn{v} belongs to \eqn{X_J}{X[J]}, and the distance between \eqn{u} and \eqn{v} is less than a specified distance \eqn{r}. Give this pair \eqn{(u,v)} the numerical weight \eqn{1/(\lambda_I(u)\lambda_J(u))}{1/(lambdaI(u) lambdaJ(u))}. Calculate the sum of these weights over all pairs of points as described. This sum (after appropriate edge-correction and normalisation) is the estimated inhomogeneous multitype \eqn{K} function. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. The argument \code{lambdaI} supplies the values of the intensity of the sub-process identified by index \code{I}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the intensity of \code{X[I]} at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the intensity of \code{X[I]} evaluated only at the data points of \code{X[I]}. The length of this vector must equal the number of points in \code{X[I]}. } \item{a function}{ of the form \code{function(x,y)} which can be evaluated to give values of the intensity at any locations. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate of \code{lambdaI} for a given point is computed by removing the point from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point in question. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Similarly \code{lambdaJ} supplies the values of the intensity of the sub-process identified by index \code{J}. Alternatively if the argument \code{lambdaX} is given, then it specifies the intensity values for all points of \code{X}, and the arguments \code{lambdaI}, \code{lambdaJ} will be ignored. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Biases due to edge effects are treated in the same manner as in \code{\link{Kinhom}}. The edge corrections implemented here are \describe{ \item{border}{the border method or ``reduced sample'' estimator (see Ripley, 1988). This is the least efficient (statistically) and the fastest to compute. It can be computed for a window of arbitrary shape. } \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is currently implemented only for rectangular windows. } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries. } } The pair correlation function \code{\link{pcf}} can also be applied to the result of \code{Kmulti.inhom}. } \references{ Baddeley, A., \Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. } \seealso{ \code{\link{Kmulti}}, \code{\link{Kdot.inhom}}, \code{\link{Kcross.inhom}}, \code{\link{pcf}} } \examples{ # Finnish Pines data: marked by diameter and height plot(finpines, which.marks="height") II <- (marks(finpines)$height <= 2) JJ <- (marks(finpines)$height > 3) K <- Kmulti.inhom(finpines, II, JJ) plot(K) # functions determining subsets f1 <- function(X) { marks(X)$height <= 2 } f2 <- function(X) { marks(X)$height > 3 } K <- Kmulti.inhom(finpines, f1, f2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/localKdot.Rd0000644000176200001440000001144214141452520016022 0ustar liggesusers\name{localKdot} \alias{localKdot} \alias{localLdot} \title{Local Multitype K Function (Dot-Type)} \description{ for a multitype point pattern, computes the dot-type version of the local K function. } \usage{ localKdot(X, from, \dots, rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL) localLdot(X, from, \dots, rmax = NULL, correction = "Ripley") } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"} with marks which are a factor). } \item{\dots}{ Further arguments passed from \code{localLdot} to \code{localKdot}. } \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{from}{ Type of points from which distances should be measured. A single value; one of the possible levels of \code{marks(X)}, or an integer indicating which level. } \item{correction}{ String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"translation"}, \code{"Ripley"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{verbose}{ Logical flag indicating whether to print progress reports during the calculation. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the function L or K should be computed. } } \details{ Given a multitype spatial point pattern \code{X}, the local dot-type \eqn{K} function \code{localKdot} is the local version of the multitype \eqn{K} function \code{\link{Kdot}}. Recall that \code{Kdot(X, from)} is a sum of contributions from all pairs of points in \code{X} where the first point belongs to \code{from}. The \emph{local} dot-type \eqn{K} function is defined for each point \code{X[i]} that belongs to type \code{from}, and it consists of all the contributions to the dot-type \eqn{K} function that originate from point \code{X[i]}: \deqn{ K_{i,from,to}(r) = \sqrt{\frac a {(n-1) \pi} \sum_j e_{ij}} }{ K[i,from,to](r) = sqrt( (a/((n-1)* pi)) * sum[j] e[i,j]) } where the sum is over all points \eqn{j \neq i}{j != i} that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{a} is the area of the observation window, \eqn{n} is the number of points in \code{X}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The value of \eqn{K_{i,from}(r)}{K[i,from](r)} can also be interpreted as one of the summands that contributes to the global estimate of the \code{\link{Kdot}} function. By default, the function \eqn{K_{i,from}(r)}{K[i,from](r)} is computed for a range of \eqn{r} values for each point \eqn{i} belonging to type \code{from}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X} belonging to type \code{from}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X} belonging to type \code{from}. The local dot-type \eqn{L} function \code{localLdot} is computed by applying the transformation \eqn{L(r) = \sqrt{K(r)/(2\pi)}}{L(r) = sqrt(K(r)/(2*pi))}. } \value{ If \code{rvalue} is given, the result is a numeric vector of length equal to the number of points in the point pattern that belong to type \code{from}. If \code{rvalue} is absent, the result is an object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point of type \code{from}. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{Kdot}}, \code{\link{Ldot}}, \code{\link{localK}}, \code{\link{localL}}. } \examples{ X <- amacrine # compute all the local Ldot functions L <- localLdot(X) # plot all the local Ldot functions against r plot(L, main="local Ldot functions for amacrine", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) # compute the values of L(r) for r = 0.1 metres L12 <- localLdot(X, rvalue=0.1) } \author{ \ege and \adrian. } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/rhohat.Rd0000644000176200001440000004242614141452520015401 0ustar liggesusers\name{rhohat} \alias{rhohat} \alias{rhohat.ppp} \alias{rhohat.quad} \alias{rhohat.ppm} \alias{rhohat.slrm} \concept{Resource Selection Function} \concept{Prospectivity} \title{ Nonparametric Estimate of Intensity as Function of a Covariate } \description{ Computes a nonparametric estimate of the intensity of a point process, as a function of a (continuous) spatial covariate. } \usage{ rhohat(object, covariate, ...) \method{rhohat}{ppp}(object, covariate, ..., baseline=NULL, weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "piecewise"), subset=NULL, dimyx=NULL, eps=NULL, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) \method{rhohat}{quad}(object, covariate, ..., baseline=NULL, weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "piecewise"), subset=NULL, dimyx=NULL, eps=NULL, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) \method{rhohat}{ppm}(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "piecewise"), subset=NULL, dimyx=NULL, eps=NULL, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) \method{rhohat}{slrm}(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "piecewise"), subset=NULL, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) } \arguments{ \item{object}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}), a quadrature scheme (object of class \code{"quad"}) or a fitted point process model (object of class \code{"ppm"}, \code{"slrm"} or \code{"lppm"}). } \item{covariate}{ Either a \code{function(x,y)} or a pixel image (object of class \code{"im"}) providing the values of the covariate at any location. Alternatively one of the strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. } \item{weights}{ Optional weights attached to the data points. Either a numeric vector of weights for each data point, or a pixel image (object of class \code{"im"}) or a \code{function(x,y)} providing the weights. } \item{baseline}{ Optional baseline for intensity function. A \code{function(x,y)} or a pixel image (object of class \code{"im"}) providing the values of the baseline at any location. } \item{method}{ Character string determining the smoothing method. See Details. } \item{horvitz}{ Logical value indicating whether to use Horvitz-Thompson weights. See Details. } \item{smoother}{ Character string determining the smoothing algorithm. See Details. } \item{subset}{ Optional. A spatial window (object of class \code{"owin"}) specifying a subset of the data, from which the estimate should be calculated. } \item{dimyx,eps}{ Arguments controlling the pixel resolution at which the covariate will be evaluated. See Details. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link{density.default}} to control the number and range of values at which the function will be estimated. } \item{bwref}{ Optional. An alternative value of \code{bw} to use when smoothing the reference density (the density of the covariate values observed at all locations in the window). } \item{\dots}{ Additional arguments passed to \code{\link{density.default}} or \code{\link[locfit]{locfit}}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } \item{confidence}{ Confidence level for confidence intervals. A number between 0 and 1. } \item{positiveCI}{ Logical value. If \code{TRUE}, confidence limits are always positive numbers; if \code{FALSE}, the lower limit of the confidence interval may sometimes be negative. Default is \code{FALSE} if \code{smoother="kernel"} and \code{TRUE} if \code{smoother="local"}. See Details. } \item{breaks}{ Breakpoints for the piecewise-constant function computed when \code{smoother='piecewise'}. Either a vector of numeric values specifying the breakpoints, or a single integer specifying the number of equally-spaced breakpoints. There is a sensible default. } } \details{ This command estimates the relationship between point process intensity and a given spatial covariate. Such a relationship is sometimes called a \emph{resource selection function} (if the points are organisms and the covariate is a descriptor of habitat) or a \emph{prospectivity index} (if the points are mineral deposits and the covariate is a geological variable). This command uses nonparametric methods which do not assume a particular form for the relationship. If \code{object} is a point pattern, and \code{baseline} is missing or null, this command assumes that \code{object} is a realisation of a point process with intensity function \eqn{\lambda(u)}{lambda(u)} of the form \deqn{\lambda(u) = \rho(Z(u))}{lambda(u) = rho(Z(u))} where \eqn{Z} is the spatial covariate function given by \code{covariate}, and \eqn{\rho(z)}{rho(z)} is the resource selection function or prospectivity index. A nonparametric estimator of the function \eqn{\rho(z)}{rho(z)} is computed. If \code{object} is a point pattern, and \code{baseline} is given, then the intensity function is assumed to be \deqn{\lambda(u) = \rho(Z(u)) B(u)}{lambda(u) = rho(Z(u)) * B(u)} where \eqn{B(u)} is the baseline intensity at location \eqn{u}. A nonparametric estimator of the relative intensity \eqn{\rho(z)}{rho(z)} is computed. If \code{object} is a fitted point process model, suppose \code{X} is the original data point pattern to which the model was fitted. Then this command assumes \code{X} is a realisation of a Poisson point process with intensity function of the form \deqn{ \lambda(u) = \rho(Z(u)) \kappa(u) }{ lambda(u) = rho(Z(u)) * kappa(u) } where \eqn{\kappa(u)}{kappa(u)} is the intensity of the fitted model \code{object}. A nonparametric estimator of the relative intensity \eqn{\rho(z)}{rho(z)} is computed. The nonparametric estimation procedure is controlled by the arguments \code{smoother}, \code{method} and \code{horvitz}. The argument \code{smoother} selects the type of estimation technique. \itemize{ \item If \code{smoother="kernel"} (the default) or \code{smoother="local"}, the nonparametric estimator is a \emph{smoothing estimator} of \eqn{\rho(z)}{rho(z)}, effectively a kind of density estimator (Baddeley et al, 2012). The estimated function \eqn{\rho(z)}{rho(z)} will be a smooth function of \eqn{z}. Confidence bands are also computed, assuming a Poisson point process. See the section on \emph{Smooth estimates}. \item If \code{smoother="increasing"} or \code{smoother="decreasing"}, we use the \emph{nonparametric maximum likelihood estimator} of \eqn{\rho(z)}{rho(z)} described by Sager (1982). This assumes that \eqn{\rho(z)}{rho(z)} is either an increasing function of \eqn{z}, or a decreasing function of \eqn{z}. The estimated function will be a step function, increasing or decreasing as a function of \eqn{z}. See the section on \emph{Monotone estimates}. \item If \code{smoother="piecewise"}, the estimate of \eqn{\rho(z)}{rho(z)} is piecewise constant. The range of covariate values is divided into several intervals (ranges or bands). The endpoints of these intervals are the breakpoints, which may be specified by the argument \code{breaks}; there is a sensible default. The estimate of \eqn{\rho(z)}{rho(z)} takes a constant value on each interval. The estimate of \eqn{\rho(z)}{rho(z)} in each interval of covariate values is simply the average intensity (number of points per unit area) in the relevant sub-region. } See Baddeley (2018) for a comparison of these estimation techniques. If the argument \code{weights} is present, then the contribution from each data point \code{X[i]} to the estimate of \eqn{\rho}{rho} is multiplied by \code{weights[i]}. If the argument \code{subset} is present, then the calculations are performed using only the data inside this spatial region. This technique assumes that \code{covariate} has continuous values. It is not applicable to covariates with categorical (factor) values or discrete values such as small integers. For a categorical covariate, use \code{\link{intensity.quadratcount}} applied to the result of \code{\link{quadratcount}(X, tess=covariate)}. The argument \code{covariate} should be a pixel image, or a function, or one of the strings \code{"x"} or \code{"y"} signifying the cartesian coordinates. It will be evaluated on a fine grid of locations, with spatial resolution controlled by the arguments \code{dimyx,eps} which are passed to \code{\link{as.mask}}. } \section{Smooth estimates}{ Smooth estimators of \eqn{\rho(z)}{rho(z)} were proposed by Baddeley and Turner (2005) and Baddeley et al (2012). Similar estimators were proposed by Guan (2008) and in the literature on relative distributions (Handcock and Morris, 1999). The estimated function \eqn{\rho(z)}{rho(z)} will be a smooth function of \eqn{z}. The smooth estimation procedure involves computing several density estimates and combining them. The algorithm used to compute density estimates is determined by \code{smoother}: \itemize{ \item If \code{smoother="kernel"}, the smoothing procedure is based on fixed-bandwidth kernel density estimation, performed by \code{\link{density.default}}. \item If \code{smoother="local"}, the smoothing procedure is based on local likelihood density estimation, performed by \code{\link[locfit]{locfit}}. } The argument \code{method} determines how the density estimates will be combined to obtain an estimate of \eqn{\rho(z)}{rho(z)}: \itemize{ \item If \code{method="ratio"}, then \eqn{\rho(z)}{rho(z)} is estimated by the ratio of two density estimates, The numerator is a (rescaled) density estimate obtained by smoothing the values \eqn{Z(y_i)}{Z(y[i])} of the covariate \eqn{Z} observed at the data points \eqn{y_i}{y[i]}. The denominator is a density estimate of the reference distribution of \eqn{Z}. See Baddeley et al (2012), equation (8). This is similar but not identical to an estimator proposed by Guan (2008). \item If \code{method="reweight"}, then \eqn{\rho(z)}{rho(z)} is estimated by applying density estimation to the values \eqn{Z(y_i)}{Z(y[i])} of the covariate \eqn{Z} observed at the data points \eqn{y_i}{y[i]}, with weights inversely proportional to the reference density of \eqn{Z}. See Baddeley et al (2012), equation (9). \item If \code{method="transform"}, the smoothing method is variable-bandwidth kernel smoothing, implemented by applying the Probability Integral Transform to the covariate values, yielding values in the range 0 to 1, then applying edge-corrected density estimation on the interval \eqn{[0,1]}, and back-transforming. See Baddeley et al (2012), equation (10). } If \code{horvitz=TRUE}, then the calculations described above are modified by using Horvitz-Thompson weighting. The contribution to the numerator from each data point is weighted by the reciprocal of the baseline value or fitted intensity value at that data point; and a corresponding adjustment is made to the denominator. Pointwise confidence intervals for the true value of \eqn{\rho(z)} are also calculated for each \eqn{z}, and will be plotted as grey shading. The confidence intervals are derived using the central limit theorem, based on variance calculations which assume a Poisson point process. If \code{positiveCI=FALSE}, the lower limit of the confidence interval may sometimes be negative, because the confidence intervals are based on a normal approximation to the estimate of \eqn{\rho(z)}. If \code{positiveCI=TRUE}, the confidence limits are always positive, because the confidence interval is based on a normal approximation to the estimate of \eqn{\log(\rho(z))}{log(\rho(z))}. For consistency with earlier versions, the default is \code{positiveCI=FALSE} for \code{smoother="kernel"} and \code{positiveCI=TRUE} for \code{smoother="local"}. } \section{Monotone estimates}{ The nonparametric maximum likelihood estimator of a monotone function \eqn{\rho(z)}{rho(z)} was described by Sager (1982). This method assumes that \eqn{\rho(z)}{rho(z)} is either an increasing function of \eqn{z}, or a decreasing function of \eqn{z}. The estimated function will be a step function, increasing or decreasing as a function of \eqn{z}. This estimator is chosen by specifying \code{smoother="increasing"} or \code{smoother="decreasing"}. The argument \code{method} is ignored this case. To compute the estimate of \eqn{\rho(z)}{rho(z)}, the algorithm first computes several primitive step-function estimates, and then takes the maximum of these primitive functions. If \code{smoother="decreasing"}, each primitive step function takes the form \eqn{\rho(z) = \lambda}{rho(z) = lambda} when \eqn{z \le t}, and \eqn{\rho(z) = 0}{rho(z) = 0} when \eqn{z > t}, where and \eqn{\lambda}{lambda} is a primitive estimate of intensity based on the data for \eqn{Z \le t}{Z <= t}. The jump location \eqn{t} will be the value of the covariate \eqn{Z} at one of the data points. The primitive estimate \eqn{\lambda}{lambda} is the average intensity (number of points divided by area) for the region of space where the covariate value is less than or equal to \eqn{t}. If \code{horvitz=TRUE}, then the calculations described above are modified by using Horvitz-Thompson weighting. The contribution to the numerator from each data point is weighted by the reciprocal of the baseline value or fitted intensity value at that data point; and a corresponding adjustment is made to the denominator. Confidence intervals are not available for the monotone estimators. } \value{ A function value table (object of class \code{"fv"}) containing the estimated values of \eqn{\rho}{rho} (and confidence limits) for a sequence of values of \eqn{Z}. Also belongs to the class \code{"rhohat"} which has special methods for \code{print}, \code{plot} and \code{predict}. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2012) Nonparametric estimation of the dependence of a point process on spatial covariates. \emph{Statistics and Its Interface} \bold{5} (2), 221--236. Baddeley, A. and Turner, R. (2005) Modelling spatial point patterns in R. In: A. Baddeley, P. Gregori, J. Mateu, R. Stoica, and D. Stoyan, editors, \emph{Case Studies in Spatial Point Pattern Modelling}, Lecture Notes in Statistics number 185. Pages 23--74. Springer-Verlag, New York, 2006. ISBN: 0-387-28311-0. Baddeley, A. (2018) A statistical commentary on mineral prospectivity analysis. Chapter 2, pages 25--65 in \emph{Handbook of Mathematical Geosciences: Fifty Years of IAMG}, edited by B.S. Daya Sagar, Q. Cheng and F.P. Agterberg. Springer, Berlin. Guan, Y. (2008) On consistent nonparametric intensity estimation for inhomogeneous spatial point processes. \emph{Journal of the American Statistical Association} \bold{103}, 1238--1247. Handcock, M.S. and Morris, M. (1999) \emph{Relative Distribution Methods in the Social Sciences}. Springer, New York. Sager, T.W. (1982) Nonparametric maximum likelihood estimation of spatial patterns. \emph{Annals of Statistics} \bold{10}, 1125--1136. } \author{ Smoothing algorithm by \adrian, Ya-Mei Chang, Yong Song, and \rolf. Nonparametric maximum likelihood algorithm by \adrian. } \seealso{ \code{\link{rho2hat}}, \code{\link{methods.rhohat}}, \code{\link{parres}}. See \code{\link{ppm}} for a parametric method for the same problem. } \examples{ X <- rpoispp(function(x,y){exp(3+3*x)}) rho <- rhohat(X, "x") rho <- rhohat(X, function(x,y){x}) plot(rho) curve(exp(3+3*x), lty=3, col=4, lwd=2, add=TRUE) rhoB <- rhohat(X, "x", method="reweight") rhoC <- rhohat(X, "x", method="transform") rhoM <- rhohat(X, "x", smoother="increasing") plot(rhoM, add=TRUE, .y ~ .x, col=6) legend("top", lty=c(3, 1), col=c(4, 6), lwd=c(2, 1), legend=c("true", "increasing")) \testonly{rh <- rhohat(X, "x", dimyx=32)} fit <- ppm(X ~x) rr <- rhohat(fit, "y") } \keyword{spatial} \keyword{models} \keyword{nonparametric} spatstat.core/man/emend.Rd0000644000176200001440000000157614141452520015205 0ustar liggesusers\name{emend} \alias{emend} \title{ Force Model to be Valid } \description{ Check whether a model is valid, and if not, find the nearest model which is valid. } \usage{ emend(object, \dots) } \arguments{ \item{object}{ A statistical model, belonging to some class. } \item{\dots}{Arguments passed to methods.} } \details{ The function \code{emend} is generic, and has methods for several classes of statistical models in the \pkg{spatstat} package (mostly point process models). Its purpose is to check whether a given model is valid (for example, that none of the model parameters are \code{NA}) and, if not, to find the nearest model which is valid. See the methods for more information. } \value{ Another model of the same kind. } \author{ \spatstatAuthors. } \seealso{ \code{\link{emend.ppm}}, \code{\link{valid}}. } \keyword{spatial} \keyword{models} spatstat.core/man/simulate.slrm.Rd0000644000176200001440000000503714141452520016710 0ustar liggesusers\name{simulate.slrm} \alias{simulate.slrm} \title{Simulate a Fitted Spatial Logistic Regression Model} \description{ Generates simulated realisations from a fitted spatial logistic regresson model } \usage{ \method{simulate}{slrm}(object, nsim = 1, seed=NULL, ..., window=NULL, covariates=NULL, verbose=TRUE, drop=FALSE) } \arguments{ \item{object}{ Fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{nsim}{ Number of simulated realisations. } \item{seed}{ an object specifying whether and how to initialise the random number generator. Either \code{NULL} or an integer that will be used in a call to \code{\link[base:Random]{set.seed}} before simulating the point patterns. } \item{\dots}{Ignored.} \item{window}{ Optional. Window (object of class \code{"owin"}) in which the model should be simulated. } \item{covariates}{ Optional. A named list containing new values for the covariates in the model. } \item{verbose}{ Logical. Whether to print progress reports (when \code{nsim > 1}). } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE}, the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"slrm"} of fitted spatial logistic regression models. Simulations are performed by \code{\link{rpoispp}} after the intensity has been computed by \code{\link{predict.slrm}}. The return value is a list of point patterns. It also carries an attribute \code{"seed"} that captures the initial state of the random number generator. This follows the convention used in \code{simulate.lm} (see \code{\link[stats]{simulate}}). It can be used to force a sequence of simulations to be repeated exactly, as shown in the examples for \code{\link[stats]{simulate}}. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"ppp"}). The return value also carries an attribute \code{"seed"} that captures the initial state of the random number generator. See Details. } \examples{ X <- copper$SouthPoints fit <- slrm(X ~ 1) simulate(fit, 2) fitxy <- slrm(X ~ x+y) simulate(fitxy, 2, window=square(2)) } \seealso{ \code{\link{slrm}}, \code{\link{rpoispp}}, \code{\link{simulate.ppm}}, \code{\link{simulate.kppm}}, \code{\link[stats]{simulate}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/quadrat.test.mppm.Rd0000644000176200001440000000752414141452520017503 0ustar liggesusers\name{quadrat.test.mppm} \alias{quadrat.test.mppm} \title{Chi-Squared Test for Multiple Point Process Model Based on Quadrat Counts} \description{ Performs a chi-squared goodness-of-fit test of a Poisson point process model fitted to multiple point patterns. } \usage{ \method{quadrat.test}{mppm}(X, ...) } \arguments{ \item{X}{ An object of class \code{"mppm"} representing a point process model fitted to multiple point patterns. It should be a Poisson model. } \item{\dots}{ Arguments passed to \code{\link{quadrat.test.ppm}} which determine the size of the quadrats. } } \details{ This function performs a \eqn{\chi^2}{chi^2} test of goodness-of-fit for a Poisson point process model, based on quadrat counts. It can also be used to perform a test of Complete Spatial Randomness for a list of point patterns. The function \code{quadrat.test} is generic, with methods for point patterns (class \code{"ppp"}), point process models (class \code{"ppm"}) and multiple point process models (class \code{"mppm"}). For this function, the argument \code{X} should be a multiple point process model (object of class \code{"mppm"}) obtained by fitting a point process model to a list of point patterns using the function \code{\link{mppm}}. To perform the test, the data point patterns are extracted from \code{X}. For each point pattern \itemize{ \item the window of observation is divided into rectangular tiles, and the number of data points in each tile is counted, as described in \code{\link{quadratcount}}. \item The expected number of points in each quadrat is calculated, as determined by the fitted model. } Then we perform a single \eqn{\chi^2}{chi^2} test of goodness-of-fit based on these observed and expected counts. } \section{Testing Complete Spatial Randomness}{ If the intention is to test Complete Spatial Randomness (CSR) there are two options: \itemize{ \item CSR with the same intensity of points in each point pattern; \item CSR with a different, unrelated intensity of points in each point pattern. } In the first case, suppose \code{P} is a list of point patterns we want to test. Then fit the multiple model \code{fit1 <- mppm(P, ~1)} which signifies a Poisson point process model with a constant intensity. Then apply \code{quadrat.test(fit1)}. In the second case, fit the model code{fit2 <- mppm(P, ~id)} which signifies a Poisson point process with a different constant intensity for each point pattern. Then apply \code{quadrat.test(fit2)}. } \value{ An object of class \code{"htest"}. Printing the object gives comprehensible output about the outcome of the test. The \eqn{p}-value of the test is stored in the component \code{p.value}. The return value also belongs to the special class \code{"quadrat.test"}. Plotting the object will display, for each window, the position of the quadrats, annotated by their observed and expected counts and the Pearson residuals. See the examples. The return value also has an attribute \code{"components"} which is a list containing the results of \eqn{\chi^2}{chi^2} tests of goodness-of-fit for each individual point pattern. } \seealso{ \code{\link{mppm}}, \code{\link{quadrat.test}} } \examples{ H <- hyperframe(X=waterstriders) # Poisson with constant intensity for all patterns fit1 <- mppm(X~1, H) quadrat.test(fit1, nx=2) # uniform Poisson with different intensity for each pattern fit2 <- mppm(X ~ id, H) quadrat.test(fit2, nx=2) } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \adrian \rolf and \ege } \keyword{spatial} \keyword{htest} spatstat.core/man/psstA.Rd0000644000176200001440000001541214141452520015201 0ustar liggesusers\name{psstA} \Rdversion{1.1} \alias{psstA} \title{ Pseudoscore Diagnostic For Fitted Model against Area-Interaction Alternative } \description{ Given a point process model fitted to a point pattern dataset, this function computes the pseudoscore diagnostic of goodness-of-fit for the model, against moderately clustered or moderately inhibited alternatives of area-interaction type. } \usage{ psstA(object, r = NULL, breaks = NULL, \dots, model = NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), ppmcorrection = "border", correction = "all", truecoef = NULL, hi.res = NULL, nr=spatstat.options("psstA.nr"), ngrid=spatstat.options("psstA.ngrid")) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the diagnostic should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{ Extra arguments passed to \code{\link{quadscheme}} to determine the quadrature scheme, if \code{object} is a point pattern. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord,ppmcorrection}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{ppmcorrection}{ Optional. Character string specifying the edge correction for the pseudolikelihood to be used in fitting the point process model. Passed to \code{\link{ppm}}. } \item{correction}{ Optional. Character string specifying which diagnostic quantities will be computed. Options are \code{"all"} and \code{"best"}. The default is to compute all diagnostic quantities. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } \item{nr}{ Optional. Number of \code{r} values to be used if \code{r} is not specified. } \item{ngrid}{ Integer. Number of points in the square grid used to compute the approximate area. } } \details{ This function computes the pseudoscore test statistic which can be used as a diagnostic for goodness-of-fit of a fitted point process model. Let \eqn{x} be a point pattern dataset consisting of points \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} in a window \eqn{W}. Consider a point process model fitted to \eqn{x}, with conditional intensity \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}. For the purpose of testing goodness-of-fit, we regard the fitted model as the null hypothesis. The alternative hypothesis is a family of hybrid models obtained by combining the fitted model with the area-interaction process (see \code{\link{AreaInter}}). The family of alternatives includes models that are slightly more regular than the fitted model, and others that are slightly more clustered than the fitted model. The pseudoscore, evaluated at the null model, is \deqn{ V(r) = \sum_i A(x_i, x, r) - \int_W A(u,x, r) \lambda(u,x) {\rm d} u }{ V(r) = sum( A(x[i], x, r)) - integral( A(u,x,r) lambda(u,x) du) } where \deqn{ A(u,x,r) = B(x\cup\{u\},r) - B(x\setminus u, r) }{ A(u,x,r) = B(x union u, r) - B(x setminus u, r) } where \eqn{B(x,r)} is the area of the union of the discs of radius \eqn{r} centred at the points of \eqn{x} (i.e. \eqn{B(x,r)} is the area of the dilation of \eqn{x} by a distance \eqn{r}). Thus \eqn{A(u,x,r)} is the \emph{unclaimed area} associated with \eqn{u}, that is, the area of that part of the disc of radius \eqn{r} centred at the point \eqn{u} that is not covered by any of the discs of radius \eqn{r} centred at points of \eqn{x}. According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have mean zero if the model is correct (ignoring the fact that the parameters of the model have been estimated). Hence \eqn{V(r)} can be used as a diagnostic for goodness-of-fit. The diagnostic \eqn{V(r)} is also called the \bold{pseudoresidual} of \eqn{S}. On the right hand side of the equation for \eqn{V(r)} given above, the sum over points of \eqn{x} is called the \bold{pseudosum} and the integral is called the \bold{pseudocompensator}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. Columns in this data frame include \code{dat} for the pseudosum, \code{com} for the compensator and \code{res} for the pseudoresidual. There is a plot method for this class. See \code{\link{fv.object}}. } \section{Warning}{ This computation can take a \bold{very long time}. To shorten the computation time, choose smaller values of the arguments \code{nr} and \code{ngrid}, or reduce the values of their defaults \code{spatstat.options("psstA.nr")} and \code{spatstat.options("psstA.ngrid")}. Computation time is roughly proportional to \code{nr * npoints * ngrid^2} where \code{npoints} is the number of points in the point pattern. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Alternative functions: \code{\link{psstG}}, \code{\link{psst}}, \code{\link{Gres}}, \code{\link{Kres}}. Point process models: \code{\link{ppm}}. Options: \code{\link{spatstat.options}} } \examples{ if(live <- interactive()) { X <- rStrauss(200,0.1,0.05) } else { pso <- spatstat.options(psstA.ngrid=16,psstA.nr=10, ndummy.min=16,npixel=32) X <- cells } plot(psstA(X)) plot(psstA(X, interaction=Strauss(0.05))) if(!live) spatstat.options(pso) } \keyword{spatial} \keyword{models} spatstat.core/man/Smooth.fv.Rd0000644000176200001440000000550014141452520015767 0ustar liggesusers\name{Smooth.fv} \alias{Smooth.fv} \title{ Apply Smoothing to Function Values } \description{ Applies smoothing to the values in selected columns of a function value table. } \usage{ \method{Smooth}{fv}(X, which = "*", ..., method=c("smooth.spline", "loess"), xinterval=NULL) } \arguments{ \item{X}{ Values to be smoothed. A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). } \item{which}{ Character vector identifying which columns of the table should be smoothed. Either a vector containing names of columns, or one of the wildcard strings \code{"*"} or \code{"."} explained below. } \item{\dots}{ Extra arguments passed to \code{\link[stats]{smooth.spline}} or \code{\link[stats]{loess}} to control the smoothing. } \item{method}{ Smoothing algorithm. A character string, partially matched to either \code{"smooth.spline"} or \code{"loess"}. } \item{xinterval}{ Optional. Numeric vector of length 2 specifying a range of \eqn{x} values. Smoothing will be performed only on the part of the function corresponding to this range. } } \details{ The command \code{Smooth.fv} applies smoothing to the function values in a function value table (object of class \code{"fv"}). \code{Smooth.fv} is a method for the generic function \code{\link{Smooth}}. The smoothing is performed either by \code{\link[stats]{smooth.spline}} or by \code{\link[stats]{loess}}. Smoothing is applied to every column (or to each of the selected columns) of function values in turn, using the function argument as the \eqn{x} coordinate and the selected column as the \eqn{y} coordinate. The original function values are then replaced by the corresponding smooth interpolated function values. The optional argument \code{which} specifies which of the columns of function values in \code{x} will be smoothed. The default (indicated by the wildcard \code{which="*"}) is to smooth all function values, i.e.\ all columns except the function argument. Alternatively \code{which="."} designates the subset of function values that are displayed in the default plot. Alternatively \code{which} can be a character vector containing the names of columns of \code{x}. If the argument \code{xinterval} is given, then smoothing will be performed only in the specified range of \eqn{x} values. } \value{ Another function value table (object of class \code{"fv"}) of the same format. } \author{\adrian and \rolf } \seealso{ \code{\link{Smooth}}, \code{\link{with.fv}}, \code{\link{fv.object}}, \code{\link[stats]{smooth.spline}}, \code{\link[stats]{smooth.spline}} } \examples{ data(cells) G <- Gest(cells) plot(G) plot(Smooth(G, df=9), add=TRUE) } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/rpoispp3.Rd0000644000176200001440000000300614141452520015662 0ustar liggesusers\name{rpoispp3} \alias{rpoispp3} \title{ Generate Poisson Point Pattern in Three Dimensions } \description{ Generate a random three-dimensional point pattern using the homogeneous Poisson process. } \usage{ rpoispp3(lambda, domain = box3(), nsim=1, drop=TRUE) } \arguments{ \item{lambda}{ Intensity of the Poisson process. A single positive number. } \item{domain}{ Three-dimensional box in which the process should be generated. An object of class \code{"box3"}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern in three dimensions (an object of class \code{"pp3"}). If \code{nsim > 1}, a list of such point patterns. } \details{ This function generates a realisation of the homogeneous Poisson process in three dimensions, with intensity \code{lambda} (points per unit volume). The realisation is generated inside the three-dimensional region \code{domain} which currently must be a rectangular box (object of class \code{"box3"}). } \note{ The intensity \code{lambda} is the expected number of points \emph{per unit volume}. } \seealso{ \code{\link{runifpoint3}}, \code{\link{pp3}}, \code{\link{box3}} } \examples{ X <- rpoispp3(50) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/valid.Rd0000644000176200001440000000211514141452520015202 0ustar liggesusers\name{valid} \alias{valid} \title{ Check Whether Point Process Model is Valid } \description{ Determines whether a point process model object corresponds to a valid point process. } \usage{ valid(object, \dots) } \arguments{ \item{object}{ Object of some class, describing a point process model. } \item{\dots}{ Additional arguments passed to methods. } } \details{ The function \code{valid} is generic, with methods for the classes \code{"ppm"} and \code{"dppmodel"}. An object representing a point process is called valid if all its parameter values are known (for example, no parameter takes the value \code{NA} or \code{NaN}) and the parameter values correspond to a well-defined point process (for example, the parameter values satisfy all the constraints that are imposed by mathematical theory.) See the methods for further details. } \value{ A logical value, or \code{NA}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{valid.ppm}}, \code{\link{valid.detpointprocfamily}} } \keyword{spatial} \keyword{models} spatstat.core/man/model.depends.Rd0000644000176200001440000000672014141452520016632 0ustar liggesusers\name{model.depends} \alias{model.depends} \alias{model.is.additive} \alias{model.covariates} \alias{has.offset.term} \alias{has.offset} \title{ Identify Covariates Involved in each Model Term } \description{ Given a fitted model (of any kind), identify which of the covariates is involved in each term of the model. } \usage{ model.depends(object) model.is.additive(object) model.covariates(object, fitted=TRUE, offset=TRUE) has.offset.term(object) has.offset(object) } \arguments{ \item{object}{ A fitted model of any kind. } \item{fitted,offset}{ Logical values determining which type of covariates to include. } } \details{ The \code{object} can be a fitted model of any kind, including models of the classes \code{\link{lm}}, \code{\link{glm}} and \code{\link{ppm}}. To be precise, \code{object} must belong to a class for which there are methods for \code{\link{formula}}, \code{\link{terms}} and \code{\link{model.matrix}}. The command \code{model.depends} determines the relationship between the original covariates (the data supplied when \code{object} was fitted) and the canonical covariates (the columns of the design matrix). It returns a logical matrix, with one row for each canonical covariate, and one column for each of the original covariates, with the \code{i,j} entry equal to \code{TRUE} if the \code{i}th canonical covariate depends on the \code{j}th original covariate. If the model formula of \code{object} includes offset terms (see \code{\link{offset}}), then the return value of \code{model.depends} also has an attribute \code{"offset"}. This is a logical value or matrix with one row for each offset term and one column for each of the original covariates, with the \code{i,j} entry equal to \code{TRUE} if the \code{i}th offset term depends on the \code{j}th original covariate. The command \code{model.covariates} returns a character vector containing the names of all (original) covariates that were actually used to fit the model. By default, this includes all covariates that appear in the model formula, including offset terms as well as canonical covariate terms. To omit the offset terms, set \code{offset=FALSE}. To omit the canonical covariate terms, set \code{fitted=FALSE}. The command \code{model.is.additive} determines whether the model is additive, in the sense that there is no canonical covariate that depends on two or more original covariates. It returns a logical value. The command \code{has.offset.term} is a faster way to determine whether the model \emph{formula} includes an \code{offset} term. The functions \code{model.depends} and \code{has.offset.term} only detect \code{offset} terms which are present in the model formula. They do not detect numerical offsets in the model object, that were inserted using the \code{offset} argument in \code{lm}, \code{glm} etc. To detect the presence of offsets of both kinds, use \code{has.offset}. } \value{ A logical value or matrix. } \seealso{ \code{\link{ppm}}, \code{\link{model.matrix}} } \examples{ x <- 1:10 y <- 3*x + 2 z <- rep(c(-1,1), 5) fit <- lm(y ~ poly(x,2) + sin(z)) model.depends(fit) model.covariates(fit) model.is.additive(fit) fitoff1 <- lm(y ~ x + offset(z)) fitoff2 <- lm(y ~ x, offset=z) has.offset.term(fitoff1) has.offset(fitoff1) has.offset.term(fitoff2) has.offset(fitoff2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.core/man/Smooth.ppp.Rd0000644000176200001440000002053414141452520016157 0ustar liggesusers\name{Smooth.ppp} \alias{Smooth.ppp} \alias{markmean} \alias{markvar} \title{Spatial smoothing of observations at irregular points} \description{ Performs spatial smoothing of numeric values observed at a set of irregular locations. Uses kernel smoothing and least-squares cross-validated bandwidth selection. } \usage{ \method{Smooth}{ppp}(X, sigma=NULL, ..., weights = rep(1, npoints(X)), at="pixels", adjust=1, varcov=NULL, edge=TRUE, diggle=FALSE, kernel="gaussian", scalekernel=is.character(kernel), geometric=FALSE) markmean(X, ...) markvar(X, sigma=NULL, ..., weights=NULL, varcov=NULL) } \arguments{ \item{X}{A marked point pattern (object of class \code{"ppp"}).} \item{sigma}{ Smoothing bandwidth. A single positive number, a numeric vector of length 2, or a function that selects the bandwidth automatically. See \code{\link{density.ppp}}. } \item{\dots}{ Further arguments passed to \code{\link{bw.smoothppp}} and \code{\link{density.ppp}} to control the kernel smoothing and the pixel resolution of the result. } \item{weights}{ Optional weights attached to the observations. A numeric vector, a \code{function(x,y)}, a pixel image, or an \code{expression}. See \code{\link{density.ppp}}. } \item{at}{ String specifying whether to compute the smoothed values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } \item{edge,diggle}{ Arguments passed to \code{\link{density.ppp}} to determine the edge correction. } \item{adjust}{ Optional. Adjustment factor for the bandwidth \code{sigma}. } \item{varcov}{ Variance-covariance matrix. An alternative to \code{sigma}. See \code{\link{density.ppp}}. } \item{kernel}{ The smoothing kernel. A character string specifying the smoothing kernel (current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}), or a pixel image (object of class \code{"im"}) containing values of the kernel, or a \code{function(x,y)} which yields values of the kernel. } \item{scalekernel}{ Logical value. If \code{scalekernel=TRUE}, then the kernel will be rescaled to the bandwidth determined by \code{sigma} and \code{varcov}: this is the default behaviour when \code{kernel} is a character string. If \code{scalekernel=FALSE}, then \code{sigma} and \code{varcov} will be ignored: this is the default behaviour when \code{kernel} is a function or a pixel image. } \item{geometric}{ Logical value indicating whether to perform geometric mean smoothing instead of arithmetic mean smoothing. See Details. } } \details{ The function \code{Smooth.ppp} performs spatial smoothing of numeric values observed at a set of irregular locations. The functions \code{markmean} and \code{markvar} are wrappers for \code{Smooth.ppp} which compute the spatially-varying mean and variance of the marks of a point pattern. \code{Smooth.ppp} is a method for the generic function \code{\link{Smooth}} for the class \code{"ppp"} of point patterns. Thus you can type simply \code{Smooth(X)}. Smoothing is performed by kernel weighting, using the Gaussian kernel by default. If the observed values are \eqn{v_1,\ldots,v_n}{v[1],...,v[n]} at locations \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} respectively, then the smoothed value at a location \eqn{u} is (ignoring edge corrections) \deqn{ g(u) = \frac{\sum_i k(u-x_i) v_i}{\sum_i k(u-x_i)} }{ g(u) = (sum of k(u-x[i]) v[i])/(sum of k(u-x[i])) } where \eqn{k} is the kernel (a Gaussian kernel by default). This is known as the Nadaraya-Watson smoother (Nadaraya, 1964, 1989; Watson, 1964). By default, the smoothing kernel bandwidth is chosen by least squares cross-validation (see below). The argument \code{X} must be a marked point pattern (object of class \code{"ppp"}, see \code{\link{ppp.object}}). The points of the pattern are taken to be the observation locations \eqn{x_i}{x[i]}, and the marks of the pattern are taken to be the numeric values \eqn{v_i}{v[i]} observed at these locations. The marks are allowed to be a data frame (in \code{Smooth.ppp} and \code{markmean}). Then the smoothing procedure is applied to each column of marks. The numerator and denominator are computed by \code{\link{density.ppp}}. The arguments \code{...} control the smoothing kernel parameters and determine whether edge correction is applied. The smoothing kernel bandwidth can be specified by either of the arguments \code{sigma} or \code{varcov} which are passed to \code{\link{density.ppp}}. If neither of these arguments is present, then by default the bandwidth is selected by least squares cross-validation, using \code{\link{bw.smoothppp}}. The optional argument \code{weights} allows numerical weights to be applied to the data. If a weight \eqn{w_i}{w[i]} is associated with location \eqn{x_i}{x[i]}, then the smoothed function is (ignoring edge corrections) \deqn{ g(u) = \frac{\sum_i k(u-x_i) v_i w_i}{\sum_i k(u-x_i) w_i} }{ g(u) = (sum of k(u-x[i]) v[i] w[i])/(sum of k(u-x[i]) w[i]) } If \code{geometric=TRUE} then geometric mean smoothing is performed instead of arithmetic mean smoothing. The mark values must be non-negative numbers. The logarithm of the mark values is computed; these logarithmic values are kernel-smoothed as described above; then the exponential function is applied to the smoothed values. An alternative to kernel smoothing is inverse-distance weighting, which is performed by \code{\link{idw}}. } \section{Very small bandwidth}{ If the chosen bandwidth \code{sigma} is very small, kernel smoothing is mathematically equivalent to nearest-neighbour interpolation; the result will be computed by \code{\link{nnmark}}. This is unless \code{at="points"} and \code{leaveoneout=FALSE}, when the original mark values are returned. } \value{ \emph{If \code{X} has a single column of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a pixel image (object of class \code{"im"}). Pixel values are values of the interpolated function. \item If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{X}. Entries are values of the interpolated function at the points of \code{X}. } \emph{If \code{X} has a data frame of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a named list of pixel images (object of class \code{"im"}). There is one image for each column of marks. This list also belongs to the class \code{"solist"}, for which there is a plot method. \item If \code{at="points"}, the result is a data frame with one row for each point of \code{X}, and one column for each column of marks. Entries are values of the interpolated function at the points of \code{X}. } The return value has attributes \code{"sigma"} and \code{"varcov"} which report the smoothing bandwidth that was used. } \seealso{ \code{\link{Smooth}}, \code{\link{density.ppp}}, \code{\link{bw.smoothppp}}, \code{\link{nnmark}}, \code{\link{ppp.object}}, \code{\link{im.object}}. See \code{\link{idw}} for inverse-distance weighted smoothing. To perform interpolation, see also the \code{akima} package. } \examples{ # Longleaf data - tree locations, marked by tree diameter # Local smoothing of tree diameter (automatic bandwidth selection) Z <- Smooth(longleaf) # Kernel bandwidth sigma=5 plot(Smooth(longleaf, 5)) # mark variance plot(markvar(longleaf, sigma=5)) # data frame of marks: trees marked by diameter and height plot(Smooth(finpines, sigma=2)) head(Smooth(finpines, sigma=2, at="points")) } \author{ \spatstatAuthors. } \references{ Nadaraya, E.A. (1964) On estimating regression. \emph{Theory of Probability and its Applications} \bold{9}, 141--142. Nadaraya, E.A. (1989) \emph{Nonparametric estimation of probability densities and regression curves}. Kluwer, Dordrecht. Watson, G.S. (1964) Smooth regression analysis. \emph{Sankhya A} \bold{26}, 359--372. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.core/man/HierStraussHard.Rd0000644000176200001440000001172514141452520017165 0ustar liggesusers\name{HierStraussHard} \alias{HierStraussHard} \title{The Hierarchical Strauss Hard Core Point Process Model} \description{ Creates an instance of the hierarchical Strauss-hard core point process model which can then be fitted to point pattern data. } \usage{ HierStraussHard(iradii, hradii=NULL, types=NULL, archy=NULL) } \arguments{ \item{iradii}{Matrix of interaction radii} \item{hradii}{Optional matrix of hard core distances} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} \item{archy}{Optional: the hierarchical order. See Details.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hierarchical Strauss-hard core process with interaction radii \eqn{iradii[i,j]} and hard core distances \eqn{hradii[i,j]}. } \details{ This is a hierarchical point process model for a multitype point pattern (\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander} and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, 1999; Grabarnik and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, 2009). It is appropriate for analysing multitype point pattern data in which the types are ordered so that the points of type \eqn{j} depend on the points of type \eqn{1,2,\ldots,j-1}{1,2,...,j-1}. The hierarchical version of the (stationary) Strauss hard core process with \eqn{m} types, with interaction radii \eqn{r_{ij}}{r[i,j]}, hard core distances \eqn{h_{ij}}{h[i,j]} and parameters \eqn{\beta_j}{beta[j]} and \eqn{\gamma_{ij}}{gamma[i,j]} is a point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern, and a pair of points of types \eqn{i} and \eqn{j} closer than \eqn{r_{ij}}{r[i,j]} units apart contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the density \bold{provided} \eqn{i \le j}{i <= j}. If any pair of points of types \eqn{i} and \eqn{j} lies closer than \eqn{h_{ij}}{h[i,j]} units apart, the configuration of points is impossible (probability density zero). The nonstationary hierarchical Strauss hard core process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hierarchical Strauss hard core process pairwise interaction is yielded by the function \code{HierStraussHard()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the HierStraussHard interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The argument \code{archy} can be used to specify a hierarchical ordering of the types. It can be either a vector of integers or a character vector matching the possible types. The default is the sequence \eqn{1,2, \ldots, m}{1,2, ..., m} meaning that type \eqn{j} depends on types \eqn{1,2, \ldots, j-1}{1,2, ..., j-1}. The matrices \code{iradii} and \code{hradii} must be square, with entries which are either positive numbers or zero or \code{NA}. A value of zero or \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii and hard core distances are specified in \code{HierStraussHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{HierStraussHard()}. } \seealso{ \code{\link{MultiStraussHard}} for the corresponding symmetrical interaction. \code{\link{HierHard}}, \code{\link{HierStrauss}}. } \examples{ r <- matrix(c(30, NA, 40, 30), nrow=2,ncol=2) h <- matrix(c(4, NA, 10, 15), 2, 2) HierStraussHard(r, h) # prints a sensible description of itself ppm(ants ~1, HierStraussHard(r, h)) # fit the stationary hierarchical Strauss-hard core process to ants data } \author{\adrian , \rolf and \ege. } \references{ Grabarnik, P. and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, A. (2009) Modelling the spatial structure of forest stands by multivariate point processes with hierarchical interactions. \emph{Ecological Modelling} \bold{220}, 1232--1240. \ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}, H. and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, A. (1999) Multitype spatial point patterns with hierarchical interactions. \emph{Biometrics} \bold{55}, 1051--1058. } \keyword{spatial} \keyword{models} spatstat.core/man/Kcom.Rd0000644000176200001440000002207114141452520014777 0ustar liggesusers\name{Kcom} \Rdversion{1.1} \alias{Kcom} \title{ Model Compensator of K Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the \emph{compensator} of the \eqn{K} function based on the fitted model (as well as the usual nonparametric estimates of \eqn{K} based on the data alone). Comparison between the nonparametric and model-compensated \eqn{K} functions serves as a diagnostic for the model. } \usage{ Kcom(object, r = NULL, breaks = NULL, ..., correction = c("border", "isotropic", "translate"), conditional = !is.poisson(object), restrict = FALSE, model = NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), compute.var = TRUE, truecoef = NULL, hi.res = NULL) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the function \eqn{K(r)} should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ This argument is for advanced use only. } \item{\dots}{ Ignored. } \item{correction}{ Optional vector of character strings specifying the edge correction(s) to be used. See \code{\link{Kest}} for options. } \item{conditional}{ Optional. Logical value indicating whether to compute the estimates for the conditional case. See Details. } \item{restrict}{ Logical value indicating whether to compute the restriction estimator (\code{restrict=TRUE}) or the reweighting estimator (\code{restrict=FALSE}, the default). Applies only if \code{conditional=TRUE}. See Details. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{compute.var}{ Logical value indicating whether to compute the Poincare variance bound for the residual \eqn{K} function (calculation is only implemented for the isotropic correction). } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes an estimate of the \eqn{K} function of the dataset, together with a \emph{model compensator} of the \eqn{K} function, which should be approximately equal if the model is a good fit to the data. The first argument, \code{object}, is usually a fitted point process model (object of class \code{"ppm"}), obtained from the model-fitting function \code{\link{ppm}}. For convenience, \code{object} can also be a point pattern (object of class \code{"ppp"}). In that case, a point process model will be fitted to it, by calling \code{\link{ppm}} using the arguments \code{trend} (for the first order trend), \code{interaction} (for the interpoint interaction) and \code{rbord} (for the erosion distance in the border correction for the pseudolikelihood). See \code{\link{ppm}} for details of these arguments. The algorithm first extracts the original point pattern dataset (to which the model was fitted) and computes the standard nonparametric estimates of the \eqn{K} function. It then also computes the \emph{model compensator} of the \eqn{K} function. The different function estimates are returned as columns in a data frame (of class \code{"fv"}). The argument \code{correction} determines the edge correction(s) to be applied. See \code{\link{Kest}} for explanation of the principle of edge corrections. The following table gives the options for the \code{correction} argument, and the corresponding column names in the result: \tabular{llll}{ \code{correction} \tab \bold{description of correction} \tab \bold{nonparametric} \tab \bold{compensator} \cr \code{"isotropic"} \tab Ripley isotropic correction \tab \code{iso} \tab \code{icom} \cr \code{"translate"} \tab Ohser-Stoyan translation correction \tab \code{trans} \tab \code{tcom} \cr \code{"border"} \tab border correction \tab \code{border} \tab \code{bcom} } The nonparametric estimates can all be expressed in the form \deqn{ \hat K(r) = \sum_i \sum_{j < i} e(x_i,x_j,r,x) I\{ d(x_i,x_j) \le r \} }{ K(r) = sum[i] sum[j < i] e(x[i], x[j], r, x) I( d(x[i],x[j]) <= r ) } where \eqn{x_i}{x[i]} is the \eqn{i}-th data point, \eqn{d(x_i,x_j)}{d(x[i],x[j])} is the distance between \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}, and \eqn{e(x_i,x_j,r,x)}{e(x[i],x[j],r,x)} is a term that serves to correct edge effects and to re-normalise the sum. The corresponding model compensator is \deqn{ {\bf C} \, \tilde K(r) = \int_W \lambda(u,x) \sum_j e(u,x_j,r,x \cup u) I\{ d(u,x_j) \le r\} }{ C K(r) = integral[u] lambda(u,x) sum[j] e(u, x[j], r, x+u) I( d(u,x[j]) <= r ) } where the integral is over all locations \eqn{u} in the observation window, \eqn{\lambda(u,x)}{lambda(u,x)} denotes the conditional intensity of the model at the location \eqn{u}, and \eqn{x \cup u}{x+u} denotes the data point pattern \eqn{x} augmented by adding the extra point \eqn{u}. If the fitted model is a Poisson point process, then the formulae above are exactly what is computed. If the fitted model is not Poisson, the formulae above are modified slightly to handle edge effects. The modification is determined by the arguments \code{conditional} and \code{restrict}. The value of \code{conditional} defaults to \code{FALSE} for Poisson models and \code{TRUE} for non-Poisson models. If \code{conditional=FALSE} then the formulae above are not modified. If \code{conditional=TRUE}, then the algorithm calculates the \emph{restriction estimator} if \code{restrict=TRUE}, and calculates the \emph{reweighting estimator} if \code{restrict=FALSE}. See Appendix D of Baddeley, Rubak and \Moller (2011). Thus, by default, the reweighting estimator is computed for non-Poisson models. The nonparametric estimates of \eqn{K(r)} are approximately unbiased estimates of the \eqn{K}-function, assuming the point process is stationary. The model compensators are unbiased estimates \emph{of the mean values of the corresponding nonparametric estimates}, assuming the model is true. Thus, if the model is a good fit, the mean value of the difference between the nonparametric estimates and model compensators is approximately zero. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Related functions: \code{\link{Kres}}, \code{\link{Kest}}. Alternative functions: \code{\link{Gcom}}, \code{\link{psstG}}, \code{\link{psstA}}, \code{\link{psst}}. Point process models: \code{\link{ppm}}. } \examples{ fit0 <- ppm(cells, ~1) # uniform Poisson \testonly{fit0 <- ppm(cells, ~1, nd=16)} if(interactive()) { plot(Kcom(fit0)) # compare the isotropic-correction estimates plot(Kcom(fit0), cbind(iso, icom) ~ r) # uniform Poisson is clearly not correct } fit1 <- ppm(cells, ~1, Strauss(0.08)) \testonly{fit1 <- ppm(cells, ~1, Strauss(0.08), nd=16)} K1 <- Kcom(fit1) K1 if(interactive()) { plot(K1) plot(K1, cbind(iso, icom) ~ r) plot(K1, cbind(trans, tcom) ~ r) # how to plot the difference between nonparametric estimates and compensators plot(K1, iso - icom ~ r) # fit looks approximately OK; try adjusting interaction distance } fit2 <- ppm(cells, ~1, Strauss(0.12)) \testonly{fit2 <- ppm(cells, ~1, Strauss(0.12), nd=16)} K2 <- Kcom(fit2) if(interactive()) { plot(K2) plot(K2, cbind(iso, icom) ~ r) plot(K2, iso - icom ~ r) } } \keyword{spatial} \keyword{models} spatstat.core/man/quadrat.test.Rd0000644000176200001440000002632714141452520016535 0ustar liggesusers\name{quadrat.test} \alias{quadrat.test} \alias{quadrat.test.ppp} \alias{quadrat.test.ppm} \alias{quadrat.test.slrm} \alias{quadrat.test.quadratcount} \title{Dispersion Test for Spatial Point Pattern Based on Quadrat Counts} \description{ Performs a test of Complete Spatial Randomness for a given point pattern, based on quadrat counts. Alternatively performs a goodness-of-fit test of a fitted inhomogeneous Poisson model. By default performs chi-squared tests; can also perform Monte Carlo based tests. } \usage{ quadrat.test(X, ...) \method{quadrat.test}{ppp}(X, nx=5, ny=nx, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, lambda=NULL, df.est=NULL, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) \method{quadrat.test}{ppm}(X, nx=5, ny=nx, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, df.est=NULL, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) \method{quadrat.test}{slrm}(X, nx=5, ny=nx, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, df.est=NULL, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) \method{quadrat.test}{quadratcount}(X, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, lambda=NULL, df.est=NULL, ..., nsim=1999) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) to be subjected to the goodness-of-fit test. Alternatively a fitted point process model (object of class \code{"ppm"} or \code{"slrm"}) to be tested. Alternatively \code{X} can be the result of applying \code{\link{quadratcount}} to a point pattern. } \item{nx,ny}{ Numbers of quadrats in the \eqn{x} and \eqn{y} directions. Incompatible with \code{xbreaks} and \code{ybreaks}. } \item{alternative}{ Character string (partially matched) specifying the alternative hypothesis. } \item{method}{ Character string (partially matched) specifying the test to use: either \code{method="Chisq"} for the chi-squared test (the default), or \code{method="MonteCarlo"} for a Monte Carlo test. } \item{conditional}{ Logical. Should the Monte Carlo test be conducted conditionally upon the observed number of points of the pattern? Ignored if \code{method="Chisq"}. } \item{CR}{ Optional. Numerical value. The exponent for the Cressie-Read test statistic. See Details. } \item{lambda}{ Optional. Pixel image (object of class \code{"im"}) or function (class \code{"funxy"}) giving the predicted intensity of the point process. } \item{df.est}{ Optional. Advanced use only. The number of fitted parameters, or the number of degrees of freedom lost by estimation of parameters. } \item{\dots}{Ignored.} \item{xbreaks}{ Optional. Numeric vector giving the \eqn{x} coordinates of the boundaries of the quadrats. Incompatible with \code{nx}. } \item{ybreaks}{ Optional. Numeric vector giving the \eqn{y} coordinates of the boundaries of the quadrats. Incompatible with \code{ny}. } \item{tess}{ Tessellation (object of class \code{"tess"} or something acceptable to \code{\link{as.tess}}) determining the quadrats. Incompatible with \code{nx, ny, xbreaks, ybreaks}. } \item{nsim}{ The number of simulated samples to generate when \code{method="MonteCarlo"}. } } \details{ These functions perform \eqn{\chi^2}{chi^2} tests or Monte Carlo tests of goodness-of-fit for a point process model, based on quadrat counts. The function \code{quadrat.test} is generic, with methods for point patterns (class \code{"ppp"}), split point patterns (class \code{"splitppp"}), point process models (class \code{"ppm"} or \code{"slrm"}) and quadrat count tables (class \code{"quadratcount"}). \itemize{ \item if \code{X} is a point pattern, we test the null hypothesis that the data pattern is a realisation of Complete Spatial Randomness (the uniform Poisson point process). Marks in the point pattern are ignored. (If \code{lambda} is given then the null hypothesis is the Poisson process with intensity \code{lambda}.) \item if \code{X} is a split point pattern, then for each of the component point patterns (taken separately) we test the null hypotheses of Complete Spatial Randomness. See \code{\link{quadrat.test.splitppp}} for documentation. \item If \code{X} is a fitted point process model, then it should be a Poisson point process model. The data to which this model was fitted are extracted from the model object, and are treated as the data point pattern for the test. We test the null hypothesis that the data pattern is a realisation of the (inhomogeneous) Poisson point process specified by \code{X}. } In all cases, the window of observation is divided into tiles, and the number of data points in each tile is counted, as described in \code{\link{quadratcount}}. The quadrats are rectangular by default, or may be regions of arbitrary shape specified by the argument \code{tess}. The expected number of points in each quadrat is also calculated, as determined by CSR (in the first case) or by the fitted model (in the second case). Then the Pearson \eqn{X^2} statistic \deqn{ X^2 = sum((observed - expected)^2/expected) } is computed. If \code{method="Chisq"} then a \eqn{\chi^2}{chi^2} test of goodness-of-fit is performed by comparing the test statistic to the \eqn{\chi^2}{chi^2} distribution with \eqn{m-k} degrees of freedom, where \code{m} is the number of quadrats and \eqn{k} is the number of fitted parameters (equal to 1 for \code{quadrat.test.ppp}). The default is to compute the \emph{two-sided} \eqn{p}-value, so that the test will be declared significant if \eqn{X^2} is either very large or very small. One-sided \eqn{p}-values can be obtained by specifying the \code{alternative}. An important requirement of the \eqn{\chi^2}{chi^2} test is that the expected counts in each quadrat be greater than 5. If \code{method="MonteCarlo"} then a Monte Carlo test is performed, obviating the need for all expected counts to be at least 5. In the Monte Carlo test, \code{nsim} random point patterns are generated from the null hypothesis (either CSR or the fitted point process model). The Pearson \eqn{X^2} statistic is computed as above. The \eqn{p}-value is determined by comparing the \eqn{X^2} statistic for the observed point pattern, with the values obtained from the simulations. Again the default is to compute the \emph{two-sided} \eqn{p}-value. If \code{conditional} is \code{TRUE} then the simulated samples are generated from the multinomial distribution with the number of \dQuote{trials} equal to the number of observed points and the vector of probabilities equal to the expected counts divided by the sum of the expected counts. Otherwise the simulated samples are independent Poisson counts, with means equal to the expected counts. If the argument \code{CR} is given, then instead of the Pearson \eqn{X^2} statistic, the Cressie-Read (1984) power divergence test statistic \deqn{ 2nI = \frac{2}{CR(CR+1)} \sum_i \left[ \left( \frac{X_i}{E_i} \right)^CR - 1 \right] }{ 2nI = (2/(CR * (CR+1))) * sum((X[i]/E[i])^CR - 1) } is computed, where \eqn{X_i}{X[i]} is the \eqn{i}th observed count and \eqn{E_i}{E[i]} is the corresponding expected count. The value \code{CR=1} gives the Pearson \eqn{X^2} statistic; \code{CR=0} gives the likelihood ratio test statistic \eqn{G^2}; \code{CR=-1/2} gives the Freeman-Tukey statistic \eqn{T^2}; \code{CR=-1} gives the modified likelihood ratio test statistic \eqn{GM^2}; and \code{CR=-2} gives Neyman's modified statistic \eqn{NM^2}. In all cases the asymptotic distribution of this test statistic is the same \eqn{\chi^2}{chi^2} distribution as above. The return value is an object of class \code{"htest"}. Printing the object gives comprehensible output about the outcome of the test. The return value also belongs to the special class \code{"quadrat.test"}. Plotting the object will display the quadrats, annotated by their observed and expected counts and the Pearson residuals. See the examples. } \seealso{ \code{\link{quadrat.test.splitppp}}, \code{\link{quadratcount}}, \code{\link{quadrats}}, \code{\link{quadratresample}}, \code{\link{chisq.test}}, \code{\link{cdf.test}}. To test a Poisson point process model against a specific alternative, use \code{\link{anova.ppm}}. } \value{ An object of class \code{"htest"}. See \code{\link{chisq.test}} for explanation. The return value is also an object of the special class \code{"quadrattest"}, and there is a plot method for this class. See the examples. } \references{ Cressie, N. and Read, T.R.C. (1984) Multinomial goodness-of-fit tests. \emph{Journal of the Royal Statistical Society, Series B} \bold{46}, 440--464. } \examples{ quadrat.test(simdat) quadrat.test(simdat, 4, 3) quadrat.test(simdat, alternative="regular") quadrat.test(simdat, alternative="clustered") ## Likelihood ratio test quadrat.test(simdat, CR=0) ## Power divergence tests quadrat.test(simdat, CR=-1)$p.value quadrat.test(simdat, CR=-2)$p.value # Using Monte Carlo p-values quadrat.test(swedishpines) # Get warning, small expected values. # quadrat.test(swedishpines, method="M", nsim=4999) # quadrat.test(swedishpines, method="M", nsim=4999, conditional=FALSE) \testonly{ quadrat.test(swedishpines, method="M", nsim=19) quadrat.test(swedishpines, method="M", nsim=19, conditional=FALSE) } # quadrat counts qS <- quadratcount(simdat, 4, 3) quadrat.test(qS) # fitted model: inhomogeneous Poisson fitx <- ppm(simdat ~ x) quadrat.test(fitx) # an equivalent test (results differ due to discretisation effects): quadrat.test(simdat, lambda=predict(fitx), df.est=length(coef(fitx))) te <- quadrat.test(simdat, 4) residuals(te) # Pearson residuals plot(te) plot(simdat, pch="+", cols="green", lwd=2) plot(te, add=TRUE, col="red", cex=1.4, lty=2, lwd=3) sublab <- eval(substitute(expression(p[chi^2]==z), list(z=signif(te$p.value,3)))) title(sub=sublab, cex.sub=3) # quadrats of irregular shape B <- dirichlet(runifpoint(6, Window(simdat))) qB <- quadrat.test(simdat, tess=B) plot(simdat, main="quadrat.test(simdat, tess=B)", pch="+") plot(qB, add=TRUE, col="red", lwd=2, cex=1.2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} spatstat.core/man/rMosaicSet.Rd0000644000176200001440000000261314141452520016157 0ustar liggesusers\name{rMosaicSet} \alias{rMosaicSet} \title{Mosaic Random Set} \description{ Generate a random set by taking a random selection of tiles of a given tessellation. } \usage{ rMosaicSet(X, p=0.5) } \arguments{ \item{X}{ A tessellation (object of class \code{"tess"}). } \item{p}{ Probability of including a given tile. A number strictly between 0 and 1. } } \details{ Given a tessellation \code{X}, this function randomly selects some of the tiles of \code{X}, including each tile with probability \eqn{p} independently of the other tiles. The selected tiles are then combined to form a set in the plane. One application of this is Switzer's (1965) example of a random set which has a Markov property. It is constructed by generating \code{X} according to a Poisson line tessellation (see \code{\link{rpoislinetess}}). } \value{ A window (object of class \code{"owin"}). } \references{ Switzer, P. A random set process in the plane with a Markovian property. \emph{Annals of Mathematical Statistics} \bold{36} (1965) 1859--1863. } \author{\adrian and \rolf } \seealso{ \code{\link{rpoislinetess}}, \code{\link{rMosaicField}} } \examples{ # Switzer's random set X <- rpoislinetess(3) plot(rMosaicSet(X, 0.5), col="green", border=NA) # another example plot(rMosaicSet(dirichlet(runifpoint(30)), 0.4)) } \keyword{spatial} \keyword{datagen} spatstat.core/man/rmhmodel.default.Rd0000644000176200001440000005141414144333466017355 0ustar liggesusers\name{rmhmodel.default} \alias{rmhmodel.default} \title{Build Point Process Model for Metropolis-Hastings Simulation.} \description{ Builds a description of a point process model for use in simulating the model by the Metropolis-Hastings algorithm. } \usage{ \method{rmhmodel}{default}(..., cif=NULL, par=NULL, w=NULL, trend=NULL, types=NULL) } \arguments{ \item{\dots}{Ignored.} \item{cif}{Character string specifying the choice of model} \item{par}{Parameters of the model} \item{w}{Spatial window in which to simulate} \item{trend}{Specification of the trend in the model} \item{types}{A vector of factor levels defining the possible marks, for a multitype process. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ The generic function \code{\link{rmhmodel}} takes a description of a point process model in some format, and converts it into an object of class \code{"rmhmodel"} so that simulations of the model can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. This function \code{rmhmodel.default} is the default method. It builds a description of the point process model from the simple arguments listed. The argument \code{cif} is a character string specifying the choice of interpoint interaction for the point process. The current options are \describe{ \item{\code{'areaint'}}{Area-interaction process.} \item{\code{'badgey'}}{Baddeley-Geyer (hybrid Geyer) process.} \item{\code{'dgs'}}{Diggle, Gates and Stibbard (1987) process} \item{\code{'diggra'}}{Diggle and Gratton (1984) process} \item{\code{'fiksel'}}{Fiksel double exponential process (Fiksel, 1984).} \item{\code{'geyer'}}{Saturation process (Geyer, 1999).} \item{\code{'hardcore'}}{Hard core process} \item{\code{'lennard'}}{Lennard-Jones process} \item{\code{'lookup'}}{General isotropic pairwise interaction process, with the interaction function specified via a ``lookup table''.} \item{\code{'multihard'}}{Multitype hardcore process} \item{\code{'penttinen'}}{The Penttinen process} \item{\code{'strauss'}}{The Strauss process} \item{\code{'straush'}}{The Strauss process with hard core} \item{\code{'sftcr'}}{The Softcore process} \item{\code{'straussm'}}{ The multitype Strauss process} \item{\code{'straushm'}}{Multitype Strauss process with hard core} \item{\code{'triplets'}}{Triplets process (Geyer, 1999).} } It is also possible to specify a \emph{hybrid} of these interactions in the sense of Baddeley et al (2013). In this case, \code{cif} is a character vector containing names from the list above. For example, \code{cif=c('strauss', 'geyer')} would specify a hybrid of the Strauss and Geyer models. The argument \code{par} supplies parameter values appropriate to the conditional intensity function being invoked. For the interactions listed above, these parameters are: \describe{ \item{areaint:}{ (Area-interaction process.) A \bold{named} list with components \code{beta,eta,r} which are respectively the ``base'' intensity, the scaled interaction parameter and the interaction radius. } \item{badgey:}{ (Baddeley-Geyer process.) A \bold{named} list with components \code{beta} (the ``base'' intensity), \code{gamma} (a vector of non-negative interaction parameters), \code{r} (a vector of interaction radii, of the same length as \code{gamma}, in \emph{increasing} order), and \code{sat} (the saturation parameter(s); this may be a scalar, or a vector of the same length as \code{gamma} and \code{r}; all values should be at least 1). Note that because of the presence of ``saturation'' the \code{gamma} values are permitted to be larger than 1. } \item{dgs:}{ (Diggle, Gates, and Stibbard process. See Diggle, Gates, and Stibbard (1987)) A \bold{named} list with components \code{beta} and \code{rho}. This process has pairwise interaction function equal to \deqn{ e(t) = \sin^2\left(\frac{\pi t}{2\rho}\right) }{ e(t) = sin^2((pi * t)/(2 * rho)) } for \eqn{t < \rho}{t < rho}, and equal to 1 for \eqn{t \ge \rho}{t >= rho}. } \item{diggra:}{ (Diggle-Gratton process. See Diggle and Gratton (1984) and Diggle, Gates and Stibbard (1987).) A \bold{named} list with components \code{beta}, \code{kappa}, \code{delta} and \code{rho}. This process has pairwise interaction function \eqn{e(t)} equal to 0 for \eqn{t < \delta}{t < delta}, equal to \deqn{ \left(\frac{t-\delta}{\rho-\delta}\right)^\kappa }{ ((t-delta)/(rho-delta))^kappa } for \eqn{\delta \le t < \rho}{delta <= t < rho}, and equal to 1 for \eqn{t \ge \rho}{t >= rho}. Note that here we use the symbol \eqn{\kappa}{kappa} where Diggle, Gates, and Stibbard use \eqn{\beta}{beta} since we reserve the symbol \eqn{\beta}{beta} for an intensity parameter. } \item{fiksel:}{ (Fiksel double exponential process, see Fiksel (1984)) A \bold{named} list with components \code{beta}, \code{r}, \code{hc}, \code{kappa} and \code{a}. This process has pairwise interaction function \eqn{e(t)} equal to 0 for \eqn{t < hc}, equal to \deqn{ \exp(a \exp(- \kappa t)) }{ exp(a * exp( - kappa * t)) } for \eqn{hc \le t < r}{hc <= t < r}, and equal to 1 for \eqn{t \ge r}{t >= r}. } \item{geyer:}{ (Geyer's saturation process. See Geyer (1999).) A \bold{named} list with components \code{beta}, \code{gamma}, \code{r}, and \code{sat}. The components \code{beta}, \code{gamma}, \code{r} are as for the Strauss model, and \code{sat} is the ``saturation'' parameter. The model is Geyer's ``saturation'' point process model, a modification of the Strauss process in which we effectively impose an upper limit (\code{sat}) on the number of neighbours which will be counted as close to a given point. Explicitly, a saturation point process with interaction radius \eqn{r}, saturation threshold \eqn{s}, and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{\beta \gamma^{\min(s, t(x_i,X))}}{beta gamma^min(s,t(x[i],X))} to the probability density of the point pattern, where \eqn{t(x_i,X)}{t(x[i],X)} denotes the number of ``\eqn{r}-close neighbours'' of \eqn{x_i}{x[i]} in the pattern \eqn{X}. If the saturation threshold \eqn{s} is infinite, the Geyer process reduces to a Strauss process with interaction parameter \eqn{\gamma^2}{gamma^2} rather than \eqn{\gamma}{gamma}. } \item{hardcore:}{ (Hard core process.) A \bold{named} list with components \code{beta} and \code{hc} where \code{beta} is the base intensity and \code{hc} is the hard core distance. This process has pairwise interaction function \eqn{e(t)} equal to 1 if \eqn{t > hc} and 0 if \eqn{t <= hc}. } \item{lennard:}{ (Lennard-Jones process.) A \bold{named} list with components \code{sigma} and \code{epsilon}, where \code{sigma} is the characteristic diameter and \code{epsilon} is the well depth. See \code{\link{LennardJones}} for explanation. } \item{multihard:}{ (Multitype hard core process.) A \bold{named} list with components \code{beta} and \code{hradii}, where \code{beta} is a vector of base intensities for each type of point, and \code{hradii} is a matrix of hard core radii between each pair of types. } \item{penttinen:}{ (Penttinen process.) A \bold{named} list with components \code{beta,gamma,r} which are respectively the ``base'' intensity, the pairwise interaction parameter, and the disc radius. Note that \code{gamma} must be less than or equal to 1. See \code{\link{Penttinen}} for explanation. (Note that there is also an algorithm for perfect simulation of the Penttinen process, \code{\link{rPenttinen}}) } \item{strauss:}{ (Strauss process.) A \bold{named} list with components \code{beta,gamma,r} which are respectively the ``base'' intensity, the pairwise interaction parameter and the interaction radius. Note that \code{gamma} must be less than or equal to 1. (Note that there is also an algorithm for perfect simulation of the Strauss process, \code{\link{rStrauss}}) } \item{straush:}{ (Strauss process with hardcore.) A \bold{named} list with entries \code{beta,gamma,r,hc} where \code{beta}, \code{gamma}, and \code{r} are as for the Strauss process, and \code{hc} is the hardcore radius. Of course \code{hc} must be less than \code{r}. } \item{sftcr:}{ (Softcore process.) A \bold{named} list with components \code{beta,sigma,kappa}. Again \code{beta} is a ``base'' intensity. The pairwise interaction between two points \eqn{u \neq v}{u != v} is \deqn{ \exp \left \{ - \left ( \frac{\sigma}{||u-v||} \right )^{2/\kappa} \right \} }{-(sigma/||u-v||)^(2/kappa)} Note that it is necessary that \eqn{0 < \kappa < 1}{0 < kappa <1}. } \item{straussm:}{ (Multitype Strauss process.) A \bold{named} list with components \itemize{ \item \code{beta}: A vector of ``base'' intensities, one for each possible type. \item \code{gamma}: A \bold{symmetric} matrix of interaction parameters, with \eqn{\gamma_{ij}}{gamma_ij} pertaining to the interaction between type \eqn{i} and type \eqn{j}. \item \code{radii}: A \bold{symmetric} matrix of interaction radii, with entries \eqn{r_{ij}}{r_ij} pertaining to the interaction between type \eqn{i} and type \eqn{j}. } } \item{straushm:}{ (Multitype Strauss process with hardcore.) A \bold{named} list with components \code{beta} and \code{gamma} as for \code{straussm} and \bold{two} ``radii'' components: \itemize{ \item \code{iradii}: the interaction radii \item \code{hradii}: the hardcore radii } which are both symmetric matrices of nonnegative numbers. The entries of \code{hradii} must be less than the corresponding entries of \code{iradii}. } \item{triplets:}{ (Triplets process.) A \bold{named} list with components \code{beta,gamma,r} which are respectively the ``base'' intensity, the triplet interaction parameter and the interaction radius. Note that \code{gamma} must be less than or equal to 1. } \item{lookup:}{ (Arbitrary pairwise interaction process with isotropic interaction.) A \bold{named} list with components \code{beta}, \code{r}, and \code{h}, or just with components \code{beta} and \code{h}. This model is the pairwise interaction process with an isotropic interaction given by any chosen function \eqn{H}. Each pair of points \eqn{x_i, x_j}{x[i], x[j]} in the point pattern contributes a factor \eqn{H(d(x_i, x_j))}{H(d(x[i],x[j]))} to the probability density, where \eqn{d} denotes distance and \eqn{H} is the pair interaction function. The component \code{beta} is a (positive) scalar which determines the ``base'' intensity of the process. In this implementation, \eqn{H} must be a step function. It is specified by the user in one of two ways. \itemize{ \item \bold{as a vector of values:} If \code{r} is present, then \code{r} is assumed to give the locations of jumps in the function \eqn{H}, while the vector \code{h} gives the corresponding values of the function. Specifically, the interaction function \eqn{H(t)} takes the value \code{h[1]} for distances \eqn{t} in the interval \code{[0, r[1])}; takes the value \code{h[i]} for distances \eqn{t} in the interval \code{[r[i-1], r[i])} where \eqn{i = 2,\ldots, n}{i = 2, ..., n}; and takes the value 1 for \eqn{t \ge r[n]}{t >= r[n]}. Here \eqn{n} denotes the length of \code{r}. The components \code{r} and \code{h} must be numeric vectors of equal length. The \code{r} values must be strictly positive, and sorted in increasing order. The entries of \code{h} must be non-negative. If any entry of \code{h} is greater than 1, then the entry \code{h[1]} must be 0 (otherwise the specified process is non-existent). Greatest efficiency is achieved if the values of \code{r} are equally spaced. [\bold{Note:} The usage of \code{r} and \code{h} has \emph{changed} from the previous usage in \pkg{spatstat} versions 1.4-7 to 1.5-1, in which ascending order was not required, and in which the first entry of \code{r} had to be 0.] \item \bold{as a stepfun object:} If \code{r} is absent, then \code{h} must be an object of class \code{"stepfun"} specifying a step function. Such objects are created by \code{\link{stepfun}}. The stepfun object \code{h} must be right-continuous (which is the default using \code{\link{stepfun}}.) The values of the step function must all be nonnegative. The values must all be less than 1 unless the function is identically zero on some initial interval \eqn{[0,r)}. The rightmost value (the value of \code{h(t)} for large \code{t}) must be equal to 1. Greatest efficiency is achieved if the jumps (the ``knots'' of the step function) are equally spaced. } } } For a hybrid model, the argument \code{par} should be a list, of the same length as \code{cif}, such that \code{par[[i]]} is a list of the parameters required for the interaction \code{cif[i]}. See the Examples. The optional argument \code{trend} determines the spatial trend in the model, if it has one. It should be a function or image (or a list of such, if the model is multitype) to provide the value of the trend at an arbitrary point. \describe{ \item{trend given as a function:}{A trend function may be a function of any number of arguments, but the first two must be the \eqn{x,y} coordinates of a point. Auxiliary arguments may be passed to the \code{trend} function at the time of simulation, via the \code{\dots} argument to \code{\link{rmh}}. The function \bold{must} be \bold{vectorized}. That is, it must be capable of accepting vector valued \code{x} and \code{y} arguments. Put another way, it must be capable of calculating the trend value at a number of points, simultaneously, and should return the \bold{vector} of corresponding trend values. } \item{trend given as an image:}{ An image (see \code{\link{im.object}}) provides the trend values at a grid of points in the observation window and determines the trend value at other points as the value at the nearest grid point. } } Note that the trend or trends must be \bold{non-negative}; no checking is done for this. The optional argument \code{w} specifies the window in which the pattern is to be generated. If specified, it must be in a form which can be coerced to an object of class \code{owin} by \code{\link{as.owin}}. The optional argument \code{types} specifies the possible types in a multitype point process. If the model being simulated is multitype, and \code{types} is not specified, then this vector defaults to \code{1:ntypes} where \code{ntypes} is the number of types. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \code{DOI: 10.18637/jss.v055.i11} Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Fiksel, T. (1984) Estimation of parameterized pair potentials of marked and non-marked Gibbsian point processes. \emph{Electronische Informationsverabeitung und Kybernetika} \bold{20}, 270--278. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \section{Warnings in Respect of ``lookup''}{ For the \code{lookup} cif, the entries of the \code{r} component of \code{par} must be \emph{strictly positive} and sorted into ascending order. Note that if you specify the \code{lookup} pairwise interaction function via \code{\link{stepfun}()} the arguments \code{x} and \code{y} which are passed to \code{stepfun()} are slightly different from \code{r} and \code{h}: \code{length(y)} is equal to \code{1+length(x)}; the final entry of \code{y} must be equal to 1 --- i.e. this value is explicitly supplied by the user rather than getting tacked on internally. The step function returned by \code{stepfun()} must be right continuous (this is the default behaviour of \code{stepfun()}) otherwise an error is given. } \seealso{ \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \rmhInteractionsList. } \examples{ # Strauss process: mod01 <- rmhmodel(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) mod01 # The above could also be simulated using 'rStrauss' # Strauss with hardcore: mod04 <- rmhmodel(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3), w=owin(c(0,10),c(0,5))) # Hard core: mod05 <- rmhmodel(cif="hardcore",par=list(beta=2,hc=0.3), w=square(5)) # Soft core: w <- square(10) mod07 <- rmhmodel(cif="sftcr", par=list(beta=0.8,sigma=0.1,kappa=0.5), w=w) # Penttinen process: modpen <- rmhmodel(cif="penttinen",par=list(beta=2,gamma=0.6,r=1), w=c(0,10,0,10)) # Area-interaction process: mod42 <- rmhmodel(cif="areaint",par=list(beta=2,eta=1.6,r=0.7), w=c(0,10,0,10)) # Baddeley-Geyer process: mod99 <- rmhmodel(cif="badgey",par=list(beta=0.3, gamma=c(0.2,1.8,2.4),r=c(0.035,0.07,0.14),sat=5), w=unit.square()) # Multitype Strauss: beta <- c(0.027,0.008) gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2) r <- matrix(c(45,45,45,45),2,2) mod08 <- rmhmodel(cif="straussm", par=list(beta=beta,gamma=gmma,radii=r), w=square(250)) # specify types mod09 <- rmhmodel(cif="straussm", par=list(beta=beta,gamma=gmma,radii=r), w=square(250), types=c("A", "B")) # Multitype Hardcore: rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) mod08hard <- rmhmodel(cif="multihard", par=list(beta=beta,hradii=rhc), w=square(250), types=c("A", "B")) # Multitype Strauss hardcore with trends for each type: beta <- c(0.27,0.08) ri <- matrix(c(45,45,45,45),2,2) rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend tr4 <- function(x,y){x <- x/250; y <- y/250; exp(-0.6*x+0.5*y)} # log linear trend mod10 <- rmhmodel(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=ri,hradii=rhc),w=c(0,250,0,250), trend=list(tr3,tr4)) # Triplets process: mod11 <- rmhmodel(cif="triplets",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) # Lookup (interaction function h_2 from page 76, Diggle (2003)): r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0. h <- 20*(r-0.05) h[r<0.05] <- 0 h[r>0.10] <- 1 mod17 <- rmhmodel(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1)) # hybrid model modhy <- rmhmodel(cif=c('strauss', 'geyer'), par=list(list(beta=100,gamma=0.5,r=0.05), list(beta=1, gamma=0.7,r=0.1, sat=2)), w=square(1)) modhy } \author{ \adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/Fest.Rd0000644000176200001440000002704414141452520015014 0ustar liggesusers\name{Fest} \alias{Fest} \alias{Fhazard} \title{Estimate the Empty Space Function or its Hazard Rate} \description{ Estimates the empty space function \eqn{F(r)} or its hazard rate \eqn{h(r)} from a point pattern in a window of arbitrary shape. } \usage{ Fest(X, \dots, eps, r=NULL, breaks=NULL, correction=c("rs", "km", "cs"), domain=NULL) Fhazard(X, \dots) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{F(r)} will be computed. An object of class \code{ppp}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{ Extra arguments, passed from \code{Fhazard} to \code{Fest}. Extra arguments to \code{Fest} are ignored. } \item{eps}{Optional. A positive number. The resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which \eqn{F(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{F(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"cs"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. The result of \code{Fest} is essentially a data frame containing up to seven columns: \item{r}{the values of the argument \eqn{r} at which the function \eqn{F(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{F(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{F(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{F(r)} by the spatial Kaplan-Meier method } \item{cs}{the Chiu-Stoyan estimator of \eqn{F(r)} } \item{raw}{the uncorrected estimate of \eqn{F(r)}, i.e. the empirical distribution of the distance from a random point in the window to the nearest point of the data pattern \code{X} } \item{theo}{the theoretical value of \eqn{F(r)} for a stationary Poisson process of the same estimated intensity. } The result of \code{Fhazard} contains only three columns \item{r}{the values of the argument \eqn{r} at which the hazard rate \eqn{h(r)} has been estimated } \item{hazard}{the spatial Kaplan-Meier estimate of the hazard rate \eqn{h(r)}} \item{theo}{ the theoretical value of \eqn{h(r)} for a stationary Poisson process of the same estimated intensity. } } \details{ \code{Fest} computes an estimate of the empty space function \eqn{F(r)}, and \code{Fhazard} computes an estimate of its hazard rate \eqn{h(r)}. The empty space function (also called the ``\emph{spherical contact distribution}'' or the ``\emph{point-to-nearest-event}'' distribution) of a stationary point process \eqn{X} is the cumulative distribution function \eqn{F} of the distance from a fixed point in space to the nearest point of \eqn{X}. An estimate of \eqn{F} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1988). In exploratory analyses, the estimate of \eqn{F} is a useful statistic summarising the sizes of gaps in the pattern. For inferential purposes, the estimate of \eqn{F} is usually compared to the true value of \eqn{F} for a completely random (Poisson) point process, which is \deqn{F(r) = 1 - e^{ - \lambda \pi r^2}}{% F(r) = 1 - exp( - \lambda * \pi * r^2) % } where \eqn{\lambda}{\lambda} is the intensity (expected number of points per unit area). Deviations between the empirical and theoretical \eqn{F} curves may suggest spatial clustering or spatial regularity. This algorithm estimates the empty space function \eqn{F} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X}) may have arbitrary shape. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}}. The algorithm uses two discrete approximations which are controlled by the parameter \code{eps} and by the spacing of values of \code{r} respectively. (See below for details.) First-time users are strongly advised not to specify these arguments. The estimation of \eqn{F} is hampered by edge effects arising from the unobservability of points of the random pattern outside the window. An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). The edge corrections implemented here are the border method or "\emph{reduced sample}" estimator, the spatial Kaplan-Meier estimator (Baddeley and Gill, 1997) and the Chiu-Stoyan estimator (Chiu and Stoyan, 1998). Our implementation makes essential use of the distance transform algorithm of image processing (Borgefors, 1986). A fine grid of pixels is created in the observation window. The Euclidean distance between two pixels is approximated by the length of the shortest path joining them in the grid, where a path is a sequence of steps between adjacent pixels, and horizontal, vertical and diagonal steps have length \eqn{1}, \eqn{1} and \eqn{\sqrt 2}{sqrt(2)} respectively in pixel units. If the pixel grid is sufficiently fine then this is an accurate approximation. The parameter \code{eps} is the pixel width of the rectangular raster used to compute the distance transform (see below). It must not be too large: the absolute error in distance values due to discretisation is bounded by \code{eps}. If \code{eps} is not specified, the function checks whether the window \code{Window(X)} contains pixel raster information. If so, then \code{eps} is set equal to the pixel width of the raster; otherwise, \code{eps} defaults to 1/100 of the width of the observation window. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{F(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The estimators are computed from histogram counts. This introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the spacing of successive \code{r} values must be very fine (ideally not greater than \code{eps/4}). The algorithm also returns an estimate of the hazard rate function, \eqn{h(r)} of \eqn{F(r)}. The hazard rate is defined by \deqn{h(r) = - \frac{d}{dr} \log(1 - F(r))}{% h(r) = - (d/dr) log(1 - F(r)) % } The hazard rate of \eqn{F} has been proposed as a useful exploratory statistic (Baddeley and Gill, 1994). The estimate of \eqn{h(r)} given here is a discrete approximation to the hazard rate of the Kaplan-Meier estimator of \eqn{F}. Note that \eqn{F} is absolutely continuous (for any stationary point process \eqn{X}), so the hazard function always exists (Baddeley and Gill, 1997). If the argument \code{domain} is given, the estimate of \eqn{F(r)} will be based only on the empty space distances measured from locations inside \code{domain} (although their nearest data points may lie outside \code{domain}). This is useful in bootstrap techniques. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. It must be a subset of the window of the point pattern \code{X}. The naive empirical distribution of distances from each location in the window to the nearest point of the data pattern, is a biased estimate of \eqn{F}. However this is also returned by the algorithm (if \code{correction="none"}), as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{F} as if it were an unbiased estimator of \eqn{F}. } \note{ Sizeable amounts of memory may be needed during the calculation. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37-78. Baddeley, A.J. and Gill, R.D. The empty space hazard of a spatial pattern. Research Report 1994/3, Department of Mathematics, University of Western Australia, May 1994. Baddeley, A.J. and Gill, R.D. Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25} (1997) 263-292. Borgefors, G. Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34} (1986) 344-371. Chiu, S.N. and Stoyan, D. (1998) Estimators of distance distributions for spatial patterns. \emph{Statistica Neerlandica} \bold{52}, 239--246. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The reduced sample (border method) estimator of \eqn{F} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{F} is always nondecreasing but its maximum value may be less than \eqn{1}. The estimate of hazard rate \eqn{h(r)} returned by the algorithm is an approximately unbiased estimate for the integral of \eqn{h()} over the corresponding histogram cell. It may exhibit oscillations due to discretisation effects. We recommend modest smoothing, such as kernel smoothing with kernel width equal to the width of a histogram cell, using \code{\link{Smooth.fv}}. } \seealso{ \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{km.rs}}, \code{\link{reduced.sample}}, \code{\link{kaplan.meier}} } \examples{ Fc <- Fest(cells, 0.01) # Tip: don't use F for the left hand side! # That's an abbreviation for FALSE plot(Fc) # P-P style plot plot(Fc, cbind(km, theo) ~ theo) # The empirical F is above the Poisson F # indicating an inhibited pattern if(interactive()) { plot(Fc, . ~ theo) plot(Fc, asin(sqrt(.)) ~ asin(sqrt(theo))) } \testonly{ Fh <- Fhazard(cells) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/rlabel.Rd0000644000176200001440000000677214141452520015361 0ustar liggesusers\name{rlabel} \alias{rlabel} \title{Random Re-Labelling of Point Pattern} \description{ Randomly allocates marks to a point pattern, or permutes the existing marks, or resamples from the existing marks. } \usage{ rlabel(X, labels=marks(X), permute=TRUE, group=NULL, \dots, nsim=1, drop=TRUE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}) or line segment pattern (object of class \code{"psp"}). } \item{labels}{ Vector of values from which the new marks will be drawn at random. Defaults to the vector of existing marks. } \item{permute}{ Logical value indicating whether to generate new marks by randomly permuting \code{labels} or by drawing a random sample with replacement. } \item{group}{ Optional. A factor, or other data dividing the points into groups. Random relabelling will be performed separately within each group. See Details. } \item{\dots}{Additional arguments passed to \code{\link{cut.ppp}} to determine the grouping factor, when \code{group} is given. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a marked point pattern (of the same class as \code{X}). If \code{nsim > 1}, a list of point patterns. } \details{ This very simple function allocates random marks to an existing point pattern \code{X}. It is useful for hypothesis testing purposes. (The function can also be applied to line segment patterns.) In the simplest case, the command \code{rlabel(X)} yields a point pattern obtained from \code{X} by randomly permuting the marks of the points. If \code{permute=TRUE}, then \code{labels} should be a vector of length equal to the number of points in \code{X}. The result of \code{rlabel} will be a point pattern with locations given by \code{X} and marks given by a random permutation of \code{labels} (i.e. a random sample without replacement). If \code{permute=FALSE}, then \code{labels} may be a vector of any length. The result of \code{rlabel} will be a point pattern with locations given by \code{X} and marks given by a random sample from \code{labels} (with replacement). The argument \code{group} specifies that the points are divided into several different groups, and that the random labelling shall be performed separately on each group. The arguments \code{group} and \code{\dots} are passed to \code{\link{cut.ppp}} to determine the grouping. Thus \code{group} could be a \code{factor}, or the name of a column of marks in \code{X}, or a tessellation, or a factor-valued pixel image, etc. } \seealso{ \code{\link{marks<-}} to assign arbitrary marks. } \examples{ amacrine # Randomly permute the marks "on" and "off" # Result always has 142 "off" and 152 "on" Y <- rlabel(amacrine) # randomly allocate marks "on" and "off" # with probabilities p(off) = 0.48, p(on) = 0.52 Y <- rlabel(amacrine, permute=FALSE) # randomly allocate marks "A" and "B" with equal probability Y <- rlabel(cells, labels=factor(c("A", "B")), permute=FALSE) # divide the window into tiles and # randomly permute the marks within each tile Z <- rlabel(amacrine, group=quadrats(Window(amacrine), 4, 3)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{datagen} spatstat.core/man/isf.object.Rd0000644000176200001440000000364314141452520016140 0ustar liggesusers\name{isf.object} \alias{isf.object} %DoNotExport \title{Interaction Structure Family Objects} \description{ Objects of class \code{"isf"} are used internally by the \pkg{spatstat} package to represent the structure of the interpoint interactions in a family of point process models. } \details{ \emph{Advanced Use Only!} An object of class \code{"isf"} (Interaction Structure Family) is used internally by the \pkg{spatstat} package to represent the common mathematical and algorithmic structure of the interpoint interactions in a family of point process models. The existing objects of class \code{"isf"} are: \tabular{ll}{ \code{\link{pairwise.family}} \tab pairwise interaction \cr \code{\link{triplet.family}} \tab triplet interaction \cr \code{\link{pairsat.family}} \tab saturated pairwise interaction \cr \code{\link{hierpair.family}} \tab hierarchical pairwise interaction \cr \code{\link{inforder.family}} \tab infinite order interaction \cr \code{\link{hybrid.family}} \tab hybrids of several interactions \cr \code{\link{ord.family}} \tab Ord interactions \cr } The information contained in these objects enables the \pkg{spatstat} package to select the appropriate algorithm for fitting, predicting and simulating each point process model. For example, in order to fit a model that involves pairwise interactions, the model-fitting function \code{\link{ppm}} would use information contained in \code{\link{pairwise.family}} to select the appropriate algorithms. An object of class \code{"isf"} is essentially a list of functions for various tasks. The internal format is undocumented and may be changed without notice. } \value{ An object of class \code{"isf"}, essentially a list of functions for various tasks. The internal format is undocumented and may be changed without notice. } \author{\adrian.} \keyword{spatial} spatstat.core/man/dummy.ppm.Rd0000644000176200001440000000431314141452520016033 0ustar liggesusers\name{dummy.ppm} \alias{dummy.ppm} \title{Extract Dummy Points Used to Fit a Point Process Model} \description{ Given a fitted point process model, this function extracts the `dummy points' of the quadrature scheme used to fit the model. } \usage{ dummy.ppm(object, drop=FALSE) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"}). } \item{drop}{ Logical value determining whether to delete dummy points that were not used to fit the model. } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The maximum pseudolikelihood algorithm in \code{\link{ppm}} approximates the pseudolikelihood integral by a sum over a finite set of quadrature points, which is constructed by augmenting the original data point pattern by a set of ``dummy'' points. The fitted model object returned by \code{\link{ppm}} contains complete information about this quadrature scheme. See \code{\link{ppm}} or \code{\link{ppm.object}} for further information. This function \code{dummy.ppm} extracts the dummy points of the quadrature scheme. A typical use of this function would be to count the number of dummy points, to gauge the accuracy of the approximation to the exact pseudolikelihood. It may happen that some dummy points are not actually used in fitting the model (typically because the value of a covariate is \code{NA} at these points). The argument \code{drop} specifies whether these unused dummy points shall be deleted (\code{drop=TRUE}) or retained (\code{drop=FALSE}) in the return value. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppp.object}}, \code{\link{ppm}} } \examples{ data(cells) fit <- ppm(cells, ~1, Strauss(r=0.1)) X <- dummy.ppm(fit) npoints(X) # this is the number of dummy points in the quadrature scheme } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} \keyword{models} spatstat.core/man/Ldot.Rd0000644000176200001440000000513414141452520015011 0ustar liggesusers\name{Ldot} \alias{Ldot} \title{Multitype L-function (i-to-any)} \description{ Calculates an estimate of the multitype L-function (from type \code{i} to any type) for a multitype point pattern. } \usage{ Ldot(X, i, ..., from, correction) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot-type \eqn{L} function \eqn{L_{ij}(r)}{Lij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{correction,\dots}{ Arguments passed to \code{\link{Kdot}}. } \item{from}{An alternative way to specify \code{i}.} } \details{ This command computes \deqn{L_{i\bullet}(r) = \sqrt{\frac{K_{i\bullet}(r)}{\pi}}}{Li.(r) = sqrt(Ki.(r)/pi)} where \eqn{K_{i\bullet}(r)}{Ki.(r)} is the multitype \eqn{K}-function from points of type \code{i} to points of any type. See \code{\link{Kdot}} for information about \eqn{K_{i\bullet}(r)}{Ki.(r)}. The command \code{Ldot} first calls \code{\link{Kdot}} to compute the estimate of the \code{i}-to-any \eqn{K}-function, and then applies the square root transformation. For a marked Poisson point process, the theoretical value of the L-function is \eqn{L_{i\bullet}(r) = r}{Li.(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L_{i\bullet}}{Li.} is more appropriate for use in simulation envelopes and hypothesis tests. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L_{i\bullet}}{Li.} has been estimated } \item{theo}{the theoretical value \eqn{L_{i\bullet}(r) = r}{Li.(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{i\bullet}}{Li.} obtained by the edge corrections named. } \seealso{ \code{\link{Kdot}}, \code{\link{Lcross}}, \code{\link{Lest}} } \examples{ data(amacrine) L <- Ldot(amacrine, "off") plot(L) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.core/man/plot.scan.test.Rd0000644000176200001440000000471014141452520016765 0ustar liggesusers\name{plot.scan.test} \alias{plot.scan.test} \alias{as.im.scan.test} \title{ Plot Result of Scan Test } \description{ Computes or plots an image showing the likelihood ratio test statistic for the scan test, or the optimal circle radius. } \usage{ \method{plot}{scan.test}(x, \dots, what=c("statistic", "radius"), do.window = TRUE) \method{as.im}{scan.test}(X, \dots, what=c("statistic", "radius")) } \arguments{ \item{x,X}{ Result of a scan test. An object of class \code{"scan.test"} produced by \code{\link{scan.test}}. } \item{\dots}{ Arguments passed to \code{\link{plot.im}} to control the appearance of the plot. } \item{what}{ Character string indicating whether to produce an image of the (profile) likelihood ratio test statistic (\code{what="statistic"}, the default) or an image of the optimal value of circle radius (\code{what="radius"}). } \item{do.window}{ Logical value indicating whether to plot the original window of the data as well. } } \details{ These functions extract, and plot, the spatially-varying value of the likelihood ratio test statistic which forms the basis of the scan test. If the test result \code{X} was based on circles of the same radius \code{r}, then \code{as.im(X)} is a pixel image of the likelihood ratio test statistic as a function of the position of the centre of the circle. If the test result \code{X} was based on circles of several different radii \code{r}, then \code{as.im(X)} is a pixel image of the profile (maximum value over all radii \code{r}) likelihood ratio test statistic as a function of the position of the centre of the circle, and \code{as.im(X, what="radius")} is a pixel image giving for each location \eqn{u} the value of \code{r} which maximised the likelihood ratio test statistic at that location. The \code{plot} method plots the corresponding image. } \value{ The value of \code{as.im.scan.test} is a pixel image (object of class \code{"im"}). The value of \code{plot.scan.test} is \code{NULL}. } \author{\adrian and \rolf } \seealso{ \code{\link{scan.test}}, \code{\link{scanLRTS}} } \examples{ if(interactive()) { a <- scan.test(redwood, seq(0.04, 0.1, by=0.01), method="poisson", nsim=19) } else { a <- scan.test(redwood, c(0.05, 0.1), method="poisson", nsim=2) } plot(a) as.im(a) plot(a, what="radius") } \keyword{htest} \keyword{spatial} spatstat.core/man/ppm.Rd0000644000176200001440000003732114141452520014706 0ustar liggesusers\name{ppm} \alias{ppm} \alias{ppm.formula} \concept{point process model} \concept{Poisson point process} \concept{Gibbs point process} \title{ Fit Point Process Model to Data } \description{ Fits a point process model to an observed point pattern. } \usage{ ppm(Q, \dots) \method{ppm}{formula}(Q, interaction=NULL, \dots, data=NULL, subset) } \arguments{ \item{Q}{ A \code{formula} in the \R language describing the model to be fitted. } \item{interaction}{ An object of class \code{"interact"} describing the point process interaction structure, or a function that makes such an object, or \code{NULL} indicating that a Poisson process (stationary or nonstationary) should be fitted. } \item{\dots}{ Arguments passed to \code{\link{ppm.ppp}} or \code{\link{ppm.quad}} to control the model-fitting process. } \item{data}{ Optional. The values of spatial covariates (other than the Cartesian coordinates) required by the model. Either a data frame, or a list whose entries are images, functions, windows, tessellations or single numbers. See Details. } \item{subset}{ Optional. An expression (which may involve the names of the Cartesian coordinates \code{x} and \code{y} and the names of entries in \code{data}) defining a subset of the spatial domain, to which the model-fitting should be restricted. The result of evaluating the expression should be either a logical vector, or a window (object of class \code{"owin"}) or a logical-valued pixel image (object of class \code{"im"}). } } \value{ An object of class \code{"ppm"} describing a fitted point process model. See \code{\link{ppm.object}} for details of the format of this object and methods available for manipulating it. } \details{ This function fits a point process model to an observed point pattern. The model may include spatial trend, interpoint interaction, and dependence on covariates. The model fitted by \code{ppm} is either a Poisson point process (in which different points do not interact with each other) or a Gibbs point process (in which different points typically inhibit each other). For clustered point process models, use \code{\link{kppm}}. The function \code{ppm} is generic, with methods for the classes \code{formula}, \code{ppp} and \code{quad}. This page describes the method for a \code{formula}. The first argument is a \code{formula} in the \R language describing the spatial trend model to be fitted. It has the general form \code{pattern ~ trend} where the left hand side \code{pattern} is usually the name of a spatial point pattern (object of class \code{"ppp"}) to which the model should be fitted, or an expression which evaluates to a point pattern; and the right hand side \code{trend} is an expression specifying the spatial trend of the model. Systematic effects (spatial trend and/or dependence on spatial covariates) are specified by the \code{trend} expression on the right hand side of the formula. The trend may involve the Cartesian coordinates \code{x}, \code{y}, the marks \code{marks}, the names of entries in the argument \code{data} (if supplied), or the names of objects that exist in the \R session. The trend formula specifies the \bold{logarithm} of the intensity of a Poisson process, or in general, the logarithm of the first order potential of the Gibbs process. The formula should not use any names beginning with \code{.mpl} as these are reserved for internal use. If the formula is \code{pattern~1}, then the model to be fitted is stationary (or at least, its first order potential is constant). The symbol \code{.} in the trend expression stands for all the covariates supplied in the argument \code{data}. For example the formula \code{pattern ~ .} indicates an additive model with a main effect for each covariate in \code{data}. Stochastic interactions between random points of the point process are defined by the argument \code{interaction}. This is an object of class \code{"interact"} which is initialised in a very similar way to the usage of family objects in \code{\link{glm}} and \code{gam}. The interaction models currently available are: \GibbsInteractionsList. See the examples below. Note that it is possible to combine several interactions using \code{\link{Hybrid}}. If \code{interaction} is missing or \code{NULL}, then the model to be fitted has no interpoint interactions, that is, it is a Poisson process (stationary or nonstationary according to \code{trend}). In this case the methods of maximum pseudolikelihood and maximum logistic likelihood coincide with maximum likelihood. The fitted point process model returned by this function can be printed (by the print method \code{\link{print.ppm}}) to inspect the fitted parameter values. If a nonparametric spatial trend was fitted, this can be extracted using the predict method \code{\link{predict.ppm}}. To fit a model involving spatial covariates other than the Cartesian coordinates \eqn{x} and \eqn{y}, the values of the covariates should either be supplied in the argument \code{data}, or should be stored in objects that exist in the \R session. Note that it is not sufficient to have observed the covariate only at the points of the data point pattern; the covariate must also have been observed at other locations in the window. If it is given, the argument \code{data} is typically a list, with names corresponding to variables in the \code{trend} formula. Each entry in the list is either \describe{ \item{a pixel image,}{ giving the values of a spatial covariate at a fine grid of locations. It should be an object of class \code{"im"}, see \code{\link{im.object}}. } \item{a function,}{ which can be evaluated at any location \code{(x,y)} to obtain the value of the spatial covariate. It should be a \code{function(x, y)} or \code{function(x, y, ...)} in the \R language. For marked point pattern data, the covariate can be a \code{function(x, y, marks)} or \code{function(x, y, marks, ...)}. The first two arguments of the function should be the Cartesian coordinates \eqn{x} and \eqn{y}. The function may have additional arguments; if the function does not have default values for these additional arguments, then the user must supply values for them, in \code{covfunargs}. See the Examples. } \item{a window,}{ interpreted as a logical variable which is \code{TRUE} inside the window and \code{FALSE} outside it. This should be an object of class \code{"owin"}. } \item{a tessellation,}{ interpreted as a factor covariate. For each spatial location, the factor value indicates which tile of the tessellation it belongs to. This should be an object of class \code{"tess"}. (To make a covariate in which each tile of the tessellation has a numerical value, convert the tessellation to a \code{function(x,y)} using \code{\link{as.function.tess}}.) } \item{a single number,}{indicating a covariate that is constant in this dataset. } } The software will look up the values of each covariate at the required locations (quadrature points). Note that, for covariate functions, only the \emph{name} of the function appears in the trend formula. A covariate function is treated as if it were a single variable. The function arguments do not appear in the trend formula. See the Examples. If \code{data} is a list, the list entries should have names corresponding to (some of) the names of covariates in the model formula \code{trend}. The variable names \code{x}, \code{y} and \code{marks} are reserved for the Cartesian coordinates and the mark values, and these should not be used for variables in \code{data}. Alternatively, \code{data} may be a data frame giving the values of the covariates at specified locations. Then \code{pattern} should be a quadrature scheme (object of class \code{"quad"}) giving the corresponding locations. See \code{\link{ppm.quad}} for details. } \section{Interaction parameters}{ Apart from the Poisson model, every point process model fitted by \code{ppm} has parameters that determine the strength and range of \sQuote{interaction} or dependence between points. These parameters are of two types: \describe{ \item{regular parameters:}{ A parameter \eqn{\phi}{phi} is called \emph{regular} if the log likelihood is a linear function of \eqn{\theta}{theta} where \eqn{\theta = \theta(\psi)}{theta = theta(psi)} is some transformation of \eqn{\psi}{psi}. [Then \eqn{\theta}{theta} is called the canonical parameter.] } \item{irregular parameters}{ Other parameters are called \emph{irregular}. } } Typically, regular parameters determine the \sQuote{strength} of the interaction, while irregular parameters determine the \sQuote{range} of the interaction. For example, the Strauss process has a regular parameter \eqn{\gamma}{gamma} controlling the strength of interpoint inhibition, and an irregular parameter \eqn{r} determining the range of interaction. The \code{ppm} command is only designed to estimate regular parameters of the interaction. It requires the values of any irregular parameters of the interaction to be fixed. For example, to fit a Strauss process model to the \code{cells} dataset, you could type \code{ppm(cells ~ 1, Strauss(r=0.07))}. Note that the value of the irregular parameter \code{r} must be given. The result of this command will be a fitted model in which the regular parameter \eqn{\gamma}{gamma} has been estimated. To determine the irregular parameters, there are several practical techniques, but no general statistical theory available. Useful techniques include maximum profile pseudolikelihood, which is implemented in the command \code{\link{profilepl}}, and Newton-Raphson maximisation, implemented in the experimental command \code{\link{ippm}}. Some irregular parameters can be estimated directly from data: the hard-core radius in the model \code{\link{Hardcore}} and the matrix of hard-core radii in \code{\link{MultiHard}} can be estimated easily from data. In these cases, \code{ppm} allows the user to specify the interaction without giving the value of the irregular parameter. The user can give the hard core interaction as \code{interaction=Hardcore()} or even \code{interaction=Hardcore}, and the hard core radius will then be estimated from the data. } \section{Technical Warnings and Error Messages}{ See \code{\link{ppm.ppp}} for some technical warnings about the weaknesses of the algorithm, and explanation of some common error messages. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014) Logistic regression for spatial Gibbs point processes. \emph{Biometrika} \bold{101} (2) 377--392. Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} 283--322. Berman, M. and Turner, T.R. (1992) Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41}, 31--38. Besag, J. (1975) Statistical analysis of non-lattice data. \emph{The Statistician} \bold{24}, 179-195. Diggle, P.J., Fiksel, T., Grabarnik, P., Ogata, Y., Stoyan, D. and Tanemura, M. (1994) On parameter estimation for pairwise interaction processes. \emph{International Statistical Review} \bold{62}, 99-117. Huang, F. and Ogata, Y. (1999) Improvements of the maximum pseudo-likelihood estimators in various spatial statistical models. \emph{Journal of Computational and Graphical Statistics} \bold{8}, 510--530. Jensen, J.L. and Moeller, M. (1991) Pseudolikelihood for exponential family models of spatial point processes. \emph{Annals of Applied Probability} \bold{1}, 445--461. Jensen, J.L. and Kuensch, H.R. (1994) On asymptotic normality of pseudo likelihood estimates for pairwise interaction processes, \emph{Annals of the Institute of Statistical Mathematics} \bold{46}, 475--486. } \seealso{ \code{\link{ppm.ppp}} and \code{\link{ppm.quad}} for more details on the fitting technique and edge correction. \code{\link{ppm.object}} for details of how to print, plot and manipulate a fitted model. \code{\link{ppp}} and \code{\link{quadscheme}} for constructing data. Interactions: \GibbsInteractionsList. See \code{\link{profilepl}} for advice on fitting nuisance parameters in the interaction, and \code{\link{ippm}} for irregular parameters in the trend. See \code{\link{valid.ppm}} and \code{\link{project.ppm}} for ensuring the fitted model is a valid point process. See \code{\link{kppm}} for fitting Cox point process models and cluster point process models, and \code{\link{dppm}} for fitting determinantal point process models. } \examples{ online <- interactive() if(!online) { # reduce grid sizes for efficiency in tests spatstat.options(npixel=32, ndummy.min=16) } # fit the stationary Poisson process # to point pattern 'nztrees' ppm(nztrees ~ 1) if(online) { Q <- quadscheme(nztrees) ppm(Q ~ 1) # equivalent. } fit1 <- ppm(nztrees ~ x) # fit the nonstationary Poisson process # with intensity function lambda(x,y) = exp(a + bx) # where x,y are the Cartesian coordinates # and a,b are parameters to be estimated fit1 coef(fit1) coef(summary(fit1)) ppm(nztrees ~ polynom(x,2)) # fit the nonstationary Poisson process # with intensity function lambda(x,y) = exp(a + bx + cx^2) if(online) { library(splines) ppm(nztrees ~ bs(x,df=3)) } # Fits the nonstationary Poisson process # with intensity function lambda(x,y) = exp(B(x)) # where B is a B-spline with df = 3 ppm(nztrees ~ 1, Strauss(r=10), rbord=10) # Fit the stationary Strauss process with interaction range r=10 # using the border method with margin rbord=10 ppm(nztrees ~ x, Strauss(13), correction="periodic") # Fit the nonstationary Strauss process with interaction range r=13 # and exp(first order potential) = activity = beta(x,y) = exp(a+bx) # using the periodic correction. # Compare Maximum Pseudolikelihood, Huang-Ogata and Variational Bayes fits: if(online) ppm(swedishpines ~ 1, Strauss(9)) ppm(swedishpines ~ 1, Strauss(9), method="ho", nsim=if(!online) 8 else 99) ppm(swedishpines ~ 1, Strauss(9), method="VBlogi") # COVARIATES # X <- rpoispp(20) weirdfunction <- function(x,y){ 10 * x^2 + 5 * sin(10 * y) } # # (a) covariate values as function ppm(X ~ y + weirdfunction) # # (b) covariate values in pixel image Zimage <- as.im(weirdfunction, unit.square()) ppm(X ~ y + Z, covariates=list(Z=Zimage)) # # (c) covariate values in data frame Q <- quadscheme(X) xQ <- x.quad(Q) yQ <- y.quad(Q) Zvalues <- weirdfunction(xQ,yQ) ppm(Q ~ y + Z, data=data.frame(Z=Zvalues)) # Note Q not X # COVARIATE FUNCTION WITH EXTRA ARGUMENTS # f <- function(x,y,a){ y - a } ppm(X ~ x + f, covfunargs=list(a=1/2)) # COVARIATE: logical value TRUE inside window, FALSE outside b <- owin(c(0.1, 0.6), c(0.1, 0.9)) ppm(X ~ b) ## MULTITYPE POINT PROCESSES ### # fit stationary marked Poisson process # with different intensity for each species if(online) { ppm(lansing ~ marks, Poisson()) } else { ama <- amacrine[square(0.7)] a <- ppm(ama ~ marks, Poisson(), nd=16) } # fit nonstationary marked Poisson process # with different log-cubic trend for each species if(online) { ppm(lansing ~ marks * polynom(x,y,3), Poisson()) } else { b <- ppm(ama ~ marks * polynom(x,y,2), Poisson(), nd=16) } } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat.core/man/rdpp.Rd0000644000176200001440000000274014141452520015054 0ustar liggesusers\name{rdpp} \alias{rdpp} \title{Simulation of a Determinantal Point Process} \description{ Generates simulated realisations from a determinantal point process. } \usage{ rdpp(eig, index, basis = "fourierbasis", window = boxx(rep(list(0:1), ncol(index))), reject_max = 10000, progress = 0, debug = FALSE, \dots) } \arguments{ \item{eig}{ vector of values between 0 and 1 specifying the non-zero eigenvalues for the process. } \item{index}{ \code{data.frame} or \code{matrix} (or something acceptable to \code{\link{as.matrix}}) specifying indices of the basis functions. } \item{basis}{character string giving the name of the basis.} \item{window}{ window (of class \code{"owin"}, \code{"box3"} or \code{"boxx"}) giving the domain of the point process. } \item{reject_max}{ integer giving the maximal number of trials for rejection sampling. } \item{progress}{ integer giving the interval for making a progress report. The value zero turns reporting off. } \item{debug}{ logical value indicating whether debug informationb should be outputted. } \item{\dots}{Ignored.} } \author{ \spatstatAuthors. } \value{ A point pattern (object of class \code{"ppp"}). } \examples{ index <- expand.grid(-2:2,-2:2) eig <- exp(-rowSums(index^2)) X <- rdpp(eig, index) X ## To simulate a det. projection p. p. with the given indices set eig=1: XX <- rdpp(1, index) XX } \keyword{datagen} \keyword{spatial} \keyword{models} spatstat.core/man/rmhmodel.list.Rd0000644000176200001440000001127614141452520016674 0ustar liggesusers\name{rmhmodel.list} \alias{rmhmodel.list} \title{Define Point Process Model for Metropolis-Hastings Simulation.} \description{ Given a list of parameters, builds a description of a point process model for use in simulating the model by the Metropolis-Hastings algorithm. } \usage{ \method{rmhmodel}{list}(model, ...) } \arguments{ \item{model}{A list of parameters. See Details.} \item{\dots}{ Optional list of additional named parameters. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a validated list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ The generic function \code{\link{rmhmodel}} takes a description of a point process model in some format, and converts it into an object of class \code{"rmhmodel"} so that simulations of the model can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. This function \code{rmhmodel.list} is the method for lists. The argument \code{model} should be a named list of parameters of the form \code{list(cif, par, w, trend, types)} where \code{cif} and \code{par} are required and the others are optional. For details about these components, see \code{\link{rmhmodel.default}}. The subsequent arguments \code{\dots} (if any) may also have these names, and they will take precedence over elements of the list \code{model}. } \references{ Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{rmhmodel}}, \code{\link{rmhmodel.default}}, \code{\link{rmhmodel.ppm}}, \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \code{\link{Strauss}}, \code{\link{Softcore}}, \code{\link{StraussHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{DiggleGratton}}, \code{\link{PairPiece}} } \examples{ # Strauss process: mod01 <- list(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) mod01 <- rmhmodel(mod01) # Strauss with hardcore: mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3), w=owin(c(0,10),c(0,5))) mod04 <- rmhmodel(mod04) # Soft core: w <- square(10) mod07 <- list(cif="sftcr", par=list(beta=0.8,sigma=0.1,kappa=0.5), w=w) mod07 <- rmhmodel(mod07) # Multitype Strauss: beta <- c(0.027,0.008) gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2) r <- matrix(c(45,45,45,45),2,2) mod08 <- list(cif="straussm", par=list(beta=beta,gamma=gmma,radii=r), w=square(250)) mod08 <- rmhmodel(mod08) # specify types mod09 <- rmhmodel(list(cif="straussm", par=list(beta=beta,gamma=gmma,radii=r), w=square(250), types=c("A", "B"))) # Multitype Strauss hardcore with trends for each type: beta <- c(0.27,0.08) ri <- matrix(c(45,45,45,45),2,2) rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend tr4 <- function(x,y){x <- x/250; y <- y/250; exp(-0.6*x+0.5*y)} # log linear trend mod10 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=ri,hradii=rhc),w=c(0,250,0,250), trend=list(tr3,tr4)) mod10 <- rmhmodel(mod10) # Lookup (interaction function h_2 from page 76, Diggle (2003)): r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0. h <- 20*(r-0.05) h[r<0.05] <- 0 h[r>0.10] <- 1 mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1)) mod17 <- rmhmodel(mod17) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.core/man/dkernel.Rd0000644000176200001440000000611714141452520015535 0ustar liggesusers\name{dkernel} \alias{dkernel} \alias{pkernel} \alias{qkernel} \alias{rkernel} \title{Kernel distributions and random generation} \description{Density, distribution function, quantile function and random generation for several distributions used in kernel estimation for numerical data. } \usage{ dkernel(x, kernel = "gaussian", mean = 0, sd = 1) pkernel(q, kernel = "gaussian", mean = 0, sd = 1, lower.tail = TRUE) qkernel(p, kernel = "gaussian", mean = 0, sd = 1, lower.tail = TRUE) rkernel(n, kernel = "gaussian", mean = 0, sd = 1) } \arguments{ \item{x, q}{Vector of quantiles.} \item{p}{Vector of probabilities.} \item{kernel}{ String name of the kernel. Options are \code{"gaussian"}, \code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"}, \code{"biweight"}, \code{"cosine"} and \code{"optcosine"}. (Partial matching is used). } \item{n}{Number of observations.} \item{mean}{Mean of distribution.} \item{sd}{Standard deviation of distribution.} \item{lower.tail}{logical; if \code{TRUE} (the default), then probabilities are \eqn{P(X \le x)}{P[X \le x]}, otherwise, \eqn{P(X > x)}. } } \details{ These functions give the probability density, cumulative distribution function, quantile function and random generation for several distributions used in kernel estimation for one-dimensional (numerical) data. The available kernels are those used in \code{\link[stats]{density.default}}, namely \code{"gaussian"}, \code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"}, \code{"biweight"}, \code{"cosine"} and \code{"optcosine"}. For more information about these kernels, see \code{\link[stats]{density.default}}. \code{dkernel} gives the probability density, \code{pkernel} gives the cumulative distribution function, \code{qkernel} gives the quantile function, and \code{rkernel} generates random deviates. } \value{ A numeric vector. For \code{dkernel}, a vector of the same length as \code{x} containing the corresponding values of the probability density. For \code{pkernel}, a vector of the same length as \code{x} containing the corresponding values of the cumulative distribution function. For \code{qkernel}, a vector of the same length as \code{p} containing the corresponding quantiles. For \code{rkernel}, a vector of length \code{n} containing randomly generated values. } \examples{ x <- seq(-3,3,length=100) plot(x, dkernel(x, "epa"), type="l", main=c("Epanechnikov kernel", "probability density")) plot(x, pkernel(x, "opt"), type="l", main=c("OptCosine kernel", "cumulative distribution function")) p <- seq(0,1, length=256) plot(p, qkernel(p, "biw"), type="l", main=c("Biweight kernel", "cumulative distribution function")) y <- rkernel(100, "tri") hist(y, main="Random variates from triangular density") rug(y) } \seealso{ \code{\link[stats]{density.default}}, \code{\link{kernel.factor}} } \author{\adrian \email{adrian@maths.uwa.edu.au} and Martin Hazelton } \keyword{methods} \keyword{nonparametric} \keyword{smooth} spatstat.core/man/spatstat.core-deprecated.Rd0000644000176200001440000000127714141452520021003 0ustar liggesusers\name{spatstat.core-deprecated} \alias{which.max.im} \title{Deprecated spatstat.core functions} \description{ Deprecated spatstat.core functions. } \usage{ which.max.im(x) } \details{ These functions are deprecated, and will eventually be deleted from the \pkg{spatstat.core} package. \code{as.psp.owin} has been replaced by \code{\link{edges}}. \code{which.max.im(x)} is replaced by \code{\link{im.apply}(x, which.max)}. \code{circumradius} is replaced by the more appropriately named \code{boundingradius}. \code{rjitterlpp} is replaced by \code{rjitter.lpp}, a method for the generic \code{rjitter}. } \value{ \code{which.max.im} returns an integer. } \keyword{internal} spatstat.core/man/as.fv.Rd0000644000176200001440000000623214141452520015124 0ustar liggesusers\name{as.fv} \alias{as.fv} \alias{as.fv.fv} \alias{as.fv.fasp} \alias{as.fv.data.frame} \alias{as.fv.matrix} \alias{as.fv.minconfit} \alias{as.fv.dppm} \alias{as.fv.kppm} \alias{as.fv.bw.optim} \title{Convert Data To Class fv} \description{ Converts data into a function table (an object of class \code{"fv"}). } \usage{ as.fv(x) \method{as.fv}{fv}(x) \method{as.fv}{data.frame}(x) \method{as.fv}{matrix}(x) \method{as.fv}{fasp}(x) \method{as.fv}{minconfit}(x) \method{as.fv}{dppm}(x) \method{as.fv}{kppm}(x) \method{as.fv}{bw.optim}(x) } \arguments{ \item{x}{Data which will be converted into a function table} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This command converts data \code{x}, that could be interpreted as the values of a function, into a function value table (object of the class \code{"fv"} as described in \code{\link{fv.object}}). This object can then be plotted easily using \code{\link{plot.fv}}. The dataset \code{x} may be any of the following: \itemize{ \item an object of class \code{"fv"}; \item a matrix or data frame with at least two columns; \item an object of class \code{"fasp"}, representing an array of \code{"fv"} objects. \item an object of class \code{"minconfit"}, giving the results of a minimum contrast fit by the command \code{\link[spatstat.core]{mincontrast}}. The \item an object of class \code{"kppm"}, representing a fitted Cox or cluster point process model, obtained from the model-fitting command \code{\link[spatstat.core]{kppm}}; \item an object of class \code{"dppm"}, representing a fitted determinantal point process model, obtained from the model-fitting command \code{\link[spatstat.core]{dppm}}; \item an object of class \code{"bw.optim"}, representing an optimal choice of smoothing bandwidth by a cross-validation method, obtained from commands like \code{\link[spatstat.core]{bw.diggle}}. } The function \code{as.fv} is generic, with methods for each of the classes listed above. The behaviour is as follows: \itemize{ \item If \code{x} is an object of class \code{"fv"}, it is returned unchanged. \item If \code{x} is a matrix or data frame, the first column is interpreted as the function argument, and subsequent columns are interpreted as values of the function computed by different methods. \item If \code{x} is an object of class \code{"fasp"} representing an array of \code{"fv"} objects, these are combined into a single \code{"fv"} object. \item If \code{x} is an object of class \code{"minconfit"}, or an object of class \code{"kppm"} or \code{"dppm"}, the result is a function table containing the observed summary function and the best fit summary function. \item If \code{x} is an object of class \code{"bw.optim"}, the result is a function table of the optimisation criterion as a function of the smoothing bandwidth. } } \examples{ r <- seq(0, 1, length=101) x <- data.frame(r=r, y=r^2) as.fv(x) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat.core/man/MultiStraussHard.Rd0000644000176200001440000000673614141452520017376 0ustar liggesusers\name{MultiStraussHard} \alias{MultiStraussHard} \title{The Multitype/Hard Core Strauss Point Process Model} \description{ Creates an instance of the multitype/hard core Strauss point process model which can then be fitted to point pattern data. } \usage{ MultiStraussHard(iradii, hradii, types=NULL) } \arguments{ \item{iradii}{Matrix of interaction radii} \item{hradii}{Matrix of hard core radii} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the multitype/hard core Strauss process with interaction radii \eqn{iradii[i,j]} and hard core radii \eqn{hradii[i,j]}. } \details{ This is a hybrid of the multitype Strauss process (see \code{\link{MultiStrauss}}) and the hard core process (case \eqn{\gamma=0}{gamma = 0} of the Strauss process). A pair of points of types \eqn{i} and \eqn{j} must not lie closer than \eqn{h_{ij}}{h[i,j]} units apart; if the pair lies more than \eqn{h_{ij}}{h[i,j]} and less than \eqn{r_{ij}}{r[i,j]} units apart, it contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the probability density. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the MultiStraussHard interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrices \code{iradii} and \code{hradii}. The matrices \code{iradii} and \code{hradii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii and hardcore radii are specified in \code{MultiStraussHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{MultiStraussHard()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{MultiStrauss}}, \code{\link{MultiHard}}, \code{\link{Strauss}} } \examples{ r <- matrix(3, nrow=2,ncol=2) h <- matrix(c(1,2,2,1), nrow=2,ncol=2) MultiStraussHard(r,h) # prints a sensible description of itself r <- 0.04 * matrix(c(1,2,2,1), nrow=2,ncol=2) h <- 0.02 * matrix(c(1,NA,NA,1), nrow=2,ncol=2) X <- amacrine \testonly{ X <- X[owin(c(0,0.8), c(0,1))] } fit <- ppm(X ~1, MultiStraussHard(r,h)) # fit stationary multitype hardcore Strauss process to `amacrine' } \section{Warnings}{ In order that \code{\link{ppm}} can fit the multitype/hard core Strauss model correctly to a point pattern \code{X}, this pattern must be marked, with \code{markformat} equal to \code{vector} and the mark vector \code{marks(X)} must be a factor. If the argument \code{types} is specified it is interpreted as a set of factor levels and this set must equal \code{levels(marks(X))}. } \section{Changed Syntax}{ Before \pkg{spatstat} version \code{1.37-0}, the syntax of this function was different: \code{MultiStraussHard(types=NULL, iradii, hradii)}. The new code attempts to handle the old syntax as well. } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{models} spatstat.core/man/as.owin.Rd0000644000176200001440000001547114141452520015472 0ustar liggesusers\name{as.owin} \alias{as.owin.ppm} \alias{as.owin.kppm} \alias{as.owin.dppm} \alias{as.owin.slrm} \alias{as.owin.msr} \alias{as.owin.quadrattest} \alias{as.owin.rmhmodel} \title{Convert Data To Class owin} \description{ Converts data specifying an observation window in any of several formats, into an object of class \code{"owin"}. } \usage{ \method{as.owin}{ppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE) \method{as.owin}{kppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE) \method{as.owin}{dppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE) \method{as.owin}{slrm}(W, \dots, from=c("points", "covariates")) \method{as.owin}{msr}(W, \dots, fatal=TRUE) \method{as.owin}{quadrattest}(W, \dots, fatal=TRUE) \method{as.owin}{rmhmodel}(W, \dots, fatal=FALSE) } \arguments{ \item{W}{ Data specifying an observation window, in any of several formats described under \emph{Details} below. } \item{fatal}{ Logical value determining what to do if the data cannot be converted to an observation window. See Details. } \item{\dots}{Ignored.} \item{from}{Character string. See Details.} } \value{ An object of class \code{"owin"} (see \code{\link{owin.object}}) specifying an observation window. } \details{ The class \code{"owin"} is a way of specifying the observation window for a point pattern. See \code{\link{owin.object}} for an overview. The generic function \code{as.owin} converts data in any of several formats into an object of class \code{"owin"} for use by the \pkg{spatstat} package. The function \code{as.owin} is generic, with methods for different classes of objects, and a default method. The argument \code{W} may be \itemize{ \item an object of class \code{"owin"} \item a structure with entries \code{xrange}, \code{yrange} specifying the \eqn{x} and \eqn{y} dimensions of a rectangle \item a structure with entries named \code{xmin}, \code{xmax}, \code{ymin}, \code{ymax} (in any order) specifying the \eqn{x} and \eqn{y} dimensions of a rectangle. This will accept objects of class \code{bbox} in the \code{sf} package. \item a numeric vector of length 4 (interpreted as \code{(xmin, xmax, ymin, ymax)} in that order) specifying the \eqn{x} and \eqn{y} dimensions of a rectangle \item a structure with entries named \code{xl}, \code{xu}, \code{yl}, \code{yu} (in any order) specifying the \eqn{x} and \eqn{y} dimensions of a rectangle as \code{(xmin, xmax) = (xl, xu)} and \code{(ymin, ymax) = (yl, yu)}. This will accept objects of class \code{spp} used in the Venables and Ripley \pkg{spatial} package. \item an object of class \code{"ppp"} representing a point pattern. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"psp"} representing a line segment pattern. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"tess"} representing a tessellation. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"quad"} representing a quadrature scheme. In this case, the window of the \code{data} component will be extracted. \item an object of class \code{"im"} representing a pixel image. In this case, a window of type \code{"mask"} will be returned, with the same pixel raster coordinates as the image. An image pixel value of \code{NA}, signifying that the pixel lies outside the window, is transformed into the logical value \code{FALSE}, which is the corresponding convention for window masks. \item an object of class \code{"ppm"}, \code{"kppm"}, \code{"slrm"} or \code{"dppm"} representing a fitted point process model. In this case, if \code{from="data"} (the default), \code{as.owin} extracts the original point pattern data to which the model was fitted, and returns the observation window of this point pattern. If \code{from="covariates"} then \code{as.owin} extracts the covariate images to which the model was fitted, and returns a binary mask window that specifies the pixel locations. \item A \code{data.frame} with exactly three columns. Each row of the data frame corresponds to one pixel. Each row contains the \eqn{x} and \eqn{y} coordinates of a pixel, and a logical value indicating whether the pixel lies inside the window. \item A \code{data.frame} with exactly two columns. Each row of the data frame contains the \eqn{x} and \eqn{y} coordinates of a pixel that lies inside the window. \item an object of class \code{"distfun"}, \code{"nnfun"} or \code{"funxy"} representing a function of spatial location, defined on a spatial domain. The spatial domain of the function will be extracted. \item an object of class \code{"rmhmodel"} representing a point process model that can be simulated using \code{\link{rmh}}. The window (spatial domain) of the model will be extracted. The window may be \code{NULL} in some circumstances (indicating that the simulation window has not yet been determined). This is not treated as an error, because the argument \code{fatal} defaults to \code{FALSE} for this method. \item an object of class \code{"layered"} representing a list of spatial objects. See \code{\link{layered}}. In this case, \code{as.owin} will be applied to each of the objects in the list, and the union of these windows will be returned. \item an object of class \code{"SpatialPolygon"}, \code{"SpatialPolygons"} or \code{"SpatialPolygonsDataFrame"}. To handle these data types, \bold{the package} \pkg{maptools} \bold{must be loaded}, because it provides the methods for \code{as.owin} for these classes. For full details, see \code{vignette('shapefiles')}. } If the argument \code{W} is not in one of these formats and cannot be converted to a window, then an error will be generated (if \code{fatal=TRUE}) or a value of \code{NULL} will be returned (if \code{fatal=FALSE}). When \code{W} is a data frame, the argument \code{step} can be used to specify the pixel grid spacing; otherwise, the spacing will be guessed from the data. } \seealso{ \code{\link{owin.object}}, \code{\link{owin}}. Additional methods for \code{as.owin} are provided in the \pkg{maptools} package: \code{as.owin.SpatialPolygon}, \code{as.owin.SpatialPolygons}, \code{as.owin.SpatialPolygonsDataFrame}. } \examples{ w <- as.owin(c(0,1,0,1)) w <- as.owin(list(xrange=c(0,5),yrange=c(0,10))) # point pattern data(demopat) w <- as.owin(demopat) # image Z <- as.im(function(x,y) { x + 3}, unit.square()) w <- as.owin(Z) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat.core/man/plot.msr.Rd0000644000176200001440000000702714141452520015670 0ustar liggesusers\name{plot.msr} \alias{plot.msr} \title{Plot a Signed or Vector-Valued Measure} \description{ Plot a signed measure or vector-valued measure. } \usage{ \method{plot}{msr}(x, \dots, add = FALSE, how = c("image", "contour", "imagecontour"), main = NULL, do.plot = TRUE, multiplot = TRUE, massthresh = 0, equal.markscale = FALSE, equal.ribbon = FALSE) } \arguments{ \item{x}{ The signed or vector measure to be plotted. An object of class \code{"msr"} (see \code{\link{msr}}). } \item{\dots}{ Extra arguments passed to \code{\link{Smooth.ppp}} to control the interpolation of the continuous density component of \code{x}, or passed to \code{\link{plot.im}} or \code{\link{plot.ppp}} to control the appearance of the plot. } \item{add}{ Logical flag; if \code{TRUE}, the graphics are added to the existing plot. If \code{FALSE} (the default) a new plot is initialised. } \item{how}{ String indicating how to display the continuous density component. } \item{main}{ String. Main title for the plot. } \item{do.plot}{ Logical value determining whether to actually perform the plotting. } \item{multiplot}{ Logical value indicating whether it is permissible to display a plot with multiple panels (representing different components of a vector-valued measure, or different types of points in a multitype measure.) } \item{massthresh}{ Threshold for plotting atoms. A single numeric value or \code{NULL}. If \code{massthresh=0} (the default) then only atoms with nonzero mass will be plotted. If \code{massthresh > 0} then only atoms whose absolute mass exceeds \code{massthresh} will be plotted. If \code{massthresh=NULL}, then all atoms of the measure will be plotted. } \item{equal.markscale}{ Logical value indicating whether different panels should use the same symbol map (to represent the masses of atoms of the measure). } \item{equal.ribbon}{ Logical value indicating whether different panels should use the same colour map (to represent the density values in the diffuse component of the measure). } } \value{ (Invisible) colour map (object of class \code{"colourmap"}) for the colour image. } \details{ This is the \code{plot} method for the class \code{"msr"}. The continuous density component of \code{x} is interpolated from the existing data by \code{\link{Smooth.ppp}}, and then displayed as a colour image by \code{\link{plot.im}}. The discrete atomic component of \code{x} is then superimposed on this image by plotting the atoms as circles (for positive mass) or squares (for negative mass) by \code{\link{plot.ppp}}. By default, atoms with zero mass are not plotted at all. To smooth both the discrete and continuous components, use \code{\link{Smooth.msr}}. Use the argument \code{clipwin} to restrict the plot to a subset of the full data. To remove atoms with tiny masses, use the argument \code{massthresh}. } \seealso{ \code{\link{msr}}, \code{\link{Smooth.ppp}}, \code{\link{Smooth.msr}}, \code{\link{plot.im}}, \code{\link{plot.ppp}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rs <- residuals(fit, type="score") plot(rp) plot(rs) plot(rs, how="contour") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat.core/man/logLik.mppm.Rd0000644000176200001440000001023514141452520016276 0ustar liggesusers\name{logLik.mppm} \alias{logLik.mppm} \alias{AIC.mppm} \alias{extractAIC.mppm} \alias{nobs.mppm} \alias{getCall.mppm} \alias{terms.mppm} \title{Log Likelihood and AIC for Multiple Point Process Model} \description{ For a point process model that has been fitted to multiple point patterns, these functions extract the log likelihood and AIC, or analogous quantities based on the pseudolikelihood. } \usage{ \method{logLik}{mppm}(object, \dots, warn=TRUE) \method{AIC}{mppm}(object, \dots, k=2, takeuchi=TRUE) \method{extractAIC}{mppm}(fit, scale = 0, k = 2, \dots, takeuchi = TRUE) \method{nobs}{mppm}(object, \dots) \method{getCall}{mppm}(x, \dots) \method{terms}{mppm}(x, \dots) } \arguments{ \item{object,fit,x}{ Fitted point process model (fitted to multiple point patterns). An object of class \code{"mppm"}. } \item{\dots}{Ignored.} \item{warn}{ If \code{TRUE}, a warning is given when the pseudolikelihood is returned instead of the likelihood. } \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details. } \item{takeuchi}{ Logical value specifying whether to use the Takeuchi penalty (\code{takeuchi=TRUE}) or the number of fitted parameters (\code{takeuchi=FALSE}) in calculating AIC. } } \details{ These functions are methods for the generic commands \code{\link[stats]{logLik}}, \code{\link[stats]{AIC}}, \code{\link[stats]{extractAIC}}, \code{\link[stats]{terms}} and \code{\link[stats:update]{getCall}} for the class \code{"mppm"}. An object of class \code{"mppm"} represents a fitted Poisson or Gibbs point process model fitted to several point patterns. It is obtained from the model-fitting function \code{\link{mppm}}. The method \code{logLik.mppm} extracts the maximised value of the log likelihood for the fitted model (as approximated by quadrature using the Berman-Turner approximation). If \code{object} is not a Poisson process, the maximised log \emph{pseudolikelihood} is returned, with a warning. The Akaike Information Criterion AIC for a fitted model is defined as \deqn{ AIC = -2 \log(L) + k \times \mbox{penalty} }{ AIC = -2 * log(L) + k * penalty } where \eqn{L} is the maximised likelihood of the fitted model, and \eqn{\mbox{penalty}}{penalty} is a penalty for model complexity, usually equal to the effective degrees of freedom of the model. The method \code{extractAIC.mppm} returns the \emph{analogous} quantity \eqn{AIC*} in which \eqn{L} is replaced by \eqn{L*}, the quadrature approximation to the likelihood (if \code{fit} is a Poisson model) or the pseudolikelihood (if \code{fit} is a Gibbs model). The \eqn{\mbox{penalty}}{penalty} term is calculated as follows. If \code{takeuchi=FALSE} then \eqn{\mbox{penalty}}{penalty} is the number of fitted parameters. If \code{takeuchi=TRUE} then \eqn{\mbox{penalty} = \mbox{trace}(J H^{-1})}{penalty = trace(J H^(-1))} where \eqn{J} and \eqn{H} are the estimated variance and hessian, respectively, of the composite score. These two choices are equivalent for a Poisson process. The method \code{nobs.mppm} returns the total number of points in the original data point patterns to which the model was fitted. The method \code{getCall.mppm} extracts the original call to \code{\link{mppm}} which caused the model to be fitted. The method \code{terms.mppm} extracts the covariate terms in the model formula as a \code{terms} object. Note that these terms do not include the interaction component of the model. The \R function \code{\link[stats]{step}} uses these methods. } \value{ See the help files for the corresponding generic functions. } \seealso{ \code{\link{mppm}} } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \examples{ fit <- mppm(Bugs ~ x, hyperframe(Bugs=waterstriders)) logLik(fit) AIC(fit) nobs(fit) getCall(fit) } \keyword{spatial} \keyword{models} spatstat.core/DESCRIPTION0000644000176200001440000001176614150203345014562 0ustar liggesusersPackage: spatstat.core Version: 2.3-2 Date: 2021-11-25 Title: Core Functionality of the 'spatstat' Family Authors@R: c(person("Adrian", "Baddeley", role = c("aut", "cre"), email = "Adrian.Baddeley@curtin.edu.au"), person("Rolf", "Turner", role = "aut", email="r.turner@auckland.ac.nz"), person("Ege", "Rubak", role = "aut", email = "rubak@math.aau.dk"), person("Kasper", "Klitgaard Berthelsen", role = "ctb"), person("Achmad", "Choiruddin", role = "ctb"), person("Jean-Francois", "Coeurjolly", role = "ctb"), person("Ottmar", "Cronie", role = "ctb"), person("Tilman", "Davies", role = "ctb"), person("Chiara", "Fend", role = "ctb"), person("Julian", "Gilbey", role = "ctb"), person("Yongtao", "Guan", role = "ctb"), person("Ute", "Hahn", role = "ctb"), person("Kassel", "Hingee", role = "ctb"), person("Abdollah", "Jalilian", role = "ctb"), person("Marie-Colette", "van Lieshout", role = "ctb"), person("Greg", "McSwiggan", role = "ctb"), person("Tuomas", "Rajala", role = "ctb"), person("Suman", "Rakshit", role = "ctb"), person("Dominic", "Schuhmacher", role = "ctb"), person("Rasmus", "Plenge Waagepetersen", role = "ctb"), person("Hangsheng", "Wang", role = "ctb")) Maintainer: Adrian Baddeley Depends: R (>= 3.5.0), spatstat.data (>= 2.1-0), spatstat.geom (>= 2.3-0), stats, graphics, grDevices, utils, methods, nlme, rpart Imports: spatstat.utils (>= 2.2-0), spatstat.sparse (>= 2.0-0), mgcv, Matrix, abind, tensor, goftest (>= 1.2-2) Suggests: sm, maptools (>= 0.9-9), gsl, locfit, spatial, RandomFields (>= 3.1.24.1), RandomFieldsUtils(>= 0.3.3.1), fftwtools (>= 0.9-8), nleqslv, spatstat.linnet (>= 2.0-0), spatstat (>= 2.0-0) Description: Functionality for data analysis and modelling of spatial data, mainly spatial point patterns, in the 'spatstat' family of packages. (Excludes analysis of spatial data on a linear network, which is covered by the separate package 'spatstat.linnet'.) Exploratory methods include quadrat counts, K-functions and their simulation envelopes, nearest neighbour distance and empty space statistics, Fry plots, pair correlation function, kernel smoothed intensity, relative risk estimation with cross-validated bandwidth selection, mark correlation functions, segregation indices, mark dependence diagnostics, and kernel estimates of covariate effects. Formal hypothesis tests of random pattern (chi-squared, Kolmogorov-Smirnov, Monte Carlo, Diggle-Cressie-Loosmore-Ford, Dao-Genton, two-stage Monte Carlo) and tests for covariate effects (Cox-Berman-Waller-Lawson, Kolmogorov-Smirnov, ANOVA) are also supported. Parametric models can be fitted to point pattern data using the functions ppm(), kppm(), slrm(), dppm() similar to glm(). Types of models include Poisson, Gibbs and Cox point processes, Neyman-Scott cluster processes, and determinantal point processes. Models may involve dependence on covariates, inter-point interaction, cluster formation and dependence on marks. Models are fitted by maximum likelihood, logistic regression, minimum contrast, and composite likelihood methods. A model can be fitted to a list of point patterns (replicated point pattern data) using the function mppm(). The model can include random effects and fixed effects depending on the experimental design, in addition to all the features listed above. Fitted point process models can be simulated, automatically. Formal hypothesis tests of a fitted model are supported (likelihood ratio test, analysis of deviance, Monte Carlo tests) along with basic tools for model selection (stepwise(), AIC()) and variable selection (sdr). Tools for validating the fitted model include simulation envelopes, residuals, residual plots and Q-Q plots, leverage and influence diagnostics, partial residuals, and added variable plots. License: GPL (>= 2) URL: http://spatstat.org/ NeedsCompilation: yes ByteCompile: true BugReports: https://github.com/spatstat/spatstat.core/issues Packaged: 2021-11-25 02:35:56 UTC; adrian Author: Adrian Baddeley [aut, cre], Rolf Turner [aut], Ege Rubak [aut], Kasper Klitgaard Berthelsen [ctb], Achmad Choiruddin [ctb], Jean-Francois Coeurjolly [ctb], Ottmar Cronie [ctb], Tilman Davies [ctb], Chiara Fend [ctb], Julian Gilbey [ctb], Yongtao Guan [ctb], Ute Hahn [ctb], Kassel Hingee [ctb], Abdollah Jalilian [ctb], Marie-Colette van Lieshout [ctb], Greg McSwiggan [ctb], Tuomas Rajala [ctb], Suman Rakshit [ctb], Dominic Schuhmacher [ctb], Rasmus Plenge Waagepetersen [ctb], Hangsheng Wang [ctb] Repository: CRAN Date/Publication: 2021-11-26 16:10:13 UTC spatstat.core/tests/0000755000176200001440000000000014141452520014204 5ustar liggesusersspatstat.core/tests/testsK.R0000644000176200001440000004510414141452520015610 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.core #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.core) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) #' #' tests/Kfuns.R #' #' Various K and L functions and pcf #' #' $Revision: 1.40 $ $Date: 2021/03/23 05:54:20 $ #' #' Assumes 'EveryStart.R' was run myfun <- function(x,y){(x+1) * y } # must be outside if(FULLTEST) { Cells <- cells Amacrine <- amacrine Redwood <- redwood } else { ## reduce numbers of data + dummy points spatstat.options(npixel=32, ndummy.min=16) Cells <- cells[c(FALSE, TRUE)] Amacrine <- amacrine[c(FALSE, TRUE)] Redwood <- redwood[c(FALSE, TRUE)] } local({ if(FULLTEST) { #' supporting code rmax.rule("Kscaled", owin(), 42) implemented.for.K(c("border", "bord.modif", "translate", "good", "best"), "polygonal", TRUE) implemented.for.K(c("border", "bord.modif", "translate", "good", "best"), "mask", TRUE) implemented.for.K(c("border", "isotropic"), "mask", TRUE) implemented.for.K(c("border", "isotropic"), "mask", FALSE) #' shortcuts D <- density(Cells) K <- Kborder.engine(Cells, rmax=0.4, weights=D, ratio=TRUE) K <- Knone.engine(Cells, rmax=0.4, weights=D, ratio=TRUE) allcor <- c("none", "border", "bord.modif","isotropic", "translate") K <- Krect.engine(Cells, rmax=0.4, ratio=TRUE, correction=allcor) K <- Krect.engine(Cells, rmax=0.4, ratio=TRUE, correction=allcor, weights=D) K <- Krect.engine(Cells, rmax=0.4, ratio=TRUE, correction=allcor, use.integers=FALSE) #' Kest special code blocks K <- Kest(Cells, var.approx=TRUE, ratio=FALSE) Z <- distmap(Cells) + 1 Kb <- Kest(Cells, correction=c("border","bord.modif"), weights=Z, ratio=TRUE) Kn <- Kest(Cells, correction="none", weights=Z, ratio=TRUE) Knb <- Kest(Cells, correction=c("border","bord.modif","none"), weights=Z, ratio=TRUE) } if(ALWAYS) { bigint <- 50000 # This is only "big" on a 32-bit system where # sqrt(.Machine$integer.max) = 46340.9 X <- runifpoint(bigint) Z <- as.im(1/bigint, owin()) Kb <- Kest(X, correction=c("border","bord.modif"), rmax=0.02, weights=Z, ratio=TRUE) } if(FULLTEST) { Kn <- Kest(X, correction="none", rmax=0.02, weights=Z, ratio=TRUE) Knb <- Kest(X, correction=c("border","bord.modif","none"), rmax=0.02, weights=Z, ratio=TRUE) #' pcf.ppp special code blocks pr <- pcf(Cells, ratio=TRUE, var.approx=TRUE) pc <- pcf(Cells, domain=square(0.5)) pcr <- pcf(Cells, domain=square(0.5), ratio=TRUE) pw <- pcf(Redwood, correction="none") pwr <- pcf(Redwood, correction="none", ratio=TRUE) pv <- pcf(Redwood, kernel="rectangular") p1 <- pcf(Redwood[1]) #' pcf.fv K <- Kest(Redwood) g <- pcf(K, method="a") g <- pcf(K, method="c") g <- pcf(K, method="d") #' Kinhom code blocks X <- rpoispp(function(x,y) { 100 * x }, 100, square(1)) lambda <- 100 * X$x Kin <- Kinhom(X, lambda, correction=c("none", "border")) lambda2 <- outer(lambda, lambda, "*") Ki2 <- Kinhom(X, lambda2=lambda2, diagonal=FALSE, correction=c("translate", "isotropic")) fut <- ppm(X ~ x) Kio <- Kinhom(X, fut, update=FALSE) Kiu <- Kinhom(X, fut, update=TRUE, diagonal=FALSE) } if(ALWAYS) { #' edge corrections rr <- rep(0.1, npoints(Cells)) eC <- edge.Ripley(Cells, rr) eI <- edge.Ripley(Cells, rr, method="interpreted") if(max(abs(eC-eI)) > 0.1) stop("Ripley edge correction results do not match") } if(FULLTEST) { a <- rmax.Ripley(square(1)) a <- rmax.Rigid(square(1)) a <- rmax.Ripley(as.polygonal(square(1))) a <- rmax.Rigid(as.polygonal(square(1))) a <- rmax.Ripley(letterR) a <- rmax.Rigid(letterR) } if(ALWAYS) { #' run slow code for edge correction and compare results op <- spatstat.options(npixel=128) X <- Redwood[c(TRUE, FALSE, FALSE, FALSE)] Window(X) <- as.polygonal(Window(X)) Eapprox <- edge.Trans(X) Eexact <- edge.Trans(X, exact=TRUE) maxrelerr <- max(abs(1 - range(Eapprox/Eexact))) if(maxrelerr > 0.1) stop(paste("Exact and approximate algorithms for edge.Trans disagree by", paste0(round(100*maxrelerr), "%")), call.=FALSE) spatstat.options(op) } }) local({ if(FULLTEST) { #' ---- multitype ------ K <- Kcross(Amacrine, correction=c("none", "bord.modif")) #' inhomogeneous multitype fit <- ppm(Amacrine ~ marks) K1 <- Kcross.inhom(Amacrine, lambdaX=fit) K2 <- Kcross.inhom(Amacrine, lambdaX=densityfun(Amacrine)) K3 <- Kcross.inhom(Amacrine, lambdaX=density(Amacrine, at="points")) On <- split(Amacrine)$on Off <- split(Amacrine)$off K4 <- Kcross.inhom(Amacrine, lambdaI=ppm(On), lambdaJ=ppm(Off)) K5 <- Kcross.inhom(Amacrine, correction="bord.modif") #' markconnect, markcorr M <- markconnect(Amacrine, "on", "off", normalise=TRUE) M <- markcorr(longleaf, normalise=TRUE, correction=c("isotropic", "translate", "border", "none")) M <- markcorr(longleaf, normalise=TRUE, fargs=list()) #' Kmark (=markcorrint) X <- runifpoint(100) %mark% runif(100) km <- Kmark(X, f=atan2) km <- Kmark(X, f1=sin) km <- Kmark(X, f="myfun") aa <- Kmark(X, normalise=FALSE, returnL=FALSE) aa <- Kmark(X, normalise=FALSE, returnL=TRUE) aa <- Kmark(X, normalise=TRUE, returnL=FALSE) aa <- Kmark(X, normalise=TRUE, returnL=TRUE) } }) local({ if(FULLTEST) { #' various modified K functions #' #' directional K functions #' a <- Ksector(swedishpines, -pi/2, pi/2, units="radians", correction=c("none", "border", "bord.modif", "Ripley", "translate"), ratio=TRUE) plot(a) #' #' local K functions #' fut <- ppm(swedishpines ~ polynom(x,y,2)) Z <- predict(fut) Lam <- fitted(fut, dataonly=TRUE) a <- localLinhom(swedishpines, lambda=fut) a <- localLinhom(swedishpines, lambda=Z) a <- localLinhom(swedishpines, lambda=Lam) a <- localLinhom(swedishpines, lambda=Z, correction="none") a <- localLinhom(swedishpines, lambda=Z, correction="translate") a <- localLcross(Amacrine) a <- localLcross(Amacrine, from="off", to="off") a <- localKdot(Amacrine) a <- localLdot(Amacrine) a <- localKcross.inhom(Amacrine) a <- localLcross.inhom(Amacrine) fat <- ppm(Amacrine ~ x * marks) Zed <- predict(fat) Lum <- fitted(fat, dataonly=TRUE) moff <- (marks(Amacrine) == "off") a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaX=Zed) a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaX=Lum) a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaX=fat) a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaFrom=Lum[moff], lambdaTo=Lum[!moff]) a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaX=Zed, correction="none") a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaX=Zed, correction="translate") #' #' cases of resolve.lambda.cross #' h <- resolve.lambda.cross(Amacrine, moff, !moff) h <- resolve.lambda.cross(Amacrine, moff, !moff, lambdaX=Zed) h <- resolve.lambda.cross(Amacrine, moff, !moff, lambdaX=Lum) h <- resolve.lambda.cross(Amacrine, moff, !moff, lambdaX=fat) h <- resolve.lambda.cross(Amacrine, moff, !moff, lambdaX=fat, update=FALSE) h <- resolve.lambda.cross(Amacrine, moff, !moff, lambdaI=Zed[["off"]], lambdaJ=Zed[["on"]]) h <- resolve.lambda.cross(Amacrine, moff, !moff, lambdaI=Lum[moff], lambdaJ=Lum[!moff]) h <- resolve.lambda.cross(Amacrine, moff, !moff, lambdaI=fat, lambdaJ=fat) h <- resolve.lambda.cross(Amacrine, moff, !moff, lambdaI=fat, lambdaJ=fat, update=FALSE) d <- densityfun(unmark(Amacrine), sigma=0.1) dm <- lapply(split(Amacrine), densityfun, sigma=0.1) h <- resolve.lambda.cross(Amacrine, moff, !moff, lambdaX=d) h <- resolve.lambda.cross(Amacrine, moff, !moff, lambdaI=dm[["off"]], lambdaJ=dm[["on"]]) h <- resolve.lambda.cross(Amacrine, moff, !moff, lambdaX=function(x,y,m){ d(x,y) }) #' #' multitype inhomogeneous pcf #' g <- pcfcross.inhom(Amacrine, lambdaI=dm[["off"]], lambdaJ=dm[["on"]]) #' #' lohboot code blocks #' Ared <- lohboot(Redwood, fun="Kest", block=TRUE, Vcorrection=TRUE, global=FALSE, correction="none") Bred <- lohboot(Redwood, block=TRUE, basicboot=TRUE, global=FALSE) Cred <- lohboot(Redwood, fun=Kest, block=TRUE, global=TRUE, correction="translate") Dred <- lohboot(Redwood, Lest) Kred <- lohboot(Redwood, Kinhom) Lred <- lohboot(Redwood, Linhom) gred <- lohboot(Redwood, pcfinhom, sigma=0.1) Zred <- predict(ppm(Redwood ~ x+y)) Lred <- lohboot(Redwood, Linhom, lambda=Zred) #' X <- runifpoint(100, letterR) AX <- lohboot(X, block=TRUE, nx=7, ny=10) #' multitype b <- lohboot(Amacrine, Kcross) b <- lohboot(Amacrine, Lcross) b <- lohboot(Amacrine, Kdot) b <- lohboot(Amacrine, Ldot) b <- lohboot(Amacrine, Kcross.inhom) b <- lohboot(Amacrine, Lcross.inhom) b <- lohboot(Amacrine, Lcross.inhom, from="off", to="on", lambdaX=Zed) b <- lohboot(Amacrine, Lcross.inhom, from="off", to="on", lambdaX=Lum) b <- lohboot(Amacrine, Lcross.inhom, from="off", to="on", lambdaX=fat) b <- lohboot(Amacrine, Lcross.inhom, from="off", to="on", lambdaFrom=Lum[moff], lambdaTo=Lum[!moff]) #' #' residual K functions etc #' rco <- compareFit(Cells, Kcom, interaction=anylist(P=Poisson(), S=Strauss(0.08)), same="trans", different="tcom") fit <- ppm(Cells ~ x, Strauss(0.07)) K <- Kcom(Cells, model=fit, restrict=TRUE) ## Kscaled A <- Lscaled(japanesepines, renormalise=TRUE, correction="all") } }) local({ if(ALWAYS) { #' From Ege, in response to a stackoverflow question. #' The following example has two points separated by r = 1 with 1/4 of the #' circumference outside the 10x10 window (i.e. area 100). #' Thus the value of K^(r) should jump from 0 to #' 100/(2\cdot 1)\cdot ((3/4)^{-1} + (3/4)^{-1}) = 100 \cdot 4/3 = 133.333. x <- c(4.5,5.5) y <- c(10,10)-sqrt(2)/2 W <- square(10) X <- ppp(x, y, W) compere <- function(a, b, where, tol=1e-6) { descrip <- paste("discrepancy in isotropic edge correction", where) err <- as.numeric(a) - as.numeric(b) maxerr <- max(abs(err)) blurb <- paste(descrip, "is", paste0(signif(maxerr, 4), ","), if(maxerr > tol) "exceeding" else "within", "tolerance of", tol) message(blurb) if(maxerr > tol) { message(paste("Discrepancies:", paste(err, collapse=", "))) stop(paste("excessive", descrip), call.=FALSE) } invisible(TRUE) } ## Testing: eX <- edge.Ripley(X, c(1,1)) compere(eX, c(4/3,4/3), "at interior point of rectangle") ## Corner case: Y <- X Y$x <- X$x-4.5+sqrt(2)/2 eY <- edge.Ripley(Y, c(1,1)) compere(eY, c(2,4/3), "near corner of rectangle") ## Invoke polygonal code Z <- rotate(Y, pi/4) eZdebug <- edge.Ripley(Z, c(1,1), internal=list(debug=TRUE)) compere(eZdebug, c(2,4/3), "at interior point of polygon (debug on)") ## test validity without debugger,in case of quirks of compiler optimisation eZ <- edge.Ripley(Z, c(1,1)) compere(eZ, c(2,4/3), "at interior point of polygon (debug off)") } }) reset.spatstat.options() # # tests/kppm.R # # $Revision: 1.37 $ $Date: 2021/09/27 04:11:22 $ # # Test functionality of kppm that depends on RandomFields # Test update.kppm for old style kppm objects if(!FULLTEST) spatstat.options(npixel=32, ndummy.min=16) local({ fit <- kppm(redwood ~1, "Thomas") # sic fitx <- kppm(redwood ~x, "Thomas", verbose=TRUE) if(FULLTEST) { fitx <- update(fit, ~ . + x) fitM <- update(fit, clusters="MatClust") fitC <- update(fit, cells) fitCx <- update(fit, cells ~ x) #' Wsub <- owin(c(0, 0.5), c(-0.5, 0)) Zsub <- (bdist.pixels(Window(redwood)) > 0.1) fitWsub <- kppm(redwood ~1, "Thomas", subset=Wsub) fitZsub <- kppm(redwood ~1, "Thomas", subset=Zsub) fitWsub #' various methods ff <- as.fv(fitx) uu <- unitname(fitx) unitname(fitCx) <- "furlong" mo <- model.images(fitCx) p <- psib(fit) px <- psib(fitx) } if(ALWAYS) { Y <- simulate(fitx, seed=42, saveLambda=TRUE)[[1]] } if(FULLTEST) { #' vcov.kppm different algorithms vc <- vcov(fitx) vc2 <- vcov(fitx, fast=TRUE) vc3 <- vcov(fitx, fast=TRUE, splitup=TRUE) vc4 <- vcov(fitx, splitup=TRUE) ## other code blocks a <- varcount(fitx, function(x,y){x+1}) # always positive a <- varcount(fitx, function(x,y){y-1}) # always negative a <- varcount(fitx, function(x,y){x+y}) # positive or negative #' improve.kppm fitI <- update(fit, improve.type="quasi") fitxI <- update(fitx, improve.type="quasi") fitxIs <- update(fitx, improve.type="quasi", fast=FALSE) #' vcov.kppm vcI <- vcov(fitxI) } ## plot.kppm including predict.kppm if(ALWAYS) { fitMC <- kppm(redwood ~ x, "Thomas") plot(fitMC) } if(FULLTEST) { fitCL <- kppm(redwood ~ x, "Thomas", method="c") fitPA <- kppm(redwood ~ x, "Thomas", method="p") plot(fitCL) plot(fitPA) ## fit with composite likelihood method [thanks to Abdollah Jalilian] fut <- kppm(redwood ~ x, "VarGamma", method="clik2", nu.ker=-3/8) kfut <- as.fv(fut) } if(require(RandomFields)) { fit0 <- kppm(redwood ~1, "LGCP") is.poisson(fit0) Y0 <- simulate(fit0, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y0)) p0 <- psib(fit0) # issues a warning if(FULLTEST) { ## fit LGCP using K function: slow fit1 <- kppm(redwood ~x, "LGCP", covmodel=list(model="matern", nu=0.3), control=list(maxit=3)) Y1 <- simulate(fit1, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y1)) } ## fit LGCP using pcf fit1p <- kppm(redwood ~x, "LGCP", covmodel=list(model="matern", nu=0.3), statistic="pcf") Y1p <- simulate(fit1p, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y1p)) ## .. and using different fitting methods if(FULLTEST) { fit1pClik <- update(fit1p, method="clik") fit1pPalm <- update(fit1p, method="palm") } ## image covariate (a different code block) xx <- as.im(function(x,y) x, Window(redwood)) fit1xx <- update(fit1p, . ~ xx, data=solist(xx=xx)) Y1xx <- simulate(fit1xx, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y1xx)) if(FULLTEST) { fit1xxVG <- update(fit1xx, clusters="VarGamma", nu=-1/4) Y1xxVG <- simulate(fit1xxVG, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y1xxVG)) } fit1xxLG <- update(fit1xx, clusters="LGCP", covmodel=list(model="matern", nu=0.3), statistic="pcf") Y1xxLG <- simulate(fit1xxLG, saveLambda=TRUE, drop=TRUE) stopifnot(is.ppp(Y1xxLG)) # ... and Abdollah's code if(FULLTEST) { fit2 <- kppm(redwood ~x, cluster="Cauchy", statistic="K") Y2 <- simulate(fit2, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y2)) } # check package mechanism kraever("RandomFields") } }) if(FULLTEST) { local({ #' various code blocks fut <- kppm(redwood, ~x) fet <- update(fut, redwood3) fot <- update(fut, trend=~y) fit <- kppm(redwoodfull ~ x) Y <- simulate(fit, window=redwoodfull.extra$regionII, saveLambda=TRUE) gut <- improve.kppm(fit, type="wclik1") gut <- improve.kppm(fit, vcov=TRUE, fast.vcov=TRUE, save.internals=TRUE) hut <- kppm(redwood ~ x, method="clik", weightfun=NULL) hut <- kppm(redwood ~ x, method="palm", weightfun=NULL) mut <- kppm(redwood) nut <- update(mut, Y) if(require(RandomFields)) { #' Bug in rLGCP spotted by Tilman Davies X <- rLGCP("matern", function(x,y) { 1 - 0.4* y }, var=2, scale=0.7, nu=0.5, win = square(10), dimyx=c(32,64)) } }) } if(FULLTEST) { local({ #' minimum contrast code K <- Kest(redwood) a <- matclust.estK(K) a <- thomas.estK(K) a <- cauchy.estK(K) a <- vargamma.estK(K) a <- lgcp.estK(K) print(a) u <- unitname(a) g <- pcf(redwood) a <- matclust.estpcf(g) a <- thomas.estpcf(g) a <- cauchy.estpcf(g) a <- vargamma.estpcf(g) a <- lgcp.estpcf(g) #' auxiliary functions b <- resolve.vargamma.shape(nu.pcf=1.5) Z <- clusterfield("Thomas", kappa=1, scale=0.2) aa <- NULL aa <- accumulateStatus(simpleMessage("Woof"), aa) aa <- accumulateStatus(simpleMessage("Sit"), aa) aa <- accumulateStatus(simpleMessage("Woof"), aa) printStatusList(aa) RMIN <- 0.01 fit <- kppm(redwood ~ 1, ctrl=list(rmin=RMIN,q=1/2)) if(fit$Fit$mcfit$ctrl$rmin != RMIN) stop("kppm did not handle parameter 'rmin' in argument 'ctrl' ") fit <- kppm(redwood ~ 1, ctrl=list(rmin=0,q=1/2), rmin=RMIN) if(fit$Fit$mcfit$ctrl$rmin != RMIN) stop("kppm did not handle parameter 'rmin' in argument 'ctrl'") RMIN <- 2 fit <- dppm(swedishpines~1, dppGauss(), ctrl=list(rmin=RMIN,q=1)) if(fit$Fit$mcfit$ctrl$rmin != RMIN) stop("dppm did not handle parameter 'rmin' in argument 'ctrl'") fit <- dppm(swedishpines~1, dppGauss(), ctrl=list(rmin=0,q=1), rmin=RMIN) if(fit$Fit$mcfit$ctrl$rmin != RMIN) stop("dppm did not handle argument 'rmin'") }) } if(FULLTEST) { local({ #' cover a few code blocks fut <- kppm(redwood ~ x, method="clik") print(summary(fut)) a <- residuals(fut) fut2 <- kppm(redwood ~ x, "LGCP", method="palm") print(summary(fut2)) b <- residuals(fut2) #' po <- ppm(redwood ~ 1) A <- kppmComLik(redwood, Xname="redwood", po=po, clusters="Thomas", statistic="pcf", statargs=list(), control=list(), weightfun=NULL, rmax=0.1) A <- kppmPalmLik(redwood, Xname="redwood", po=po, clusters="Thomas", statistic="pcf", statargs=list(), control=list(), weightfun=NULL, rmax=0.1) }) } reset.spatstat.options() spatstat.core/tests/testsGtoJ.R0000644000176200001440000002703014141452520016257 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.core #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.core) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) ## ## tests/gcc323.R ## ## $Revision: 1.3 $ $Date: 2020/04/28 12:58:26 $ ## if(ALWAYS) { # depends on hardware local({ # critical R values that provoke GCC bug #323 a <- marktable(lansing, R=0.25) a <- marktable(lansing, R=0.21) a <- marktable(lansing, R=0.20) a <- marktable(lansing, R=0.10) }) } # # tests/hobjects.R # # Validity of methods for ppm(... method="ho") # if(FULLTEST) { local({ set.seed(42) fit <- ppm(cells ~1, Strauss(0.1), method="ho", nsim=10) fitx <- ppm(cells ~offset(x), Strauss(0.1), method="ho", nsim=10) a <- AIC(fit) ax <- AIC(fitx) f <- fitted(fit) fx <- fitted(fitx) p <- predict(fit) px <- predict(fitx) }) } #' tests/hypotests.R #' Hypothesis tests #' #' $Revision: 1.9 $ $Date: 2020/11/02 06:39:23 $ if(FULLTEST) { local({ hopskel.test(redwood, method="MonteCarlo", nsim=5) #' quadrat test - spatial methods a <- quadrat.test(redwood, 3) domain(a) shift(a, c(1,1)) #' cases of studpermu.test #' X is a hyperframe b <- studpermu.test(pyramidal, nperm=9) b <- studpermu.test(pyramidal, nperm=9, use.Tbar=TRUE) #' X is a list of lists of ppp ZZ <- split(pyramidal$Neurons, pyramidal$group) bb <- studpermu.test(ZZ, nperm=9) #' Issue #115 X <- runifpoint(50, nsim = 3) Y <- runifpoint(3000, nsim = 3) h <- hyperframe(ppp = c(X, Y), group = rep(1:2, 3)) studpermu.test(h, ppp ~ group) #' scan test Z <- scanmeasure(cells, 0.1, method="fft") rr <- c(0.05, 1) scan.test(amacrine, rr, nsim=5, method="binomial", alternative="less") fit <- ppm(cells ~ x) lam <- predict(fit) scan.test(cells, rr, nsim=5, method="poisson", baseline=fit, alternative="less") scan.test(cells, rr, nsim=5, method="poisson", baseline=lam, alternative="less") }) } # # tests/imageops.R # # $Revision: 1.33 $ $Date: 2021/04/15 06:13:47 $ # if(FULLTEST) { local({ #' cases of 'im' data tab <- table(sample(factor(letters[1:10]), 30, replace=TRUE)) b <- im(tab, xrange=c(0,1), yrange=c(0,10)) b <- update(b) mat <- matrix(sample(0:4, 12, replace=TRUE), 3, 4) a <- im(mat) levels(a$v) <- 0:4 a <- update(a) levels(mat) <- 0:4 b <- im(mat) b <- update(b) D <- as.im(mat, letterR) df <- as.data.frame(D) DD <- as.im(df, step=c(D$xstep, D$ystep)) #' various manipulations AA <- A <- as.im(owin()) BB <- B <- as.im(owin(c(1.1, 1.9), c(0,1))) Z <- imcov(A, B) stopifnot(abs(max(Z) - 0.8) < 0.1) Frame(AA) <- Frame(B) Frame(BB) <- Frame(A) ## handling images with 1 row or column ycov <- function(x, y) y E <- as.im(ycov, owin(), dimyx = c(2,1)) G <- cut(E, 2) H <- as.tess(G) E12 <- as.im(ycov, owin(), dimyx = c(1,2)) G12 <- cut(E12, 2) H12 <- as.tess(G12) AAA <- as.array(AA) EEE <- as.array(E) AAD <- as.double(AA) EED <- as.double(E) aaa <- xtfrm(AAA) eee <- xtfrm(E) ## d <- distmap(cells, dimyx=32) Z <- connected(d <= 0.06, method="interpreted") a <- where.max(d, first=FALSE) a <- where.min(d, first=FALSE) dx <- raster.x(d) dy <- raster.y(d) dxy <- raster.xy(d) xyZ <- raster.xy(Z, drop=TRUE) horosho <- conform.imagelist(cells, list(d, Z)) #' split.im W <- square(1) X <- as.im(function(x,y){x}, W) Y <- dirichlet(runifpoint(7, W)) Z <- split(X, as.im(Y)) ## ........... cases of "[.im" ........................ ## index window has zero overlap area with image window Out <- owin(c(-0.5, 0), c(0,1)) oo <- X[Out] oo <- X[Out, drop=FALSE] if(!is.im(oo)) stop("Wrong format in [.im with disjoint index window") oon <- X[Out, drop=TRUE, rescue=FALSE] if(is.im(oon)) stop("Expected a vector of values, not an image, from [.im") if(!all(is.na(oon))) stop("Expected a vector of NA values in [.im") ## Empty <- cells[FALSE] EmptyFun <- ssf(Empty, numeric(0)) ff <- d[Empty] ff <- d[EmptyFun] gg <- d[2,] gg <- d[,2] gg <- d[j=2] gg <- d[2:4, 3:5] hh <- d[2:4, 3:5, rescue=TRUE] if(!is.im(hh)) stop("rectangle was not rescued in [.im") ## factor and NA values f <- cut(d, breaks=4) f <- f[f != levels(f)[1], drop=FALSE] fff <- f[, , drop=FALSE] fff <- f[cells] fff <- f[cells, drop=FALSE] fff <- f[Empty] ## ........... cases of "[<-.im" ....................... d[,] <- d[] + 1 d[Empty] <- 42 d[EmptyFun] <- 42 ## smudge() and rasterfilter() dd <- smudge(d) ## rgb/hsv options X <- setcov(owin()) M <- Window(X) Y <- as.im(function(x,y) x, W=M) Z <- as.im(function(x,y) y, W=M) # convert after rescaling RGBscal <- rgbim(X, Y, Z, autoscale=TRUE, maxColorValue=1) HSVscal <- hsvim(X, Y, Z, autoscale=TRUE) #' cases of [.im Ma <- as.mask(M, dimyx=37) ZM <- Z[raster=Ma, drop=FALSE] ZM[solutionset(Y+Z > 0.4)] <- NA ZF <- cut(ZM, breaks=5) ZL <- (ZM > 0) P <- list(x=c(0.511, 0.774, 0.633, 0.248, 0.798), y=c(0.791, 0.608, 0.337, 0.613, 0.819)) zmp <- ZM[P, drop=TRUE] zfp <- ZF[P, drop=TRUE] zlp <- ZL[P, drop=TRUE] P <- as.ppp(P, owin()) zmp <- ZM[P, drop=TRUE] zfp <- ZF[P, drop=TRUE] zlp <- ZL[P, drop=TRUE] #' miscellaneous ZZ <- zapsmall.im(Z, digits=6) ZZ <- zapsmall.im(Z) ZS <- shift(Z, origin="centroid") ZS <- shift(Z, origin="bottomleft") ZA <- affine(Z, mat=diag(c(-1,-2))) U <- scaletointerval(Z) C <- as.im(1, W=U) U <- scaletointerval(C) #' hist.im h <- hist(Z) h <- hist(Z, probability=TRUE) h <- hist(Z, plot=FALSE) Zcut <- cut(Z, breaks=5) h <- hist(Zcut) # barplot hp <- hist(Zcut, probability=TRUE) # barplot plot(h) # plot.barplotdata #' plot.im code blocks plot(Z, ribside="left") plot(Z, ribside="top") plot(Z, riblab="value") plot(Z, clipwin=square(0.5)) plot(Z - mean(Z), log=TRUE) plot(Z, valuesAreColours=TRUE) # rejected with a warning IX <- as.im(function(x,y) { as.integer(round(3*x)) }, square(1)) co <- colourmap(rainbow(4), inputs=0:3) plot(IX, col=co) CX <- eval.im(col2hex(IX+1L)) plot(CX, valuesAreColours=TRUE) plot(CX, valuesAreColours=FALSE) #' pairs.im pairs(solist(Z)) pairs(solist(A=Z)) #' handling and plotting of character and factor images Afactor <- as.im(col2hex("green"), letterR, na.replace=col2hex("blue")) Acharacter <- as.im(col2hex("green"), letterR, na.replace=col2hex("blue"), stringsAsFactors=FALSE) plot(Afactor) plot(Acharacter, valuesAreColours=TRUE) print(summary(Afactor)) print(summary(Acharacter)) #' safelookup (including extrapolation case) Z <- as.im(function(x,y) { x - y }, letterR) B <- grow.rectangle(Frame(letterR), 1) X <- superimpose(runifpoint(10,letterR), runifpoint(20, setminus.owin(B, letterR)), vertices(Frame(B)), W=B) a <- safelookup(Z, X) b <- safelookup(cut(Z, breaks=4), X) aa <- lookup.im(Z, X) #' Smooth.im -> blur.im with sigma=NULL ZS <- Smooth(Z) #' cases of distcdf distcdf(cells[1:5]) distcdf(W=cells[1:5], dW=1:5) distcdf(W=Window(cells), V=cells[1:5]) distcdf(W=Window(cells), V=cells[1:5], dV=1:5) #' im.apply DA <- density(split(amacrine)) Z <- im.apply(DA, sd) Z <- which.max.im(DA) # deprecated -> im.apply(DA, which.max) #' Math.imlist, Ops.imlist, Complex.imlist U <- Z+2i B <- U * (2+1i) print(summary(B)) V <- solist(A=U, B=B) negV <- -V E <- Re(V) negE <- -E #' rotmean U <- rotmean(Z, origin="midpoint", result="im", padzero=FALSE) }) } if(ALWAYS) { local({ #' check nearest.valid.pixel W <- Window(demopat) set.seed(911911) X <- runifpoint(1000, W) Z <- quantess(W, function(x,y) { x }, 9)$image nearest.valid.pixel(numeric(0), numeric(0), Z) x <- X$x y <- X$y a <- nearest.valid.pixel(x, y, Z, method="interpreted") b <- nearest.valid.pixel(x, y, Z, method="C") if(!isTRUE(all.equal(a,b))) stop("Unequal results in nearest.valid.pixel") if(!identical(a,b)) stop("Equal, but not identical, results in nearest.valid.pixel") }) } #' #' tests/interact.R #' #' Support for interaction objects #' #' $Revision: 1.2 $ $Date: 2020/04/28 12:58:26 $ if(FULLTEST) { local({ #' print.intermaker Strauss Geyer Ord #' intermaker BS <- get("BlankStrauss", envir=environment(Strauss)) BD <- function(r) { instantiate.interact(BS, list(r=r)) } BlueDanube <- intermaker(BD, BS) }) } #' tests/ippm.R #' Tests of 'ippm' class #' $Revision: 1.6 $ $Date: 2020/04/28 12:58:26 $ if(FULLTEST) { local({ # .......... set up example from help file ................. nd <- 10 gamma0 <- 3 delta0 <- 5 POW <- 3 # Terms in intensity Z <- function(x,y) { -2*y } f <- function(x,y,gamma,delta) { 1 + exp(gamma - delta * x^POW) } # True intensity lamb <- function(x,y,gamma,delta) { 200 * exp(Z(x,y)) * f(x,y,gamma,delta) } # Simulate realisation lmax <- max(lamb(0,0,gamma0,delta0), lamb(1,1,gamma0,delta0)) set.seed(42) X <- rpoispp(lamb, lmax=lmax, win=owin(), gamma=gamma0, delta=delta0) # Partial derivatives of log f DlogfDgamma <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) topbit/(1 + topbit) } DlogfDdelta <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) - (x^POW) * topbit/(1 + topbit) } # irregular score Dlogf <- list(gamma=DlogfDgamma, delta=DlogfDdelta) # fit model fit <- ippm(X ~Z + offset(log(f)), covariates=list(Z=Z, f=f), iScore=Dlogf, start=list(gamma=1, delta=1), nd=nd) # fit model with logistic likelihood but without iScore fitlo <- ippm(X ~Z + offset(log(f)), method="logi", covariates=list(Z=Z, f=f), start=list(gamma=1, delta=1), nd=nd) ## ............. test ippm class support ...................... Ar <- model.matrix(fit) Ai <- model.matrix(fit, irregular=TRUE) An <- model.matrix(fit, irregular=TRUE, keepNA=FALSE) AS <- model.matrix(fit, irregular=TRUE, subset=(abs(Z) < 0.5)) Zr <- model.images(fit) Zi <- model.images(fit, irregular=TRUE) ## update.ippm fit2 <- update(fit, . ~ . + I(Z^2)) fit0 <- update(fit, . ~ . - Z, start=list(gamma=2, delta=4)) oldfit <- ippm(X, ~Z + offset(log(f)), covariates=list(Z=Z, f=f), iScore=Dlogf, start=list(gamma=1, delta=1), nd=nd) oldfit2 <- update(oldfit, . ~ . + I(Z^2)) oldfit0 <- update(oldfit, . ~ . - Z, start=list(gamma=2, delta=4)) ## again with logistic fitlo2 <- update(fitlo, . ~ . + I(Z^2)) fitlo0 <- update(fitlo, . ~ . - Z, start=list(gamma=2, delta=4)) oldfitlo <- ippm(X, ~Z + offset(log(f)), method="logi", covariates=list(Z=Z, f=f), start=list(gamma=1, delta=1), nd=nd) oldfitlo2 <- update(oldfitlo, . ~ . + I(Z^2)) oldfitlo0 <- update(oldfitlo, . ~ . - Z, start=list(gamma=2, delta=4)) ## anova.ppm including ippm objects fit0 <- update(fit, . ~ Z) fit0lo <- update(fitlo, . ~ Z) A <- anova(fit0, fit) Alo <- anova(fit0lo, fitlo) }) } spatstat.core/tests/testsR2.R0000644000176200001440000011111414141452520015674 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.core #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.core) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/rmhAux.R # # $Revision: 1.2 $ $Date: 2020/05/01 02:42:58 $ # # For interactions which maintain 'auxiliary data', # verify that the auxiliary data are correctly updated. # # To do this we run rmh with nsave=1 so that the point pattern state # is saved after every iteration, then the algorithm is restarted, # and the auxiliary data are re-initialised. The final state must agree with # the result of simulation without saving. # ---------------------------------------------------- if(ALWAYS) { # involves C code local({ # Geyer: mod <- list(cif="geyer", par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5), w=square(10)) set.seed(42) X.nosave <- rmh(model=mod, start=list(n.start=50), control=list(nrep=1e3, periodic=FALSE, expand=1)) set.seed(42) X.save <- rmh(model=mod, start=list(n.start=50), control=list(nrep=1e3, periodic=FALSE, expand=1, nburn=0, nsave=1, pstage="start")) #' Need to set pstage='start' so that proposals are generated #' at the start of the procedure in both cases. stopifnot(npoints(X.save) == npoints(X.nosave)) stopifnot(max(nncross(X.save, X.nosave)$dist) == 0) stopifnot(max(nncross(X.nosave, X.save)$dist) == 0) }) } ## ## tests/rmhBasic.R ## ## $Revision: 1.23 $ $Date: 2020/05/01 02:42:58 $ # # Test examples for rmh.default # run to reasonable length # and with tests for validity added # ---------------------------------------------------- local({ if(!exists("nr") || is.null(nr)) nr <- 1000 nrlong <- 2e3 spatstat.options(expand=1.05) if(ALWAYS) { ## fundamental C code ## Strauss process. mod01 <- list(cif="strauss", par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) X1.strauss <- rmh(model=mod01, start=list(n.start=80), control=list(nrep=nr)) X1.strauss2 <- rmh(model=mod01, start=list(n.start=80), control=list(nrep=nr, periodic=FALSE)) ## Strauss process, conditioning on n = 80: X2.strauss <- rmh(model=mod01,start=list(n.start=80), control=list(p=1,nrep=nr)) stopifnot(npoints(X2.strauss) == 80) ## test tracking mechanism X1.strauss <- rmh(model=mod01,start=list(n.start=80), control=list(nrep=nr), track=TRUE) X2.strauss <- rmh(model=mod01,start=list(n.start=80), control=list(p=1,nrep=nr), track=TRUE) ## Hard core process: mod02 <- list(cif="hardcore",par=list(beta=2,hc=0.7),w=c(0,10,0,10)) X3.hardcore <- rmh(model=mod02,start=list(n.start=60), control=list(nrep=nr)) X3.hardcore2 <- rmh(model=mod02,start=list(n.start=60), control=list(nrep=nr, periodic=FALSE)) ## Strauss process equal to pure hardcore: mod02 <- list(cif="strauss",par=list(beta=2,gamma=0,r=0.7),w=c(0,10,0,10)) X3.strauss <- rmh(model=mod02,start=list(n.start=60), control=list(nrep=nr)) ## Strauss process in a polygonal window. x <- c(0.55,0.68,0.75,0.58,0.39,0.37,0.19,0.26,0.42) y <- c(0.20,0.27,0.68,0.99,0.80,0.61,0.45,0.28,0.33) mod03 <- list(cif="strauss",par=list(beta=2000,gamma=0.6,r=0.07), w=owin(poly=list(x=x,y=y))) X4.strauss <- rmh(model=mod03,start=list(n.start=90), control=list(nrep=nr)) ## Strauss process in a polygonal window, conditioning on n = 42. X5.strauss <- rmh(model=mod03,start=list(n.start=42), control=list(p=1,nrep=nr)) stopifnot(npoints(X5.strauss) == 42) ## Strauss process, starting off from X4.strauss, but with the ## polygonal window replace by a rectangular one. At the end, ## the generated pattern is clipped to the original polygonal window. xxx <- X4.strauss xxx$window <- as.owin(c(0,1,0,1)) X6.strauss <- rmh(model=mod03,start=list(x.start=xxx), control=list(nrep=nr)) ## Strauss with hardcore: mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3), w=c(0,10,0,10)) X1.straush <- rmh(model=mod04,start=list(n.start=70), control=list(nrep=nr)) X1.straush2 <- rmh(model=mod04,start=list(n.start=70), control=list(nrep=nr, periodic=FALSE)) ## Another Strauss with hardcore (with a perhaps surprising result): mod05 <- list(cif="straush",par=list(beta=80,gamma=0.36,r=45,hc=2.5), w=c(0,250,0,250)) X2.straush <- rmh(model=mod05,start=list(n.start=250), control=list(nrep=nr)) ## Pure hardcore (identical to X3.strauss). mod06 <- list(cif="straush",par=list(beta=2,gamma=1,r=1,hc=0.7), w=c(0,10,0,10)) X3.straush <- rmh(model=mod06,start=list(n.start=60), control=list(nrep=nr)) ## Fiksel modFik <- list(cif="fiksel", par=list(beta=180,r=0.15,hc=0.07,kappa=2,a= -1.0), w=square(1)) X.fiksel <- rmh(model=modFik,start=list(n.start=10), control=list(nrep=nr)) X.fiksel2 <- rmh(model=modFik,start=list(n.start=10), control=list(nrep=nr,periodic=FALSE)) ## Penttinen process: modpen <- rmhmodel(cif="penttinen",par=list(beta=2,gamma=0.6,r=1), w=c(0,10,0,10)) X.pen <- rmh(model=modpen,start=list(n.start=10), control=list(nrep=nr)) X.pen2 <- rmh(model=modpen,start=list(n.start=10), control=list(nrep=nr, periodic=FALSE)) ## equivalent to hardcore modpen$par$gamma <- 0 X.penHard <- rmh(model=modpen,start=list(n.start=3), control=list(nrep=nr)) ## Area-interaction, inhibitory mod.area <- list(cif="areaint", par=list(beta=2,eta=0.5,r=0.5), w=square(10)) X.area <- rmh(model=mod.area,start=list(n.start=60), control=list(nrep=nr)) X.areaE <- rmh(model=mod.area,start=list(n.start=60), control=list(nrep=nr, periodic=FALSE)) ## Area-interaction, clustered mod.area2 <- list(cif="areaint", par=list(beta=2,eta=1.5,r=0.5), w=square(10)) X.area2 <- rmh(model=mod.area2,start=list(n.start=60), control=list(nrep=nr)) ## Area-interaction close to hard core set.seed(42) mod.area0 <- list(cif="areaint",par=list(beta=2,eta=1e-300,r=0.35), w=square(10)) X.area0 <- rmh(model=mod.area0,start=list(x.start=X3.hardcore), control=list(nrep=nrlong)) stopifnot(nndist(X.area0) > 0.6) ## Soft core: w <- c(0,10,0,10) mod07 <- list(cif="sftcr",par=list(beta=0.8,sigma=0.1,kappa=0.5), w=c(0,10,0,10)) X.sftcr <- rmh(model=mod07,start=list(n.start=70), control=list(nrep=nr)) X.sftcr2 <- rmh(model=mod07,start=list(n.start=70), control=list(nrep=nr, periodic=FALSE)) ## Diggle, Gates, and Stibbard: mod12 <- list(cif="dgs",par=list(beta=3600,rho=0.08),w=c(0,1,0,1)) X.dgs <- rmh(model=mod12,start=list(n.start=300), control=list(nrep=nr)) X.dgs2 <- rmh(model=mod12,start=list(n.start=300), control=list(nrep=nr, periodic=FALSE)) ## Diggle-Gratton: mod13 <- list(cif="diggra", par=list(beta=1800,kappa=3,delta=0.02,rho=0.04), w=square(1)) X.diggra <- rmh(model=mod13,start=list(n.start=300), control=list(nrep=nr)) X.diggra2 <- rmh(model=mod13,start=list(n.start=300), control=list(nrep=nr, periodic=FALSE)) ## Geyer: mod14 <- list(cif="geyer",par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5), w=c(0,10,0,10)) X1.geyer <- rmh(model=mod14,start=list(n.start=200), control=list(nrep=nr)) ## Geyer; same as a Strauss process with parameters ## (beta=2.25,gamma=0.16,r=0.7): mod15 <- list(cif="geyer",par=list(beta=2.25,gamma=0.4,r=0.7,sat=10000), w=c(0,10,0,10)) X2.geyer <- rmh(model=mod15,start=list(n.start=200), control=list(nrep=nr)) X2.geyer2 <- rmh(model=mod15,start=list(n.start=200), control=list(nrep=nr, periodic=FALSE)) mod16 <- list(cif="geyer",par=list(beta=8.1,gamma=2.2,r=0.08,sat=3)) X3.geyer <- rmh(model=mod16,start=list(x.start=redwood), control=list(periodic=TRUE,nrep=nr)) X3.geyer2 <- rmh(model=mod16,start=list(x.start=redwood), control=list(periodic=FALSE,nrep=nr)) ## Geyer, starting from the redwood data set, simulating ## on a torus, and conditioning on n: X4.geyer <- rmh(model=mod16,start=list(x.start=redwood), control=list(p=1,periodic=TRUE,nrep=nr)) ## Lookup (interaction function h_2 from page 76, Diggle (2003)): r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0. h <- 20*(r-0.05) h[r<0.05] <- 0 h[r>0.10] <- 1 mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1)) X.lookup <- rmh(model=mod17,start=list(n.start=100), control=list(nrep=nr, periodic=TRUE)) X.lookup2 <- rmh(model=mod17,start=list(n.start=100), control=list(nrep=nr, expand=1, periodic=FALSE)) ## irregular mod17x <- mod17 mod17x$par$r <- 0.05*sqrt(mod17x$par$r/0.05) X.lookupX <- rmh(model=mod17x,start=list(n.start=100), control=list(nrep=nr, periodic=TRUE)) X.lookupX2 <- rmh(model=mod17x,start=list(n.start=100), control=list(nrep=nr, expand=1, periodic=FALSE)) ## Strauss with trend tr <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } beta <- 0.3 gmma <- 0.5 r <- 45 tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } ## log quadratic trend mod17 <- list(cif="strauss", par=list(beta=beta,gamma=gmma,r=r),w=c(0,250,0,250), trend=tr3) X1.strauss.trend <- rmh(model=mod17,start=list(n.start=90), control=list(nrep=nr)) #' trend is an image mod18 <- mod17 mod18$trend <- as.im(mod18$trend, square(10)) X1.strauss.trendim <- rmh(model=mod18,start=list(n.start=90), control=list(nrep=nr)) } if(FULLTEST) { #'..... Test other code blocks ................. #' argument passing to rmhcontrol X1S <- rmh(model=mod01, control=NULL, nrep=nr) X1f <- rmh(model=mod01, fixall=TRUE, nrep=nr) # issues a warning } if(ALWAYS) { #' nsim > 1 Xlist <- rmh(model=mod01,start=list(n.start=80), control=list(nrep=nr), nsim=2) #' Condition on contents of window XX <- Xlist[[1]] YY <- XX[square(2)] XXwindow <- rmh(model=mod01, start=list(n.start=80), control=list(nrep=nr, x.cond=YY)) XXwindowTrend <- rmh(model=mod17, start=list(n.start=80), control=list(nrep=nr, x.cond=YY)) #' Palm conditioning XXpalm <- rmh(model=mod01,start=list(n.start=80), control=list(nrep=nr, x.cond=coords(YY))) XXpalmTrend <- rmh(model=mod17,start=list(n.start=80), control=list(nrep=nr, x.cond=coords(YY))) #' nsave, nburn chq <- function(X) { Xname <- deparse(substitute(X)) A <- attr(X, "saved") if(length(A) == 0) stop(paste(Xname, "did not include a saved list of patterns")) return("ok") } XXburn <- rmh(model=mod01,start=list(n.start=80), verbose=FALSE, control=list(nrep=nr, nsave=500, nburn=100)) chq(XXburn) XXburnTrend <- rmh(model=mod17,start=list(n.start=80), verbose=FALSE, control=list(nrep=nr, nsave=500, nburn=100)) chq(XXburnTrend) XXburn0 <- rmh(model=mod01,start=list(n.start=80), verbose=FALSE, control=list(nrep=nr, nsave=500, nburn=0)) chq(XXburn0) XXsaves <- rmh(model=mod01,start=list(n.start=80), verbose=FALSE, control=list(nrep=nr, nsave=c(500, 200))) chq(XXsaves) XXsaves0 <- rmh(model=mod01,start=list(n.start=80), verbose=FALSE, control=list(nrep=nr, nsave=c(500, 200), nburn=0)) chq(XXsaves0) } if(FULLTEST) { #' code blocks for various interactions, not otherwise tested rr <- seq(0,0.2,length=8)[-1] gmma <- c(0.5,0.6,0.7,0.8,0.7,0.6,0.5) mod18 <- list(cif="badgey",par=list(beta=4000, gamma=gmma,r=rr,sat=5), w=square(1)) Xbg <- rmh(model=mod18,start=list(n.start=20), control=list(nrep=1e4, periodic=TRUE)) Xbg2 <- rmh(model=mod18,start=list(n.start=20), control=list(nrep=1e4, periodic=FALSE)) #' supporting classes rs <- rmhstart() print(rs) rs <- rmhstart(x.start=cells) print(rs) rc <- rmhcontrol(x.cond=as.list(as.data.frame(cells))) print(rc) rc <- rmhcontrol(x.cond=as.data.frame(cells)[FALSE, , drop=FALSE]) print(rc) rc <- rmhcontrol(nsave=100, ptypes=c(0.7, 0.3), x.cond=amacrine) print(rc) rc <- rmhcontrol(ptypes=c(0.7, 0.3), x.cond=as.data.frame(amacrine)) print(rc) } }) reset.spatstat.options() ## ## tests/rmhErrors.R ## ## $Revision: 1.6 $ $Date: 2020/05/01 02:42:58 $ ## # Things which should cause an error if(ALWAYS) { local({ if(!exists("nv")) nv <- 0 if(!exists("nr")) nr <- 1e3 ## Strauss with zero intensity and p = 1 mod0S <- list(cif="strauss",par=list(beta=0,gamma=0.6,r=0.7), w = square(3)) out <- try(X0S <- rmh(model=mod0S,start=list(n.start=80), control=list(p=1,nrep=nr,nverb=nv),verbose=FALSE)) if(!inherits(out, "try-error")) stop("Error not trapped (Strauss with zero intensity and p = 1) in tests/rmhErrors.R") }) } # # tests/rmhExpand.R # # test decisions about expansion of simulation window # # $Revision: 1.7 $ $Date: 2020/05/01 02:42:58 $ # local({ if(FULLTEST) { fit <- ppm(cells ~x) ## check rmhmodel.ppm mod <- rmhmodel(fit) is.expandable(mod) wsim <- as.rectangle(mod$trend) ## work around changes in 'unitname' wcel <- as.owin(cells) unitname(wcel) <- unitname(cells) ## test if(!identical(wsim, wcel)) stop("Expansion occurred improperly in rmhmodel.ppm") ## rmhexpand class a <- summary(rmhexpand(area=2)) print(a) b <- summary(rmhexpand(length=4)) print(b) print(summary(rmhexpand(distance=2))) print(summary(rmhexpand(square(2)))) } }) # # tests/rmhMulti.R # # tests of rmh, running multitype point processes # # $Revision: 1.16 $ $Date: 2020/05/01 05:29:42 $ local({ if(!exists("nr")) nr <- 2e3 if(!exists("nv")) nv <- 0 spatstat.options(expand=1.05) if(FULLTEST) { ## Multitype Poisson modp2 <- list(cif="poisson", par=list(beta=2), types=letters[1:3], w = square(10)) Xp2 <- rmh(modp2, start=list(n.start=0), control=list(p=1)) ## Multinomial Xp2fix <- rmh(modp2, start=list(n.start=c(10,20,30)), control=list(fixall=TRUE, p=1)) Xp2fixr <- rmh(modp2, start=list(x.start=Xp2fix), control=list(fixall=TRUE, p=1)) } if(ALWAYS) { ## Gibbs models => C code ## Multitype Strauss: beta <- c(0.027,0.008) gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2) r <- matrix(c(45,45,45,45),2,2) mod08 <- list(cif="straussm",par=list(beta=beta,gamma=gmma,radii=r), w=c(0,250,0,250)) X1.straussm <- rmh(model=mod08,start=list(n.start=80), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) ## Multitype Strauss equivalent to hard core: mod08hard <- mod08 mod08hard$par$gamma[] <- 0 X1.straussm.Hard <- rmh(model=mod08hard,start=list(n.start=20), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv), periodic=FALSE) X1.straussmP.Hard <- rmh(model=mod08hard,start=list(n.start=20), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv), periodic=TRUE) ## Multitype Strauss conditioning upon the total number ## of points being 80: X2.straussm <- rmh(model=mod08,start=list(n.start=80), control=list(p=1,ptypes=c(0.75,0.25),nrep=nr, nverb=nv)) stopifnot(X2.straussm$n == 80) ## Conditioning upon the number of points of type 1 being 60 ## and the number of points of type 2 being 20: X3.straussm <- rmh(model=mod08,start=list(n.start=c(60,20)), control=list(fixall=TRUE,p=1,ptypes=c(0.75,0.25), nrep=nr,nverb=nv)) stopifnot(all(table(X3.straussm$marks) == c(60,20))) ## Multitype hardcore: rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) mod087 <- list(cif="multihard",par=list(beta=5*beta,hradii=rhc), w=square(12)) cheque <- function(X, r) { Xname <- deparse(substitute(X)) nn <- minnndist(X, by=marks(X)) print(nn) if(!all(nn >= r, na.rm=TRUE)) stop(paste(Xname, "violates hard core constraint"), call.=FALSE) return(invisible(NULL)) } #' make an initial state that violates hard core #' (cannot use 'x.start' here because it disables thinning) #' and check that result satisfies hard core set.seed(19171025) X.multihard.close <- rmh(model=mod087,start=list(n.start=100), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv), periodic=FALSE) cheque(X.multihard.close, rhc) X.multihard.closeP <- rmh(model=mod087,start=list(n.start=100), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv, periodic=TRUE)) cheque(X.multihard.closeP, rhc) ## Multitype Strauss hardcore: mod09 <- list(cif="straushm", par=list(beta=5*beta,gamma=gmma, iradii=r,hradii=rhc),w=square(12)) X.straushm <- rmh(model=mod09,start=list(n.start=100), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv), periodic=FALSE) X.straushmP <- rmh(model=mod09,start=list(n.start=100), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv, periodic=TRUE)) ## Multitype Strauss hardcore equivalent to multitype hardcore: mod09hard <- mod09 mod09hard$par$gamma[] <- 0 X.straushm.hard <- rmh(model=mod09hard,start=list(n.start=15), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv, periodic=FALSE)) X.straushmP.hard <- rmh(model=mod09hard,start=list(n.start=15), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv), periodic=TRUE) ## Multitype Strauss hardcore with trends for each type: beta <- c(0.27,0.08) tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend tr4 <- function(x,y){x <- x/250; y <- y/250; exp(-0.6*x+0.5*y)} # log linear trend mod10 <- list(cif="straushm", par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=c(0,250,0,250), trend=list(tr3,tr4)) X1.straushm.trend <- rmh(model=mod10,start=list(n.start=350), control=list(ptypes=c(0.75,0.25), nrep=nr,nverb=nv)) ## Multitype Strauss hardcore with trends for each type, given as images: bigwin <- square(250) i1 <- as.im(tr3, bigwin) i2 <- as.im(tr4, bigwin) mod11 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=bigwin, trend=list(i1,i2)) X2.straushm.trend <- rmh(model=mod11,start=list(n.start=350), control=list(ptypes=c(0.75,0.25),expand=1, nrep=nr,nverb=nv)) #' nsave, nburn chq <- function(X) { Xname <- deparse(substitute(X)) A <- attr(X, "saved") if(length(A) == 0) stop(paste(Xname, "did not include a saved list of patterns")) return("ok") } XburnMS <- rmh(model=mod08,start=list(n.start=80), verbose=FALSE, control=list(ptypes=c(0.75,0.25), nrep=nr,nsave=500, nburn=100)) chq(XburnMS) XburnMStrend <- rmh(model=mod10,start=list(n.start=350), verbose=FALSE, control=list(ptypes=c(0.75,0.25), nrep=nr,nsave=500, nburn=100)) chq(XburnMStrend) ####################################################################### ############ checks on distribution of output ####################### ####################################################################### checkp <- function(p, context, testname, failmessage, pcrit=0.01) { if(missing(failmessage)) failmessage <- paste("output failed", testname) if(p < pcrit) warning(paste(context, ",", failmessage), call.=FALSE) cat(paste("\n", context, ",", testname, "has p-value", signif(p,4), "\n")) } ## Multitype Strauss code; output is multitype Poisson beta <- 100 * c(1,1) ri <- matrix(0.07, 2, 2) gmma <- matrix(1, 2, 2) # no interaction tr1 <- function(x,y){ rep(1, length(x)) } tr2 <- function(x,y){ rep(2, length(x)) } mod <- rmhmodel(cif="straussm", par=list(beta=beta,gamma=gmma,radii=ri), w=owin(), trend=list(tr1,tr2)) X <- rmh(mod, start=list(n.start=0), control=list(nrep=1e6)) ## The model is Poisson with intensity 100 for type 1 and 200 for type 2. ## Total number of points is Poisson (300) ## Marks are i.i.d. with P(type 1) = 1/3, P(type 2) = 2/3. ## Test whether the total intensity looks right ## p <- ppois(X$n, 300) p.val <- 2 * min(p, 1-p) checkp(p.val, "In multitype Poisson simulation", "test whether total number of points has required mean value") ## Test whether the mark distribution looks right ta <- table(X$marks) cat("Frequencies of marks:") print(ta) checkp(chisq.test(ta, p = c(1,2)/3)$p.value, "In multitype Poisson simulation", "chi-squared goodness-of-fit test for mark distribution (1/3, 2/3)") ##### #### multitype Strauss code; fixall=TRUE; #### output is multinomial process with nonuniform locations #### the.context <- "In nonuniform multinomial simulation" beta <- 100 * c(1,1) ri <- matrix(0.07, 2, 2) gmma <- matrix(1, 2, 2) # no interaction tr1 <- function(x,y){ ifelse(x < 0.5, 0, 2) } tr2 <- function(x,y){ ifelse(y < 0.5, 1, 3) } ## cdf of these distributions Fx1 <- function(x) { ifelse(x < 0.5, 0, ifelse(x < 1, 2 * x - 1, 1)) } Fy2 <- function(y) { ifelse(y < 0, 0, ifelse(y < 0.5, y/2, ifelse(y < 1, (1/2 + 3 * (y-1/2))/2, 1))) } mod <- rmhmodel(cif="straussm", par=list(beta=beta,gamma=gmma,radii=ri), w=owin(), trend=list(tr1,tr2)) X <- rmh(mod, start=list(n.start=c(50,50)), control=list(nrep=1e6, expand=1, p=1, fixall=TRUE)) ## The model is Poisson ## Mean number of type 1 points = 100 ## Mean number of type 2 points = 200 ## Total intensity = 300 ## Marks are i.i.d. with P(type 1) = 1/3, P(type 2) = 2/3 ## Test whether the coordinates look OK Y <- split(X) X1 <- Y[[names(Y)[1]]] X2 <- Y[[names(Y)[2]]] checkp(ks.test(X1$y, "punif")$p.value, the.context, "Kolmogorov-Smirnov test of uniformity of y coordinates of type 1 points") if(any(X1$x < 0.5)) { stop(paste(the.context, ",", "x-coordinates of type 1 points are IMPOSSIBLE"), call.=FALSE) } else { checkp(ks.test(Fx1(X1$x), "punif")$p.value, the.context, "Kolmogorov-Smirnov test of uniformity of transformed x coordinates of type 1 points") } checkp(ks.test(X2$x, "punif")$p.value, the.context, "Kolmogorov-Smirnov test of uniformity of x coordinates of type 2 points") checkp(ks.test(Fy2(X2$y), "punif")$p.value, the.context, "Kolmogorov-Smirnov test of uniformity of transformed y coordinates of type 2 points") } }) reset.spatstat.options() # # tests/rmhTrend.R # # Problems with trend images (rmhmodel.ppm or rmhEngine) # if(ALWAYS) { local({ set.seed(42) # Bug folder 37 of 8 feb 2011 # rmhmodel.ppm -> predict.ppm # + rmhResolveTypes -> is.subset.owin Z <- rescale(demopat, 7000) X <- unmark(Z) X1 <- split(Z)[[1]] Int <- density(X,dimyx=200) Lint <- eval.im(log(npoints(X1)*Int/npoints(X))) M <- as.owin(Int) MR <- intersect.owin(M,scalardilate(M,0.5,origin="midpoint")) X1 <- X1[MR] Fut <- ppm(X1~offset(Lint),covariates=list(Lint=Lint), inter=BadGey(r=c(0.03,0.05),sat=3)) Y <- rmh(Fut,control=list(expand=M,nrep=1e3), verbose=FALSE) }) } # # tests/rmhWeird.R # # $Revision: 1.4 $ $Date: 2020/05/01 05:29:42 $ # # strange boundary cases local({ if(!exists("nv")) nv <- 0 if(!exists("nr")) nr <- 2e3 if(FULLTEST) { ## Poisson process cat("Poisson\n") modP <- list(cif="poisson",par=list(beta=10), w = square(3)) XP <- rmh(model = modP, start = list(n.start=25), control=list(nrep=nr,nverb=nv)) } if(ALWAYS) { ## Poisson process case of Strauss cat("\nPoisson case of Strauss\n") modPS <- list(cif="strauss",par=list(beta=10,gamma=1,r=0.7), w = square(3)) XPS <- rmh(model=modPS, start=list(n.start=25), control=list(nrep=nr,nverb=nv)) ## Strauss with zero intensity cat("\nStrauss with zero intensity\n") mod0S <- list(cif="strauss", par=list(beta=0,gamma=0.6,r=0.7), w = square(3)) X0S <- rmh(model=mod0S,start=list(n.start=80), control=list(nrep=nr,nverb=nv)) stopifnot(X0S$n == 0) } if(FULLTEST) { ## Poisson with zero intensity cat("\nPoisson with zero intensity\n") mod0P <- list(cif="poisson",par=list(beta=0), w = square(3)) X0P <- rmh(model = mod0P, start = list(n.start=25), control=list(nrep=nr,nverb=nv)) ## Poisson conditioned on zero points cat("\nPoisson conditioned on zero points\n") modp <- list(cif="poisson", par=list(beta=2), w = square(10)) Xp <- rmh(modp, start=list(n.start=0), control=list(p=1, nrep=nr)) stopifnot(Xp$n == 0) ## Multitype Poisson conditioned on zero points cat("\nMultitype Poisson conditioned on zero points\n") modp2 <- list(cif="poisson", par=list(beta=2), types=letters[1:3], w = square(10)) Xp2 <- rmh(modp2, start=list(n.start=0), control=list(p=1, nrep=nr)) stopifnot(is.marked(Xp2)) stopifnot(Xp2$n == 0) ## Multitype Poisson conditioned on zero points of each type cat("\nMultitype Poisson conditioned on zero points of each type\n") Xp2fix <- rmh(modp2, start=list(n.start=c(0,0,0)), control=list(p=1, fixall=TRUE, nrep=nr)) stopifnot(is.marked(Xp2fix)) stopifnot(Xp2fix$n == 0) } }) # # tests/rmhmodel.ppm.R # # $Revision: 1.10 $ $Date: 2020/05/01 05:29:42 $ # # Case-by-case tests of rmhmodel.ppm # if(FULLTEST) { local({ f <- ppm(cells) m <- rmhmodel(f) f <- ppm(cells ~x) m <- rmhmodel(f) f <- ppm(cells ~1, Strauss(0.1)) m <- rmhmodel(f) f <- ppm(cells ~1, StraussHard(r=0.1,hc=0.05)) m <- rmhmodel(f) print(m) f <- ppm(cells ~1, Hardcore(0.07)) m <- rmhmodel(f) f <- ppm(cells ~1, DiggleGratton(0.05,0.1)) m <- rmhmodel(f) f <- ppm(cells ~1, Softcore(0.5), correction="isotropic") m <- rmhmodel(f) f <- ppm(cells ~1, Geyer(0.07,2)) m <- rmhmodel(f) f <- ppm(cells ~1, BadGey(c(0.07,0.1,0.13),2)) m <- rmhmodel(f) f <- ppm(cells ~1, PairPiece(r = c(0.05, 0.1, 0.2))) m <- rmhmodel(f) f <- ppm(cells ~1, AreaInter(r=0.06)) m <- rmhmodel(f) print(m) # multitype r <- matrix(0.07, 2, 2) f <- ppm(amacrine ~1, MultiStrauss(c("off","on"),r)) m <- rmhmodel(f) print(m) h <- matrix(min(nndist(amacrine))/2, 2, 2) f <- ppm(amacrine ~1, MultiStraussHard(c("off","on"),r, h)) m <- rmhmodel(f) diag(r) <- NA diag(h) <- NA f <- ppm(amacrine ~1, MultiStrauss(c("off","on"),r)) m <- rmhmodel(f) f <- ppm(amacrine ~1, MultiStraussHard(c("off","on"),r, h)) m <- rmhmodel(f) # multitype data, interaction not dependent on type f <- ppm(amacrine ~marks, Strauss(0.05)) m <- rmhmodel(f) print(m) # trends f <- ppm(cells ~x, Strauss(0.1)) m <- rmhmodel(f) f <- ppm(cells ~y, StraussHard(r=0.1,hc=0.05)) m <- rmhmodel(f) f <- ppm(cells ~x+y, Hardcore(0.07)) m <- rmhmodel(f) print(m) f <- ppm(cells ~polynom(x,y,2), Softcore(0.5), correction="isotropic") m <- rmhmodel(f) # covariates Z <- as.im(function(x,y){ x^2+y^2 }, as.owin(cells)) f <- ppm(cells ~z, covariates=list(z=Z)) m <- rmhmodel(f) m <- rmhmodel(f, control=list(p=1)) print(m) Zim <- as.im(Z, as.owin(cells)) f <- ppm(cells ~z, covariates=list(z=Zim)) m <- rmhmodel(f) Z <- as.im(function(x,y){ x^2+y }, as.owin(amacrine)) f <- ppm(amacrine ~z + marks, covariates=list(z=Z)) m <- rmhmodel(f) print(m) m <- rmhmodel(f, control=list(p=1)) m <- rmhmodel(f, control=list(p=1,fixall=TRUE)) print(m) Zim <- as.im(Z, as.owin(amacrine)) f <- ppm(amacrine ~z + marks, covariates=list(z=Zim)) m <- rmhmodel(f) print(m) }) } # # tests/rmhmodelHybrids.R # # Test that rmhmodel.ppm and rmhmodel.default # work on Hybrid interaction models # # $Revision: 1.5 $ $Date: 2020/05/01 05:29:42 $ # if(ALWAYS) { # involves C code local({ # ......... rmhmodel.ppm ....................... fit1 <- ppm(redwood ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2), C=Geyer(0.15, 1))) m1 <- rmhmodel(fit1) m1 reach(m1) ## Test of handling 'IsOffset' fit2 <- ppm(cells ~1, Hybrid(H=Hardcore(0.05), G=Geyer(0.15, 2))) m2 <- rmhmodel(fit2) ## also test C code for hybrid interaction with hard core fakecells <- rmh(fit2, nrep=1e4) # Test of handling Poisson components fit3 <- ppm(cells ~1, Hybrid(P=Poisson(), S=Strauss(0.05))) X3 <- rmh(fit3, control=list(nrep=1e3,expand=1), verbose=FALSE) # ............ rmhmodel.default ............................ modH <- list(cif=c("strauss","geyer"), par=list(list(beta=50,gamma=0.5, r=0.1), list(beta=1, gamma=0.7, r=0.2, sat=2)), w = square(1)) rmodH <- rmhmodel(modH) rmodH reach(rmodH) # test handling of Poisson components modHP <- list(cif=c("poisson","strauss"), par=list(list(beta=5), list(beta=10,gamma=0.5, r=0.1)), w = square(1)) rmodHP <- rmhmodel(modHP) rmodHP reach(rmodHP) modPP <- list(cif=c("poisson","poisson"), par=list(list(beta=5), list(beta=10)), w = square(1)) rmodPP <- rmhmodel(modPP) rmodPP reach(rmodPP) }) } # # tests/rmh.ppm.R # # $Revision: 1.5 $ $Date: 2020/05/01 05:29:42 $ # # Examples removed from rmh.ppm.Rd # stripped down to minimal tests of validity # local({ op <- spatstat.options() spatstat.options(rmh.nrep=10, npixel=10, ndummy.min=10) spatstat.options(project.fast=TRUE) Nrep <- 10 X <- swedishpines if(FULLTEST) { ## Poisson process fit <- ppm(X ~1, Poisson()) Xsim <- rmh(fit) } if(ALWAYS) { # Gibbs model => C code ## Strauss process fit <- ppm(X ~1, Strauss(r=7)) Xsim <- rmh(fit) ## Strauss process simulated on a larger window ## then clipped to original window Xsim <- rmh(fit, control=list(nrep=Nrep, expand=1.1, periodic=TRUE)) ## Extension of model to another window (thanks to Tuomas Rajala) Xsim <- rmh(fit, w=square(2)) Xsim <- simulate(fit, w=square(2)) ## Strauss - hard core process ## fit <- ppm(X ~1, StraussHard(r=7,hc=2)) ## Xsim <- rmh(fit, start=list(n.start=X$n)) ## Geyer saturation process ## fit <- ppm(X ~1, Geyer(r=7,sat=2)) ## Xsim <- rmh(fit, start=list(n.start=X$n)) ## Area-interaction process fit <- ppm(X ~1, AreaInter(r=7)) Xsim <- rmh(fit, start=list(n.start=X$n)) ## Penttinen process fit <- ppm(X ~1, Penttinen(r=7)) Xsim <- rmh(fit, start=list(n.start=X$n)) ## soft core interaction process ## X <- quadscheme(X, nd=50) ## fit <- ppm(X ~1, Softcore(kappa=0.1), correction="isotropic") ## Xsim <- rmh(fit, start=list(n.start=X$n)) ## Diggle-Gratton pairwise interaction model ## fit <- ppm(cells ~1, DiggleGratton(0.05, 0.1)) ## Xsim <- rmh(fit, start=list(n.start=cells$n)) ## plot(Xsim, main="simulation from fitted Diggle-Gratton model") ## piecewise-constant pairwise interaction function X <- rSSI(0.05, 100) fit <- ppm(X ~1, PairPiece(seq(0.02, 0.1, by=0.01))) Xsim <- rmh(fit) } ## marked point pattern Y <- amacrine if(FULLTEST) { #' marked Poisson models fit <- ppm(Y) Ysim <- rmh(fit) fit <- ppm(Y~marks) Ysim <- rmh(fit) fit <- ppm(Y~x) Ysim <- rmh(fit) fit <- ppm(Y~marks+x) Ysim <- rmh(fit) } if(ALWAYS) { #' multitype Strauss typ <- levels(Y$marks) MS <- MultiStrauss(types = typ, radii=matrix(0.07, ncol=2, nrow=2)) fit <- ppm(Y~marks*x, MS) Ysim <- rmh(fit) #' multitype Hardcore h0 <- minnndist(unmark(Y)) * 0.95 MH <- MultiHard(types = typ, hradii=matrix(h0, ncol=2, nrow=2)) fit <- ppm(Y ~ marks+x, MH) Ysim <- rmh(fit) #' other code blocks Ysim <- rmh(fit, control=list(periodic=TRUE, expand=1)) Ysim <- rmh(fit, control=list(periodic=FALSE, expand=1)) #' multihard core with invalid initial state Ydouble <- superimpose(Y, rjitter(Y, h0/10)) Ysim <- rmh(fit, start=list(x.start=Ydouble)) #' Lennard-Jones fut <- ppm(unmark(longleaf) ~ 1, LennardJones(), rbord=1) Ysim <- rmh(fut) Ysim <- rmh(fut, control=list(periodic=TRUE, expand=1)) } spatstat.options(op) }) reset.spatstat.options() #' #' tests/rmhsnoopy.R #' #' Test the rmh interactive debugger #' #' $Revision: 1.10 $ $Date: 2020/05/01 05:29:42 $ if(ALWAYS) { # may depend on platform local({ ## fit a model and prepare to simulate R <- 0.1 fit <- ppm(amacrine ~ marks + x, Strauss(R)) siminfo <- rmh(fit, preponly=TRUE) Wsim <- siminfo$control$internal$w.sim Wclip <- siminfo$control$internal$w.clip if(is.null(Wclip)) Wclip <- Window(cells) ## determine debugger interface panel geometry Xinit <- runifpoint(ex=amacrine)[1:40] P <- rmhsnoop(Wsim=Wsim, Wclip=Wclip, R=R, xcoords=Xinit$x, ycoords=Xinit$y, mlevels=levels(marks(Xinit)), mcodes=as.integer(marks(Xinit)) - 1L, irep=3L, itype=1L, proptype=1, proplocn=c(0.5, 0.5), propmark=0, propindx=0, numerator=42, denominator=24, panel.only=TRUE) boxes <- P$boxes clicknames <- names(P$clicks) boxcentres <- do.call(concatxy, lapply(boxes, centroid.owin)) ## design a sequence of clicks actionsequence <- c("Up", "Down", "Left", "Right", "At Proposal", "Zoom Out", "Zoom In", "Reset", "Accept", "Reject", "Print Info", "Next Iteration", "Next Shift", "Next Death", "Skip 10", "Skip 100", "Skip 1000", "Skip 10,000", "Skip 100,000", "Exit Debugger") actionsequence <- match(actionsequence, clicknames) actionsequence <- actionsequence[!is.na(actionsequence)] xy <- lapply(boxcentres, "[", actionsequence) ## queue the click sequence spatstat.utils::queueSpatstatLocator(xy$x,xy$y) ## go rmh(fit, snoop=TRUE) }) } spatstat.core/tests/testsR1.R0000644000176200001440000001706214141452520015702 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.core #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.core) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) #' tests/randoms.R #' Further tests of random generation code #' $Revision: 1.14 $ $Date: 2021/09/09 10:02:00 $ local({ if(FULLTEST) { #' cases not covered in examples A <- runifdisc(6, nsim=2) A <- runifpoispp(5, nsim=2) A <- runifpoispp(0, nsim=2) A <- rSSI(0.05, 6, nsim=2) A <- rSSI(0.05, 10, win=square(c(-0.5, 1.5)), x.init=A[[1]], nsim=2) A <- rstrat(nx=4, nsim=2) A <- rcell(square(1), nx=5, nsim=2) } if(ALWAYS) { # involves C code etc A <- rthin(cells, P=0.5, nsim=2) A <- rthin(cells, runif(42)) A <- rthin(cells[FALSE], P=0.5, nsim=2) } f <- function(x,y) { 10*x } Z <- as.im(f, square(1)) if(ALWAYS) { A <- rpoint(n=6, f=f, fmax=10, nsim=2) A <- rpoint(n=6, f=Z, fmax=10, nsim=2) A <- rpoint(n=0, f=f, fmax=10, nsim=2) A <- rpoint(n=0, f=Z, fmax=10, nsim=2) op <- spatstat.options(fastpois=FALSE) A <- runifpoispp(5, nsim=2) A <- rpoispp(Z) spatstat.options(op) } if(FULLTEST) { b3 <- box3(c(0,1)) b4 <- boxx(c(0,1), c(0,1), c(0,1), c(0,1)) b5 <- c(0, 2, 0, 2) X <- rMaternInhibition(2, kappa=20, r=0.1, win=b3) Y <- rMaternInhibition(2, kappa=20, r=0.1, win=b4) Y <- rMaternInhibition(2, kappa=20, r=0.1, win=b5, nsim=2) X <- rSSI(0.05, 6) Y <- rSSI(0.05, 6, x.init=X) # no extra points Z <- rlabel(finpines) } f1 <- function(x,y){(x^2 + y^3)/10} f2 <- function(x,y){(x^3 + y^2)/10} ZZ <- solist(A=as.im(f1, letterR), B=as.im(f2, letterR)) g <- function(x,y,m){(10+as.integer(m)) * (x^2 + y^3)} if(FULLTEST) { XX <- rmpoispp(ZZ, nsim=3) YY <- rmpoint(10, f=ZZ, nsim=3) VV <- rpoint.multi(10, f=g, marks=factor(sample(letters[1:3], 10, replace=TRUE)), nsim=3) } if(ALWAYS) { # depends on C code L <- edges(letterR) E <- runifpoisppOnLines(5, L) G <- rpoisppOnLines(ZZ, L) G2 <- rpoisppOnLines(list(A=f1, B=f2), L, lmax=max(sapply(ZZ, max))) } if(FULLTEST) { #' cluster models + bells + whistles X <- rThomas(10, 0.2, 5, saveLambda=TRUE) if(is.null(attr(X, "Lambda"))) stop("rThomas did not save Lambda image") Y <- rThomas(0, 0.2, 5, saveLambda=TRUE) if(is.null(attr(Y, "Lambda"))) stop("rThomas did not save Lambda image when kappa=0") X <- rMatClust(10, 0.05, 4, saveLambda=TRUE) X <- rCauchy(30, 0.01, 5, saveLambda=TRUE) X <- rVarGamma(30, 2, 0.02, 5, saveLambda=TRUE) Z <- as.im(function(x,y){ 5 * exp(2 * x - 1) }, owin()) Y <- rThomas(10, 0.2, Z, saveLambda=TRUE) Y <- rMatClust(10, 0.05, Z, saveLambda=TRUE) Y <- rCauchy(30, 0.01, Z, saveLambda=TRUE) Y <- rVarGamma(30, 2, 0.02, Z, saveLambda=TRUE) } if(FULLTEST) { #' perfect simulation code infrastructure expandwinPerfect(letterR, 2, 3) #' trivial cases of random generators for ppx B4 <- boxx(0:1, 0:1, 0:1, 0:1) Z0 <- runifpointx(0, domain=B4, nsim=2) Z1 <- runifpointx(1, domain=B4, nsim=2) } }) reset.spatstat.options() #' tests/resid.R #' #' Stuff related to residuals and residual diagnostics #' #' $Revision: 1.6 $ $Date: 2020/05/01 02:42:58 $ #' local({ fit <- ppm(cells ~x, Strauss(r=0.15)) rr <- residuals(fit, quad=quadscheme(cells, nd=128)) diagnose.ppm(fit, cumulative=FALSE, type="pearson") if(FULLTEST) { diagnose.ppm(fit, cumulative=FALSE) fitoff <- ppm(cells ~ sin(x) + offset(y)) plot(a <- parres(fitoff, "x")) plot(b <- parres(fitoff, "y")) print(a) print(b) d <- diagnose.ppm(fit, which="marks") plot(d, plot.neg="discrete") plot(d, plot.neg="imagecontour") d <- diagnose.ppm(fit, type="pearson", which="smooth") plot(d, plot.smooth="image") plot(d, plot.smooth="contour") plot(d, plot.smooth="imagecontour") d <- diagnose.ppm(fit, type="pearson", which="x") plot(d) d <- diagnose.ppm(fit, type="pearson", which="y") plot(d) diagnose.ppm(fit, type="pearson", which="x", cumulative=FALSE) diagnose.ppm(fit, type="pearson", which="x", cumulative=FALSE) diagnose.ppm(fit, type="raw", plot.neg="discrete", plot.smooth="image") diagnose.ppm(fit, type="pearson", plot.neg="contour", plot.smooth="contour") diagnose.ppm(fitoff, type="raw", which="smooth", plot.smooth="persp") diagnose.ppm(fitoff, type="pearson", plot.neg="imagecontour") plot(Frame(letterR), main="") ploterodewin(letterR, erosion(letterR, 0.05), main="jeans") W <- as.mask(letterR) plot(Frame(W), main="") ploterodewin(W, erosion(W, 0.05), main="JeAnS") #' entangled terms in model U <- as.im(1, owin()) Z <- as.im(function(x,y) x, owin()) X <- runifpoint(40) fut <- ppm(X ~ Z:U) a <- parres(fut, "Z") futoff <- ppm(X ~ offset(Z*U)) a <- parres(futoff, "Z") } }) ## ## tests/rhohat.R ## ## Test all combinations of options for rhohatCalc ## ## $Revision: 1.5 $ $Date: 2020/05/01 02:42:58 $ local({ if(FULLTEST) { X <- rpoispp(function(x,y){exp(3+3*x)}) Z <- as.im(function(x,y) { x }, Window(X)) f <- funxy(function(x,y) { y + 1 }, Window(X)) ## rhohat.ppp ## done in example(rhohat): ## rhoA <- rhohat(X, "x") ## rhoB <- rhohat(X, "x", method="reweight") ## rhoC <- rhohat(X, "x", method="transform") ## alternative smoother (if package locfit available) rhoA <- rhohat(X, "x", smoother="local") rhoB <- rhohat(X, "x", smoother="local", method="reweight") rhoC <- rhohat(X, "x", smoother="local", method="transform") #' code blocks rhoD <- rhohat(X, "y", positiveCI=TRUE) rhoE <- rhohat(X, Z, positiveCI=TRUE) #' weights rhoF <- rhohat(X, Z, weights=f(X)) rhoG <- rhohat(X, Z, weights=f) rhoH <- rhohat(X, Z, weights=as.im(f)) ## rhohat.ppm fit <- ppm(X ~x) rhofitA <- rhohat(fit, "x") rhofitB <- rhohat(fit, "x", method="reweight") rhofitC <- rhohat(fit, "x", method="transform") rhofitD <- rhohat(fit, Z) rhofitD <- rhohat(fit, Z, positiveCI=TRUE) ## Baseline lam <- predict(fit) rhoAb <- rhohat(X, "x", baseline=lam) rhoBb <- rhohat(X, "x", method="reweight", baseline=lam) rhoCb <- rhohat(X, "x", method="transform", baseline=lam) ## Horvitz-Thompson rhoAH <- rhohat(X, "x", horvitz=TRUE) rhoBH <- rhohat(X, "x", method="reweight", horvitz=TRUE) rhoCH <- rhohat(X, "x", method="transform", horvitz=TRUE) rhofitAH <- rhohat(fit, "x", horvitz=TRUE) rhofitBH <- rhohat(fit, "x", method="reweight", horvitz=TRUE) rhofitCH <- rhohat(fit, "x", method="transform", horvitz=TRUE) ## class support plot(rhoA) plot(rhoA, rho ~ x, shade=NULL) plot(rhoA, log(rho) ~ x, shade=NULL) plot(rhoA, log(.) ~ x) ## rho2hat r2xy <- rho2hat(X, "x", "y") r2xyw <- rho2hat(X, "x", "y", method="reweight") print(r2xyw) plot(r2xy, do.points=TRUE) xcoord <- function(x,y) x ycoord <- function(x,y) y xim <- as.im(xcoord, W=Window(X)) r2fi <- rho2hat(X, ycoord, xim) r2if <- rho2hat(X, xim, ycoord) r2myx <- rho2hat(fit, "y", "x") r2myxw <- rho2hat(fit, "y", "x", method="reweight") plot(r2myx) plot(r2myxw) print(r2myxw) predict(r2myxw) predict(r2myxw, relative=TRUE) } }) spatstat.core/tests/testsP2.R0000644000176200001440000004366014141452520015704 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.core #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.core) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/ppmBadData.R # # $Revision: 1.6 $ $Date: 2020/04/30 05:23:52 $ # Testing robustness of ppm and support functions # when data are rubbish local({ if(ALWAYS) { ## from Rolf: very large proportion of data is NA SEED <- 42 K <- 101 A <- 500 X <- seq(0, A, length=K) G <- expand.grid(x=X, y=X) FOO <- function(x,y) { sin(x)^2 + cos(y)^2 } M1 <- im(matrix(FOO(G$x, G$y), K, K), xcol=X, yrow=X) M <- im(matrix(FOO(G$x, G$y), K, K)) BAR <- function(x) { exp(-6.618913 + 5.855337 * x - 8.432483 * x^2) } V <- im(BAR(M$v), xcol=X, yrow=X) # V <- eval.im(exp(-6.618913 + 5.855337 * M - 8.432483 * M^2)) set.seed(SEED) Y <- rpoispp(V) fY <- ppm(Y ~cv + I(cv^2), data=list(cv=M), correction="translate") diagnose.ppm(fY) lurking(fY, covariate=as.im(function(x,y){x}, square(A)), type="raw") } if(ALWAYS) { ## from Andrew Bevan: numerical overflow, ill-conditioned Fisher information SEED <- 42 nongranite<- owin(poly = list(x = c(0, 8500, 7000, 6400, 6400, 6700, 7000, 7200, 7300, 8000, 8100, 8800, 9500, 10000, 10000, 0), y = c(0, 0, 2000, 3800, 4000, 5000, 6500, 7400, 7500, 8000, 8100, 9000, 9500, 9600, 10000, 10000))) ## Trend on raster grid rain <- as.im(X=function(x,y) { x^2 + y^2 }, W=nongranite, dimyx=100) ## Generate a point pattern via a Lennard-Jones process set.seed(SEED) mod4<- rmhmodel(cif="lennard", par=list(beta=1, sigma=250, epsilon=2.2), trend=rain, w=nongranite) ljtr<- rmh(mod4, start=list(n.start=80), control=list(p=1, nrep=1e5)) ## Fit a point process model to the pattern with rain as a covariate ## NOTE INCORRECT TREND FORMULA ljtrmod <- ppm(ljtr, trend= ~ Z, interaction=NULL, data=list(Z=rain)) ss <- summary(ljtrmod) } if(FULLTEST) { ## From Ege ## Degenerate but non-null argument 'covariates' xx <- list() names(xx) <- character(0) fit <- ppm(cells ~x, covariates = xx) st <- summary(fit) } }) #' tests/ppmclass.R #' #' Class support for ppm #' #' $Revision: 1.8 $ $Date: 2020/12/04 08:24:43 $ if(FULLTEST) { local({ #' (1) print.ppm, summary.ppm, print.summary.ppm Z <- as.im(function(x,y){x}, Window(cells)) fitZ <- ppm(cells ~ Z) print(fitZ) print(summary(fitZ)) #' logistic fitl <- ppm(swedishpines ~ x+y, method="logi") print(fitl) print(summary(fitl)) #' Model with covariate arguments f <- function(x,y,b) { x+b } fitf <- ppm(cells ~ f, covfunargs=list(b=1)) print(fitf) print(summary(fitf)) #' Invalid model fitN <- ppm(redwood ~ 1, Strauss(0.1)) print(fitN) print(summary(fitN)) #' standard errors in output fat <- ppm(cells ~ x, Strauss(0.12)) op <- spatstat.options(print.ppm.SE='always') print(fat) spatstat.options(print.ppm.SE='never') print(fat) print(fitZ) spatstat.options(op) ## (2) plot.ppm plot(fitZ) plot(fat, trend=FALSE, cif=FALSE, se=FALSE) ## (3) emend.ppm fitZe <- emend(fitZ, trace=TRUE) ZZ <- Z fitZZ <- ppm(cells ~ Z + ZZ) fitZZe <- emend(fitZZ, trace=TRUE) fitOK <- ppm(redwood ~1, Strauss(0.1), emend=TRUE) print(fitOK) fitNot <- ppm(redwood ~1, Strauss(0.1)) fitSlow <- emend(fitNot, trace=TRUE) print(fitSlow) op <- spatstat.options(project.fast=TRUE) fitFast <- emend(fitNot, trace=TRUE) print(fitFast) fitZZe <- emend(fitZZ, trace=TRUE) spatstat.options(op) #' (4) methods for other generics logLik(fitZ, absolute=TRUE) unitname(fitZ) unitname(fat) <- c("metre", "metres") is.expandable(fitf) fit0 <- update(fitZ, . ~ 1) anova(fit0, fitZ, override=TRUE) #' (5) miscellaneous ## example from Robert Aue - handling offsets X <- demohyper$Points[[1]] GH <- Hybrid(G=Geyer(r=0.1, sat=3), H=Hardcore(0.01)) fit <- ppm(X ~ 1, GH) valid.ppm(fit) #' case of boundingbox boundingbox(cells, ppm(cells ~ 1)) }) reset.spatstat.options() } # # tests/ppmgam.R # # Test ppm with use.gam=TRUE # # $Revision: 1.4 $ $Date: 2020/04/30 05:23:52 $ # if(FULLTEST) { local({ fit <- ppm(nztrees ~s(x,y), use.gam=TRUE) mm <- model.matrix(fit) mf <- model.frame(fit) v <- vcov(fit) prd <- predict(fit) }) } #' #' tests/ppmlogi.R #' #' Tests of ppm(method='logi') #' and related code (predict, leverage etc) #' #' $Revision: 1.15 $ $Date: 2020/04/30 05:23:52 $ #' local({ if(FULLTEST) { fit <- ppm(cells ~x, method="logi") f <- fitted(fit) p <- predict(fit) u <- summary(fit) fitS <- ppm(cells ~x, Strauss(0.12), method="logi") fS <- fitted(fitS) pS <- predict(fitS) uS <- summary(fitS) print(uS) plot(leverage(fit)) plot(influence(fit)) plot(dfbetas(fit)) plot(leverage(fitS)) plot(influence(fitS)) plot(dfbetas(fitS)) } if(FULLTEST) { #' same with hard core - A1 is singular fitH <- ppm(cells ~x, Strauss(0.08), method="logi") print(fitH) fH <- fitted(fitH) pH <- predict(fitH) uH <- summary(fitH) print(uH) plot(leverage(fitH)) plot(influence(fitH)) plot(dfbetas(fitH)) } if(FULLTEST) { #' logistic fit to data frame of covariates z <- c(rep(TRUE, 5), rep(FALSE, 5)) df <- data.frame(A=z + 2* runif(10), B=runif(10)) Y <- quadscheme.logi(runifpoint(5), runifpoint(5)) fut <- ppm(Y ~ A+B, data=df, method="logi") sf <- summary(fut) print(sf) } if(FULLTEST) { #' vblogit code, just to check that it runs. fee <- ppm(cells ~ x, method="VBlogi", nd=21) print(fee) summary(fee) logLik(fee) AIC(fee) extractAIC(fee) Z <- predict(fee) summary(Z) print(fee$internal$glmfit) # print.vblogit } }) # # tests/ppmmarkorder.R # # $Revision: 1.4 $ $Date: 2020/04/30 05:23:52 $ # # Test that predict.ppm, plot.ppm and plot.fitin # tolerate marks with levels that are not in alpha order # if(ALWAYS) { # locale-dependent? local({ X <- amacrine levels(marks(X)) <- c("ZZZ", "AAA") fit <- ppm(X ~marks, MultiStrauss(c("ZZZ","AAA"), matrix(0.06, 2, 2))) aa <- predict(fit, type="trend") bb <- predict(fit, type="cif") plot(fit) plot(fitin(fit)) }) } # # tests/ppmscope.R # # Test things that might corrupt the internal format of ppm objects # # $Revision: 1.6 $ $Date: 2020/04/30 05:23:52 $ # if(ALWAYS) { # dependent on R version? local({ ## (1) Scoping problem that can arise when ppm splits the data fit <- ppm(bei ~elev, data=bei.extra) mm <- model.matrix(fit) ## (2) Fast update mechanism fit1 <- ppm(cells ~x+y, Strauss(0.07)) fit2 <- update(fit1, ~y) fit3 <- update(fit2, ~x) ## (3) New formula-based syntax attach(bei.extra) slfit <- ppm(bei ~ grad) sl2fit <- update(slfit, ~grad + I(grad^2)) slfitup <- update(slfit, use.internal=TRUE) sl2fitup <- update(sl2fit, use.internal=TRUE) ## (4) anova.ppm fut1 <- ppm(cells ~ 1, Strauss(0.1)) futx <- ppm(cells ~ x, Strauss(0.1)) anova(fut1, test="Chi") anova(futx, test="Chi") fut1a <- ppm(cells ~ 1, Strauss(0.1), rbord=0) anova(fut1a, futx, test="Chi") fut1d <- ppm(cells ~ 1, Strauss(0.1), nd=23) anova(fut1d, futx, test="Chi") ## The following doesn't work yet ## futxyg <- ppm(cells ~ x + s(y), Strauss(0.1), use.gam=TRUE) ## anova(futx, futxyg) fatP <- ppm(amacrine ~ marks) fatM <- ppm(amacrine ~ marks, MultiStrauss(matrix(0.07, 2, 2))) anova(fatP, fatM, test="Chi") }) } grep# # tests/ppmtricks.R # # Test backdoor exits, hidden options, internals and tricks in ppm # # $Revision: 1.19 $ $Date: 2020/04/30 05:23:52 $ # local({ ## (1) skip.border if(ALWAYS) { # needed below fit <- ppm(cells, ~1, Strauss(0.1), skip.border=TRUE) } ## (2) subset arguments of different kinds if(FULLTEST) { fut <- ppm(cells ~ x, subset=(x > 0.5)) fot <- ppm(cells ~ x, subset=(x > 0.5), method="logi") W <- owin(c(0.4, 0.8), c(0.2, 0.7)) fut <- ppm(cells ~ x, subset=W) fot <- ppm(cells ~ x, subset=W, method="logi") V <- as.im(inside.owin, Window(cells), w=W) fet <- ppm(cells ~ x, subset=V) fet <- ppm(cells ~ x, subset=V, method="logi") } ## (3) profilepl -> ppm ## uses 'skip.border' and 'precomputed' ## also tests scoping for covariates if(FULLTEST) { splants <- split(ants) mess <- splants[["Messor"]] cats <- splants[["Cataglyphis"]] ss <- data.frame(r=seq(60,120,by=20),hc=29/6) dM <- distmap(mess,dimyx=256) mungf <- profilepl(ss, StraussHard, cats ~ dM) mungp <- profilepl(ss, StraussHard, trend=~dM, Q=cats) } ## (4) splitting large quadschemes into blocks if(FULLTEST) { mop <- spatstat.options(maxmatrix=5000) qr <- quadBlockSizes(quadscheme(cells)) pr <- predict(ppm(cells ~ x, AreaInter(0.05))) spatstat.options(mop) qr <- quadBlockSizes(quadscheme(cells)) } ## (5) shortcuts in summary.ppm ## and corresponding behaviour of print.summary.ppm if(FULLTEST) { print(summary(fit, quick=TRUE)) print(summary(fit, quick="entries")) print(summary(fit, quick="no prediction")) print(summary(fit, quick="no variances")) } ## (6) suffstat.R if(ALWAYS) { fitP <- update(fit, Poisson()) suffstat.poisson(fitP, cells) fit0 <- killinteraction(fit) suffstat.poisson(fit0, cells) } ## (7) various support for class ppm if(FULLTEST) { fut <- kppm(redwood ~ x) A <- quad.ppm(fut) Z <- as.im(function(x,y){x}, Window(cells)) fitZ <- ppm(cells ~ Z) U <- getppmOriginalCovariates(fitZ) } ## (8) support for class profilepl if(FULLTEST) { rr <- data.frame(r=seq(0.05, 0.15, by=0.02)) ps <- profilepl(rr, Strauss, cells) ## plot(ps) ## covered in plot.profilepl.Rd simulate(ps, nrep=1e4) parameters(ps) fitin(ps) predict(ps, type="cif") } ## (9) class 'plotppm' if(FULLTEST) { fut <- ppm(amacrine ~ marks + polynom(x,y,2), Strauss(0.07)) p <- plot(fut, plot.it=FALSE) print(p) plot(p, how="contour") plot(p, how="persp") } ## (10) ppm -> mpl.engine -> mpl.prepare if(ALWAYS) { # includes C code fit <- ppm(cells, NULL) fit <- ppm(cells ~ x, clipwin=square(0.7)) fit <- ppm(cells ~ x, subset=square(0.7)) DG <- as.im(function(x,y){x+y < 1}, square(1)) fit <- ppm(cells ~ x, subset=DG) fit <- ppm(cells ~ x, GLM=glm) fit <- ppm(cells ~ x, famille=quasi(link='log', variance='mu')) fit <- ppm(cells ~ x, Hardcore(0.07), skip.border=TRUE, splitInf=TRUE) } ## (11) unidentifiable model (triggers an error in ppm) if(FULLTEST) { Q <- quadscheme(cells) M <- mpl.prepare(Q, cells, as.ppp(Q), trend=~1, covariates=NULL, interaction=Hardcore(0.3), correction="none") } }) reset.spatstat.options() # # tests/prediction.R # # Things that might go wrong with predict() # # $Revision: 1.20 $ $Date: 2020/04/30 05:41:59 $ # local({ if(ALWAYS) { ## test of 'covfunargs' - platform dependent? f <- function(x,y,a){ y - a } fit <- ppm(cells ~x + f, covariates=list(f=f), covfunargs=list(a=1/2)) p <- predict(fit) ## prediction involving 0 * NA qc <- quadscheme(cells, nd=10) r <- minnndist(as.ppp(qc))/10 fit <- ppm(qc ~ 1, Strauss(r)) # model has NA for interaction coefficient p1 <- predict(fit) p2 <- predict(fit, type="cif", ngrid=10) stopifnot(all(is.finite(as.matrix(p1)))) stopifnot(all(is.finite(as.matrix(p2)))) } if(FULLTEST) { ## test of 'new.coef' mechanism fut <- ppm(cells ~ x, Strauss(0.15), rbord=0) p0 <- predict(fut, type="cif") pe <- predict(fut, type="cif", new.coef=coef(fut)) pn <- predict(fut, type="cif", new.coef=unname(coef(fut))) if(max(abs(pe-p0)) > 0.01) stop("new.coef mechanism is broken!") if(max(abs(pn-p0)) > 0.01) stop("new.coef mechanism gives wrong answer, for unnamed vectors") #' adaptcoef a <- c(A=1,B=2,Z=42) b <- c(B=41,A=0) ab <- adaptcoef(a, b, drop=TRUE) } if(FULLTEST) { ## tests of relrisk.ppm fut <- ppm(amacrine ~ x * marks) a <- relrisk(fut, control=2, relative=TRUE) a <- relrisk(fut, se=TRUE) a <- relrisk(fut, relative=TRUE, se=TRUE) fut <- ppm(sporophores ~ marks + x) a <- relrisk(fut, control=2, relative=TRUE) a <- relrisk(fut, se=TRUE) a <- relrisk(fut, relative=TRUE, se=TRUE) ## untested cases of predict.ppm fit0 <- ppm(cells) a <- predict(fit0, interval="confidence") a <- predict(fit0, interval="confidence", type="count") fit <- ppm(cells ~ x) b <- predict(fit, se=TRUE, locations=cells) b <- predict(fit, se=TRUE, interval="confidence") b <- predict(fit, type="count", se=TRUE) b <- predict(fit, type="count", window=square(0.5), se=TRUE) b <- predict(fit, type="count", window=quadrats(cells, 3), se=TRUE) d <- predict(fit, type="count", interval="prediction", se=TRUE) d <- predict(fit, type="count", interval="confidence", se=TRUE) d <- predict(fit, interval="confidence", se=TRUE) foot <- ppm(cells ~ x, StraussHard(0.12)) d <- predict(foot, ignore.hardcore=TRUE) dX <- predict(foot, ignore.hardcore=TRUE, locations=cells) ## superseded usages b <- predict(fit, type="se", getoutofjail=TRUE) b <- predict(fit, type="se", locations=cells) # warning b <- predict(fit, total=TRUE) b <- predict(fit, total=square(0.5)) b <- predict(fit, total=quadrats(cells, 3)) ## supporting code u <- model.se.image(fit, square(0.5)) u <- model.se.image(fit, square(0.5), what="cv") u <- model.se.image(fit, square(0.5), what="ce") co <- c(Intercept=5, slope=3, kink=2) re <- c("Intercept", "slope") a <- fill.coefs(co, re) # warning b <- fill.coefs(co, rev(names(co))) d <- fill.coefs(co, letters[1:3]) ## model matrix etc v <- model.frame(ppm(cells)) fut <- ppm(cells ~ x, Strauss(0.1)) v <- model.matrix(fut, subset=(x<0.5), keepNA=FALSE) df <- data.frame(x=runif(10), y=runif(10), Interaction=sample(0:1, 10, TRUE)) m10 <- PPMmodelmatrix(fut, data=df) mmm <- PPMmodelmatrix(fut, Q=quad.ppm(fut)) #' effectfun for Gibbs effectfun(fut, "x") effectfun(fut, "x", se.fit=TRUE) #' implicit covariate when there is only one effectfun(fut) effectfun(fut, se.fit=TRUE) #' given covariate dlin <- distfun(copper$SouthLines) copfit <- ppm(copper$SouthPoints ~ dlin, Geyer(1,1)) effectfun(copfit, "dlin") effectfun(copfit) #' covariate that is not used in model effectfun(fut, "y", x=0) futS <- ppm(cells ~ 1, Strauss(0.1)) effectfun(futS, "x") effectfun(futS, "y") #' factor covariate fot <- ppm(amacrine~x+marks) effectfun(fot, "marks", x=0.5, se.fit=TRUE) #' covariate retained but not used W <- Window(swedishpines) a <- solist(A=funxy(function(x,y){x < 20}, W), B=funxy(function(x,y){factor(x < 20)}, W)) fvt <- ppm(swedishpines ~ A, data=a, allcovar=TRUE) effectfun(fvt, "A", se.fit=TRUE) effectfun(fvt, "B", A=TRUE, se.fit=TRUE) ## ppm with covariate values in data frame X <- rpoispp(42) Q <- quadscheme(X) weirdfunction <- function(x,y){ 10 * x^2 + 5 * sin(10 * y) } Zvalues <- weirdfunction(x.quad(Q), y.quad(Q)) fot <- ppm(Q ~ y + Z, data=data.frame(Z=Zvalues)) effectfun(fot, "y", Z=0) effectfun(fot, "Z", y=0) #' multitype modX <- ppm(amacrine ~ polynom(x,2)) effectfun(modX) effectfun(modX, "x") modXM <- ppm(amacrine ~ marks*polynom(x,2)) effectfun(modXM, "x", marks="on") modXYM <- ppm(amacrine ~ marks*polynom(x,y,2)) effectfun(modXYM, "x", y=0, marks="on") df <- as.data.frame(simulate(modXM, drop=TRUE)) df$marks <- as.character(df$marks) dfpr <- predict(modXM, locations=df) } }) # # tests/project.ppm.R # # $Revision: 1.7 $ $Date: 2020/04/30 05:41:59 $ # # Tests of projection mechanism # local({ chk <- function(m) { if(!valid.ppm(m)) stop("Projected model was still not valid") return(invisible(NULL)) } if(FULLTEST) { ## a very unidentifiable model fit <- ppm(cells ~Z, Strauss(1e-06), covariates=list(Z=0)) chk(emend(fit)) ## multitype r <- matrix(1e-06, 2, 2) fit2 <- ppm(amacrine ~1, MultiStrauss(types=c("off", "on"), radii=r)) chk(emend(fit2)) ## complicated multitype fit3 <- ppm(amacrine ~1, MultiStraussHard(types=c("off", "on"), iradii=r, hradii=r/5)) chk(emend(fit3)) #' code coverage op <- spatstat.options(project.fast=TRUE) fut <- emend(fit, trace=TRUE) chk(fut) spatstat.options(op) #' hierarchical ra <- r r[2,1] <- NA fit4 <- ppm(amacrine ~1, HierStrauss(types=c("off", "on"), radii=r)) chk(emend(fit4)) #' complicated hierarchical fit5 <- ppm(amacrine ~1, HierStraussHard(types=c("off", "on"), iradii=r, hradii=r/5)) chk(emend(fit5)) ## hybrids r0 <- min(nndist(redwood)) ra <- 1.25 * r0 rb <- 0.8 * r0 f1 <- ppm(redwood ~1, Hybrid(A=Strauss(ra), B=Geyer(0.1, 2)), project=TRUE) chk(f1) f2 <- ppm(redwood ~1, Hybrid(A=Strauss(rb), B=Geyer(0.1, 2)), project=TRUE) chk(f2) f3 <- ppm(redwood ~1, Hybrid(A=Strauss(ra), B=Strauss(0.1)), project=TRUE) chk(f3) f4 <- ppm(redwood ~1, Hybrid(A=Strauss(rb), B=Strauss(0.1)), project=TRUE) chk(f4) f5 <- ppm(redwood ~1, Hybrid(A=Hardcore(rb), B=Strauss(0.1)), project=TRUE) chk(f5) f6 <- ppm(redwood ~1, Hybrid(A=Hardcore(rb), B=Geyer(0.1, 2)), project=TRUE) chk(f6) f7 <- ppm(redwood ~1, Hybrid(A=Geyer(rb, 1), B=Strauss(0.1)), project=TRUE) chk(f7) } }) reset.spatstat.options() spatstat.core/tests/testsL.R0000644000176200001440000002536214141452520015615 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.core #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.core) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) ## ## tests/legacy.R ## ## Test that current version of spatstat is compatible with outmoded usage ## $Revision: 1.3 $ $Date: 2020/04/29 08:55:17 $ if(FULLTEST) { local({ ## (1) Old syntax of ppm ppm(cells, ~x) ## (2) Old syntax of MultiStrauss etc. r <- matrix(3, 2, 2) a <- MultiStrauss( , r) a <- MultiStrauss(NULL, r) a <- MultiHard(, r) h <- r/2 a <- MultiStraussHard( , r, h) NULL }) } #' #' tests/leverinf.R #' #' leverage and influence for Gibbs models #' #' $Revision: 1.34 $ $Date: 2021/04/17 04:25:26 $ #' if(FULLTEST) { Cells <- cells Amacrine <- amacrine Redwood <- redwood } else { ## reduce number of data + dummy points spatstat.options(npixel=32, ndummy.min=16) Cells <- cells[c(FALSE,TRUE)] Redwood <- redwood[c(FALSE, TRUE)] Amacrine <- amacrine[c(FALSE, TRUE)] } local({ cat("Running non-sparse algorithm...", fill=TRUE) # original non-sparse algorithm Leverage <- function(...) leverage(..., sparseOK=FALSE) Influence <- function(...) influence(..., sparseOK=FALSE) Dfbetas <- function(...) dfbetas(..., sparseOK=FALSE) if(ALWAYS) { ## Strauss()$delta2 fitS <- ppm(Cells ~ x, Strauss(0.12), rbord=0) levS <- Leverage(fitS) infS <- Influence(fitS) dfbS <- Dfbetas(fitS) ## Geyer()$delta2 fitG <- ppm(Redwood ~ 1, Geyer(0.1, 2), rbord=0) levG <- Leverage(fitG) infG <- Influence(fitG) ## AreaInter()$delta2 fitA <- ppm(Cells ~ 1, AreaInter(0.06), rbord=0, nd=11) levA <- Leverage(fitA) infA <- Influence(fitA) ## pairwise.family$delta2 fitD <- ppm(Cells ~ 1, DiggleGatesStibbard(0.12), rbord=0) levD <- Leverage(fitD) infD <- Influence(fitD) ## DiggleGratton() special code fitDG <- ppm(Cells ~ 1, DiggleGratton(0.05, 0.12), rbord=0) levDG <- Leverage(fitDG) infDG <- Influence(fitDG) ## ppmInfluence; offset is present; coefficient vector has length 0 fitH <- ppm(Cells ~ 1, Hardcore(0.07)) levH <- Leverage(fitH) infH <- Influence(fitH) ## ppmInfluence; hard core fitSH <- ppm(Cells ~ 1, StraussHard(0.07, 0.01)) levSH <- Leverage(fitSH) infSH <- Influence(fitSH) ## ppmInfluence; offset is present; coefficient vector has length 1 fitHx <- ppm(Cells ~ x, Hardcore(0.07), rbord=0) levHx <- Leverage(fitHx) infHx <- Influence(fitHx) ## multitype futAm <- ppm(Amacrine ~ x + marks, Strauss(0.07)) levAm <- leverage(futAm) } if(FULLTEST) { ## ......... class support ............................. ## other methods for classes leverage.ppm and influence.ppm ## not elsewhere tested cat("Testing class support...", fill=TRUE) w <- domain(levS) w <- Window(infS) vv <- shift(levS, c(1.2, 1.3)) vv <- shift(infS, c(1.2, 1.3)) A <- quadrats(Window(Cells), 2) a <- integral(levS,domain=A) b <- integral(infS,domain=A) u <- Smooth(levS, sigma=0.07) v <- Smooth(infS, sigma=0.1) ## plot options plot(levS, what="exact") plot(levS, what="nearest") contour(levS, what="nearest") persp(levS, what="nearest") ## plotting for multitype models plot(levAm) contour(levAm) persp(levAm) plot(levAm, multiplot=FALSE) contour(levAm, multiplot=FALSE) } if(ALWAYS) { ## .......... compare algorithms ......................... ## divide and recombine algorithm cat("Reduce maximum block side to 50,000 ...", fill=TRUE) op <- spatstat.options(maxmatrix=50000) ## non-sparse levSB <- Leverage(fitS) infSB <- Influence(fitS) dfbSB <- Dfbetas(fitS) } chk <- function(x, y, what, from="single-block and multi-block", thresh=1e-12) { if(max(abs(x-y)) > thresh) stop(paste("Different results for", what, "obtained from", from, "algorithms"), call.=FALSE) invisible(NULL) } if(ALWAYS) { cat("Compare single-block to multi-block...", fill=TRUE) chk(marks(as.ppp(infS)), marks(as.ppp(infSB)), "influence") chk(as.im(levS), as.im(levSB), "leverage") chk(dfbS$val, dfbSB$val, "dfbetas$value") chk(dfbS$density, dfbSB$density, "dfbetas$density") } if(FULLTEST) { ## also check case of zero cif cat("Check zero cif cases...", fill=TRUE) levHB <- Leverage(fitH) infHB <- Influence(fitH) dfbHB <- Dfbetas(fitH) levHxB <- Leverage(fitHx) infHxB <- Influence(fitHx) dfbHxB <- Dfbetas(fitHx) } ## run all code segments Everything <- function(model, ...) { ppmInfluence(model, ..., what="all") } if(FULLTEST) { cat("Run full code on AreaInteraction model...", fill=TRUE) pmiA <- Everything(fitA) ## sparse algorithm, with blocks cat("Run sparse algorithm with blocks...", fill=TRUE) pmiSSB <- Everything(fitS, sparseOK=TRUE) ## also check case of zero cif pmiHSB <- Everything(fitH, sparseOK=TRUE) pmiSHSB <- Everything(fitSH, sparseOK=TRUE) pmiHxSB <- Everything(fitHx, sparseOK=TRUE) cat("Reinstate maxmatrix...", fill=TRUE) spatstat.options(op) } if(ALWAYS) { ## sparse algorithm, no blocks cat("Compare sparse and non-sparse results...", fill=TRUE) pmi <- Everything(fitS, sparseOK=TRUE) levSp <- pmi$leverage infSp <- pmi$influence dfbSp <- pmi$dfbetas chks <- function(...) chk(..., from="sparse and non-sparse") chks(marks(as.ppp(infS)), marks(as.ppp(infSp)), "influence") chks(as.im(levS), as.im(levSp), "leverage") chks(dfbS$val, dfbSp$val, "dfbetas$value") chks(dfbS$density, dfbSp$density, "dfbetas$density") } if(ALWAYS) { #' case of zero cif cat("zero cif...", fill=TRUE) pmiH <- Everything(fitH, sparseOK=TRUE) pmiSH <- Everything(fitSH, sparseOK=TRUE) pmiHx <- Everything(fitHx, sparseOK=TRUE) } if(FULLTEST) { #' other code blocks - check execution only cat("other code blocks...", fill=TRUE) a <- Everything(fitS) a <- Everything(fitS, method="interpreted") a <- Everything(fitS, method="interpreted", entrywise=FALSE) a <- Everything(fitS, entrywise=FALSE) #' zero cif b <- Everything(fitSH) b <- Everything(fitSH, method="interpreted") b <- Everything(fitSH, method="interpreted", entrywise=FALSE) b <- Everything(fitSH, entrywise=FALSE) } #' NOTE: code for irregular parameters is tested below, and in 'make bookcheck' ## ........... logistic fits ....................... cat("Logistic fits...", fill=TRUE) #' special algorithm for delta2 fitSlogi <- ppm(Cells ~ x, Strauss(0.12), rbord=0, method="logi") if(FULLTEST) { pmiSlogi <- Everything(fitSlogi) #' special algorithm for delta2 fitGlogi <- ppm(Redwood ~ 1, Geyer(0.1, 2), rbord=0, method="logi") pmiGlogi <- Everything(fitGlogi) #' generic algorithm for delta2 fitDlogi <- ppm(Cells ~ 1, DiggleGatesStibbard(0.12), rbord=0, method="logi") pmiDlogi <- Everything(fitDlogi) #' generic algorithm for delta2 : offset; zero-dimensional fitHlogi <- ppm(Cells ~ 1, Hardcore(0.07), method="logi") pmiHlogi <- Everything(fitHlogi) #' generic algorithm for delta2 : offset; 1-dimensional fitHxlogi <- ppm(Cells ~ x, Hardcore(0.07), rbord=0, method="logi") pmiHxlogi <- Everything(fitHxlogi) #' plotting plot(leverage(fitSlogi)) plot(influence(fitSlogi)) plot(dfbetas(fitSlogi)) } if(ALWAYS) { #' other code blocks - check execution only cat("Other code blocks...", fill=TRUE) b <- Everything(fitSlogi) # i.e. full set of results b <- Everything(fitSlogi, method="interpreted") b <- Everything(fitSlogi, method="interpreted", entrywise=FALSE) b <- Everything(fitSlogi, entrywise=FALSE) } #' irregular parameters cat("Irregular parameters...", fill=TRUE) ytoa <- function(x,y, alpha=1) { y^alpha } lam <- function(x,y,alpha=1) { exp(4 + y^alpha) } set.seed(90210) X <- rpoispp(lam, alpha=2) iScor <- list(alpha=function(x,y,alpha) { alpha * y^(alpha-1) } ) iHess <- list(alpha=function(x,y,alpha) { alpha * (alpha-1) * y^(alpha-2) } ) gogo <- function(tag, ..., iS=iScor, iH=iHess) { cat(tag, fill=TRUE) #' compute all leverage+influence terms ppmInfluence(..., what="all", iScore=iS, iHessian=iH) } gogogo <- function(hdr, fit) { cat(hdr, fill=TRUE) force(fit) #' try all code options d <- gogo("a", fit) d <- gogo("b", fit, method="interpreted") d <- gogo("c", fit, method="interpreted", entrywise=FALSE) d <- gogo("d", fit, entrywise=FALSE) invisible(NULL) } gogogo("Offset model...", ippm(X ~ offset(ytoa), start=list(alpha=1), iterlim=40)) gogogo("Offset model (logistic) ...", ippm(X ~ offset(ytoa), start=list(alpha=1), method="logi", iterlim=40)) gogogo("Offset+x model...", ippm(X ~ x + offset(ytoa), start=list(alpha=1), iterlim=40)) gogogo("Offset+x model (logistic) ...", ippm(X ~ x + offset(ytoa), start=list(alpha=1), method="logi", iterlim=40)) gogogo("Offset model Strauss ...", ippm(X ~ offset(ytoa), Strauss(0.07), start=list(alpha=1), iterlim=40)) gogogo("Offset model Strauss (logistic) ...", ippm(X ~ offset(ytoa), Strauss(0.07), start=list(alpha=1), method="logi", iterlim=40)) if(FULLTEST) { gogogo("Offset+x model Strauss ...", ippm(X ~ x + offset(ytoa), Strauss(0.07), start=list(alpha=1), iterlim=40)) gogogo("Offset+x model Strauss (logistic)...", ippm(X ~ x + offset(ytoa), Strauss(0.07), start=list(alpha=1), method="logi", iterlim=40)) } #' if(FULLTEST) { set.seed(452) foo <- ppm(Cells ~ 1, Strauss(0.15), method="ho", nsim=5) aa <- Everything(foo) #' Gradient and Hessian obtained by symbolic differentiation f <- deriv(expression((1+x)^a), "a", function.arg=c("x", "y", "a"), hessian=TRUE) #' check they can be extracted fit <- ippm(Cells ~offset(f), start=list(a=0.7)) Everything(fit) } }) reset.spatstat.options() ## ## tests/localpcf.R ## ## temporary test file for localpcfmatrix ## $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $ local({ a <- localpcfmatrix(redwood) if(FULLTEST) { a plot(a) a[, 3:5] } }) spatstat.core/tests/testsUtoZ.R0000644000176200001440000002211314141452520016312 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.core #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.core) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/undoc.R # # $Revision: 1.16 $ $Date: 2020/11/02 07:06:49 $ # # Test undocumented hacks, experimental code, etc local({ if(FULLTEST) { ## cases of 'pickoption' aliases <- c(Lenin="Ulyanov", Stalin="Djugashvili", Trotsky="Bronstein") surname <- "Trot" pickoption("leader", surname, aliases) pickoption("leader", surname, aliases, exact=TRUE, die=FALSE) } if(ALWAYS) { ## pixellate.ppp accepts a data frame of weights pixellate(cells, weights=data.frame(a=1:42, b=42:1)) ## test parts of 'rmhsnoop' that don't require interaction with user rmhSnoopEnv(cells, Window(cells), 0.1) } if(FULLTEST) { ## Berman-Turner frame A <- bt.frame(quadscheme(cells), ~x, Strauss(0.07), rbord=0.07) print(A) ## digestCovariates D <- distfun(cells) Z <- distmap(cells) U <- dirichlet(cells) stopifnot(is.scov(D)) stopifnot(is.scov(Z)) stopifnot(is.scov(U)) stopifnot(is.scov("x")) dg <- digestCovariates(D=D,Z=Z,U=U,"x",list(A="x", B=D)) ## a <- getfields(dg, c("A", "D", "niets"), fatal=FALSE) ## util.R gg <- pointgrid(owin(), 7) checkbigmatrix(1000000L, 1000000L, FALSE, TRUE) spatstatDiagnostic("whatever") M <- list(list(a=2, b=FALSE), list(a=2, b=TRUE)) stopifnot(!allElementsIdentical(M)) stopifnot(allElementsIdentical(M, "a")) ## A <- Strauss(0.1) A <- reincarnate.interact(A) ## ## special lists B <- solist(a=cells, b=redwood, c=japanesepines) BB <- as.ppplist(B) BL <- as.layered(B) DB <- as.imlist(lapply(B, density)) is.solist(B) is.ppplist(B) is.imlist(DB) ## case of density.ppplist DEB <- density(BB, se=TRUE) } if(ALWAYS) { ## fft z <- matrix(1:16, 4, 4) a <- fft2D(z, west=FALSE) if(fftwAvailable()) b <- fft2D(z, west=TRUE) } if(ALWAYS) { ## experimental interactions pot <- function(d, par) { d <= 0.1 } A <- Saturated(pot) print(A) A <- update(A, name="something") ppm(amacrine ~ x, A, rbord=0.1) } if(ALWAYS) { # platform dependent #' version-checking now <- Sys.Date() versioncurrency.spatstat(now + 80, FALSE) versioncurrency.spatstat(now + 140, FALSE) versioncurrency.spatstat(now + 400, FALSE) versioncurrency.spatstat(now + 1000) } if(FULLTEST) { #' general Ord interaction gradual <- function(d, pars) { y <- pmax(0, 0.005 - d)/0.005 if(is.matrix(d)) y <- matrix(y, nrow(d), ncol(d)) return(y) } B <- Ord(gradual, "gradual Ord process") } }) ## ## tests/updateppm.R ## ## Check validity of update.ppm ## ## $Revision: 1.7 $ $Date: 2020/11/02 07:07:42 $ local({ if(ALWAYS) { require(spatstat.utils) h <- function(m1, m2) { mc <- short.deparse(sys.call()) cat(paste(mc, "\t... ")) m1name <- short.deparse(substitute(m1)) m2name <- short.deparse(substitute(m2)) if(!identical(names(coef(m1)), names(coef(m2)))) stop(paste("Differing results for", m1name, "and", m2name, "in updateppm.R"), call.=FALSE) cat("OK\n") } X <- redwood[c(TRUE,FALSE)] Y <- redwood[c(FALSE,TRUE)] fit0f <- ppm(X ~ 1, nd=8) fit0p <- ppm(X, ~1, nd=8) fitxf <- ppm(X ~ x, nd=8) fitxp <- ppm(X, ~x, nd=8) cat("Basic consistency ...\n") h(fit0f, fit0p) h(fitxf, fitxp) cat("\nTest correct handling of model formulas ...\n") h(update(fitxf, Y), fitxf) h(update(fitxf, Q=Y), fitxf) h(update(fitxf, Y~x), fitxf) h(update(fitxf, Q=Y~x), fitxf) h(update(fitxf, ~x), fitxf) } if(FULLTEST) { h(update(fitxf, Y~1), fit0f) h(update(fitxf, ~1), fit0f) h(update(fit0f, Y~x), fitxf) h(update(fit0f, ~x), fitxf) h(update(fitxp, Y), fitxp) h(update(fitxp, Q=Y), fitxp) h(update(fitxp, Y~x), fitxp) h(update(fitxp, Q=Y~x), fitxp) h(update(fitxp, ~x), fitxp) h(update(fitxp, Y~1), fit0p) h(update(fitxp, ~1), fit0p) h(update(fit0p, Y~x), fitxp) h(update(fit0p, ~x), fitxp) } if(ALWAYS) { cat("\nTest scope handling for left hand side ...\n") X <- Y h(update(fitxf), fitxf) } if(ALWAYS) { cat("\nTest scope handling for right hand side ...\n") Z <- distmap(X) fitZf <- ppm(X ~ Z) fitZp <- ppm(X, ~ Z) h(update(fitxf, X ~ Z), fitZf) } if(FULLTEST) { h(update(fitxp, X ~ Z), fitZp) h(update(fitxf, . ~ Z), fitZf) h(update(fitZf, . ~ x), fitxf) h(update(fitZf, . ~ . - Z), fit0f) h(update(fitxp, . ~ Z), fitZp) h(update(fitZp, . ~ . - Z), fit0p) h(update(fit0p, . ~ . + Z), fitZp) h(update(fitZf, . ~ . ), fitZf) h(update(fitZp, . ~ . ), fitZp) } if(ALWAYS) { cat("\nTest use of internal data ...\n") h(update(fitZf, ~ x, use.internal=TRUE), fitxf) fitsin <- update(fitZf, X~sin(Z)) h(update(fitZf, ~ sin(Z), use.internal=TRUE), fitsin) } if(FULLTEST) { cat("\nTest step() ... ") fut <- ppm(X ~ Z + x + y, nd=8) fut0 <- step(fut, trace=0) cat("OK\n") } }) # # tests/vcovppm.R # # Check validity of vcov.ppm algorithms # # Thanks to Ege Rubak # # $Revision: 1.12 $ $Date: 2020/05/02 01:32:58 $ # local({ set.seed(42) X <- rStrauss(200, .5, .05) model <- ppm(X, inter = Strauss(.05)) if(ALWAYS) { b <- vcov(model, generic = TRUE, algorithm = "basic") v <- vcov(model, generic = TRUE, algorithm = "vector") vc <- vcov(model, generic = TRUE, algorithm = "vectorclip") vn <- vcov(model, generic = FALSE) disagree <- function(x, y, tol=1e-7) { max(abs(x-y)) > tol } asymmetric <- function(x) { disagree(x, t(x)) } if(asymmetric(b)) stop("Non-symmetric matrix produced by vcov.ppm 'basic' algorithm") if(asymmetric(v)) stop("Non-symmetric matrix produced by vcov.ppm 'vector' algorithm") if(asymmetric(vc)) stop("Non-symmetric matrix produced by vcov.ppm 'vectorclip' algorithm") if(asymmetric(vn)) stop("Non-symmetric matrix produced by vcov.ppm Strauss algorithm") if(disagree(v, b)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'basic' ") if(disagree(v, vc)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'vectorclip' ") if(disagree(vn, vc)) stop("Disagreement between vcov.ppm generic and Strauss algorithms") } if(ALWAYS) { # C code ## Geyer code xx <- c(0.7375956, 0.6851697, 0.6399788, 0.6188382) yy <- c(0.5816040, 0.6456319, 0.5150633, 0.6191592) Y <- ppp(xx, yy, window=square(1)) modelY <- ppm(Y ~1, Geyer(0.1, 1)) b <- vcov(modelY, generic = TRUE, algorithm = "basic") v <- vcov(modelY, generic = TRUE, algorithm = "vector") vc <- vcov(modelY, generic = TRUE, algorithm = "vectorclip") if(asymmetric(b)) stop("Non-symmetric matrix produced by vcov.ppm 'basic' algorithm for Geyer model") if(asymmetric(v)) stop("Non-symmetric matrix produced by vcov.ppm 'vector' algorithm for Geyer model") if(asymmetric(vc)) stop("Non-symmetric matrix produced by vcov.ppm 'vectorclip' algorithm for Geyer model") if(disagree(v, b)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'basic' for Geyer model") if(disagree(v, vc)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'vectorclip' for Geyer model") } if(ALWAYS) { # C code ## tests of 'deltasuffstat' code ## Handling of offset terms modelH <- ppm(cells ~x, Hardcore(0.05)) a <- vcov(modelH, generic=TRUE) ## may fall over b <- vcov(modelH, generic=FALSE) if(disagree(a, b)) stop("Disagreement between vcov.ppm algorithms for Hardcore model") ## Correctness of pairwise.family$delta2 modelZ <- ppm(amacrine ~1, MultiStrauss(radii=matrix(0.1, 2, 2))) b <- vcov(modelZ, generic=FALSE) g <- vcov(modelZ, generic=TRUE) if(disagree(b, g)) stop("Disagreement between vcov.ppm algorithms for MultiStrauss model") ## Test that 'deltasuffstat' works for Hybrids modelHyb <- ppm(japanesepines ~ 1, Hybrid(Strauss(0.05), Strauss(0.1))) vHyb <- vcov(modelHyb) } if(FULLTEST) { ## Code blocks for other choices of 'what' model <- ppm(X ~ 1, Strauss(.05)) cG <- vcov(model, what="corr") cP <- vcov(update(model, Poisson()), what="corr") ## outdated usage cX <- vcov(model, A1dummy=TRUE) ## Model with zero-length coefficient vector lam <- intensity(X) f <- function(x,y) { rep(lam, length(x)) } model0 <- ppm(X ~ offset(log(f)) - 1) dd <- vcov(model0) cc <- vcov(model0, what="corr") ## Model with NA coefficients fit <- ppm(X ~ log(f)) vcov(fit) fitE <- emend(fit, trace=TRUE) ## Other weird stuff su <- suffloc(ppm(X ~ x)) } }) spatstat.core/tests/testsQ.R0000644000176200001440000000065414141452520015617 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.core #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.core) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) spatstat.core/tests/testsEtoF.R0000644000176200001440000004362014141452520016254 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.core #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.core) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/envelopes.R # # Test validity of envelope data # # $Revision: 1.24 $ $Date: 2020/11/02 06:53:20 $ # local({ checktheo <- function(fit) { fitname <- deparse(substitute(fit)) en <- envelope(fit, nsim=4, verbose=FALSE, nrep=1e3) nama <- names(en) expecttheo <- is.poisson(fit) && is.stationary(fit) context <- paste("Envelope of", fitname) if(expecttheo) { if(!("theo" %in% nama)) stop(paste(context, "did not contain", sQuote("theo"))) if("mmean" %in% nama) stop(paste(context, "unexpectedly contained", sQuote("mmean"))) } else { if("theo" %in% nama) stop(paste(context, "unexpectedly contained", sQuote("theo"))) if(!("mmean" %in% nama)) stop(paste(context, "did not contain", sQuote("mmean"))) } cat(paste(context, "has correct format\n")) } if(ALWAYS) { checktheo(ppm(cells ~x)) } if(FULLTEST) { checktheo(ppm(cells)) checktheo(ppm(cells ~1, Strauss(0.1))) } # check envelope calls from 'alltypes' if(ALWAYS) a <- alltypes(demopat, Kcross, nsim=4, envelope=TRUE) if(FULLTEST) b <- alltypes(demopat, Kcross, nsim=4, envelope=TRUE, global=TRUE) # check 'transform' idioms if(ALWAYS) A <- envelope(cells, Kest, nsim=4, transform=expression(. - .x)) if(FULLTEST) B <- envelope(cells, Kest, nsim=4, transform=expression(sqrt(./pi) - .x)) #' check savefuns/savepatterns with global fit <- ppm(cells~x) if(ALWAYS) Ef <- envelope(fit, Kest, nsim=4, savefuns=TRUE, global=TRUE) if(FULLTEST) Ep <- envelope(fit, Kest, nsim=4, savepatterns=TRUE, global=TRUE) #' check handling of 'dangerous' cases if(FULLTEST) { fut <- ppm(redwood ~ x) Ek <- envelope(fut, Kinhom, update=FALSE, nsim=4) kfut <- kppm(redwood3 ~ x) Ekk <- envelope(kfut, Kinhom, lambda=density(redwood3), nsim=7) } # check conditional simulation if(FULLTEST) { e1 <- envelope(cells, Kest, nsim=4, fix.n=TRUE) e2 <- envelope(amacrine, Kest, nsim=4, fix.n=TRUE) e3 <- envelope(amacrine, Kcross, nsim=4, fix.marks=TRUE) e4 <- envelope(finpines, Kest, nsim=4, fix.n=TRUE) # multiple columns of marks e5 <- envelope(finpines, Kest, nsim=4, fix.marks=TRUE) } if(ALWAYS) { # invokes C code fit <- ppm(japanesepines ~ 1, Strauss(0.04)) e6 <- envelope(fit, Kest, nsim=4, fix.n=TRUE) fit2 <- ppm(amacrine ~ 1, Strauss(0.03)) e7 <- envelope(fit2, Gcross, nsim=4, fix.marks=TRUE) } # check pooling of envelopes in global case E1 <- envelope(cells, Kest, nsim=5, savefuns=TRUE, global=TRUE) E2 <- envelope(cells, Kest, nsim=12, savefuns=TRUE, global=TRUE) p12 <- pool(E1, E2) p12 <- pool(E1, E2, savefuns=TRUE) if(FULLTEST) { F1 <- envelope(cells, Kest, nsim=5, savefuns=TRUE, savepatterns=TRUE, global=TRUE) F2 <- envelope(cells, Kest, nsim=12, savefuns=TRUE, savepatterns=TRUE, global=TRUE) p12 <- pool(F1, F2) p12 <- pool(F1, F2, savefuns=TRUE, savepatterns=TRUE) E1r <- envelope(cells, Kest, nsim=5, savefuns=TRUE, global=TRUE, ginterval=c(0.05, 0.15)) E2r <- envelope(cells, Kest, nsim=12, savefuns=TRUE, global=TRUE, ginterval=c(0.05, 0.15)) p12r <- pool(E1r, E2r) } }) if(FULLTEST) { local({ #' as.data.frame.envelope Nsim <- 5 E <- envelope(cells, nsim=Nsim, savefuns=TRUE) A <- as.data.frame(E) B <- as.data.frame(E, simfuns=TRUE) stopifnot(ncol(B) - ncol(A) == Nsim) }) } if(FULLTEST) { local({ #' cases not covered elsewhere A <- envelope(cells, nsim=5, alternative="less", do.pwrong=TRUE, use.theory=FALSE, savepatterns=TRUE, savefuns=TRUE) print(A) B <- envelope(A, nsim=5, savefuns=TRUE) D <- envelope(cells, "Lest", nsim=5) UU <- envelope(cells, nsim=5, foreignclass="ppp", clipdata=TRUE) AA <- envelope(cells, nsim=5, jsim=5, alternative="greater", global=TRUE) AA <- envelope(cells, nsim=5, jsim=5, alternative="less", global=TRUE) AA <- envelope(cells, nsim=5, jsim=5, alternative="greater", VARIANCE=TRUE) AA <- envelope(cells, nsim=5, jsim=5, alternative="greater", VARIANCE=TRUE) fit <- ppm(cells ~ 1, Strauss(0.07)) U <- envelope(fit, nsim=3, simulate=expression(runifpoint(20))) kfit <- kppm(redwood3 ~ x) UU <- envelope(kfit, nsim=7, simulate=expression(simulate(kfit, drop=TRUE))) VV <- envelope(kfit, nsim=7, weights=1:7) MM <- envelope(kfit, nsim=7, Kinhom, lambda=density(redwood3)) #' envelopes based on sample variance E <- envelope(cells, nsim=8, VARIANCE=TRUE) G <- envelope(cells, nsim=8, VARIANCE=TRUE, use.theory=FALSE, do.pwrong=TRUE) print(G) #' summary method summary(E) summary(envelope(cells, nsim=5, simulate=expression(runifpoint(42)))) #' weights argument H1 <- envelope(cells, nsim=4, weights=npoints, savefuns=TRUE) H2 <- envelope(cells, nsim=4, weights=npoints, savefuns=TRUE) J1 <- envelope(cells, nsim=4, weights=npoints, VARIANCE=TRUE) J2 <- envelope(cells, nsim=4, weights=npoints, VARIANCE=TRUE) #' pooling with weights H <- pool(H1, H2) J <- pool(J1, J2) #' pooling envelopes with non-identical attributes H0 <- envelope(cells, nsim=4, savefuns=TRUE) HH <- pool(H0, H1) #' undocumented/secret K <- envelope(cells, nsim=4, saveresultof=npoints, collectrubbish=TRUE) #' so secret I've even forgotten how to do it M <- envelope(cells, nsim=4, internal=list(eject="patterns")) }) } if(FULLTEST) { local({ #' envelope computations in other functions P <- lurking(cells, expression(x), envelope=TRUE, nsim=9) print(P) #' re-using envelope objects in other functions A <- envelope(cells, nsim=9, savepatterns=TRUE, savefuns=TRUE) S <- lurking(cells, expression(x), envelope=A, nsim=9) #' envelope.envelope B <- envelope(cells, nsim=5, savepatterns=TRUE, savefuns=FALSE) envelope(B) }) } if(ALWAYS) { local({ #' Test robustness of envelope() sorting procedure when NA's are present #' Fails with spatstat.utils 1.12-0 set.seed(42) EP <- envelope(longleaf, pcf, nsim=10, nrank=2) #' Test case when the maximum permitted number of failures is exceeded X <- amacrine[1:153] # contains exactly one point with mark='off' #' High probability of generating a pattern with no marks = 'off' E <- envelope(X, Kcross, nsim=39, maxnerr=2, maxerr.action="warn") A <- alltypes(X, Kcross, envelope=TRUE, nsim=39, maxnerr=2) }) } if(ALWAYS) { local({ #' Internals: envelope.matrix Y <- matrix(rnorm(200), 10, 20) rr <- 1:10 oo <- rnorm(10) zz <- numeric(10) E <- envelope(Y, rvals=rr, observed=oo, nsim=10) E <- envelope(Y, rvals=rr, observed=oo, jsim=1:10) E <- envelope(Y, rvals=rr, observed=oo, theory=zz, type="global", use.theory=TRUE) E <- envelope(Y, rvals=rr, observed=oo, theory=zz, type="global", use.theory=TRUE, nsim=10) E <- envelope(Y, rvals=rr, observed=oo, theory=zz, type="global", use.theory=FALSE, nsim=10) E <- envelope(Y, rvals=rr, observed=oo, type="global", nsim=10, nsim2=10) E <- envelope(Y, rvals=rr, observed=oo, type="global", jsim=1:10, jsim.mean=11:20) if(FULLTEST) print(E) E <- envelope(Y, rvals=rr, observed=oo, type="global", nsim=10, jsim.mean=11:20) E <- envelope(Y, rvals=rr, observed=oo, type="global", jsim=1:10, nsim2=10) }) } if(ALWAYS) { local({ #' quirk with handmade summary functions ('conserve' attribute) Kdif <- function(X, r=NULL) { # note no ellipsis Y <- split(X) K1 <- Kest(Y[[1]], r=r) K2 <- Kest(Y[[2]], r=r) D <- eval.fv(K1-K2) return(D) } envelope(amacrine, Kdif, nsim=3) }) } #' tests/enveltest.R #' Envelope tests (dclf.test, mad.test) #' and two-stage tests (bits.test, dg.test, bits.envelope, dg.envelope) #' #' $Revision: 1.3 $ $Date: 2020/04/28 12:58:26 $ #' if(FULLTEST) { local({ #' handling of NA function values (due to empty point patterns) set.seed(1234) X <- rThomas(5, 0.05, 10) fit <- kppm(X ~ 1, "Thomas") set.seed(100000) dclf.test(fit) set.seed(909) dg.test(fit, nsim=9) #' other code blocks dclf.test(fit, rinterval=c(0, 3), nsim=9) envelopeTest(X, exponent=3, clamp=TRUE, nsim=9) }) } # # tests/fastgeyer.R # # checks validity of fast C implementation of Geyer interaction # # $Revision: 1.4 $ $Date: 2020/04/28 12:58:26 $ # if(FULLTEST) { # depends on hardware local({ X <- redwood Q <- quadscheme(X) U <- union.quad(Q) EP <- equalpairs.quad(Q) G <- Geyer(0.11, 2) # The value r=0.11 is chosen to avoid hardware numerical effects (gcc bug 323). # It avoids being close any value of pairdist(redwood). # The nearest such values are 0.1077.. and 0.1131.. # By contrast if r = 0.1 there are values differing from 0.1 by 3e-17 a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border") b <- G$fasteval(X,U,EP,G$pot,G$par,"border") if(!all(a==b)) stop("Results of Geyer()$fasteval and pairsat.family$eval do not match") # ... # and again for a non-integer value of 'sat' # (spotted by Thordis Linda Thorarinsdottir) G <- Geyer(0.11, 2.5) a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border") b <- G$fasteval(X,U,EP,G$pot,G$par,"border") if(!all(a==b)) stop("Results of Geyer()$fasteval and pairsat.family$eval do not match when sat is not an integer") # and again for sat < 1 # (spotted by Rolf) G <- Geyer(0.11, 0.5) a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border") b <- G$fasteval(X,U,EP,G$pot,G$par,"border") if(!all(a==b)) stop("Results of Geyer()$fasteval and pairsat.family$eval do not match when sat < 1") }) } # # tests/fastK.R # # check fast and slow code for Kest # and options not tested elsewhere # # $Revision: 1.5 $ $Date: 2020/04/28 12:58:26 $ # if(ALWAYS) { local({ ## fast code Kb <- Kest(cells, nlarge=0) Ku <- Kest(cells, correction="none") Kbu <- Kest(cells, correction=c("none", "border")) ## slow code, full set of corrections, sqrt transformation, ratios Ldd <- Lest(unmark(demopat), correction="all", var.approx=TRUE, ratio=TRUE) ## Lotwick-Silverman var approx (rectangular window) Loo <- Lest(cells, correction="all", var.approx=TRUE, ratio=TRUE) ## Code for large dataset nbig <- .Machine$integer.max if(!is.null(nbig)) { nn <- ceiling(sqrt(nbig)) if(nn < 1e6) Kbig <- Kest(runifpoint(nn), correction=c("border", "bord.modif", "none"), ratio=TRUE) } ## Kinhom lam <- density(cells, at="points", leaveoneout=TRUE) ## fast code Kib <- Kinhom(cells, lam, nlarge=0) Kiu <- Kest(cells, lam, correction="none") Kibu <- Kest(cells, lam, correction=c("none", "border")) ## slow code Lidd <- Linhom(unmark(demopat), sigma=bw.scott) }) } #' tests/formuli.R #' #' Test machinery for manipulating formulae #' #' $Revision: 1.7 $ $Date: 2020/04/28 12:58:26 $ local({ ff <- function(A, deletevar, B) { D <- reduceformula(A, deletevar) if(!spatstat.utils::identical.formulae(D, B)) { AD <- as.expression(substitute(reduceformula(A,d), list(A=A, d=deletevar))) stop(paste(AD, "\n\tyields ", spatstat.utils::pasteFormula(D), " instead of ", spatstat.utils::pasteFormula(B)), call.=FALSE) } invisible(NULL) } ff(~ x + z, "x", ~z) ff(y ~ x + z, "x", y~z) ff(~ I(x^2) + z, "x", ~z) ff(y ~ poly(x,2) + poly(z,3), "x", y ~poly(z,3)) ff(y ~ x + z, "g", y ~ x + z) reduceformula(y ~ x+z, "g", verbose=TRUE) reduceformula(y ~ sin(x-z), "z", verbose=TRUE) illegal.iformula(~str*g, itags="str", dfvarnames=c("marks", "g", "x", "y")) }) ## ## tests/funnymarks.R ## ## tests involving strange mark values ## $Revision: 1.7 $ $Date: 2020/04/28 12:58:26 $ if(ALWAYS) { # depends on locale local({ ## ppm() where mark levels contain illegal characters hyphenated <- c("a", "not-a") spaced <- c("U", "non U") suffixed <- c("a+", "a*") charred <- c("+", "*") irad <- matrix(0.1, 2,2) hrad <- matrix(0.005, 2, 2) tryit <- function(types, X, irad, hrad) { levels(marks(X)) <- types fit <- ppm(X ~marks + polynom(x,y,2), MultiStraussHard(types=types,iradii=irad,hradii=hrad)) print(fit) print(coef(fit)) val <- fitted(fit) pred <- predict(fit) return(invisible(NULL)) } tryit(hyphenated, amacrine, irad, hrad) tryit(spaced, amacrine, irad, hrad) tryit(suffixed, amacrine, irad, hrad) tryit(charred, amacrine, irad, hrad) ## marks which are dates X <- cells n <- npoints(X) endoftime <- rep(ISOdate(2001,1,1), n) eotDate <- rep(as.Date("2001-01-01"), n) markformat(endoftime) markformat(eotDate) marks(X) <- endoftime print(X) Y <- X %mark% data.frame(id=1:42, date=endoftime, dd=eotDate) print(Y) md <- markformat(endoftime) ## mark formats Z <- Y marks(Z) <- marks(Z)[1,,drop=FALSE] ms <- markformat(solist(cells, redwood)) marks(Z) <- factor(1:npoints(Z)) marks(Z)[12] <- NA mz <- is.multitype(Z) cZ <- coerce.marks.numeric(Z) marks(Z) <- data.frame(n=1:npoints(Z), a=factor(sample(letters, npoints(Z), replace=TRUE))) cZ <- coerce.marks.numeric(Z) stopifnot(is.multitype(cells %mark% data.frame(a=factor(1:npoints(cells))))) a <- numeric.columns(finpines) b1 <- numeric.columns(amacrine) b2 <- coerce.marks.numeric(amacrine) d <- numeric.columns(cells) f <- numeric.columns(longleaf) ff <- data.frame(a=factor(letters[1:10]), y=factor(sample(letters, 10))) numeric.columns(ff) ## mark operations df <- data.frame(x=1:2, y=sample(letters, 2)) h <- hyperframe(z=1:2, p=solist(cells, cells)) a <- NULL %mrep% 3 a <- 1:4 %mrep% 3 a <- df %mrep% 3 a <- h %mrep% 3 b <- markcbind(df, h) b <- markcbind(h, df) }) } ## ## tests/fvproblems.R ## ## problems with fv, ratfv and fasp code ## ## $Revision: 1.15 $ $Date: 2020/04/28 12:58:26 $ #' This appears in the workshop notes #' Problem detected by Martin Bratschi if(FULLTEST) { local({ Jdif <- function(X, ..., i) { Jidot <- Jdot(X, ..., i=i) J <- Jest(X, ...) dif <- eval.fv(Jidot - J) return(dif) } Z <- Jdif(amacrine, i="on") }) } #' #' Test mathlegend code #' local({ K <- Kest(cells) if(FULLTEST) { plot(K) plot(K, . ~ r) plot(K, . - theo ~ r) } if(ALWAYS) { plot(K, sqrt(./pi) ~ r) } if(FULLTEST) { plot(K, cbind(iso, theo) ~ r) plot(K, cbind(iso, theo) - theo ~ r) plot(K, sqrt(cbind(iso, theo)/pi) ~ r) plot(K, cbind(iso/2, -theo) ~ r) plot(K, cbind(iso/2, trans/2) - theo ~ r) } if(FULLTEST) { ## test expansion of .x and .y plot(K, . ~ .x) plot(K, . - theo ~ .x) plot(K, .y - theo ~ .x) } if(ALWAYS) { plot(K, sqrt(.y) - sqrt(theo) ~ .x) } # problems with parsing weird strings in levels(marks(X)) # noted by Ulf Mehlig if(ALWAYS) { levels(marks(amacrine)) <- c("Nasticreechia krorluppia", "Homo habilis") plot(Kcross(amacrine)) plot(alltypes(amacrine, "K")) } if(FULLTEST) { plot(alltypes(amacrine, "J")) plot(alltypes(amacrine, pcfcross)) } }) #' #' Test quirks related to 'alim' attribute if(FULLTEST) { local({ K <- Kest(cells) attr(K, "alim") <- NULL plot(K) attr(K, "alim") <- c(0, 0.1) plot(tail(K)) }) } #' #' Check that default 'r' vector passes the test for fine spacing if(ALWAYS) { local({ a <- Fest(cells) A <- Fest(cells, r=a$r) b <- Hest(heather$coarse) B <- Hest(heather$coarse, r=b$r) # from Cenk Icos X <- runifpoint(100, owin(c(0,3), c(0,10))) FX <- Fest(X) FXr <- Fest(X, r=FX$r) JX <- Jest(X) }) } ##' various functionality in fv.R if(ALWAYS) { local({ M <- cbind(1:20, matrix(runif(100), 20, 5)) A <- as.fv(M) fvlabels(A) <- c("r","%s(r)", "%s[A](r)", "%s[B](r)", "%s[C](r)", "%s[D](r)") A <- rename.fv(A, "M", quote(M(r))) A <- tweak.fv.entry(A, "V1", new.tag="r") A[,3] <- NULL A$hogwash <- runif(nrow(A)) fvnames(A, ".") <- NULL #' bind.fv with qualitatively different functions GK <- harmonise(G=Gest(cells), K=Kest(cells)) G <- GK$G K <- GK$K ss <- c(rep(TRUE, nrow(K)-10), rep(FALSE, 10)) U <- bind.fv(G, K[ss, ], clip=TRUE) #' H <- rebadge.as.crossfun(K, "H", "inhom", 1, 2) H <- rebadge.as.dotfun(K, "H", "inhom", 3) #' text layout op <- options(width=27) print(K) options(width=18) print(K) options(op) #' collapse.fv Kb <- Kest(cells, correction="border") Ki <- Kest(cells, correction="isotropic") collapse.fv(Kb, Ki, same="theo") collapse.fv(anylist(B=Kb, I=Ki), same="theo") collapse.fv(anylist(B=Kb), I=Ki, same="theo") Xlist <- replicate(3, runifpoint(30), simplify=FALSE) Klist <- anylapply(Xlist, Kest) collapse.fv(Klist, same="theo", different=c("iso", "border")) names(Klist) <- LETTERS[24:26] collapse.fv(Klist, same="theo", different=c("iso", "border")) }) } if(FULLTEST) { local({ ## rat K <- Kest(cells, ratio=TRUE) G <- Gest(cells, ratio=TRUE) print(K) compatible(K, K) compatible(K, G) H <- rat(K, attr(K, "numerator"), attr(K, "denominator"), check=TRUE) }) } if(FULLTEST) { local({ ## bug in Jmulti.R colliding with breakpts.R B <- owin(c(0,3), c(0,10)) Y <- superimpose(A=runifpoint(1212, B), B=runifpoint(496, B)) JDX <- Jdot(Y) JCX <- Jcross(Y) Jdif <- function(X, ..., i) { Jidot <- Jdot(X, ..., i=i) J <- Jest(X, ...) dif <- eval.fv(Jidot - J) return(dif) } E <- envelope(Y, Jdif, nsim=19, i="A", simulate=expression(rlabel(Y))) }) } if(FULLTEST) { local({ #' fasp axes, title, dimnames a <- alltypes(amacrine) a$title <- NULL plot(a, samex=TRUE, samey=TRUE) dimnames(a) <- lapply(dimnames(a), toupper) b <- as.fv(a) }) } if(FULLTEST) { local({ ## plot.anylist (fv) b <- anylist(A=Kcross(amacrine), B=Kest(amacrine)) plot(b, equal.scales=TRUE, main=expression(sqrt(pi))) plot(b, arrange=FALSE) }) } spatstat.core/tests/testsS.R0000644000176200001440000001571314141452520015623 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.core #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.core) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) #' tests/sdr.R #' #' $Revision: 1.2 $ $Date: 2020/05/01 09:59:59 $ if(FULLTEST) { local({ AN <- sdr(bei, bei.extra, method="NNIR") AV <- sdr(bei, bei.extra, method="SAVE") AI <- sdr(bei, bei.extra, method="SIR") AT <- sdr(bei, bei.extra, method="TSE") subspaceDistance(AN$B, AV$B) dimhat(AN$M) }) } ## ## tests/segments.R ## Tests of psp class and related code ## [SEE ALSO: tests/xysegment.R] ## ## $Revision: 1.32 $ $Date: 2020/12/04 05:26:31 $ local({ if(ALWAYS) { # C code #' tests of density.psp Y <- edges(letterR) Window(Y) <- grow.rectangle(Frame(Y), 0.4) YC <- density(Y, 0.2, method="C", edge=FALSE, dimyx=64) YI <- density(Y, 0.2, method="interpreted", edge=FALSE, dimyx=64) YF <- density(Y, 0.2, method="FFT", edge=FALSE, dimyx=64) xCI <- max(abs(YC/YI - 1)) xFI <- max(abs(YF/YI - 1)) cat(paste("xCI =", xCI, "\txFI =", signif(xFI, 5)), fill=TRUE) if(xCI > 0.01) stop(paste("density.psp C algorithm relative error =", xCI)) if(xFI > 0.1) stop(paste("density.psp FFT algorithm relative error =", xFI)) B <- square(0.3) density(Y, 0.2, at=B) density(Y, 0.2, at=B, edge=TRUE, method="C") Z <- runifrect(3, B) density(Y, 0.2, at=Z) density(Y, 0.2, at=Z, edge=TRUE, method="C") } if(FULLTEST) { #' segment clipping in window (bug found by Rolf) set.seed(42) X <- runifpoint(50, letterR) SP <- dirichletEdges(X) #' clip to polygonal window Window(X) <- as.mask(Window(X)) SM <- dirichletEdges(X) #' clip to mask window } if(FULLTEST) { #' test rshift.psp and append.psp with marks (Ute Hahn) m <- data.frame(A=1:10, B=letters[1:10]) g <- gl(3, 3, length=10) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin(), marks=m) Y <- rshift(X, radius = 0.1) Y <- rshift(X, radius = 0.1, group=g) #' mark management b <- data.frame(A=1:10) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin(), marks=b) stopifnot(is.data.frame(marks(X))) Y <- rshift(X, radius = 0.1) Y <- rshift(X, radius = 0.1, group=g) } }) # ## tests/sigtraceprogress.R # ## Tests of *.sigtrace and *.progress # ## $Revision: 1.5 $ $Date: 2020/05/01 09:59:59 $ if(FULLTEST) { local({ plot(dclf.sigtrace(redwood, nsim=19, alternative="greater", rmin=0.02, verbose=FALSE)) plot(dclf.progress(redwood, nsim=19, alternative="greater", rmin=0.02, verbose=FALSE)) plot(dg.sigtrace(redwood, nsim=5, alternative="greater", rmin=0.02, verbose=FALSE)) plot(dg.progress(redwood, nsim=5, alternative="greater", rmin=0.02, verbose=FALSE)) ## test 'leave-two-out' algorithm a <- dclf.sigtrace(redwood, Lest, nsim=9, use.theory=FALSE, leaveout=2, verbose=FALSE) aa <- dclf.progress(redwood, Lest, nsim=9, use.theory=FALSE, leaveout=2, verbose=FALSE) b <- dg.sigtrace(redwood, Lest, nsim=5, use.theory=FALSE, leaveout=2) bb <- dg.progress(redwood, Lest, nsim=5, use.theory=FALSE, leaveout=2, verbose=FALSE) ## other code blocks e <- mad.progress(redwood, nsim=5) e <- mad.progress(redwood, nsim=19, alpha=0.05) f <- dclf.progress(redwood, nsim=5, scale=function(x) x^2) f <- dclf.progress(redwood, nsim=5, normalize=TRUE, deflate=TRUE) g <- dg.progress(redwood, nsim=5, scale=function(x) x^2) g <- dg.progress(redwood, nsim=5, normalize=TRUE, deflate=TRUE) }) } # # tests/slrm.R # # $Revision: 1.3 $ $Date: 2020/05/01 09:59:59 $ # # Test slrm fitting and prediction when there are NA's # if(ALWAYS) { local({ X <- copper$SouthPoints W <- owin(poly=list(x=c(0,35,35,1),y=c(1,1,150,150))) Y <- X[W] fit <- slrm(Y ~ x+y) pred <- predict(fit) extractAIC(fit) fitx <- update(fit, . ~ x) simulate(fitx, seed=42) if(FULLTEST) { unitname(fitx) unitname(fitx) <- "km" mur <- solapply(murchison,rescale, 1000, "km") mur$dfault <- distfun(mur$faults) fut <- slrm(gold ~ dfault, data=mur, splitby="greenstone") A <- model.images(fut) } }) } #' #' tests/ssf.R #' #' Tests of 'ssf' class #' #' $Revision: 1.5 $ $Date: 2020/12/04 08:02:25 $ #' if(FULLTEST) { local({ Y <- cells[1:5] X <- rsyst(Window(Y), 5) Z <- runifpoint(3, Window(Y)) f1 <- ssf(X, nncross(X,Y,what="dist")) f2 <- ssf(X, nncross(X,Y,what="dist", k=1:2)) image(f1) g1 <- as.function(f1) g1(Z) g2 <- as.function(f2) g2(Z) plot(f1, style="contour") plot(f1, style="imagecontour") contour(f1) apply.ssf(f2, 1, sum) range(f1) min(f1) max(f1) integral(f1, weights=tile.areas(dirichlet(X))) }) } # # tests/step.R # # $Revision: 1.5 $ $Date: 2020/05/01 09:59:59 $ # # test for step() operation # if(FULLTEST) { local({ Z <- as.im(function(x,y){ x^3 - y^2 }, nztrees$window) fitP <- ppm(nztrees ~x+y+Z, covariates=list(Z=Z)) step(fitP) fitS <- update(fitP, Strauss(7)) step(fitS) fitM <- ppm(amacrine ~ marks*(x+y), MultiStrauss(types=levels(marks(amacrine)), radii=matrix(0.04, 2, 2))) step(fitM) }) } #' #' tests/sumfun.R #' #' Tests of code for summary functions #' including score residual functions etc #' #' $Revision: 1.6 $ $Date: 2020/05/01 09:59:59 $ if(ALWAYS) { # involves C code local({ W <- owin(c(0,1), c(-1/2, 0)) Gr <- Gest(redwood, correction="all",domain=W) Fr <- Fest(redwood, correction="all",domain=W) Jr <- Jest(redwood, correction="all",domain=W) F0 <- Fest(redwood[FALSE], correction="all") Fh <- Fest(humberside, domain=erosion(Window(humberside), 100)) FIr <- Finhom(redwood, savelambda=TRUE) JIr <- Jinhom(redwood, savelambda=TRUE) Ga <- Gcross(amacrine, correction="all") Ia <- Iest(amacrine, correction="all") lam <- intensity(amacrine) lmin <- 0.9 * min(lam) nJ <- sum(marks(amacrine) == "off") FM <- FmultiInhom(amacrine, marks(amacrine) == "off", lambdaJ=rep(lam["off"], nJ), lambdamin = lmin) GM <- GmultiInhom(amacrine, marks(amacrine) == "on", marks(amacrine) == "off", lambda=lam[marks(amacrine)], lambdamin=lmin, ReferenceMeasureMarkSetI=42) pt <- psst(cells, interaction=Strauss(0.1), fun=nndcumfun) a <- compileCDF(D=nndist(redwood), B=bdist.points(redwood), r=seq(0, 1, length=256)) ## distance argument spacing and breakpoints e <- check.finespacing(c(0,1,2), eps=0.1, action="silent") b <- as.breakpts(pi, 20) b <- as.breakpts(42, max=pi, npos=20) b <- even.breaks.owin(letterR) }) } spatstat.core/tests/testsP1.R0000644000176200001440000000251114141452520015671 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.core #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.core) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) ## ## tests/percy.R ## ## Tests of Percus-Yevick approximations ## ## $Revision: 1.3 $ $Date: 2020/04/30 05:23:52 $ if(FULLTEST) { local({ fit <- ppm(swedishpines ~1, DiggleGatesStibbard(6)) K <- Kmodel(fit) }) } ## ## tests/pixelgripes.R ## Problems related to pixellation of windows ## ## $Revision: 1.5 $ $Date: 2020/04/30 05:23:52 $ if(FULLTEST) { local({ ## From Philipp Hunziker: bug in rNeymanScott (etc) ## Create an irregular window PM <- matrix(c(1,0,0.5,1,0,0), 3, 2, byrow=TRUE) P <- owin(poly=PM) ## Generate Matern points X <- rMatClust(50, 0.05, 5, win=P) ## Some distance function as a covariate distorigin <- function(x, y) { sqrt(x^2 + y^2) } ## No covariates: works fine fit0 <- kppm(X ~ 1, clusters="MatClust") Y0 <- simulate(fit0, retry=0) ## Covariates: Simulation fails fit1 <- kppm(X ~ distorigin, clusters="MatClust") Y1 <- simulate(fit1, retry=0) }) } spatstat.core/tests/testsAtoC.R0000644000176200001440000001621514141452520016245 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.core #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.core) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) #' tests/aucroc.R #' #' AUC and ROC code #' #' $Revision: 1.6 $ $Date: 2020/11/02 06:26:45 $ local({ if(FULLTEST) { fit <- kppm(redwood ~ I(y-x)) a <- roc(fit) b <- auc(fit) fet <- ppm(amacrine~x+y+marks) d <- roc(fet) e <- auc(fet) } }) ## tests/cdf.test.R local({ NSIM <- 9 op <- spatstat.options(ndummy.min=16, npixel=32) AA <- split(ants, un=FALSE) AC <- AA[["Cataglyphis"]] AM <- AA[["Messor"]] DM <- distmap(AM) if(ALWAYS) { ## (1) check cdf.test with strange data ## Marked point patterns with some marks not represented ## should produce a warning, rather than a crash: cdf.test(AC, DM) } if(FULLTEST) { ## should be OK: cdf.test(unmark(AC), DM) cdf.test(unmark(AC), DM, "cvm") cdf.test(unmark(AC), DM, "ad") ## other code blocks cdf.test(finpines, "x") } if(FULLTEST) { ## (2) Monte Carlo test for Gibbs model fit <- ppm(cells ~ 1, Strauss(0.07)) cdf.test(fit, "x", nsim=NSIM) ## cdf.test.slrm fut <- slrm(japanesepines ~ x + y) Z <- distmap(japanesepines) cdf.test(fut, Z) } reset.spatstat.options() }) #' tests/circular.R #' #' Circular data and periodic distributions #' #' $Revision: 1.4 $ $Date: 2020/04/28 12:58:26 $ local({ if(ALWAYS) { a <- pairorient(redwood, 0.05, 0.15, correction="none") rose(a) } if(FULLTEST) { b <- pairorient(redwood, 0.05, 0.15, correction="best") rose(b, start="N", clockwise=TRUE) } if(ALWAYS) { #' arcs on the circle #' (depends on numerical behaviour) set.seed(19171025) aa <- replicate(7, runif(1, 0, 2*pi) + c(0, runif(1, 0, pi)), simplify=FALSE) bb <- circunion(aa) assertsingle <- function(x, a, id) { y <- circunion(x) if(length(y) != 1 || max(abs(y[[1]] - a)) > .Machine$double.eps) stop(paste("Incorrect result from circunion in case", id), call.=FALSE) invisible(NULL) } assertsingle(list(c(pi/3, pi), c(pi/2, 3*pi/2)), c(pi/3, 3*pi/2), 1) assertsingle(list(c(0, pi/2), c(pi/4, pi)), c(0,pi), 2) assertsingle(list(c(-pi/4, pi/2), c(pi/4, pi)), c((2-1/4)*pi, pi), 3) } }) #' #' tests/closecore.R #' #' check 'closepairs/crosspairs' code #' invoked in core package #' #' $Revision: 1.4 $ $Date: 2021/04/17 04:16:43 $ #' #' ------- All this code must be run on every hardware ------- #' local({ #' weightedclosepairs is currently in strauss.R wi <- weightedclosepairs(redwood, 0.05, "isotropic") if(FULLTEST) { wt <- weightedclosepairs(redwood, 0.05, "translate") wp <- weightedclosepairs(redwood, 0.05, "periodic") } #' markmarkscatter uses closepairs.pp3 X <- runifpoint3(100) marks(X) <- runif(100) markmarkscatter(X, 0.2) if(FULLTEST) { markmarkscatter(X[FALSE], 0.2) } }) #' #' contact.R #' #' Check machinery for first contact distributions #' #' $Revision: 1.8 $ $Date: 2021/04/17 02:25:55 $ local({ if(ALWAYS) { #' reduce complexity Y <- as.mask(heather$coarse, dimyx=c(50, 25)) X <- runifpoint(100, win = complement.owin(Y)) if(FULLTEST) G <- Gfox(X, Y) J <- Jfox(X, Y) Y <- as.polygonal(Y) X <- runifpoint(100, win = complement.owin(Y)) if(FULLTEST) G <- Gfox(X, Y) J <- Jfox(X, Y) op <- spatstat.options(exactdt.checks.data=TRUE) U <- exactdt(X) spatstat.options(op) } }) reset.spatstat.options() #' #' tests/contrib.R #' #' Tests for user-contributed code in spatstat #' #' $Revision: 1.4 $ $Date: 2021/04/17 02:32:24 $ local({ #' Jinhom #' Marie-Colette van Lieshout and Ottmar Cronie X <- redwood3 if(FULLTEST) { fit <- ppm(X ~ polynom(x,y,2)) } else { X <- X[c(TRUE,FALSE)] spatstat.options(npixel=32, ndummy.min=16) fit <- ppm(X ~ x) } lam <- predict(fit) lamX <- fitted(fit, dataonly=TRUE) lmin <- 0.9 * min(lam) g1 <- Ginhom(X, lambda=fit, update=TRUE) if(FULLTEST) { g2 <- Ginhom(X, lambda=fit, update=FALSE, lmin = lmin) g3 <- Ginhom(X, lambda=lam, lmin=lmin) g4 <- Ginhom(X, lambda=lamX, lmin=lmin) } if(ALWAYS) { f2 <- Finhom(X, lambda=fit, update=FALSE) } if(FULLTEST) { f1 <- Finhom(X, lambda=fit, update=TRUE) f3 <- Finhom(X, lambda=lam, lmin=lmin) } if(!FULLTEST) reset.spatstat.options() }) # tests/correctC.R # check for agreement between C and interpreted code # for interpoint distances etc. # $Revision: 1.8 $ $Date: 2020/12/03 03:06:04 $ if(ALWAYS) { # depends on hardware local({ eps <- .Machine$double.eps * 4 checkagree <- function(A, B, blurb) { maxerr <- max(abs(A-B)) cat("Discrepancy", maxerr, "for", blurb, fill=TRUE) if(maxerr > eps) stop(paste("Algorithms for", blurb, "disagree")) return(TRUE) } ## pairdist.ppp set.seed(190901) ## X <- rpoispp(42) X <- runifrect(max(2, rpois(1, 42))) dC <- pairdist(X, method="C") dR <- pairdist(X, method="interpreted") checkagree(dC, dR, "pairdist()") dCp <- pairdist(X, periodic=TRUE, method="C") dRp <- pairdist(X, periodic=TRUE, method="interpreted") checkagree(dCp, dRp, "pairdist(periodic=TRUE)") dCp2 <- pairdist(X, periodic=TRUE, squared=TRUE, method="C") dRp2 <- pairdist(X, periodic=TRUE, squared=TRUE, method="interpreted") checkagree(dCp2, dRp2, "pairdist(periodic=TRUE, squared=TRUE)") ## crossdist.ppp ## Y <- rpoispp(42) Y <- runifrect(max(2, rpois(1, 42))) dC <- crossdist(X, Y, method="C") dR <- crossdist(X, Y, method="interpreted") checkagree(dC, dR, "crossdist()") dC <- crossdist(X, Y, periodic=TRUE, method="C") dR <- crossdist(X, Y, periodic=TRUE, method="interpreted") checkagree(dC, dR, "crossdist(periodic=TRUE)") dC2 <- crossdist(X, Y, periodic=TRUE, squared=TRUE, method="C") dR2 <- crossdist(X, Y, periodic=TRUE, squared=TRUE, method="interpreted") checkagree(dC2, dR2, "crossdist(periodic=TRUE, squared=TRUE)") # nndist.ppp nnC <- nndist(X, method="C") nnI <- nndist(X, method="interpreted") checkagree(nnC, nnI, "nndist()") nn3C <- nndist(X, k=3, method="C") nn3I <- nndist(X, k=3, method="interpreted") checkagree(nn3C, nn3I, "nndist(k=3)") # nnwhich.ppp nwC <- nnwhich(X, method="C") nwI <- nnwhich(X, method="interpreted") checkagree(nwC, nwI, "nnwhich()") nw3C <- nnwhich(X, k=3, method="C") nw3I <- nnwhich(X, k=3, method="interpreted") checkagree(nw3C, nw3I, "nnwhich(k=3)") # whist set.seed(98123) x <- runif(1000) w <- sample(1:5, 1000, replace=TRUE) b <- seq(0,1,length=101) op <- spatstat.options(Cwhist=TRUE) aT <- whist(x,b,w) spatstat.options(Cwhist=FALSE) aF <- whist(x,b,w) if(!all(aT == aF)) stop("Algorithms for whist disagree") spatstat.options(op) }) reset.spatstat.options() } spatstat.core/tests/testsT.R0000644000176200001440000001144514141452520015622 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.core #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.core) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/testaddvar.R # # test addvar options # # $Revision: 1.3 $ $Date: 2020/05/02 01:32:58 $ if(FULLTEST) { local({ X <- rpoispp(function(x,y){exp(3+3*x)}) model <- ppm(X ~y) addvar(model, "x", crosscheck=TRUE) addvar(model, "x", bw.input="quad") w <- square(0.5) addvar(model, "x", subregion=w) addvar(model, "x", subregion=w, bw.input="points") Z <- as.im(function(x,y) { x }, Window(X)) addvar(model, Z) }) } # # tests/testparres.R # # additional test of parres # # $Revision: 1.7 $ $Date: 2020/05/02 01:32:58 $ # if(FULLTEST) { local({ X <- rpoispp(function(x,y){exp(3+x+2*x^2)}) model <- ppm(X ~x+y) # options in parres (and code blocks in print.parres) parres(model, "x") parres(model, "x", smooth.effect=TRUE) parres(model, "x", bw.input="quad") w <- square(0.5) parres(model, "x", subregion=w) parres(model, "x", subregion=w, bw.input="quad") f <- function(x,y) { x + y } parres(model, f) # check whether 'update.ppm' has messed up internals mod2 <- update(model, ~x) parres(mod2, "x") #' other kinds of covariates mod3 <- ppm(X ~ x + offset(y)) parres(mod3, "offset(y)") Z <- distmap(runifpoint(3)) parres(mod3, Z) mod4 <- ppm(X ~ sin(x), data=solist(B=Z)) parres(mod4, "sin(x)") parres(mod4, "B") #' models with interaction mod5 <- ppm(cells ~ x, AreaInter(0.06)) parres(mod5, "x") dlin <- distfun(copper$SouthLines) copfit <- ppm(copper$SouthPoints ~ dlin, Geyer(1,1)) parres(copfit, "dlin") #' covariate need not be specified if there is only one. parres(mod5) parres(copfit) #' infrastructure ltuae <- evalCovariate(42, cells) LTUAE <- evalCovariate(ltuae, cells) fit <- ppm(amacrine ~ x * marks, nd=16) dmat <- model.depends(fit) check.separable(dmat, "x", c(x=FALSE, marks=FALSE), FALSE) check.separable(dmat, "x", c(FALSE, FALSE), FALSE) check.separable(dmat, "x", c(x=FALSE, marks=TRUE), FALSE) }) } #' #' tests/threedee.R #' #' Tests of 3D code #' #' $Revision: 1.8 $ $Date: 2020/05/02 01:32:58 $ #' local({ X <- runifpoint3(30) Y <- runifpoint3(20) if(FULLTEST) { A <- runifpoint3(10, nsim=2) Z <- ppsubset(X, 2:4) } ## if(ALWAYS) { # includes C code d <- pairdist(X, periodic=TRUE, squared=TRUE) d <- crossdist(X, Y, squared=TRUE) d <- crossdist(X, Y, squared=TRUE, periodic=TRUE) #' h <- has.close(X, 0.2) h <- has.close(X, 0.2, periodic=TRUE) h <- has.close(X, 0.2, Y=Y) h <- has.close(X, 0.2, Y=Y, periodic=TRUE) #' code blocks not otherwise reached rmax <- 0.6 * max(nndist(X)) g <- G3est(X, rmax=rmax, correction="rs") g <- G3est(X, rmax=rmax, correction="km") g <- G3est(X, rmax=rmax, correction="Hanisch") g <- G3est(X, rmax=rmax, sphere="ideal") g <- G3est(X, rmax=rmax, sphere="digital") v <- sphere.volume() v <- digital.volume() #' older code co <- coords(X) xx <- co$x yy <- co$y zz <- co$z gg1 <- g3engine(xx, yy, zz, correction="Hanisch G3") gg2 <- g3engine(xx, yy, zz, correction="minus sampling") ff1 <- f3engine(xx, yy, zz, correction="no") ff2 <- f3engine(xx, yy, zz, correction="minus sampling") } ## if(ALWAYS) { #'class support X <- runifpoint3(10) print(X) print(X %mark% runif(10)) print(X %mark% factor(letters[c(1:5,5:1)])) print(X %mark% data.frame(a=1:10, b=runif(10))) da <- as.Date(paste0("2020-01-0", c(1:5,5:1))) print(X %mark% da) print(X %mark% data.frame(a=1:10, b=da)) } }) # # tests/triplets.R # # test code for triplet interaction and associated summary function Tstat # # $Revision: 1.8 $ $Date: 2020/05/02 01:32:58 $ # if(ALWAYS) { # C code, platform dependence local({ #' valid model fit <- ppm(cells ~1, Triplets(0.1)) fit suffstat(fit) #' invalid model fitR <- ppm(redwood ~1, Triplets(0.1)) fitR suffstat(fitR) #' hard core (zero triangles, coefficient is NA) fit0 <- ppm(cells ~1, Triplets(0.05)) fit0 suffstat(fit0) #' bug case (1 triangle in data) fit1 <- ppm(cells ~1, Triplets(0.15)) fit1 suffstat(fit1) #' Tstat function, all code blocks a <- Tstat(redwood, ratio=TRUE, correction=c("none", "border", "bord.modif", "translate")) #' simulation X <- simulate(fit) mod <- list(cif="triplets",par=list(beta=50,gamma=0.2,r=0.07), w=square(1)) Xm <- rmh(model=mod,start=list(n.start=5), control=list(nrep=1e5)) #' hard core mod$par$gamma <- 0 XmHard <- rmh(model=mod,start=list(n.start=5), control=list(nrep=1e5)) }) } spatstat.core/tests/testsD.R0000644000176200001440000005434514145330757015623 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.core #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.core) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) #' #' tests/deltasuffstat.R #' #' Explicit tests of 'deltasuffstat' #' #' $Revision: 1.4 $ $Date: 2021/01/22 08:08:48 $ if(!FULLTEST) spatstat.options(npixel=32, ndummy.min=16) if(ALWAYS) { # depends on C code local({ disagree <- function(x, y, tol=1e-7) { !is.null(x) && !is.null(y) && max(abs(x-y)) > tol } flydelta <- function(model, modelname="") { ## Check execution of different algorithms for 'deltasuffstat' dSS <- deltasuffstat(model, sparseOK=TRUE) dBS <- deltasuffstat(model, sparseOK=TRUE, use.special=FALSE, force=TRUE) dBF <- deltasuffstat(model, sparseOK=FALSE, use.special=FALSE, force=TRUE) ## Compare results if(disagree(dBS, dSS)) stop(paste(modelname, "model: Brute force algorithm disagrees with special algorithm")) if(disagree(dBF, dBS)) stop(paste(modelname, "model: Sparse and full versions of brute force algorithm disagree")) return(invisible(NULL)) } modelS <- ppm(cells ~ x, Strauss(0.13), nd=10) flydelta(modelS, "Strauss") antsub <- ants[c(FALSE,TRUE,FALSE)] rmat <- matrix(c(130, 90, 90, 60), 2, 2) modelM <- ppm(antsub ~ 1, MultiStrauss(rmat), nd=16) flydelta(modelM, "MultiStrauss") modelA <- ppm(antsub ~ 1, HierStrauss(rmat, archy=c(2,1)), nd=16) flydelta(modelA, "HierStrauss") }) } reset.spatstat.options() #' #' tests/density.R #' #' Test behaviour of density() methods, #' relrisk(), Smooth() #' and inhomogeneous summary functions #' and idw, adaptive.density, intensity #' #' $Revision: 1.58 $ $Date: 2021/03/31 01:57:48 $ #' if(!FULLTEST) spatstat.options(npixel=32, ndummy.min=16) local({ # test all cases of density.ppp and densityfun.ppp tryit <- function(..., do.fun=TRUE, badones=FALSE) { Z <- density(cells, ..., at="pixels") Z <- density(cells, ..., at="points") if(do.fun) { f <- densityfun(cells, ...) U <- f(0.1, 0.3) if(badones) { U2 <- f(1.1, 0.3) U3 <- f(1.1, 0.3, drop=FALSE) } } return(invisible(NULL)) } if(ALWAYS) { tryit(0.05) tryit(0.05, diggle=TRUE) tryit(0.05, se=TRUE) tryit(0.05, weights=expression(x)) tryit(0.07, kernel="epa") tryit(sigma=Inf) tryit(0.05, badones=TRUE) } if(FULLTEST) { tryit(0.07, kernel="quartic") tryit(0.07, kernel="disc") tryit(0.07, kernel="epa", weights=expression(x)) tryit(sigma=Inf, weights=expression(x)) } V <- diag(c(0.05^2, 0.07^2)) if(ALWAYS) { tryit(varcov=V) } if(FULLTEST) { tryit(varcov=V, diggle=TRUE) tryit(varcov=V, weights=expression(x)) tryit(varcov=V, weights=expression(x), diggle=TRUE) Z <- distmap(runifpoint(5, Window(cells))) tryit(0.05, weights=Z) tryit(0.05, weights=Z, diggle=TRUE) } trymost <- function(...) tryit(..., do.fun=FALSE) wdf <- data.frame(a=1:42,b=42:1) if(ALWAYS) { trymost(0.05, weights=wdf) trymost(sigma=Inf, weights=wdf) } if(FULLTEST) { trymost(0.05, weights=wdf, diggle=TRUE) trymost(varcov=V, weights=wdf) trymost(varcov=V, weights=expression(cbind(x,y))) } ## check conservation of mass checkconserve <- function(X, xname, sigma, toler=0.01) { veritas <- npoints(X) vino <- integral(density(X, sigma, diggle=TRUE)) relerr <- abs(vino - veritas)/veritas if(relerr > toler) stop(paste("density.ppp(diggle=TRUE) fails to conserve mass:", vino, "!=", veritas, "for", sQuote(xname)), call.=FALSE) return(relerr) } if(FULLTEST) { checkconserve(cells, "cells", 0.15) } if(ALWAYS) { checkconserve(split(chorley)[["lung"]], "lung", 2) } ## run C algorithm 'denspt' opa <- spatstat.options(densityC=TRUE, densityTransform=FALSE) if(ALWAYS) { tryit(varcov=V) } if(FULLTEST) { tryit(varcov=V, weights=expression(x)) trymost(varcov=V, weights=wdf) } spatstat.options(opa) crossit <- function(..., sigma=NULL) { U <- runifpoint(20, Window(cells)) a <- densitycrossEngine(cells, U, ..., sigma=sigma) a <- densitycrossEngine(cells, U, ..., sigma=sigma, diggle=TRUE) invisible(NULL) } if(ALWAYS) { crossit(varcov=V, weights=cells$x) crossit(sigma=Inf) } if(FULLTEST) { crossit(varcov=V, weights=wdf) crossit(sigma=0.1, weights=wdf) crossit(sigma=0.1, kernel="epa", weights=wdf) } ## apply different discretisation rules if(ALWAYS) { Z <- density(cells, 0.05, fractional=TRUE) } if(FULLTEST) { Z <- density(cells, 0.05, preserve=TRUE) Z <- density(cells, 0.05, fractional=TRUE, preserve=TRUE) } ## compare results with different algorithms crosscheque <- function(expr) { e <- as.expression(substitute(expr)) ename <- sQuote(deparse(substitute(expr))) ## interpreted R opa <- spatstat.options(densityC=FALSE, densityTransform=FALSE) val.interpreted <- eval(e) ## established C algorithm 'denspt' spatstat.options(densityC=TRUE, densityTransform=FALSE) val.C <- eval(e) ## new C algorithm 'Gdenspt' using transformed coordinates spatstat.options(densityC=TRUE, densityTransform=TRUE) val.Transform <- eval(e) spatstat.options(opa) if(max(abs(val.interpreted - val.C)) > 0.001) stop(paste("Numerical discrepancy between R and C algorithms in", ename)) if(max(abs(val.C - val.Transform)) > 0.001) stop(paste("Numerical discrepancy between C algorithms", "using transformed and untransformed coordinates in", ename)) invisible(NULL) } ## execute & compare results of density(at="points") with different algorithms wdfr <- cbind(1:npoints(redwood), 2) if(ALWAYS) { crosscheque(density(redwood, at="points", sigma=0.13, edge=FALSE)) crosscheque(density(redwood, at="points", sigma=0.13, edge=FALSE, weights=wdfr[,1])) crosscheque(density(redwood, at="points", sigma=0.13, edge=FALSE, weights=wdfr)) } ## correctness of non-Gaussian kernel calculation leavein <- function(ker, maxd=0.025) { ZI <- density(redwood, 0.12, kernel=ker, edge=FALSE, dimyx=256)[redwood] ZP <- density(redwood, 0.12, kernel=ker, edge=FALSE, at="points", leaveoneout=FALSE) discrep <- max(abs(ZP - ZI))/npoints(redwood) if(discrep > maxd) stop(paste("Discrepancy", signif(discrep, 3), "in calculation for", ker, "kernel")) return(invisible(NULL)) } if(ALWAYS) { leavein("epanechnikov", 0.015) } if(FULLTEST) { leavein("quartic", 0.010) leavein("disc", 0.100) } ## bandwidth selection code blocks sigvec <- 0.01 * 2:15 sigran <- range(sigvec) if(ALWAYS) { bw.ppl(redwood, sigma=sigvec) bw.CvL(redwood, sigma=sigvec) } if(FULLTEST) { bw.ppl(redwood, srange=sigran, ns=5) bw.CvL(redwood, srange=sigran, ns=5) } ## adaptive bandwidth if(ALWAYS) { a <- bw.abram(redwood) } if(FULLTEST) { a <- bw.abram(redwood, pilot=density(redwood, 0.2)) a <- bw.abram(redwood, smoother="densityVoronoi", at="pixels") } ## Kinhom if(ALWAYS) { lam <- density(redwood) K <- Kinhom(redwood, lam) lamX <- density(redwood, at="points") KX <- Kinhom(redwood, lamX) } ## test all code cases of new 'relrisk.ppp' algorithm pants <- function(..., X=ants, sigma=100, se=TRUE) { a <- relrisk(X, sigma=sigma, se=se, ...) return(TRUE) } if(ALWAYS) { pants() pants(diggle=TRUE) pants(edge=FALSE) pants(at="points") pants(casecontrol=FALSE) pants(relative=TRUE) pants(sigma=Inf) pants(sigma=NULL, varcov=diag(c(100,100)^2)) } if(FULLTEST) { pants(diggle=TRUE, at="points") pants(edge=FALSE, at="points") pants(casecontrol=FALSE, relative=TRUE) pants(casecontrol=FALSE,at="points") pants(relative=TRUE,at="points") pants(casecontrol=FALSE, relative=TRUE,at="points") pants(relative=TRUE, control="Cataglyphis", case="Messor") pants(relative=TRUE, control="Cataglyphis", case="Messor", at="points") pants(casecontrol=FALSE, case="Messor", se=FALSE) pants(case=2, at="pixels", relative=TRUE) pants(case=2, at="points", relative=TRUE) pants(case=2, at="pixels", relative=FALSE) pants(case=2, at="points", relative=FALSE) } ## more than 2 types if(ALWAYS) { pants(X=sporophores) pants(X=sporophores, sigma=20, at="points") bw.relrisk(sporophores, method="leastsquares") } if(FULLTEST) { pants(X=sporophores, sigma=20, relative=TRUE, at="points") pants(X=sporophores, sigma=20, at="pixels", se=FALSE) pants(X=sporophores, sigma=20, relative=TRUE, at="pixels", se=FALSE) bw.relrisk(sporophores, method="weightedleastsquares") } ## likewise 'relrisk.ppm' fit <- ppm(ants ~ x) rants <- function(..., model=fit) { a <- relrisk(model, sigma=100, se=TRUE, ...) return(TRUE) } if(ALWAYS) { rants() rants(diggle=TRUE) rants(edge=FALSE) rants(at="points") rants(casecontrol=FALSE) rants(relative=TRUE) } if(FULLTEST) { rants(diggle=TRUE, at="points") rants(edge=FALSE, at="points") rants(casecontrol=FALSE, relative=TRUE) rants(casecontrol=FALSE,at="points") rants(relative=TRUE,at="points") rants(casecontrol=FALSE, relative=TRUE,at="points") rants(relative=TRUE, control="Cataglyphis", case="Messor") rants(relative=TRUE, control="Cataglyphis", case="Messor", at="points") } ## more than 2 types fut <- ppm(sporophores ~ x) if(ALWAYS) { rants(model=fut) } if(FULLTEST) { rants(model=fut, at="points") rants(model=fut, relative=TRUE, at="points") } ## execute Smooth.ppp and Smoothfun.ppp in all cases stroke <- function(..., Y = longleaf) { Z <- Smooth(Y, ..., at="pixels") Z <- Smooth(Y, ..., at="points", leaveoneout=TRUE) Z <- Smooth(Y, ..., at="points", leaveoneout=FALSE) f <- Smoothfun(Y, ...) f(120, 80) f(Y[1:2]) f(Y[FALSE]) U <- as.im(f) return(invisible(NULL)) } if(ALWAYS) { stroke() stroke(5, diggle=TRUE) stroke(5, geometric=TRUE) stroke(1e-6) # generates warning about small bandwidth stroke(5, weights=expression(x)) stroke(5, kernel="epa") stroke(sigma=Inf) } if(FULLTEST) { Z <- as.im(function(x,y){abs(x)+1}, Window(longleaf)) stroke(5, weights=Z) stroke(5, weights=runif(npoints(longleaf))) stroke(varcov=diag(c(25, 36))) stroke(varcov=diag(c(25, 36)), weights=runif(npoints(longleaf))) stroke(5, Y=longleaf %mark% 1) stroke(5, Y=cut(longleaf,breaks=3)) stroke(5, weights=Z, geometric=TRUE) } markmean(longleaf, 9) strike <- function(..., Y=finpines) { Z <- Smooth(Y, ..., at="pixels") Z <- Smooth(Y, ..., at="points", leaveoneout=TRUE) Z <- Smooth(Y, ..., at="points", leaveoneout=FALSE) f <- Smoothfun(Y, ...) f(4, 1) f(Y[1:2]) f(Y[FALSE]) U <- as.im(f) return(invisible(NULL)) } if(ALWAYS) { strike() strike(sigma=1.5, kernel="epa") strike(varcov=diag(c(1.2, 2.1))) strike(sigma=1e-6) strike(sigma=Inf) } if(FULLTEST) { strike(sigma=1e-6, kernel="epa") strike(1.5, weights=runif(npoints(finpines))) strike(1.5, weights=expression(y)) strike(1.5, geometric=TRUE) strike(1.5, Y=finpines[FALSE]) flatfin <- finpines %mark% data.frame(a=rep(1, npoints(finpines)), b=2) strike(1.5, Y=flatfin) strike(1.5, Y=flatfin, geometric=TRUE) } opx <- spatstat.options(densityTransform=FALSE) if(ALWAYS) { stroke(5, Y=longleaf[order(longleaf$x)], sorted=TRUE) } if(FULLTEST) { strike(1.5, Y=finpines[order(finpines$x)], sorted=TRUE) } spatstat.options(opx) ## detect special cases if(ALWAYS) { Smooth(longleaf[FALSE]) Smooth(longleaf, minnndist(longleaf)) Xconst <- cells %mark% 1 Smooth(Xconst, 0.1) Smooth(Xconst, 0.1, at="points") Smooth(cells %mark% runif(42), sigma=Inf) Smooth(cells %mark% runif(42), sigma=Inf, at="points") Smooth(cells %mark% runif(42), sigma=Inf, at="points", leaveoneout=FALSE) Smooth(cut(longleaf, breaks=4)) } ## code not otherwise reached if(ALWAYS) { smoothpointsEngine(cells, values=rep(1, npoints(cells)), sigma=0.2) } if(FULLTEST) { smoothpointsEngine(cells, values=runif(npoints(cells)), sigma=Inf) smoothpointsEngine(cells, values=runif(npoints(cells)), sigma=1e-16) } ## validity of Smooth.ppp(at='points') Y <- longleaf %mark% runif(npoints(longleaf), min=41, max=43) Z <- Smooth(Y, 5, at="points", leaveoneout=TRUE) rZ <- range(Z) if(rZ[1] < 40 || rZ[2] > 44) stop("Implausible results from Smooth.ppp(at=points, leaveoneout=TRUE)") Z <- Smooth(Y, 5, at="points", leaveoneout=FALSE) rZ <- range(Z) if(rZ[1] < 40 || rZ[2] > 44) stop("Implausible results from Smooth.ppp(at=points, leaveoneout=FALSE)") ## compare Smooth.ppp results with different algorithms if(ALWAYS) { crosscheque(Smooth(longleaf, at="points", sigma=6)) wt <- runif(npoints(longleaf)) crosscheque(Smooth(longleaf, at="points", sigma=6, weights=wt)) } if(FULLTEST) { vc <- diag(c(25,36)) crosscheque(Smooth(longleaf, at="points", varcov=vc)) crosscheque(Smooth(longleaf, at="points", varcov=vc, weights=wt)) } ## drop-dimension coding errors if(FULLTEST) { X <- longleaf marks(X) <- cbind(marks(X), 1) Z <- Smooth(X, 5) ZZ <- bw.smoothppp(finpines, hmin=0.01, hmax=0.012, nh=2) # reshaping problem } ## geometric-mean smoothing if(ALWAYS) { U <- Smooth(longleaf, 5, geometric=TRUE) } if(FULLTEST) { UU <- Smooth(X, 5, geometric=TRUE) V <- Smooth(longleaf, 5, geometric=TRUE, at="points") VV <- Smooth(X, 5, geometric=TRUE, at="points") } }) reset.spatstat.options() local({ #' Kmeasure, second.moment.engine #' Expansion of window Zno <- Kmeasure(redwood, sigma=0.2, expand=FALSE) Zyes <- Kmeasure(redwood, sigma=0.2, expand=TRUE) #' All code blocks sigmadouble <- rep(0.1, 2) diagmat <- diag(sigmadouble^2) generalmat <- matrix(c(1, 0.5, 0.5, 1)/100, 2, 2) Z <- Kmeasure(redwood, sigma=sigmadouble) Z <- Kmeasure(redwood, varcov=diagmat) Z <- Kmeasure(redwood, varcov=generalmat) A <- second.moment.calc(redwood, 0.1, what="all", debug=TRUE) B <- second.moment.calc(redwood, varcov=diagmat, what="all") B <- second.moment.calc(redwood, varcov=diagmat, what="all") D <- second.moment.calc(redwood, varcov=generalmat, what="all") PR <- pixellate(redwood) DRno <- second.moment.calc(PR, 0.2, debug=TRUE, expand=FALSE, npts=npoints(redwood), obswin=Window(redwood)) DRyes <- second.moment.calc(PR, 0.2, debug=TRUE, expand=TRUE, npts=npoints(redwood), obswin=Window(redwood)) DR2 <- second.moment.calc(solist(PR, PR), 0.2, debug=TRUE, expand=TRUE, npts=npoints(redwood), obswin=Window(redwood)) Gmat <- generalmat * 100 isoGauss <- function(x,y) {dnorm(x) * dnorm(y)} ee <- evaluate2Dkernel(isoGauss, runif(10), runif(10), varcov=Gmat, scalekernel=TRUE) isoGaussIm <- as.im(isoGauss, square(c(-3,3))) gg <- evaluate2Dkernel(isoGaussIm, runif(10), runif(10), varcov=Gmat, scalekernel=TRUE) ## experimental code op <- spatstat.options(developer=TRUE) DR <- density(redwood, 0.1) spatstat.options(op) }) local({ #' bandwidth selection op <- spatstat.options(n.bandwidth=8) bw.diggle(cells) bw.diggle(cells, method="interpreted") # undocumented test # bw.relrisk(urkiola, hmax=20) is tested in man/bw.relrisk.Rd bw.relrisk(urkiola, hmax=20, method="leastsquares") bw.relrisk(urkiola, hmax=20, method="weightedleastsquares") ZX <- density(swedishpines, at="points") bw.pcf(swedishpines, lambda=ZX) bw.pcf(swedishpines, lambda=ZX, bias.correct=FALSE, simple=FALSE, cv.method="leastSQ") spatstat.options(op) }) local({ #' code in kernels.R kernames <- c("gaussian", "rectangular", "triangular", "epanechnikov", "biweight", "cosine", "optcosine") X <- rnorm(20) U <- runif(20) for(ker in kernames) { dX <- dkernel(X, ker) fX <- pkernel(X, ker) qU <- qkernel(U, ker) m0 <- kernel.moment(0, 0, ker) m1 <- kernel.moment(1, 0, ker) m2 <- kernel.moment(2, 0, ker) m3 <- kernel.moment(3, 0, ker) } }) local({ ## idw Z <- idw(longleaf, power=4) Z <- idw(longleaf, power=4, se=TRUE) ZX <- idw(longleaf, power=4, at="points") ZX <- idw(longleaf, power=4, at="points", se=TRUE) ## dodgy code blocks in densityVoronoi.R A <- adaptive.density(nztrees, nrep=2, f=0.5, counting=TRUE) B <- adaptive.density(nztrees, nrep=2, f=0.5, counting=TRUE, fixed=TRUE) D <- adaptive.density(nztrees, nrep=2, f=0.5, counting=FALSE) E <- adaptive.density(nztrees, nrep=2, f=0.5, counting=FALSE, fixed=TRUE) #' adaptive kernel estimation d10 <- nndist(nztrees, k=10) d10fun <- distfun(nztrees, k=10) d10im <- as.im(d10fun) uN <- 2 * runif(npoints(nztrees)) AA <- densityAdaptiveKernel(nztrees, bw=d10) BB <- densityAdaptiveKernel(nztrees, bw=d10, weights=uN) DD <- densityAdaptiveKernel(nztrees, bw=d10fun, weights=uN) EE <- densityAdaptiveKernel(nztrees, bw=d10im, weights=uN) }) local({ ## unnormdensity x <- rnorm(20) d0 <- unnormdensity(x, weights=rep(0, 20)) dneg <- unnormdensity(x, weights=c(-runif(19), 0)) ## cases of 'intensity' etc a <- intensity(amacrine, weights=expression(x)) SA <- split(amacrine) a <- intensity(SA, weights=expression(x)) a <- intensity(SA, weights=amacrine$x) a <- intensity(ppm(amacrine ~ 1)) ## check infrastructure for 'densityfun' f <- densityfun(cells, 0.05) Z <- as.im(f) Z <- as.im(f, W=square(0.5)) }) reset.spatstat.options() #' #' tests/diagnostique.R #' #' Diagnostic tools such as diagnose.ppm, qqplot.ppm #' #' $Revision: 1.6 $ $Date: 2020/04/28 12:58:26 $ #' if(FULLTEST) { local({ fit <- ppm(cells ~ x) diagE <- diagnose.ppm(fit, type="eem") diagI <- diagnose.ppm(fit, type="inverse") diagP <- diagnose.ppm(fit, type="Pearson") plot(diagE, which="all") plot(diagI, which="smooth") plot(diagP, which="x") plot(diagP, which="marks", plot.neg="discrete") plot(diagP, which="marks", plot.neg="contour") plot(diagP, which="smooth", srange=c(-5,5)) plot(diagP, which="smooth", plot.smooth="contour") plot(diagP, which="smooth", plot.smooth="image") fitS <- ppm(cells ~ x, Strauss(0.08)) diagES <- diagnose.ppm(fitS, type="eem", clip=FALSE) diagIS <- diagnose.ppm(fitS, type="inverse", clip=FALSE) diagPS <- diagnose.ppm(fitS, type="Pearson", clip=FALSE) plot(diagES, which="marks", plot.neg="imagecontour") plot(diagPS, which="marks", plot.neg="discrete") plot(diagPS, which="marks", plot.neg="contour") plot(diagPS, which="smooth", plot.smooth="image") plot(diagPS, which="smooth", plot.smooth="contour") plot(diagPS, which="smooth", plot.smooth="persp") #' infinite reach, not border-corrected fut <- ppm(cells ~ x, Softcore(0.5), correction="isotropic") diagnose.ppm(fut) #' diagPX <- diagnose.ppm(fit, type="Pearson", cumulative=FALSE) plot(diagPX, which="y") #' simulation based e <- envelope(cells, nsim=4, savepatterns=TRUE, savefuns=TRUE) Plist <- rpoispp(40, nsim=5) qf <- qqplot.ppm(fit, nsim=4, expr=e, plot.it=FALSE) print(qf) qp <- qqplot.ppm(fit, nsim=5, expr=Plist, fast=FALSE) print(qp) qp <- qqplot.ppm(fit, nsim=5, expr=expression(rpoispp(40)), plot.it=FALSE) print(qp) qg <- qqplot.ppm(fit, nsim=5, style="classical", plot.it=FALSE) print(qg) #' lurking.ppm #' covariate is numeric vector fitx <- ppm(cells ~ x) yvals <- coords(as.ppp(quad.ppm(fitx)))[,"y"] lurking(fitx, yvals) #' covariate is stored but is not used in model Z <- as.im(function(x,y){ x+y }, Window(cells)) fitxx <- ppm(cells ~ x, data=solist(Zed=Z), allcovar=TRUE) lurking(fitxx, expression(Zed)) #' envelope is a ppplist; length < nsim; glmdata=NULL fit <- ppm(cells ~ 1) stuff <- lurking(fit, expression(x), envelope=Plist, plot.sd=FALSE) #' plot.lurk plot(stuff, shade=NULL) }) } #' #' tests/discarea.R #' #' $Revision: 1.3 $ $Date: 2020/04/28 12:58:26 $ #' if(ALWAYS) { local({ u <- c(0.5,0.5) B <- owin(poly=list(x=c(0.3, 0.5, 0.7, 0.4), y=c(0.3, 0.3, 0.6, 0.8))) areaGain(u, cells, 0.1, exact=TRUE) areaGain(u, cells, 0.1, W=NULL) areaGain(u, cells, 0.1, W=B) X <- cells[square(0.4)] areaLoss(X, 0.1, exact=TRUE) # -> areaLoss.diri areaLoss(X, 0.1, exact=FALSE) # -> areaLoss.grid areaLoss.poly(X, 0.1) areaLoss(X, 0.1, exact=FALSE, method="distmap") # -> areaLoss.grid areaLoss(X, c(0.1, 0.15), exact=FALSE, method="distmap") # -> areaLoss.grid }) } #' #' tests/disconnected.R #' #' disconnected linear networks #' #' $Revision: 1.4 $ $Date: 2020/04/28 12:58:26 $ #' #' tests/deepeepee.R #' #' Tests for determinantal point process models #' #' $Revision: 1.8 $ $Date: 2021/11/18 01:38:31 $ local({ if(ALWAYS) { #' simulate.dppm jpines <- residualspaper$Fig1 fit <- dppm(jpines ~ 1, dppGauss) set.seed(10981) simulate(fit, W=square(5)) } if(FULLTEST) { #' simulate.detpointprocfamily - code blocks model <- dppGauss(lambda=100, alpha=.05, d=2) simulate(model, seed=1999, correction="border") u <- is.stationary(model) #' other methods for dppm kay <- Kmodel(fit) gee <- pcfmodel(fit) lam <- intensity(fit) arr <- reach(fit) pah <- parameters(fit) #' a user bug report - matrix dimension error set.seed(256) dat <- simulate( dppGauss(lambda = 8.5, alpha = 0.1, d = 2), nsim = 1) } #' dppeigen code blocks if(ALWAYS) { mod <- dppMatern(lambda=2, alpha=0.01, nu=1, d=2) uT <- dppeigen(mod, trunc=1.1, Wscale=c(1,1), stationary=TRUE) } if(FULLTEST) { uF <- dppeigen(mod, trunc=1.1, Wscale=c(1,1), stationary=FALSE) vT <- dppeigen(mod, trunc=0.98, Wscale=c(1,1), stationary=TRUE) vF <- dppeigen(mod, trunc=0.98, Wscale=c(1,1), stationary=FALSE) } }) spatstat.core/tests/testsM.R0000644000176200001440000002657214141452520015622 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.core #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.core) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) ## ## tests/marcelino.R ## ## $Revision: 1.4 $ $Date: 2020/04/30 02:18:23 $ ## local({ if(FULLTEST) { Y <- split(urkiola) B <- Y$birch O <- Y$oak B.lam <- predict (ppm(B ~polynom(x,y,2)), type="trend") O.lam <- predict (ppm(O ~polynom(x,y,2)), type="trend") Kinhom(B, lambda=B.lam, correction="iso") Kinhom(B, lambda=B.lam, correction="border") Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam) Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam, correction = "iso") Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam, correction = "border") } }) ## ## tests/markcor.R ## ## Tests of mark correlation code (etc) ## ## $Revision: 1.7 $ $Date: 2020/11/25 01:23:32 $ local({ if(ALWAYS) { ## check.testfun checks equality of functions ## and is liable to break if the behaviour of all.equal is changed fe <- function(m1, m2) {m1 == m2} fm <- function(m1, m2) {m1 * m2} fs <- function(m1, m2) {sqrt(m1)} if(check.testfun(fe, X=amacrine)$ftype != "equ") warning("check.testfun fails to recognise mark equality function") if(check.testfun(fm, X=longleaf)$ftype != "mul") warning("check.testfun fails to recognise mark product function") check.testfun(fs, X=longleaf) check.testfun("mul") check.testfun("equ") } if(FULLTEST) { ## test all is well in Kmark -> Kinhom MA <- Kmark(amacrine,function(m1,m2){m1==m2}) set.seed(42) AR <- rlabel(amacrine) MR <- Kmark(AR,function(m1,m2){m1==m2}) if(isTRUE(all.equal(MA,MR))) stop("Kmark unexpectedly ignores marks") ## cover code blocks in markcorr() X <- runifpoint(100) %mark% runif(100) Y <- X %mark% data.frame(u=runif(100), v=runif(100)) ww <- runif(100) fone <- function(x) { x/2 } ffff <- function(x,y) { fone(x) * fone(y) } aa <- markcorr(Y) bb <- markcorr(Y, ffff, weights=ww, normalise=TRUE) bb <- markcorr(Y, ffff, weights=ww, normalise=FALSE) bb <- markcorr(Y, f1=fone, weights=ww, normalise=TRUE) bb <- markcorr(Y, f1=fone, weights=ww, normalise=FALSE) ## markcrosscorr a <- markcrosscorr(betacells, normalise=FALSE) if(require(sm)) { b <- markcrosscorr(betacells, method="sm") } ## Vmark with normalisation v <- Vmark(spruces, normalise=TRUE) v <- Vmark(finpines, normalise=TRUE) } }) #' tests/mctests.R #' Monte Carlo tests #' (mad.test, dclf.test, envelopeTest, hasenvelope) #' $Revision: 1.4 $ $Date: 2020/06/12 06:10:47 $ local({ if(FULLTEST) { envelopeTest(cells, Lest, exponent=1, nsim=9, savepatterns=TRUE) (a3 <- envelopeTest(cells, Lest, exponent=3, nsim=9, savepatterns=TRUE)) envelopeTest(a3, Lest, exponent=3, nsim=9, alternative="less") fitx <- ppm(redwood~x) ax <- envelopeTest(fitx, exponent=2, nsim=9, savefuns=TRUE) print(ax) envelopeTest(redwood, Lest, exponent=1, nsim=19, rinterval=c(0, 0.1), alternative="greater", clamp=TRUE) envelopeTest(redwood, pcf, exponent=Inf, nsim=19, rinterval=c(0, 0.1), alternative="greater", clamp=TRUE) } }) # # tests/mppm.R # # Basic tests of mppm # # $Revision: 1.20 $ $Date: 2021/01/22 08:09:02 $ # if(!FULLTEST) spatstat.options(npixel=32, ndummy.min=16) local({ ## test interaction formulae and subfits fit1 <- mppm(Points ~ group, simba, hyperframe(po=Poisson(), str=Strauss(0.1)), iformula=~ifelse(group=="control", po, str)) fit2 <- mppm(Points ~ group, simba, hyperframe(po=Poisson(), str=Strauss(0.1)), iformula=~str/id) fit2w <- mppm(Points ~ group, simba, hyperframe(po=Poisson(), str=Strauss(0.1)), iformula=~str/id, weights=runif(nrow(simba))) # currently invalid # fit3 <- mppm(Points ~ group, simba, # hyperframe(po=Poisson(), pie=PairPiece(c(0.05,0.1))), # iformula=~I((group=="control") * po) + I((group=="treatment") * pie)) fit1 fit2 fit2w # fit3 if(FULLTEST) { ## run summary.mppm which currently sits in spatstat-internal.Rd summary(fit1) summary(fit2) summary(fit2w) # summary(fit3) } ## test vcov algorithm vcov(fit1) vcov(fit2) # vcov(fit3) ## test subfits algorithm if(FULLTEST) { s1 <- subfits(fit1) s2 <- subfits(fit2) # s3 <- subfits(fit3) ## validity of results of subfits() p1 <- solapply(s1, predict) p2 <- solapply(s2, predict) # p3 <- solapply(s3, predict) } }) local({ if(FULLTEST) { ## cases of predict.mppm W <- solapply(waterstriders, Window) Fakes <- solapply(W, runifpoint, n=30) FakeDist <- solapply(Fakes, distfun) H <- hyperframe(Bugs=waterstriders, D=FakeDist) fit <- mppm(Bugs ~ D, data=H) p1 <- predict(fit) p2 <- predict(fit, locations=Fakes) p3 <- predict(fit, locations=solapply(W, erosion, r=4)) locn <- as.data.frame(do.call(cbind, lapply(Fakes, coords))) df <- data.frame(id=sample(1:3, nrow(locn), replace=TRUE), D=runif(nrow(locn))) p4 <- predict(fit, locations=locn, newdata=df) fitG <- mppm(Bugs ~ D, data=H, use.gam=TRUE) p1G <- predict(fitG) p2G <- predict(fitG, locations=Fakes) p3G <- predict(fitG, locations=solapply(W, erosion, r=4)) p4G <- predict(fitG, locations=locn, newdata=df) } }) local({ ## [thanks to Sven Wagner] ## factor covariate, with some levels unused in some rows if(FULLTEST) { set.seed(14921788) H <- hyperframe(X=replicate(3, runifpoint(20), simplify=FALSE), Z=solist(as.im(function(x,y){x}, owin()), as.im(function(x,y){y}, owin()), as.im(function(x,y){x+y}, owin()))) H$Z <- solapply(H$Z, cut, breaks=(0:4)/2) fit6 <- mppm(X ~ Z, H) v6 <- vcov(fit6) s6 <- subfits(fit6) p6 <- solapply(s6, predict) ## random effects fit7 <- mppm(X ~ Z, H, random=~1|id) v7 <- vcov(fit7) s7 <- subfits(fit7) p7 <- solapply(s7, predict) fit7a <- mppm(X ~ Z, H, random=~x|id) v7a <- vcov(fit7a) s7a <- subfits(fit7a) p7a <- solapply(s7a, predict) ## multitype: collisions in vcov.ppm, predict.ppm H$X <- lapply(H$X, rlabel, labels=factor(c("a","b")), permute=FALSE) M <- MultiStrauss(matrix(0.1, 2, 2), c("a","b")) fit8 <- mppm(X ~ Z, H, M) v8 <- vcov(fit8, fine=TRUE) s8 <- subfits(fit8) p8 <- lapply(s8, predict) c8 <- lapply(s8, predict, type="cif") fit9 <- mppm(X ~ Z, H, M, iformula=~Interaction * id) v9 <- vcov(fit9, fine=TRUE) s9 <- subfits(fit9) p9 <- lapply(s9, predict) c9 <- lapply(s9, predict, type="cif") ## and a simple error in recognising 'marks' fit10 <- mppm(X ~ marks, H) } }) local({ if(FULLTEST) { ## test handling of offsets and zero cif values in mppm H <- hyperframe(Y = waterstriders) (fit1 <- mppm(Y ~ 1, data=H, Hardcore(1.5))) (fit2 <- mppm(Y ~ 1, data=H, StraussHard(7, 1.5))) (fit3 <- mppm(Y ~ 1, data=H, Hybrid(S=Strauss(7), H=Hardcore(1.5)))) s1 <- subfits(fit1) s2 <- subfits(fit2) s3 <- subfits(fit3) ## prediction, in training/testing context ## (example from Markus Herrmann and Ege Rubak) X <- waterstriders dist <- solapply(waterstriders, function(z) distfun(runifpoint(1, Window(z)))) i <- 3 train <- hyperframe(pattern = X[-i], dist = dist[-i]) test <- hyperframe(pattern = X[i], dist = dist[i]) fit <- mppm(pattern ~ dist, data = train) pred <- predict(fit, type="cif", newdata=test, verbose=TRUE) ## examples from Robert Aue GH <- Hybrid(G=Geyer(r=0.1, sat=3), H=Hardcore(0.01)) res <- mppm(Points ~ 1, interaction = GH, data=demohyper) print(summary(res)) sub <- subfits(res, verbose=TRUE) print(sub) } }) local({ if(FULLTEST) { ## test handling of interaction coefficients in multitype case set.seed(42) XX <- as.solist(replicate(3, rthin(amacrine, 0.8), simplify=FALSE)) H <- hyperframe(X=XX) M <- MultiStrauss(matrix(0.1, 2, 2), levels(marks(amacrine))) fit <- mppm(X ~ 1, H, M) co <- coef(fit) subco <- sapply(subfits(fit), coef) if(max(abs(subco - co)) > 0.001) stop("Wrong coefficient values in subfits, for multitype interaction") } }) local({ if(FULLTEST) { ## test lurking.mppm ## example from 'mppm' n <- 7 H <- hyperframe(V=1:n, U=runif(n, min=-1, max=1)) H$Z <- setcov(square(1)) H$U <- with(H, as.im(U, as.rectangle(Z))) H$Y <- with(H, rpoispp(eval.im(exp(2+3*Z)))) fit <- mppm(Y ~ Z + U + V, data=H) lurking(fit, expression(Z), type="P") lurking(fit, expression(V), type="raw") # design covariate lurking(fit, expression(U), type="raw") # image, constant in each row lurking(fit, H$Z, type="P") # list of images } }) local({ if(FULLTEST) { ## test anova.mppm code blocks and scoping problem H <- hyperframe(X=waterstriders) mod0 <- mppm(X~1, data=H, Poisson()) modxy <- mppm(X~x+y, data=H, Poisson()) mod0S <- mppm(X~1, data=H, Strauss(2)) modxyS <- mppm(X~x+y, data=H, Strauss(2)) anova(mod0, modxy, test="Chi") anova(mod0S, modxyS, test="Chi") anova(modxy, test="Chi") anova(modxyS, test="Chi") #' models with random effects (example from Marcelino de la Cruz) mod0r <- mppm(X~1, data=H, Poisson(), random = ~1|id) modxr <- mppm(X~x, data=H, Poisson(), random = ~1|id) anova(mod0r, modxr, test="Chi") } }) local({ if(FULLTEST) { ## test multitype stuff foo <- flu[1:3,] msh <- MultiStraussHard(iradii=matrix(100, 2, 2), hradii=matrix(10,2,2), types=levels(marks(foo$pattern[[1]]))) msh0 <- MultiStraussHard(iradii=matrix(100, 2, 2), hradii=matrix(10,2,2)) fit <- mppm(pattern ~ 1, data=foo, interaction=msh0) print(fit) print(summary(fit)) v <- vcov(fit) } }) reset.spatstat.options() #' #' tests/msr.R #' #' $Revision: 1.5 $ $Date: 2020/11/30 07:27:44 $ #' #' Tests of code for measures #' if(FULLTEST) { local({ ## cases of 'msr' Q <- quadscheme(cells) nQ <- n.quad(Q) nX <- npoints(cells) A <- matrix(nX * 3, nX, 3) B <- matrix(nQ * 3, nQ, 3) m <- msr(Q, A, B) M <- msr(Q, A, 1) M <- msr(Q, 1, B) M <- msr(Q, A, B[,1]) M <- msr(Q, A[,1], B) M <- msr(Q, A, B[,1,drop=FALSE]) M <- msr(Q, A[,1,drop=FALSE], B) ## methods a <- summary(m) b <- is.marked(m) w <- as.owin(m) z <- domain(m) ss <- scalardilate(m, 2) tt <- rescale(m, 2) ee <- rotate(m, pi/4) aa <- affine(m, mat=diag(c(1,2)), vec=c(0,1)) ff <- flipxy(m) am <- augment.msr(m, sigma=0.08) ua <- update(am) rr <- residuals(ppm(cells ~ x)) mm <- residuals(ppm(amacrine ~ x)) ss <- residuals(ppm(amacrine ~ x), type="score") gg <- rescale(ss, 1/662, c("micron", "microns")) plot(mm) plot(mm, multiplot=FALSE) plot(mm, equal.markscale=TRUE, equal.ribbon=TRUE) plot(ss) plot(ss, multiplot=FALSE) }) } spatstat.core/tests/testsNtoO.R0000644000176200001440000000350514141452520016274 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.core #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.core) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/NAinCov.R # # Testing the response to the presence of NA's in covariates # # $Revision: 1.7 $ $Date: 2020/04/30 05:23:52 $ if(FULLTEST) { local({ X <- runifpoint(42) Y <- as.im(function(x,y) { x+y }, owin()) Y[owin(c(0.2,0.4),c(0.2,0.4))] <- NA # fit model: should produce a warning but no failure misfit <- ppm(X ~Y, covariates=list(Y=Y)) # prediction Z <- predict(misfit, type="trend", se=TRUE) # covariance matrix: all should be silent v <- vcov(misfit) ss <- vcov(misfit, what="internals") NULL #' quantile.ewcdf f <- ewcdf(runif(100), runif(100)) qf <- quantile(f, probs=c(0.1, NA, 0.8)) #' quantile.density f <- density(runif(100)) qf <- quantile(f, probs=c(0.1, NA, 0.8)) }) } # # tests/nnstat.R # # Check code that uses nndist/nnwhich # # nnorient() # stienen() # # $Revision: 1.1 $ $Date: 2020/12/04 03:45:44 $ # local({ if(FULLTEST) { #' test nnorient nnorient(cells, domain=erosion(Window(cells), 0.1)) #' degenerate case X <- cells[nndist(cells) > bdist.points(cells)] f <- nnorient(X) #' nnclean A <- nnclean(shapley, k=17, edge.correct=TRUE) B <- nnclean(runifpoint3(300), 3) #' stienen set #' bug when disc radius is zero Y <- unmark(humberside)[40:100] # contains duplicated points stienen(Y) Z <- stienenSet(Y) #' other cases U <- stienen(cells[1]) V <- stienenSet(cells, edge=FALSE) } }) spatstat.core/src/0000755000176200001440000000000014141452520013631 5ustar liggesusersspatstat.core/src/ripleybox.h0000644000176200001440000000676714141452520016037 0ustar liggesusers/* ripleybox.h Ripley's edge correction for rectangular windows This file is #included multiple times in corrections.c Macros used: RIPLEYFUN Name of C function DEBUGBOX #defined if debugging information should be printed. *CHUNKLOOP defined in chunkloop.h TWOPI defined in Rmath.h $Revision: 1.3 $ $Date: 2021/10/31 06:40:58 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2019 Licence: GNU Public Licence >= 2 */ void RIPLEYFUN(nx, x, y, rmat, nr, xmin, ymin, xmax, ymax, epsilon, out) /* inputs */ int *nx, *nr; /* dimensions */ double *x, *y; /* coordinate vectors of length nx */ double *rmat; /* matrix nx by nr */ double *xmin, *ymin, *xmax, *ymax; /* box dimensions */ double *epsilon; /* threshold for proximity to corner */ /* output */ double *out; /* output matrix nx by nr */ { int i, j, n, m, ijpos, ncor, maxchunk; double xx, yy, x0, y0, x1, y1, dL, dR, dU, dD, aL, aU, aD, aR, rij; double cL, cU, cD, cR, bLU, bLD, bRU, bRD, bUL, bUR, bDL, bDR; double corner, extang; double eps; n = *nx; m = *nr; x0 = *xmin; y0 = *ymin; x1 = *xmax; y1 = *ymax; eps = *epsilon; OUTERCHUNKLOOP(i, n, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 16384) { xx = x[i]; yy = y[i]; /* perpendicular distance from point to each edge of rectangle L = left, R = right, D = down, U = up */ dL = xx - x0; dR = x1 - xx; dD = yy - y0; dU = y1 - yy; /* test for corner of the rectangle */ #define ABS(X) (((X) >= 0) ? (X) : (-X)) #define SMALL(X) ((ABS(X) < eps) ? 1 : 0) ncor = SMALL(dL) + SMALL(dR) + SMALL(dD) + SMALL(dU); corner = (ncor >= 2) ? YES : NO; /* angle between - perpendicular to edge of rectangle and - line from point to corner of rectangle */ bLU = atan2(dU, dL); bLD = atan2(dD, dL); bRU = atan2(dU, dR); bRD = atan2(dD, dR); bUL = atan2(dL, dU); bUR = atan2(dR, dU); bDL = atan2(dL, dD); bDR = atan2(dR, dD); for(j = 0; j < m; j++) { ijpos = j * n + i; rij = rmat[ijpos]; #ifdef DEBUGBOX Rprintf("rij = %lf\n", rij); #endif if(rij == 0.0) { /* Circle of radius 0 */ out[ijpos] = 1.0; } else { /* Fraction of circle Compute half the angle subtended by the intersection between the circle of radius r[i,j] centred on point i and each edge of the rectangle (prolonged to an infinite line) */ aL = (dL < rij) ? acos(dL/rij) : 0.0; aR = (dR < rij) ? acos(dR/rij) : 0.0; aD = (dD < rij) ? acos(dD/rij) : 0.0; aU = (dU < rij) ? acos(dU/rij) : 0.0; #ifdef DEBUGBOX Rprintf("aL = %lf\n", aL); Rprintf("aR = %lf\n", aR); Rprintf("aD = %lf\n", aD); Rprintf("aU = %lf\n", aU); #endif /* apply maxima */ cL = MIN(aL, bLU) + MIN(aL, bLD); cR = MIN(aR, bRU) + MIN(aR, bRD); cU = MIN(aU, bUL) + MIN(aU, bUR); cD = MIN(aD, bDL) + MIN(aD, bDR); #ifdef DEBUGBOX Rprintf("cL = %lf\n", cL); Rprintf("cR = %lf\n", cR); Rprintf("cD = %lf\n", cD); Rprintf("cU = %lf\n", cU); #endif /* total exterior angle over 2 pi */ extang = (cL + cR + cU + cD)/TWOPI; #ifdef DEBUGBOX Rprintf("extang = %lf\n", extang); #endif /* add pi/2 for corners */ if(corner) { extang += 1.0/4.0; #ifdef DEBUGBOX Rprintf("extang = %lf\n", extang); #endif } /* OK, now compute weight */ out[ijpos] = 1 / (1 - extang); } } } } } spatstat.core/src/idw.c0000644000176200001440000002175414141377573014607 0ustar liggesusers/* idw.c Inverse-distance weighted smoothing $Revision: 1.12 $ $Date: 2020/01/01 01:27:35 $ Cidw inverse distance smoothing from data points onto pixel grid idwloo leave-one-out estimate at data points Cidw2 Cidw with variance estimate idwloo2 idwloo with variance estimate Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include "chunkloop.h" #define MAT(X,I,J,NROW) (X)[(J) + (NROW) * (I)] /* inverse-distance smoothing from data points onto pixel grid */ void Cidw(x, y, v, n, xstart, xstep, nx, ystart, ystep, ny, power, num, den, rat) double *x, *y, *v; /* data points and values */ int *n; double *xstart, *xstep, *ystart, *ystep; /* pixel grid */ int *nx, *ny; double *power; /* exponent for IDW */ double *num, *den, *rat; /* output arrays - assumed initialised 0 */ { int N, i, Nx, Ny, ix, iy; double xg, yg, x0, dx, y0, dy, pon2, d2, w, sumw, sumwv; N = *n; Nx = *nx; Ny = *ny; x0 = *xstart; y0 = *ystart; dx = *xstep; dy = *ystep; pon2 = (*power)/2.0; if(pon2 == 1.0) { /* slightly faster code when power=2 */ for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) { if(ix % 256 == 0) R_CheckUserInterrupt(); for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) { sumwv = sumw = 0.0; /* loop over data points, accumulating numerator and denominator */ for(i = 0; i < N; i++) { d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]); w = 1.0/d2; sumwv += w * v[i]; sumw += w; } /* compute ratio */ MAT(num, ix, iy, Ny) = sumwv; MAT(den, ix, iy, Ny) = sumw; MAT(rat, ix, iy, Ny) = sumwv/sumw; } } } else { /* general case */ for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) { if(ix % 256 == 0) R_CheckUserInterrupt(); for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) { sumwv = sumw = 0.0; /* loop over data points, accumulating numerator and denominator */ for(i = 0; i < N; i++) { d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]); w = 1.0/pow(d2, pon2); sumwv += w * v[i]; sumw += w; } /* compute ratio */ MAT(num, ix, iy, Ny) = sumwv; MAT(den, ix, iy, Ny) = sumw; MAT(rat, ix, iy, Ny) = sumwv/sumw; } } } } /* Leave-one-out IDW at data points only */ void idwloo(x, y, v, n, power, num, den, rat) double *x, *y, *v; /* data points and values */ int *n; double *power; /* exponent for IDW */ double *num, *den, *rat; /* output vectors - assumed initialised 0 */ { int N, i, j, maxchunk; double xi, yi, d2, w, pon2, sumw, sumwv; N = *n; pon2 = (*power)/2.0; if(pon2 == 1.0) { /* slightly faster code when power=2 */ OUTERCHUNKLOOP(i, N, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 16384) { xi = x[i]; yi = y[i]; sumwv = sumw = 0.0; if(i > 0) { for(j = 0; j < i; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/d2; sumwv += w * v[j]; sumw += w; } } if(i < N-1) { for(j = i+1; j < N; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/d2; sumwv += w * v[j]; sumw += w; } } /* compute ratio */ num[i] = sumwv; den[i] = sumw; rat[i] = sumwv/sumw; } } } else { /* general case */ OUTERCHUNKLOOP(i, N, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 16384) { xi = x[i]; yi = y[i]; sumwv = sumw = 0.0; if(i > 0) { for(j = 0; j < i; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/pow(d2, pon2); sumwv += w * v[j]; sumw += w; } } if(i < N-1) { for(j = i+1; j < N; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/pow(d2, pon2); sumwv += w * v[j]; sumw += w; } } /* compute ratio */ num[i] = sumwv; den[i] = sumw; rat[i] = sumwv/sumw; } } } } /* ---------------------------------------------------- VERSIONS WITH VARIANCE CALCULATION --------------------------------------------------- */ /* inverse-distance smoothing from data points onto pixel grid */ void Cidw2(x, y, v, n, xstart, xstep, nx, ystart, ystep, ny, power, num, den, rat, mtwo, wtwo) double *x, *y, *v; /* data points and values */ int *n; double *xstart, *xstep, *ystart, *ystep; /* pixel grid */ int *nx, *ny; double *power; /* exponent for IDW */ double *num, *den, *rat; /* output arrays - assumed initialised 0 */ double *mtwo, *wtwo; /* output arrays - assumed initialised 0 */ { int N, i, Nx, Ny, ix, iy; double xg, yg, x0, dx, y0, dy, pon2, d2, w, vi, sumw, sumwv, sumw2, runmean, m2, delta, epsilon; N = *n; Nx = *nx; Ny = *ny; x0 = *xstart; y0 = *ystart; dx = *xstep; dy = *ystep; pon2 = (*power)/2.0; if(pon2 == 1.0) { /* slightly faster code when power=2 */ for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) { if(ix % 256 == 0) R_CheckUserInterrupt(); for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) { sumwv = sumw = sumw2 = m2 = runmean = 0.0; /* loop over data points, accumulating numerator and denominator */ for(i = 0; i < N; i++) { vi = v[i]; d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]); w = 1.0/d2; sumw += w; sumw2 += w * w; sumwv += w * vi; delta = vi - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } /* compute ratio */ MAT(num, ix, iy, Ny) = sumwv; MAT(den, ix, iy, Ny) = sumw; MAT(rat, ix, iy, Ny) = sumwv/sumw; MAT(mtwo, ix, iy, Ny) = m2; MAT(wtwo, ix, iy, Ny) = sumw2; } } } else { /* general case */ for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) { if(ix % 256 == 0) R_CheckUserInterrupt(); for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) { sumwv = sumw = sumw2 = m2 = runmean = 0.0; /* loop over data points, accumulating numerator and denominator */ for(i = 0; i < N; i++) { vi = v[i]; d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]); w = 1.0/pow(d2, pon2); sumw += w; sumw2 += w * w; sumwv += w * vi; delta = vi - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } /* compute ratio */ MAT(num, ix, iy, Ny) = sumwv; MAT(den, ix, iy, Ny) = sumw; MAT(rat, ix, iy, Ny) = sumwv/sumw; MAT(mtwo, ix, iy, Ny) = m2; MAT(wtwo, ix, iy, Ny) = sumw2; } } } } /* Leave-one-out IDW at data points only */ void idwloo2(x, y, v, n, power, num, den, rat, mtwo, wtwo) double *x, *y, *v; /* data points and values */ int *n; double *power; /* exponent for IDW */ double *num, *den, *rat, *mtwo, *wtwo; /* output vectors - initialised 0 */ { int N, i, j, maxchunk; double xi, yi, d2, w, pon2, vj, sumw, sumwv, sumw2, runmean, m2, delta, epsilon; N = *n; pon2 = (*power)/2.0; if(pon2 == 1.0) { /* slightly faster code when power=2 */ OUTERCHUNKLOOP(i, N, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 16384) { xi = x[i]; yi = y[i]; sumwv = sumw = sumw2 = m2 = runmean = 0.0; if(i > 0) { for(j = 0; j < i; j++) { vj = v[j]; d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/d2; sumwv += w * vj; sumw += w; sumw2 += w * w; delta = vj - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } } if(i < N-1) { for(j = i+1; j < N; j++) { vj = v[j]; d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/d2; sumwv += w * vj; sumw += w; sumw2 += w * w; delta = vj - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } } /* compute ratio */ num[i] = sumwv; den[i] = sumw; rat[i] = sumwv/sumw; mtwo[i] = m2; wtwo[i] = sumw2; } } } else { /* general case */ OUTERCHUNKLOOP(i, N, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 16384) { xi = x[i]; yi = y[i]; sumwv = sumw = sumw2 = m2 = runmean = 0.0; if(i > 0) { for(j = 0; j < i; j++) { vj = v[j]; d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/pow(d2, pon2); sumwv += w * vj; sumw += w; sumw2 += w * w; delta = vj - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } } if(i < N-1) { for(j = i+1; j < N; j++) { vj = v[j]; d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/pow(d2, pon2); sumwv += w * vj; sumw += w; sumw2 += w * w; delta = vj - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } } /* compute ratio */ num[i] = sumwv; den[i] = sumw; rat[i] = sumwv/sumw; mtwo[i] = m2; wtwo[i] = sumw2; } } } } spatstat.core/src/ripleypoly.h0000644000176200001440000002377614141452520016231 0ustar liggesusers/* ripleypoly.h Ripley's edge correction for polygonal windows This file is #included multiple times in corrections.c Macros used: RIPLEYFUN Name of C function DEBUGPOLY #defined if debugging information should be printed. TESTINSIDE defined in corrections.c *CHUNKLOOP defined in chunkloop.h TWOPI defined in Rmath.h $Revision: 1.21 $ $Date: 2021/10/31 06:43:56 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2019 Licence: GNU Public Licence >= 2 */ #undef DEBUGLEVEL #ifndef DEBUGPOLY #define DEBUGLEVEL 0 #else #define DEBUGLEVEL 3 #endif /* SPLITPOINT is used only when DEBUGLEVEL = 2 */ #undef SPLITPOINT #define SPLITPOINT 0 #undef ROUNDED #ifdef _WIN32 /* Avoid quirks of Windows i386 */ #define ROUNDED(X) ((float)(X)) #else #define ROUNDED(X) ((float)(X)) /* WAS: define ROUNDED(X) ((double)(X)) */ #endif void RIPLEYFUN(nc, xc, yc, bd, nr, rmat, nseg, x0, y0, x1, y1, out) /* inputs */ int *nc, *nr, *nseg; double *xc, *yc, *bd, *rmat; double *x0, *y0, *x1, *y1; /* output */ double *out; { int n, m, i, j, k, l, nradperpt, ncut, nchanges, maxchunk; double xcentre, ycentre, xx0, yy0, xx1, yy1, xx01, yy01; double bdisti; double x, y, radius, radius2, dx0, dx1, dy0; double a, b, c, t, det, sqrtdet, tmp; double theta[6], delta[7], tmid[7]; double xtest, ytest, contrib, total; n = *nc; nradperpt = *nr; m = *nseg; #if (DEBUGLEVEL == 2) Rprintf("/// Debug level 2, split point %d ///\n", (int) SPLITPOINT); #elif (DEBUGLEVEL > 0) Rprintf("/// Debug level %d ///\n", (int) DEBUGLEVEL); #endif OUTERCHUNKLOOP(i, n, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 16384) { xcentre = xc[i]; ycentre = yc[i]; bdisti = bd[i]; #if (DEBUGLEVEL >= 3) Rprintf("------- centre[%d] = (%lf, %lf) ------\n", i, xcentre, ycentre); Rprintf(" boundary distance %lf \n", bdisti); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 1)) Rprintf("------- centre[%d] ------\n", i); #endif for(j = 0; j < nradperpt; j++) { radius = rmat[ j * n + i]; radius2 = (double) (radius * radius); #if (DEBUGLEVEL >= 3) Rprintf("radius[%d] = %lf\n", j, radius); #elif (DEBUGLEVEL >= 2) Rprintf("radius[%d]\n", j); #endif if(bdisti > radius || radius == 0.0) { /* no crossings */ total = TWOPI; #if (DEBUGLEVEL >= 2) Rprintf("no crossings; total = 2*pi\n"); #endif } else { /* run through all boundary segments */ total = 0.0; for(k=0; k < m; k++) { ncut = 0; xx0 = x0[k]; yy0 = y0[k]; xx1 = x1[k]; yy1 = y1[k]; #if (DEBUGLEVEL >= 3) Rprintf("... Edge[%d] = (%lf,%lf) to (%lf,%lf)\n", k, xx0, yy0, xx1, yy1); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 2)) Rprintf("... Edge[%d]\n", k); #endif /* intersection with left edge */ dx0 = xx0 - xcentre; det = (double) (radius2 - dx0 * dx0); #if (DEBUGLEVEL >= 3) Rprintf("Left: det = %lf\n", det); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 3)) Rprintf("Left:\n"); #endif if(ROUNDED(det) > ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 4))) Rprintf("\tdet > 0\n"); #endif sqrtdet = (double) sqrt(det); y = (double) (ycentre + sqrtdet); if(ROUNDED(y) < ROUNDED(yy0)) { theta[ncut] = (double) atan2(y - ycentre, dx0); #if (DEBUGLEVEL >= 3) Rprintf("\tcut left at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 5)) Rprintf("\tcut left (+)\n"); #endif ncut++; } y = (double) (ycentre - sqrtdet); if(ROUNDED(y) < ROUNDED(yy0)) { theta[ncut] = (double) atan2(y-ycentre, dx0); #if (DEBUGLEVEL >= 3) Rprintf("\tcut left at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 6)) Rprintf("\tcut left (-)\n"); #endif ncut++; } } else if(ROUNDED(det) == ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 7))) Rprintf("\tdet = 0\n"); #endif if(ROUNDED(ycentre) < ROUNDED(yy0)) { theta[ncut] = (double) atan2(0.0, dx0); #if (DEBUGLEVEL >= 3) Rprintf("\ttangent left at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 8)) Rprintf("\ttangent left\n"); #endif ncut++; } } /* intersection with right edge */ dx1 = xx1 - xcentre; det = (double) (radius2 - dx1 * dx1); #if (DEBUGLEVEL >= 3) Rprintf("Right: det = %lf\n", det); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 9)) Rprintf("Right:\n"); #endif if(ROUNDED(det) > ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 10))) Rprintf("\tdet > 0\n"); #endif sqrtdet = (double) sqrt(det); y = (double) (ycentre + sqrtdet); if(ROUNDED(y) < ROUNDED(yy1)) { theta[ncut] = (double) atan2(y - ycentre, dx1); #if (DEBUGLEVEL >= 3) Rprintf("\tcut right at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 11)) Rprintf("\tcut right (+)\n"); #endif ncut++; } y = (double) (ycentre - sqrtdet); if(ROUNDED(y) < ROUNDED(yy1)) { theta[ncut] = (double) atan2(y - ycentre, dx1); #if (DEBUGLEVEL >= 3) Rprintf("\tcut right at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 12)) Rprintf("\tcut right (-)\n"); #endif ncut++; } } else if(ROUNDED(det) == ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 13))) Rprintf("\tdet = 0\n"); #endif if(ycentre < yy1) { theta[ncut] = (double) atan2(0.0, dx1); #if (DEBUGLEVEL >= 3) Rprintf("\ttangent right at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 14)) Rprintf("\ttangent right\n"); #endif ncut++; } } /* intersection with top segment */ xx01 = xx1 - xx0; yy01 = yy1 - yy0; dy0 = yy0 - ycentre; a = xx01 * xx01 + yy01 * yy01; b = 2 * (xx01 * dx0 + yy01 * dy0); c = dx0 * dx0 + dy0 * dy0 - radius2; det = (double) (b * b - 4 * a * c); #if (DEBUGLEVEL >= 3) Rprintf("Top: det = %lf\n", det); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 15)) Rprintf("Top:\n"); #endif if(ROUNDED(det) > ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 16))) Rprintf("\tdet > 0\n"); #endif sqrtdet = (double) sqrt(det); t = (double) ((sqrtdet - b)/(2 * a)); if(ROUNDED(0.0) <= ROUNDED(t) && ROUNDED(t) <= ROUNDED(1.0)) { x = xx0 + t * xx01; y = yy0 + t * yy01; theta[ncut] = (double) atan2(y - ycentre, x - xcentre); #if (DEBUGLEVEL >= 3) Rprintf("\thits + segment: t = %lf, theta = %lf\n", t, theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 17)) Rprintf("\thits + segment\n"); #endif ++ncut; } t = (double) ((-sqrtdet - b)/(2 * a)); if(ROUNDED(0.0) <= ROUNDED(t) && ROUNDED(t) <= ROUNDED(1.0)) { x = xx0 + t * xx01; y = yy0 + t * yy01; theta[ncut] = (double) atan2(y - ycentre, x - xcentre); #if (DEBUGLEVEL >= 3) Rprintf("\thits - segment: t = %lf, theta = %lf\n", t, theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 18)) Rprintf("\thits - segment\n"); #endif ++ncut; } } else if(ROUNDED(det) == ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 19))) Rprintf("\tdet = 0\n"); #endif t = (double) (- b/(2 * a)); if(ROUNDED(0.0) <= ROUNDED(t) && ROUNDED(t) <= ROUNDED(1.0)) { x = xx0 + t * xx01; y = yy0 + t * yy01; theta[ncut] = (double) atan2(y - ycentre, x - xcentre); #if (DEBUGLEVEL >= 3) Rprintf("\ttangent to segment: t = %lf, theta = %lf\n", t, theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 20)) Rprintf("\ttangent to segment\n"); #endif ++ncut; } } #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 21))) Rprintf("Finished cutting; ncut = %d\n", ncut); #endif /* for safety, force all angles to be in range [0, 2 * pi] */ if(ncut > 0) for(l = 0; l < ncut; l++) if(theta[l] < 0) theta[l] += TWOPI; /* sort angles */ if(ncut > 1) { do { nchanges = 0; for(l = 0; l < ncut - 1; l++) { if(theta[l] > theta[l+1]) { /* swap */ ++nchanges; tmp = theta[l]; theta[l] = theta[l+1]; theta[l+1] = tmp; } } } while(nchanges > 0); } #if (DEBUGLEVEL >= 3) if(ncut > 0) { for(l = 0; l < ncut; l++) Rprintf("theta[%d] = %lf\n", l, theta[l]); } #endif /* compute length of circumference inside polygon */ if(ncut == 0) { /* entire circle is either in or out */ xtest = xcentre + radius; ytest = ycentre; if(TESTINSIDE(xtest, ytest, xx0, yy0, xx1, yy1)) contrib = TWOPI; else contrib = 0.0; } else { /* find midpoints and lengths of pieces (adding theta = ) */ delta[0] = theta[0]; tmid[0] = theta[0]/2; if(ncut > 1) { for(l = 1; l < ncut; l++) { delta[l] = theta[l] - theta[l-1]; tmid[l] = (theta[l] + theta[l-1])/2; } } delta[ncut] = TWOPI - theta[ncut - 1]; tmid[ncut] = (TWOPI + theta[ncut-1])/2; contrib = 0.0; for(l = 0; l <= ncut; l++) { #if (DEBUGLEVEL >= 3) Rprintf("Interval %d, width %lf:", l, delta[l]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 22)) Rprintf("Interval %d:", l); #endif xtest = (double) (xcentre + radius * cos(tmid[l])); ytest = (double) (ycentre + radius * sin(tmid[l])); if(TESTINSIDE(xtest, ytest, xx0, yy0, xx1, yy1)) { contrib += delta[l]; #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 23))) Rprintf("inside\n"); } else { Rprintf("outside\n"); #endif } } } /* multiply by sign of trapezium */ if(xx0 < xx1) contrib = -contrib; #if (DEBUGLEVEL >= 3) Rprintf("contrib = %lf\n", contrib); #endif total += contrib; } } out[ j * n + i] = total; #if (DEBUGLEVEL >= 1) Rprintf("\nTotal = %lf = %lf * (2 * pi)\n", total, total/TWOPI); #endif } } } } spatstat.core/src/Ediggatsti.c0000644000176200001440000000354314141377573016104 0ustar liggesusers#include #include #include #include "chunkloop.h" #include "looptest.h" #include "constants.h" /* Ediggatsti.c $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ C implementation of 'eval' for DiggleGatesStibbard interaction Assumes point patterns are sorted in increasing order of x coordinate Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void Ediggatsti(nnsource, xsource, ysource, idsource, nntarget, xtarget, ytarget, idtarget, rrho, values) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget; int *idsource, *idtarget; double *rrho; /* output */ double *values; { int nsource, ntarget, maxchunk, j, i, ileft, idsourcej; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double rho, rho2, rho2pluseps, coef, product; nsource = *nnsource; ntarget = *nntarget; rho = *rrho; if(nsource == 0 || ntarget == 0) return; rho2 = rho * rho; coef = M_PI_2/rho; rho2pluseps = rho2 + EPSILON(rho2); ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { product = 1; xsourcej = xsource[j]; ysourcej = ysource[j]; idsourcej = idsource[j]; /* adjust starting position */ xleft = xsourcej - rho; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process from ileft until dx > rho */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > rho2pluseps) break; if(idtarget[i] != idsourcej) { dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= rho2) product *= sin(sqrt(d2) * coef); } } values[j] = log(product * product); } } } spatstat.core/src/constants.h0000644000176200001440000000074714141377573016044 0ustar liggesusers/* constants.h Ensure that required constants are defined (Insurance against flaky installations) $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifndef M_PI #define M_PI 3.141592653589793 #endif #ifndef M_PI_2 #define M_PI_2 1.570796326794897 #endif #ifndef M_2_PI #define M_2_PI (2.0/M_PI) #endif #ifndef M_2PI #define M_2PI 6.283185307179586 #endif spatstat.core/src/rthin.c0000644000176200001440000000362514141377573015145 0ustar liggesusers#include #include #include /* rthin.c Select from the integers 1:n with probability p by simulating geometric(p) jumps between selected integers $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ SEXP thinjumpequal(SEXP n, SEXP p, SEXP guess) { int N; double P; int *w; /* temporary storage for selected integers */ int nw, nwmax; int i, j, k; double log1u, log1p; /* R object return value */ SEXP Out; /* external storage pointer */ int *OutP; /* protect R objects from garbage collector */ PROTECT(p = AS_NUMERIC(p)); PROTECT(n = AS_INTEGER(n)); PROTECT(guess = AS_INTEGER(guess)); /* Translate arguments from R to C */ N = *(INTEGER_POINTER(n)); P = *(NUMERIC_POINTER(p)); nwmax = *(INTEGER_POINTER(guess)); /* Allocate space for result */ w = (int *) R_alloc(nwmax, sizeof(int)); /* set up */ GetRNGstate(); log1p = -log(1.0 - P); /* main loop */ i = 0; /* last selected element of 1...N */ nw = 0; /* number of selected elements */ while(i <= N) { log1u = exp_rand(); /* an exponential rv is equivalent to -log(1-U) */ j = (int) ceil(log1u/log1p); /* j is geometric(p) */ i += j; if(nw >= nwmax) { /* overflow; allocate more space */ w = (int *) S_realloc((char *) w, 2 * nwmax, nwmax, sizeof(int)); nwmax = 2 * nwmax; } /* add 'i' to output vector */ w[nw] = i; ++nw; } /* The last saved 'i' could have exceeded 'N' */ /* For efficiency we don't check this in the loop */ if(nw > 0 && w[nw-1] > N) --nw; PutRNGstate(); /* create result vector */ PROTECT(Out = NEW_INTEGER(nw)); /* copy results into output */ OutP = INTEGER_POINTER(Out); for(k = 0; k < nw; k++) OutP[k] = w[k]; UNPROTECT(4); return(Out); } spatstat.core/src/geom3.h0000644000176200001440000000041014141377573015025 0ustar liggesusers/* $Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $ Definitions for 3D geometrical structures */ typedef struct Point { double x; double y; double z; } Point; typedef struct Box { double x0; double x1; double y0; double y1; double z0; double z1; } Box; spatstat.core/src/sphevol.c0000644000176200001440000000746414141377573015506 0ustar liggesusers#include #include #include "geom3.h" /* $Revision: 1.2 $ $Date: 2013/05/27 02:09:10 $ Routine for calculating ABSOLUTE volume of intersection between sphere and box Arbitrary positions: point is allowed to be inside or outside box. # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #ifdef DEBUG #define DBG(X,Y) Rprintf("%s: %f\n", (X), (Y)); #else #define DBG(X,Y) #endif #include "yesno.h" #define ABS(X) ((X >= 0.0) ? (X) : -(X)) static double rcubed, spherevol; double sphevol(point, box, r) Point *point; Box *box; double r; { double sum, p[4], q[4]; double v1(), v2(), v3(); int i, j; rcubed = r * r * r; spherevol = (4.0/3.0) * PI * rcubed; p[1] = box->x0 - point->x; p[2] = box->y0 - point->y; p[3] = box->z0 - point->z; q[1] = box->x1 - point->x; q[2] = box->y1 - point->y; q[3] = box->z1 - point->z; sum = 0; for(i = 1; i <= 3; i++) { sum += v1(p[i], -1, r) + v1(q[i], 1, r); #ifdef DEBUG Rprintf("i = %d, v1 = %f, v1 = %f\n", i, v1(p[i], -1, r), v1(q[i], 1, r)); #endif } DBG("Past v1", sum) for(i = 1; i < 3; i++) for(j = i+1; j <= 3; j++) { sum -= v2(p[i], -1, p[j], -1, r) + v2(p[i], -1, q[j], 1, r) + v2(q[i], 1, p[j], -1, r) + v2(q[i], 1, q[j], 1, r); #ifdef DEBUG Rprintf("i = %d, j = %d, sum = %f\n", i, j, sum); #endif } DBG("Past v2", sum) sum += v3(p[1], -1, p[2], -1, p[3], -1, r) + v3(p[1], -1, p[2], -1, q[3], 1, r); DBG("sum", sum) sum += v3(p[1], -1, q[2], 1, p[3], -1, r) + v3(p[1], -1, q[2], 1, q[3], 1, r); DBG("sum", sum) sum += v3(q[1], 1, p[2], -1, p[3], -1, r) + v3(q[1], 1, p[2], -1, q[3], 1, r); DBG("sum", sum) sum += v3(q[1], 1, q[2], 1, p[3], -1, r) + v3(q[1], 1, q[2], 1, q[3], 1, r); DBG("Past v3", sum) DBG("sphere volume", spherevol) return(spherevol - sum); } double v1(a,s,r) double a, r; int s; { double value; double u(); short sign; value = 4.0 * rcubed * u(ABS(a)/r, 0.0, 0.0); sign = (a >= 0.0) ? 1 : -1; if(sign == s) return(value); else return(spherevol - value); } double v2(a, sa, b, sb, r) double a, b, r; int sa, sb; { short sign; double u(); sign = (b >= 0.0) ? 1 : -1; if(sign != sb ) return(v1(a, sa, r) - v2(a, sa, ABS(b), 1, r)); b = ABS(b); sb = 1; sign = (a >= 0.0) ? 1 : -1; if(sign != sa) return(v1(b, sb, r) - v2(ABS(a), 1, b, sb, r)); a = ABS(a); return(2.0 * rcubed * u(a/r, b/r, 0.0)); } double v3(a, sa, b, sb, c, sc, r) double a, b, c, r; int sa, sb, sc; { short sign; double u(); sign = (c >= 0.0) ? 1 : -1; if(sign != sc) return(v2(a,sa,b,sb,r) - v3(a,sa,b,sb, ABS(c), 1, r)); c = ABS(c); sc = 1; sign = (b >= 0.0) ? 1 : -1; if(sign != sb) return(v2(a,sa,c,sc,r) - v3(a,sa,ABS(b),1,c,sc,r)); b = ABS(b); sb = 1; sign = (a >= 0.0) ? 1 : -1; if(sign != sa) return(v2(b,sb, c, sc, r) - v3(ABS(a),1, b, sb, c, sc, r)); a = ABS(a); return(rcubed * u(a/r, b/r, c/r)); } double u(a, b, c) double a, b, c; { double w(); if(a * a + b * b + c * c >= 1.0) return(0.0); return( (PI/12.0) * (2.0 - 3.0 * (a + b + c) + (a * a * a + b * b * b + c * c * c)) + w(a,b) + w(b,c) + w(a,c) - a * b * c ); } double w(x,y) double x,y; /* Arguments assumed >= 0 */ { double z; z = sqrt(1 - x * x - y * y); return( (x / 2.0 - x * x * x / 6.0) * atan2(y, z) + (y / 2.0 - y * y * y / 6.0) * atan2(x, z) - ( atan2(x * y , z) - x * y * z )/3.0 ); } spatstat.core/src/lookup.c0000644000176200001440000001234614141377573015332 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity function for a general pairwise interaction process with the pairwise interaction function given by a ``lookup table'', passed through the par argument. */ /* For debugging code, insert the line: #define DEBUG 1 */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Lookup { int nlook; int equisp; double delta; double rmax; double r2max; double *h; /* values of pair interaction */ double *r; /* r values if not equally spaced */ double *r2; /* r^2 values if not equally spaced */ double *period; int per; } Lookup; /* initialiser function */ Cdata *lookupinit(state, model, algo) State state; Model model; Algor algo; { int i, nlook; double ri; Lookup *lookup; lookup = (Lookup *) R_alloc(1, sizeof(Lookup)); /* Interpret model parameters*/ lookup->nlook = nlook = model.ipar[0]; lookup->equisp = (model.ipar[1] > 0); lookup->delta = model.ipar[2]; lookup->rmax = model.ipar[3]; lookup->r2max = pow(lookup->rmax, 2); /* periodic boundary conditions? */ lookup->period = model.period; lookup->per = (model.period[0] > 0.0); /* If the r-values are equispaced only the h vector is included in ``par'' after ``rmax''; the entries of h then consist of h[0] = par[5], h[1] = par[6], ..., h[k-1] = par[4+k], ..., h[nlook-1] = par[4+nlook]. If the r-values are NOT equispaced then the individual r values are needed and these are included as r[0] = par[5+nlook], r[1] = par[6+nlook], ..., r[k-1] = par[4+nlook+k], ..., r[nlook-1] = par[4+2*nlook]. */ lookup->h = (double *) R_alloc((size_t) nlook, sizeof(double)); for(i = 0; i < nlook; i++) lookup->h[i] = model.ipar[4+i]; if(!(lookup->equisp)) { lookup->r = (double *) R_alloc((size_t) nlook, sizeof(double)); lookup->r2 = (double *) R_alloc((size_t) nlook, sizeof(double)); for(i = 0; i < nlook; i++) { ri = lookup->r[i] = model.ipar[4+nlook+i]; lookup->r2[i] = ri * ri; } } #ifdef DEBUG Rprintf("Exiting lookupinit: nlook=%d, equisp=%d\n", nlook, lookup->equisp); #endif return((Cdata *) lookup); } /* conditional intensity evaluator */ double lookupcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, nlook, k, kk, ix, ixp1, j; double *x, *y; double u, v; double r2max, d2, d, delta, cifval, ux, vy; Lookup *lookup; lookup = (Lookup *) cdata; r2max = lookup->r2max; delta = lookup->delta; nlook = lookup->nlook; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = 1.0; if(npts == 0) return(cifval); ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(lookup->equisp) { /* equispaced r values */ if(lookup->per) { /* periodic distance */ /* Rprintf("Lookup equispaced table, periodic distance\n"); */ if(ix > 0) { for(j=0; j < ix; j++) { d = sqrt(dist2(u,v,x[j],y[j],lookup->period)); k = floor(d/delta); if(k < nlook) { if(k < 0) k = 0; cifval *= lookup->h[k]; } } } if(ixp1 < npts) { for(j=ixp1; jperiod)); k = floor(d/delta); if(k < nlook) { if(k < 0) k = 0; cifval *= lookup->h[k]; } } } } else { /* Euclidean distance */ /* Rprintf("Lookup equispaced table, Euclidean distance\n"); */ if(ix > 0) { for(j=0; j < ix; j++) { d = hypot(u - x[j], v-y[j]); k = floor(d/delta); if(k < nlook) { if(k < 0) k = 0; cifval *= lookup->h[k]; } } } if(ixp1 < npts) { for(j=ixp1; jh[k]; } } } } } else { /* non-equispaced r values */ if(lookup->per) { /* periodic distance */ /* Rprintf("Lookup non-equispaced table, periodic distance\n"); */ if(ix > 0) { for(j=0; j < ix; j++) { d2 = dist2(u,v,x[j],y[j],lookup->period); if(d2 < r2max) { for(kk = 0; kk < nlook && lookup->r2[kk] <= d2; kk++) ; k = (kk == 0) ? 0 : kk-1; cifval *= lookup->h[k]; } } } if(ixp1 < npts) { for(j=ixp1; jperiod); if(d2 < r2max) { for(kk = 0; kk < nlook && lookup->r2[kk] <= d2; kk++) ; k = (kk == 0) ? 0 : kk-1; cifval *= lookup->h[k]; } } } } else { /* Euclidean distance */ /* Rprintf("Lookup non-equispaced table, Euclidean distance\n"); */ if(ix > 0) { for(j=0; j < ix; j++) { ux = u - x[j]; vy = v - y[j]; d2 = ux * ux + vy * vy; if(d2 < r2max) { for(kk = 0; kk < nlook && lookup->r2[kk] <= d2; kk++) ; k = (kk == 0) ? 0 : kk-1; cifval *= lookup->h[k]; } } } if(ixp1 < npts) { for(j=ixp1; jr2[kk] <= d2; kk++) ; k = (kk == 0) ? 0 : kk-1; cifval *= lookup->h[k]; } } } } } return cifval; } Cifns LookupCifns = { &lookupinit, &lookupcif, (updafunptr) NULL, NO}; spatstat.core/src/Kborder.h0000644000176200001440000001103414141377573015407 0ustar liggesusers/* Kborder.h Code template for K function estimators in Kborder.c Variables: FNAME function name OUTTYPE storage type of the output vectors ('int' or 'double') WEIGHTED #defined for weighted (inhom) K function Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.12 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void FNAME( nxy, x, y, #ifdef WEIGHTED w, #endif b, nr, rmax, numer, denom) /* inputs */ int *nxy, *nr; double *x, *y, *b, *rmax; #ifdef WEIGHTED double *w; #endif /* outputs */ OUTTYPE *numer, *denom; { int i, j, l, n, nt, n1, nt1, lmin, lmax, maxchunk; double dt, tmax, xi, yi, bi, maxsearch, max2search; double bratio, dratio, dij, dij2, dx, dy, dx2; OUTTYPE *numerLowAccum, *numerHighAccum, *denomAccum; OUTTYPE naccum, daccum; #ifdef WEIGHTED double wi, wj, wij; #endif #ifdef WEIGHTED #define ZERO 0.0 #define WI wi #define WJ wj #define WIJ wij #else #define ZERO 0 #define WI 1 #define WJ 1 #define WIJ 1 #endif n = *nxy; nt = *nr; n1 = n - 1; nt1 = nt - 1; dt = (*rmax)/(nt-1); tmax = *rmax; /* initialise */ numerLowAccum = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE)); numerHighAccum = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE)); denomAccum = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE)); for(l = 0; l < nt; l++) numer[l] = denom[l] = numerLowAccum[l] = numerHighAccum[l] = denomAccum[l] = ZERO; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { /* -------- DENOMINATOR -------------*/ bi = b[i]; #ifdef WEIGHTED wi = w[i]; #endif /* increment denominator for all r < b[i] */ bratio = bi/dt; /* lmax is the largest integer STRICTLY less than bratio */ lmax = (int) ceil(bratio) - 1; lmax = (lmax <= nt1) ? lmax : nt1; /* effectively increment entries 0 to lmax */ if(lmax >= 0) denomAccum[lmax] += WI; /* ---------- NUMERATOR -----------*/ /* scan through points (x[j],y[j]) */ xi = x[i]; yi = y[i]; maxsearch = (bi < tmax) ? bi : tmax; max2search = maxsearch * maxsearch; /* scan backward from i-1 until |x[j]-x[i]| > maxsearch or until we run out */ if(i > 0) { for(j=i-1; j >= 0; j--) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= max2search) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < max2search) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r such that dij <= r < bi */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* increment entries lmin to lmax inclusive */ if(lmax >= lmin) { #ifdef WEIGHTED wij = wi * wj; #endif numerLowAccum[lmin] += WIJ; numerHighAccum[lmax] += WIJ; } } } } /* scan forward from i+1 until x[j]-x[i] > maxsearch or until we run out */ if(i < n1) { for(j=i+1; j < n; j++) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= max2search) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < max2search) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r such that dij <= r < bi */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* increment entries lmin to lmax inclusive */ if(lmax >= lmin) { #ifdef WEIGHTED wij = wi * wj; #endif numerLowAccum[lmin] += WIJ; numerHighAccum[lmax] += WIJ; } } } } } } /* Now use the accumulated values to compute the numerator and denominator. The value of denomAccum[l] should be added to denom[k] for all k <= l. numerHighAccum[l] should be added to numer[k] for all k <=l numerLowAccum[l] should then be subtracted from numer[k] for k <= l. */ for(l=nt1, naccum=daccum=ZERO; l>=0; l--) { daccum += denomAccum[l]; denom[l] = daccum; naccum += numerHighAccum[l]; numer[l] = naccum; naccum -= numerLowAccum[l]; } } #undef ZERO #undef WI #undef WJ #undef WIJ spatstat.core/src/KrectFunDec.h0000644000176200001440000000563014141377573016161 0ustar liggesusers/* KrectFunDec.h $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ Function declarations for Krect Macros: FNAME function name WEIGHTED #defined for weighted version (Kinhom etc) +++ Copyright (C) Adrian Baddeley 2014 ++++ */ void FNAME(width, height, nxy, x, y, #ifdef WEIGHTED w, #endif nr, rmax, trimedge, doIso, doTrans, doBord, doUnco, iso, trans, bnumer, bdenom, unco) /* input data */ double *width, *height; /* window is (0, width) x (0, height) */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ #ifdef WEIGHTED double *w; /* weights (e.g. reciprocal intensities) */ #endif /* algorithm parameters */ int *nr; /* number of r values */ double *rmax; /* maximum r value */ double *trimedge; /* maximum edge correction weight */ int *doIso; /* logical: whether to do isotropic correction */ int *doTrans; /* logical: whether to do translation correction */ int *doBord; /* logical: whether to do border correction */ int *doUnco; /* logical: whether to do uncorrected estimator */ /* outputs */ /* These are vectors of length nr if required, otherwise ignored */ double *iso; /* isotropic-corrected estimator */ double *trans; /* translation-corrected estimator */ COUNTTYPE *bnumer; /* numerator of border-corrected estimator */ COUNTTYPE *bdenom; /* denominator of border-corrected estimator */ COUNTTYPE *unco; /* uncorrected estimator */ { int i, j, l, ldist, lbord, M, maxchunk, N, Nr, N1, Nr1; double rstep, Rmax, R2max, wide, high, trim; double xi, yi, bdisti, bx, by, bratio; double dx, dy, dx2, dij, dij2, dratio, edgetrans, edgeiso; double dL, dR, dD, dU, bLU, bLD, bRU, bRD, bUL, bUR, bDL, bDR; double aL, aR, aD, aU, cL, cR, cU, cD, extang; int ncor, corner; COUNTTYPE *numerLowAccum, *numerHighAccum, *denomAccum; COUNTTYPE naccum, daccum; double accum; #ifdef WEIGHTED double wi, wj, wij; #endif #ifdef WEIGHTED #define ZERO 0.0 #define WIJ wij #else #define ZERO 0 #define WIJ 1 #endif N = *nxy; if(N == 0) return; Nr = *nr; Rmax = *rmax; trim = *trimedge; N1 = N - 1; Nr1 = Nr - 1; R2max = Rmax * Rmax; rstep = Rmax/Nr1; wide = *width; high = *height; /* Allocate and initialise scratch space - for border correction, but do it in all cases to keep the compiler happy */ M = (*doBord == 1) ? Nr : 1; numerLowAccum = (COUNTTYPE *) R_alloc(M, sizeof(COUNTTYPE)); numerHighAccum = (COUNTTYPE *) R_alloc(M, sizeof(COUNTTYPE)); denomAccum = (COUNTTYPE *) R_alloc(M, sizeof(COUNTTYPE)); for(l = 0; l < M; l++) numerLowAccum[l] = numerHighAccum[l] = denomAccum[l] = ZERO; #include "KrectV1.h" } #undef ZERO #undef WIJ spatstat.core/src/KrectIncrem.h0000644000176200001440000000462614141377573016236 0ustar liggesusers/* KrectIncrem.h Code to increment numerators of K-function $Revision: 1.5 $ $Date: 2014/02/09 03:00:51 $ +++ Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2014 ++++ */ #ifdef WEIGHTED wj = w[j]; wij = wi * wj; #endif /* determine index of entry to be incremented */ dij = (double) sqrt(dij2); dratio = dij/rstep; /* smallest integer greater than or equal to dratio */ ldist = (int) ceil(dratio); #ifdef UNCORRECTED /* ............ uncorrected estimate ................. */ #ifdef WEIGHTED unco[ldist] += wij; #else (unco[ldist])++; #endif #endif #ifdef BORDER /* ............ border correction ................. */ /* increment numerator for all r such that dij <= r < bi */ /* increment entries ldist to lbord inclusive */ #ifdef WEIGHTED if(lbord >= ldist) { numerLowAccum[ldist] += wij; numerHighAccum[lbord] += wij; } #else if(lbord >= ldist) { (numerLowAccum[ldist])++; (numerHighAccum[lbord])++; } #endif #endif #ifdef TRANSLATION /* ............ translation correction ................. */ edgetrans = 1.0/((1.0 - ABS(dx)/wide) * (1.0 - ABS(dy)/high)); edgetrans = MIN(edgetrans, trim); #ifdef WEIGHTED trans[ldist] += wij * edgetrans; #else trans[ldist] += edgetrans; #endif #endif #ifdef ISOTROPIC /* ............ isotropic correction ................. */ /* half the angle subtended by the intersection between the circle of radius d[i,j] centred on point i and each edge of the rectangle (prolonged to an infinite line) */ aL = (dL < dij) ? acos(dL/dij) : 0.0; aR = (dR < dij) ? acos(dR/dij) : 0.0; aD = (dD < dij) ? acos(dD/dij) : 0.0; aU = (dU < dij) ? acos(dU/dij) : 0.0; /* apply maxima */ cL = MIN(aL, bLU) + MIN(aL, bLD); cR = MIN(aR, bRU) + MIN(aR, bRD); cU = MIN(aU, bUL) + MIN(aU, bUR); cD = MIN(aD, bDL) + MIN(aD, bDR); /* total exterior angle over 2 pi */ extang = (cL + cR + cU + cD)/TWOPI; /* add pi/2 for corners */ if(corner) extang += 1/4; /* edge correction factor */ edgeiso = 1 / (1 - extang); edgeiso = MIN(edgeiso, trim); #ifdef WEIGHTED iso[ldist] += wij * edgeiso; #else iso[ldist] += edgeiso; #endif #endif spatstat.core/src/getcif.c0000644000176200001440000000331414141377573015255 0ustar liggesusers#include #include "methas.h" void fexitc(const char *msg); extern Cifns AreaIntCifns, BadGeyCifns, DgsCifns, DiggraCifns, FikselCifns, GeyerCifns, HardcoreCifns, LennardCifns, LookupCifns, SoftcoreCifns, StraussCifns, StraussHardCifns, MultiStraussCifns, MultiStraussHardCifns, MultiHardCifns, TripletsCifns, PenttinenCifns; Cifns NullCifns = NULL_CIFNS; typedef struct CifPair { char *name; Cifns *p; } CifPair; CifPair CifTable[] = { {"areaint", &AreaIntCifns}, {"badgey", &BadGeyCifns}, {"dgs", &DgsCifns}, {"diggra", &DiggraCifns}, {"geyer", &GeyerCifns}, {"fiksel", &FikselCifns}, {"hardcore", &HardcoreCifns}, {"lookup", &LookupCifns}, {"lennard", &LennardCifns}, {"multihard", &MultiHardCifns}, {"penttinen", &PenttinenCifns}, {"sftcr", &SoftcoreCifns}, {"strauss", &StraussCifns}, {"straush", &StraussHardCifns}, {"straussm", &MultiStraussCifns}, {"straushm", &MultiStraussHardCifns}, {"triplets", &TripletsCifns}, {(char *) NULL, (Cifns *) NULL} }; Cifns getcif(cifname) char *cifname; { int i; CifPair cp; for(i = 0; CifTable[i].name; i++) { cp = CifTable[i]; if(strcmp(cifname, cp.name) == 0) return(*(cp.p)); } fexitc("Unrecognised cif name; bailing out.\n"); /* control never passes to here, but compilers don't know that */ return(NullCifns); } /* R interface function, to check directly whether cif is recognised */ void knownCif(cifname, answer) char** cifname; int* answer; { int i; CifPair cp; for(i = 0; CifTable[i].name; i++) { cp = CifTable[i]; if(strcmp(*cifname, cp.name) == 0) { *answer = 1; return; } } *answer = 0; return; } spatstat.core/src/loccumx.h0000644000176200001440000000410414141377573015471 0ustar liggesusers/* loccumx.h C template for loccum.c grid-to-data or data-cross-data functions $Revision: 1.6 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 macros: FNAME function name NULVAL initial value (empty sum = 0, empty product = 1) INC(A,B) increment operation A += B or A *= B */ void FNAME(ntest, xtest, ytest, ndata, xdata, ydata, vdata, nr, rmax, ans) /* inputs */ int *ntest, *ndata, *nr; double *xtest, *ytest, *xdata, *ydata, *vdata; double *rmax; /* output */ double *ans; /* matrix of column vectors of functions for each point of first pattern */ { int Ntest, Ndata, Nr, Nans; double Rmax; int i, j, k, jleft, kmin, maxchunk, columnstart; double Rmax2, rstep, xtesti, ytesti, xleft; double dx, dy, dx2, d2, d, contrib; Ntest = *ntest; Ndata = *ndata; Nr = *nr; Rmax = *rmax; if(Ntest == 0) return; Nans = Nr * Ntest; /* initialise products to 1 */ OUTERCHUNKLOOP(k, Nans, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(k, Nans, maxchunk, 8196) { ans[k] = NULVAL; } } if(Ndata == 0) return; rstep = Rmax/(Nr-1); Rmax2 = Rmax * Rmax; jleft = 0; OUTERCHUNKLOOP(i, Ntest, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ntest, maxchunk, 8196) { xtesti = xtest[i]; ytesti = ytest[i]; columnstart = Nr * i; /* start position for f_i(.) in 'ans' */ /* adjust starting point */ xleft = xtesti - Rmax; while((xdata[jleft] < xleft) && (jleft+1 < Ndata)) ++jleft; /* process from jleft until |dx| > Rmax */ for(j=jleft; j < Ndata; j++) { dx = xdata[j] - xtesti; dx2 = dx * dx; if(dx2 > Rmax2) break; dy = ydata[j] - ytesti; d2 = dx2 + dy * dy; if(d2 <= Rmax2) { d = sqrt(d2); kmin = (int) ceil(d/rstep); if(kmin < Nr) { contrib = vdata[j]; for(k = kmin; k < Nr; k++) INC(ans[columnstart + k] , contrib); } } } } } } spatstat.core/src/Efiksel.c0000644000176200001440000000334614141377573015403 0ustar liggesusers#include #include #include "chunkloop.h" #include "looptest.h" /* Efiksel.c $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ C implementation of 'eval' for Fiksel interaction (non-hardcore part) Assumes point patterns are sorted in increasing order of x coordinate Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ double sqrt(), exp(); void Efiksel(nnsource, xsource, ysource, nntarget, xtarget, ytarget, rrmax, kkappa, values) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget, *rrmax, *kkappa; /* output */ double *values; { int nsource, ntarget, maxchunk, j, i, ileft; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double rmax, r2max, r2maxpluseps, kappa, total; nsource = *nnsource; ntarget = *nntarget; rmax = *rrmax; kappa = *kkappa; if(nsource == 0 || ntarget == 0) return; r2max = rmax * rmax; r2maxpluseps = r2max + EPSILON(r2max); ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 16384) { total = 0; xsourcej = xsource[j]; ysourcej = ysource[j]; /* adjust starting point */ xleft = xsourcej - rmax; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process from ileft until dx > rmax */ for(i=ileft; i < ntarget; i++) { /* squared interpoint distance */ dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > r2maxpluseps) break; dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= r2max) total += exp(- kappa * sqrt(d2)); } values[j] = total; } } } spatstat.core/src/mhv1.h0000644000176200001440000000055214141377573014675 0ustar liggesusers/* mhv1.h marked or unmarked simulation Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef MH_MARKED if(marked) { /* marked process */ #define MH_MARKED YES #include "mhv2.h" #undef MH_MARKED } else { /* unmarked process */ #define MH_MARKED NO #include "mhv2.h" #undef MH_MARKED } spatstat.core/src/raster.c0000644000176200001440000000213014141452520015271 0ustar liggesusers/* raster.c shape_raster() initialise a Raster structure $Revision: 1.1 $ $Date: 2020/11/30 11:19:18 $ */ #include #include "raster.h" void shape_raster(ras,data,xmin,ymin,xmax,ymax,nrow,ncol,mrow,mcol) Raster *ras; /* the raster structure to be initialised */ void *data; int nrow, ncol; /* absolute dimensions of storage array */ int mrow, mcol; /* margins clipped off */ /* e.g. valid width is ncol - 2*mcol columns */ double xmin, ymin, /* image dimensions in R^2 after clipping */ xmax, ymax; { ras->data = data; ras->nrow = nrow; ras->ncol = ncol; ras->length = nrow * ncol; ras->rmin = mrow; ras->rmax = nrow - mrow - 1; ras->cmin = mcol; ras->cmax = ncol - mcol - 1; ras->x0 = ras->xmin = xmin; ras->x1 = ras->xmax = xmax; ras->y0 = ras->ymin = ymin; ras->y1 = ras->ymax = ymax; ras->xstep = (xmax-xmin)/(ncol - 2 * mcol - 1); ras->ystep = (ymax-ymin)/(nrow - 2 * mrow - 1); /* Rprintf("xstep,ystep = %lf,%lf\n", ras->xstep,ras->ystep); */ } spatstat.core/src/mhsnoopdef.h0000644000176200001440000000121214141377573016156 0ustar liggesusers/* mhsnoopdef.h Define structure 'Snoop' containing visual debugger parameters and state $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifndef R_INTERNALS_H_ #include #endif typedef struct Snoop { int active; /* true or false */ int nextstop; /* jump to iteration number 'nextstop' */ int nexttype; /* jump to the next proposal of type 'nexttype' */ SEXP env; /* environment for exchanging data with R */ SEXP expr; /* callback expression for visual debugger */ } Snoop; #define NO_TYPE -1 spatstat.core/src/crossloop.h0000644000176200001440000000356214141377573016051 0ustar liggesusers/* crossloop.h Generic code template for loop for cross-close-pairs operations collecting contributions to point x_i from all points y_j such that ||x_i - y_j|| <= r cpp variables used: INITIAL_I code executed at start of 'i' loop CONTRIBUTE_IJ code executed to compute contribution from j to i COMMIT_I code executed to save total contribution to i C variables used: int i, j, n1, n2, maxchunk, jleft; double x1i, y1i, xleft, dx, dy, d2, rmax, r2max; double *x1, *y1, *x2, *y2; $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifndef CHUNKLOOP_H #include "chunkloop.h" #endif #define CROSSLOOP(INITIAL_I, CONTRIBUTE_IJ, COMMIT_I) \ OUTERCHUNKLOOP(i, n1, maxchunk, 65536) { \ R_CheckUserInterrupt(); \ INNERCHUNKLOOP(i, n1, maxchunk, 65536) { \ \ x1i = x1[i]; \ y1i = y1[i]; \ \ INITIAL_I; \ \ jleft = 0; \ \ /* \ adjust starting point jleft \ */ \ xleft = x1i - rmax; \ while((x2[jleft] < xleft) && (jleft+1 < n2)) \ ++jleft; \ \ /* \ process from j = jleft until dx > rmax \ */ \ for(j=jleft; j < n2; j++) { \ dx = x2[j] - x1i; \ if(dx > rmax) \ break; \ dy = y2[j] - y1i; \ d2 = dx * dx + dy * dy; \ if(d2 <= r2max) { \ /* add this (i, j) pair to output */ \ CONTRIBUTE_IJ; \ } \ } \ COMMIT_I; \ } \ } spatstat.core/src/call3d.c0000644000176200001440000002474714141377573015173 0ustar liggesusers/* $Revision: 1.5 $ $Date: 2010/10/24 10:57:02 $ R interface Pass data between R and internally-defined data structures # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #include #include "geom3.h" #include "functable.h" #undef DEBUG #ifdef DEBUG #define DEBUGMESSAGE(S) Rprintf(S); #else #define DEBUGMESSAGE(S) #endif void g3one(Point *p, int n, Box *b, Ftable *g); void g3three(Point *p, int n, Box *b, Ftable *g); void g3cen(Point *p, int n, Box *b, H4table *count); void k3trans(Point *p, int n, Box *b, Ftable *k); void k3isot(Point *p, int n, Box *b, Ftable *k); void pcf3trans(Point *p, int n, Box *b, Ftable *pcf, double delta); void pcf3isot(Point *p, int n, Box *b, Ftable *pcf, double delta); void phatminus(Point *p, int n, Box *b, double vside, Itable *count); void phatnaive(Point *p, int n, Box *b, double vside, Itable *count); void p3hat4(Point *p, int n, Box *b, double vside, H4table *count); /* ALLOCATION OF SPACE FOR STRUCTURES/ARRAYS We have defined an alloc() and free() function for each type. However, the free() functions currently do nothing, because we use R_alloc to allocate transient space, which is freed automatically by R. */ Ftable * allocFtable(n) /* allocate function table of size n */ int n; { Ftable *x; x = (Ftable *) R_alloc(1, sizeof(Ftable)); x->n = n; x->f = (double *) R_alloc(n, sizeof(double)); x->num = (double *) R_alloc(n, sizeof(double)); x->denom = (double *) R_alloc(n, sizeof(double)); return(x); } void freeFtable(x) Ftable *x; { } Itable * allocItable(n) int n; { Itable *x; x = (Itable *) R_alloc(1, sizeof(Itable)); x->n = n; x->num = (int *) R_alloc(n, sizeof(int)); x->denom = (int *) R_alloc(n, sizeof(int)); return(x); } void freeItable(x) Itable *x; { } H4table * allocH4table(n) int n; { H4table *x; x = (H4table *) R_alloc(1, sizeof(H4table)); x->n = n; x->obs = (int *) R_alloc(n, sizeof(int)); x->nco = (int *) R_alloc(n, sizeof(int)); x->cen = (int *) R_alloc(n, sizeof(int)); x->ncc = (int *) R_alloc(n, sizeof(int)); return(x); } void freeH4table(x) H4table *x; { } Box * allocBox() /* I know this is ridiculous but it's consistent. */ { Box *b; b = (Box *) R_alloc(1, sizeof(Box)); return(b); } void freeBox(x) Box *x; { } Point * allocParray(n) /* allocate array of n Points */ int n; { Point *p; p = (Point *) R_alloc(n, sizeof(Point)); return(p); } void freeParray(x) Point *x; { } /* CREATE AND INITIALISE DATA STORAGE */ Ftable * MakeFtable(t0, t1, n) double *t0, *t1; int *n; { Ftable *tab; int i, nn; nn = *n; tab = allocFtable(nn); tab->t0 = *t0; tab->t1 = *t1; for(i = 0; i < nn; i++) { tab->f[i] = 0.0; tab->num[i] = 0; tab->denom[i] = 0; } return(tab); } Itable * MakeItable(t0, t1, n) double *t0, *t1; int *n; { Itable *tab; int i, nn; nn = *n; tab = allocItable(nn); tab->t0 = *t0; tab->t1 = *t1; for(i = 0; i < nn; i++) { tab->num[i] = 0; tab->denom[i] = 0; } return(tab); } H4table * MakeH4table(t0, t1, n) double *t0, *t1; int *n; { H4table *tab; int i, nn; nn = *n; tab = allocH4table(nn); tab->t0 = *t0; tab->t1 = *t1; for(i = 0; i < nn; i++) { tab->obs[i] = 0; tab->nco[i] = 0; tab->cen[i] = 0; tab->ncc[i] = 0; } tab->upperobs = 0; tab->uppercen = 0; return(tab); } /* CONVERSION OF DATA TYPES R -> internal including allocation of internal data types as needed */ Point * RtoPointarray(x,y,z,n) double *x, *y, *z; int *n; { int i, nn; Point *p; nn = *n; p = allocParray(nn); for(i = 0; i < nn; i++) { p[i].x = x[i]; p[i].y = y[i]; p[i].z = z[i]; } return(p); } Box * RtoBox(x0, x1, y0, y1, z0, z1) double *x0, *x1, *y0, *y1, *z0, *z1; { Box *b; b = allocBox(); b->x0 = *x0; b->x1 = *x1; b->y0 = *y0; b->y1 = *y1; b->z0 = *z0; b->z1 = *z1; return(b); } /* CONVERSION OF DATA TYPES internal -> R Note: it can generally be assumed that the R arguments are already allocated vectors of correct length, so we do not allocate them. */ void FtabletoR(tab, t0, t1, n, f, num, denom) /* internal */ Ftable *tab; /* R representation */ double *t0, *t1; int *n; double *f, *num, *denom; { int i; *t0 = tab->t0; *t1 = tab->t1; *n = tab->n; for(i = 0; i < tab->n; i++) { f[i] = tab->f[i]; num[i] = tab->num[i]; denom[i] = tab->denom[i]; } freeFtable(tab); } void ItabletoR(tab, t0, t1, m, num, denom) /* internal */ Itable *tab; /* R representation */ double *t0, *t1; int *m; int *num, *denom; { int i; *t0 = tab->t0; *t1 = tab->t1; *m = tab->n; for(i = 0; i < tab->n; i++) { num[i] = tab->num[i]; denom[i] = tab->denom[i]; } freeItable(tab); } void H4tabletoR(tab, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen) /* internal */ H4table *tab; /* R representation */ double *t0, *t1; int *m; int *obs, *nco, *cen, *ncc; int *upperobs, *uppercen; { int i; *t0 = tab->t0; *t1 = tab->t1; *m = tab->n; *upperobs = tab->upperobs; *uppercen = tab->uppercen; for(i = 0; i < tab->n; i++) { obs[i] = tab->obs[i]; nco[i] = tab->nco[i]; cen[i] = tab->cen[i]; ncc[i] = tab->ncc[i]; } freeH4table(tab); } /* R CALLING INTERFACE These routines are called from R by > .C("routine-name", ....) */ void RcallK3(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, f, num, denom, method) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *t0, *t1; /* Ftable */ int *m; double *f, *num, *denom; int *method; { Point *p; Box *b; Ftable *tab; p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); tab = MakeFtable(t0, t1, m); switch((int) *method) { case 0: k3trans(p, (int) *n, b, tab); break; case 1: k3isot(p, (int) *n, b, tab); break; default: Rprintf("Method %d not implemented: defaults to 0\n", *method); k3trans(p, (int) *n, b, tab); break; } FtabletoR(tab, t0, t1, m, f, num, denom); } void RcallG3(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, f, num, denom, method) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *t0, *t1; /* Ftable */ int *m; double *f, *num, *denom; int *method; { Point *p; Box *b; Ftable *tab; p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); tab = MakeFtable(t0, t1, m); switch(*method) { case 1: g3one(p, (int) *n, b, tab); break; case 3: g3three(p, (int) *n, b, tab); break; default: Rprintf("Method %d not implemented: defaults to 3\n", *method); g3three(p, (int) *n, b, tab); } FtabletoR(tab, t0, t1, m, f, num, denom); } void RcallG3cen(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *t0, *t1; int *m; /* H4table */ int *obs, *nco, *cen, *ncc; int *upperobs, *uppercen; { Point *p; Box *b; H4table *count; DEBUGMESSAGE("Inside RcallG3cen\n") p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); count = MakeH4table(t0, t1, m); g3cen(p, (int) *n, b, count); H4tabletoR(count, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen); DEBUGMESSAGE("Leaving RcallG3cen\n") } void RcallF3(x,y,z, n, x0, x1, y0, y1, z0, z1, vside, t0, t1, m, num, denom, method) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *vside; double *t0, *t1; int *m; /* Itable */ int *num, *denom; int *method; { Point *p; Box *b; Itable *count; DEBUGMESSAGE("Inside Rcall_f3\n") p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); count = MakeItable(t0, t1, m); switch((int) *method) { case 0: phatnaive(p, (int) *n, b, *vside, count); break; case 1: phatminus(p, (int) *n, b, *vside, count); break; default: Rprintf("Method %d not recognised: defaults to 1\n", *method); phatminus(p, (int) *n, b, *vside, count); } ItabletoR(count, t0, t1, m, num, denom); DEBUGMESSAGE("Leaving Rcall_f3\n") } void RcallF3cen(x,y,z, n, x0, x1, y0, y1, z0, z1, vside, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *vside; double *t0, *t1; int *m; /* H4table */ int *obs, *nco, *cen, *ncc; int *upperobs, *uppercen; { Point *p; Box *b; H4table *count; DEBUGMESSAGE("Inside Rcallf3cen\n") p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); count = MakeH4table(t0, t1, m); p3hat4(p, (int) *n, b, *vside, count); H4tabletoR(count, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen); DEBUGMESSAGE("Leaving Rcallf3cen\n") } void Rcallpcf3(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, f, num, denom, method, delta) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *t0, *t1; /* Ftable */ int *m; double *f, *num, *denom; int *method; double *delta; /* Epanechnikov kernel halfwidth */ { Point *p; Box *b; Ftable *tab; p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); tab = MakeFtable(t0, t1, m); switch((int) *method) { case 0: pcf3trans(p, (int) *n, b, tab, (double) *delta); break; case 1: pcf3isot(p, (int) *n, b, tab, (double) *delta); break; default: Rprintf("Method %d not implemented: defaults to 0\n", *method); pcf3trans(p, (int) *n, b, tab, (double) *delta); break; } FtabletoR(tab, t0, t1, m, f, num, denom); } spatstat.core/src/KrectV3.h0000644000176200001440000000025314141377573015301 0ustar liggesusers/* KrectV4.h with or without border correction */ if((*doBord) == 1) { #define BORDER #include "KrectV4.h" } else { #undef BORDER #include "KrectV4.h" } spatstat.core/src/Knone.h0000644000176200001440000000567214141377573015104 0ustar liggesusers/* Knone.h Code template for K function estimators in Knone.c Variables: FNAME function name OUTTYPE storage type of the output 'numer' ('int' or 'double') WEIGHTED #defined for weighted (inhom) K function Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.6 $ $Date: 2013/09/18 04:08:26 $ */ void FNAME( nxy, x, y, #ifdef WEIGHTED w, #endif nr, rmax, numer) /* inputs */ int *nxy, *nr; double *x, *y, *rmax; #ifdef WEIGHTED double *w; #endif /* output */ OUTTYPE *numer; { int i, j, l, n, nt, n1, lmin, lmax, maxchunk; double dt, tmax, tmax2, xi, yi; double dratio, dij, dij2, dx, dy, dx2; #ifdef WEIGHTED double wi, wj, wij; #endif #ifdef WEIGHTED #define ZERO 0.0 #define WI wi #define WJ wj #define WIJ wij #else #define ZERO 0 #define WI 1 #define WJ 1 #define WIJ 1 #endif n = *nxy; nt = *nr; n1 = n - 1; lmax = nt - 1; dt = (*rmax)/(nt-1); tmax = *rmax; tmax2 = tmax * tmax; /* initialise */ for(l = 0; l < nt; l++) numer[l] = ZERO; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { #ifdef WEIGHTED wi = w[i]; #endif xi = x[i]; yi = y[i]; /* scan backward from i-1 until x[j] < x[i] -tmax or until we run out */ if(i > 0) { for(j=i-1; j >= 0; j--) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= tmax2) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < tmax2) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r >= dij */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* effectively increment entries lmin to lmax inclusive */ if(lmin <= lmax) { #ifdef WEIGHTED wij = wi * wj; #endif numer[lmin] += WIJ; } } } } /* scan forward from i+1 until x[j] > x[i] + tmax or until we run out */ if(i < n1) { for(j=i+1; j < n; j++) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= tmax2) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < tmax2) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r >= dij */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* increment entries lmin to lmax inclusive */ if(lmin <= lmax) { #ifdef WEIGHTED wij = wi * wj; #endif numer[lmin] += WIJ; } } } } } } /* Now accumulate the numerator. */ if(nt > 1) for(l=1; l < nt; l++) numer[l] += numer[l-1]; } #undef ZERO #undef WI #undef WJ #undef WIJ spatstat.core/src/corrections.c0000644000176200001440000000256314141452520016335 0ustar liggesusers/* corrections.c Edge corrections $Revision: 1.17 $ $Date: 2021/10/25 10:18:31 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include #include #include "chunkloop.h" #include "yesno.h" #include "constants.h" /* This constant is defined in Rmath.h */ #define TWOPI M_2PI #define MIN(A,B) (((A) < (B)) ? (A) : (B)) #define BETWEEN(X,X0,X1) ((double) ( ( (X) - (X0) ) * ( (X) - (X1) ) ) <= 0.0) #define UNDER(X,Y,X0,Y0,X1,Y1) \ ((double) ( ( (Y1) - (Y0) ) * ( (X) - (X0) ) ) >= (double) ( ( (Y) - (Y0) ) * ( (X1) - (X0) ) ) ) #define UNDERNEATH(X,Y,X0,Y0,X1,Y1) \ ((((double) (X0)) < ((double) (X1))) ? UNDER(X,Y,X0,Y0,X1,Y1) : UNDER(X,Y,X1,Y1,X0,Y0)) #define TESTINSIDE(X,Y,X0,Y0,X1,Y1) \ (BETWEEN(X,X0,X1) && UNDERNEATH(X, Y, X0, Y0, X1, Y1)) /* C function ripleybox */ #undef DEBUGBOX #define RIPLEYFUN ripleybox #include "ripleybox.h" #undef RIPLEYFUN /* C function ripboxDebug */ #define DEBUGBOX #define RIPLEYFUN ripboxDebug #include "ripleybox.h" #undef RIPLEYFUN #undef DEBUGBOX /* C function ripleypoly */ #undef DEBUGPOLY #define RIPLEYFUN ripleypoly #include "ripleypoly.h" #undef RIPLEYFUN /* C function rippolDebug */ #define DEBUGPOLY #define RIPLEYFUN rippolDebug #include "ripleypoly.h" #undef RIPLEYFUN #undef DEBUGPOLY spatstat.core/src/PerfectStraussHard.h0000644000176200001440000001303214141377573017573 0ustar liggesusers // ..................... Strauss-Hardcore process .......................... // $Revision: 1.5 $ $Date: 2020/05/12 03:33:08 $ class StraussHardProcess : public PointProcess { public: double beta, gamma, H, R, Hsquared, Rsquared; StraussHardProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double Ri, double Hc); ~StraussHardProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); // void CalcBeta(long int xsidepomm, long int ysidepomm, // double *betapomm); // void CheckBeta(long int xsidepomm, long int ysidepomm, // double *betapomm); // double lnCondInt(struct Point2 *TempCell, Point2Pattern *p2p); // void Beta(struct Point2 *TempCell); // void CalcBeta(Point2Pattern *p2p); }; StraussHardProcess::StraussHardProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double Ri, double Hc) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; gamma = g; R = Ri; H = Hc; Rsquared = R * R; Hsquared = H * H; InteractionRange = R; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double StraussHardProcess::Interaction(double dsquared) { if(dsquared >= Rsquared) return(1.0); if(dsquared >= Hsquared) return(gamma); return(0.0); } void StraussHardProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void StraussHardProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating StraussHardProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating StraussHardProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating StraussHardProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectStraussHard(SEXP beta, SEXP gamma, SEXP r, SEXP hc, SEXP xrange, SEXP yrange) { // input parameters double Beta, Gamma, R, H, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(gamma = AS_NUMERIC(gamma)); PROTECT(r = AS_NUMERIC(r)); PROTECT(hc = AS_NUMERIC(hc)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 6 protected objects // extract arguments // model parameters Beta = *(NUMERIC_POINTER(beta)); Gamma = *(NUMERIC_POINTER(gamma)); R = *(NUMERIC_POINTER(r)); H = *(NUMERIC_POINTER(hc)); // window Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) { xcells = 9; } else if(xcells < 1) { xcells = 1; } ycells = (int) floor((Ymax-Ymin)/ R); if(ycells > 9) { ycells = 9; } else if(ycells < 1) { ycells = 1; } #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise StraussHard point process StraussHardProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax, Beta, Gamma, R, H); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(10); // 6 arguments plus xout, yout, nout, out return(out); } } spatstat.core/src/loccums.h0000644000176200001440000000415214141377573015467 0ustar liggesusers/* loccums.h C template for loccum.c data-to-data functions $Revision: 1.6 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 macros: FNAME function name NULVAL initial value (empty sum = 0, empty product = 1) INC(A,B) increment operation A += B or A *= B */ void FNAME(n, x, y, v, nr, rmax, ans) /* inputs */ int *n, *nr; double *x, *y, *v; double *rmax; /* output */ double *ans; /* matrix of column vectors of functions for each point */ { int N, Nr, Nans; double Rmax; int i, j, k, kmin, maxchunk, columnstart; double Rmax2, rstep, xi, yi; double dx, dy, dx2, d2, d, contrib; N = *n; Nr = *nr; Rmax = *rmax; if(N == 0) return; rstep = Rmax/(Nr-1); Rmax2 = Rmax * Rmax; Nans = Nr * N; /* initialise products to 1 */ OUTERCHUNKLOOP(k, Nans, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(k, Nans, maxchunk, 8196) { ans[k] = NULVAL; } } OUTERCHUNKLOOP(i, N, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 8196) { xi = x[i]; yi = y[i]; columnstart = Nr * i; /* start position for f_i(.) in 'ans' */ /* process backward until |dx| > Rmax */ if(i > 0) { for(j=i-1; j >= 0; j--) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 > Rmax2) break; dy = y[j] - yi; d2 = dx2 + dy * dy; if(d2 <= Rmax2) { d = sqrt(d2); kmin = (int) ceil(d/rstep); if(kmin < Nr) { contrib = v[j]; for(k = kmin; k < Nr; k++) INC(ans[columnstart + k] , contrib); } } } } /* process forward until |dx| > Rmax */ if(i < N - 1) { for(j=i+1; j < N; j++) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 > Rmax2) break; dy = y[j] - yi; d2 = dx2 + dy * dy; if(d2 <= Rmax2) { d = sqrt(d2); kmin = (int) ceil(d/rstep); if(kmin < Nr) { contrib = v[j]; for(k = kmin; k < Nr; k++) INC(ans[columnstart + k] , contrib); } } } } } } } spatstat.core/src/init.c0000644000176200001440000001115414146622476014760 0ustar liggesusers /* Native symbol registration table for spatstat.core package Automatically generated - do not edit this file! */ #include "proto.h" #include #include #include // for NULL #include /* See proto.h for declarations for the native routines registered below. */ static const R_CMethodDef CEntries[] = { {"acrdenspt", (DL_FUNC) &acrdenspt, 10}, {"acrsmoopt", (DL_FUNC) &acrsmoopt, 10}, {"adenspt", (DL_FUNC) &adenspt, 7}, {"asmoopt", (DL_FUNC) &asmoopt, 8}, {"awtcrdenspt", (DL_FUNC) &awtcrdenspt, 11}, {"awtcrsmoopt", (DL_FUNC) &awtcrsmoopt, 11}, {"awtdenspt", (DL_FUNC) &awtdenspt, 8}, {"awtsmoopt", (DL_FUNC) &awtsmoopt, 9}, {"Cclosepaircounts", (DL_FUNC) &Cclosepaircounts, 5}, {"Ccrosspaircounts", (DL_FUNC) &Ccrosspaircounts, 8}, {"Cidw", (DL_FUNC) &Cidw, 14}, {"Cidw2", (DL_FUNC) &Cidw2, 16}, {"crdenspt", (DL_FUNC) &crdenspt, 9}, {"crsmoopt", (DL_FUNC) &crsmoopt, 10}, {"delta2area", (DL_FUNC) &delta2area, 10}, {"denspt", (DL_FUNC) &denspt, 6}, {"digberJ", (DL_FUNC) &digberJ, 6}, {"Ediggatsti", (DL_FUNC) &Ediggatsti, 10}, {"Ediggra", (DL_FUNC) &Ediggra, 11}, {"Efiksel", (DL_FUNC) &Efiksel, 9}, {"Egeyer", (DL_FUNC) &Egeyer, 11}, {"ESdiggra", (DL_FUNC) &ESdiggra, 12}, {"Gdenspt", (DL_FUNC) &Gdenspt, 5}, {"Gsmoopt", (DL_FUNC) &Gsmoopt, 7}, {"Gwtdenspt", (DL_FUNC) &Gwtdenspt, 6}, {"Gwtsmoopt", (DL_FUNC) &Gwtsmoopt, 8}, {"idwloo", (DL_FUNC) &idwloo, 8}, {"idwloo2", (DL_FUNC) &idwloo2, 10}, {"KborderD", (DL_FUNC) &KborderD, 8}, {"KborderI", (DL_FUNC) &KborderI, 8}, {"KnoneD", (DL_FUNC) &KnoneD, 6}, {"KnoneI", (DL_FUNC) &KnoneI, 6}, {"knownCif", (DL_FUNC) &knownCif, 2}, {"KrectDbl", (DL_FUNC) &KrectDbl, 17}, {"KrectInt", (DL_FUNC) &KrectInt, 17}, {"KrectWtd", (DL_FUNC) &KrectWtd, 18}, {"Kwborder", (DL_FUNC) &Kwborder, 9}, {"Kwnone", (DL_FUNC) &Kwnone, 7}, {"locpcfx", (DL_FUNC) &locpcfx, 12}, {"locprod", (DL_FUNC) &locprod, 7}, {"locWpcfx", (DL_FUNC) &locWpcfx, 13}, {"locxprod", (DL_FUNC) &locxprod, 10}, {"RcallF3", (DL_FUNC) &RcallF3, 17}, {"RcallF3cen", (DL_FUNC) &RcallF3cen, 20}, {"RcallG3", (DL_FUNC) &RcallG3, 17}, {"RcallG3cen", (DL_FUNC) &RcallG3cen, 19}, {"RcallK3", (DL_FUNC) &RcallK3, 17}, {"Rcallpcf3", (DL_FUNC) &Rcallpcf3, 18}, {"ripboxDebug", (DL_FUNC) &ripboxDebug, 11}, {"ripleybox", (DL_FUNC) &ripleybox, 11}, {"ripleypoly", (DL_FUNC) &ripleypoly, 12}, {"rippolDebug", (DL_FUNC) &rippolDebug, 12}, {"scantrans", (DL_FUNC) &scantrans, 11}, {"segdens", (DL_FUNC) &segdens, 10}, {"segwdens", (DL_FUNC) &segwdens, 11}, {"smoopt", (DL_FUNC) &smoopt, 8}, {"wtcrdenspt", (DL_FUNC) &wtcrdenspt, 10}, {"wtcrsmoopt", (DL_FUNC) &wtcrsmoopt, 11}, {"wtdenspt", (DL_FUNC) &wtdenspt, 7}, {"wtsmoopt", (DL_FUNC) &wtsmoopt, 9}, {NULL, NULL, 0} }; static const R_CallMethodDef CallEntries[] = { {"PerfectDGS", (DL_FUNC) &PerfectDGS, 4}, {"PerfectDiggleGratton", (DL_FUNC) &PerfectDiggleGratton, 6}, {"PerfectHardcore", (DL_FUNC) &PerfectHardcore, 4}, {"PerfectPenttinen", (DL_FUNC) &PerfectPenttinen, 5}, {"PerfectStrauss", (DL_FUNC) &PerfectStrauss, 5}, {"PerfectStraussHard", (DL_FUNC) &PerfectStraussHard, 6}, {"thinjumpequal", (DL_FUNC) &thinjumpequal, 3}, {"xmethas", (DL_FUNC) &xmethas, 25}, {NULL, NULL, 0} }; void R_init_spatstat_core(DllInfo *dll) { R_registerRoutines(dll, CEntries, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); R_forceSymbols(dll, TRUE); } spatstat.core/src/KrectV4.h0000644000176200001440000000027514141377573015306 0ustar liggesusers/* KrectV5.h with or without uncorrected estimator */ if((*doUnco) == 1) { #define UNCORRECTED #include "KrectBody.h" } else { #undef UNCORRECTED #include "KrectBody.h" } spatstat.core/src/sphefrac.c0000644000176200001440000000622614141377573015614 0ustar liggesusers#include #include #include "geom3.h" /* $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ Routine for calculating surface area of sphere intersected with box # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009, 2013 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #ifdef DEBUG #define DBG(X,Y) Rprintf("%s: %f\n", (X), (Y)); #else #define DBG(X,Y) #endif static double pi = 3.141592653589793; /* Factor of 4 * pi * r * r IS ALREADY TAKEN OUT */ double sphesfrac(point, box, r) Point *point; Box *box; double r; { double sum, p[4], q[4]; double a1(), a2(), a3(); int i, j; p[1] = point->x - box->x0; p[2] = point->y - box->y0; p[3] = point->z - box->z0; q[1] = box->x1 - point->x; q[2] = box->y1 - point->y; q[3] = box->z1 - point->z; sum = 0; for(i = 1; i <= 3; i++) { sum += a1(p[i],r) + a1(q[i],r); #ifdef DEBUG Rprintf("i = %d, a1 = %f, a1 = %f\n", i, a1(p[i],r), a1(q[i],r)); #endif } DBG("Past a1", sum) for(i = 1; i < 3; i++) for(j = i+1; j <= 3; j++) { sum -= a2(p[i], p[j], r) + a2(p[i], q[j], r) + a2(q[i], p[j], r) + a2(q[i], q[j], r); #ifdef DEBUG Rprintf("i = %d, j = %d, sum = %f\n", i, j, sum); #endif } DBG("Past a2", sum) sum += a3(p[1], p[2], p[3], r) + a3(p[1], p[2], q[3], r); DBG("sum", sum) sum += a3(p[1], q[2], p[3], r) + a3(p[1], q[2], q[3], r); DBG("sum", sum) sum += a3(q[1], p[2], p[3], r) + a3(q[1], p[2], q[3], r); DBG("sum", sum) sum += a3(q[1], q[2], p[3], r) + a3(q[1], q[2], q[3], r); DBG("Past a3", sum) return(1 - sum); } double a1(t, r) double t, r; { /* This is the function A1 divided by 4 pi r^2 */ if(t >= r) return(0.0); return((1 - t/r) * 0.5); } double a2(t1, t2, r) double t1, t2, r; { double c2(); /* This is A2 divided by 4 pi r^2 because c2 is C divided by pi */ return(c2( t1 / r, t2 / r) / 2.0); } double a3(t1, t2, t3, r) double t1, t2, t3, r; { double c3(); /* This is A3 divided by 4 pi r^2 because c3 is C divided by pi */ return(c3(t1 / r, t2 / r, t3 / r) / 4.0); } double c2(a, b) double a, b; { double z, z2; double c2(); /* This is the function C(a, b, 0) divided by pi - assumes a, b > 0 */ if( ( z2 = 1.0 - a * a - b * b) < 0.0 ) return(0.0); z = sqrt(z2); return((atan2(z, a * b) - a * atan2(z, b) - b * atan2(z, a)) / pi); } double c3(a, b, c) double a, b, c; { double za, zb, zc, sum; /* This is C(a,b,c) divided by pi. Arguments assumed > 0 */ if(a * a + b * b + c * c >= 1.0) return(0.0); za = sqrt(1 - b * b - c * c); zb = sqrt(1 - a * a - c * c); zc = sqrt(1 - a * a - b * b); sum = atan2(zb, a * c) + atan2(za, b * c) + atan2(zc, a * b) - a * atan2(zb, c) + a * atan2(b, zc) - b * atan2(za, c) + b * atan2(a, zc) - c * atan2(zb, a) + c * atan2(b, za); return(sum / pi - 1); } spatstat.core/src/lennard.c0000644000176200001440000000712114141377573015437 0ustar liggesusers#include #include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Lennard-Jones process */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Lennard { double sigma; double epsilon; double sigma2; /* sigma^2 */ double foureps; /* 4 * epsilon */ double d2min; /* minimum value of d^2 which yields nonzero intensity */ double d2max; /* maximum value of d^2 which has nontrivial contribution */ double *period; int per; } Lennard; /* MAXEXP is intended to be the largest x such that exp(-x) != 0 although the exact value is not needed */ #define MAXEXP (-log(DOUBLE_XMIN)) #define MINEXP (log(1.001)) /* initialiser function */ Cdata *lennardinit(state, model, algo) State state; Model model; Algor algo; { Lennard *lennard; double sigma2, foureps, minfrac, maxfrac; lennard = (Lennard *) R_alloc(1, sizeof(Lennard)); /* Interpret model parameters*/ lennard->sigma = model.ipar[0]; lennard->epsilon = model.ipar[1]; lennard->period = model.period; /* constants */ lennard->sigma2 = sigma2 = pow(lennard->sigma, 2); lennard->foureps = foureps = 4 * lennard->epsilon; /* thresholds where the interaction becomes trivial */ minfrac = pow(foureps/MAXEXP, (double) 1.0/6.0); if(minfrac > 0.5) minfrac = 0.5; maxfrac = pow(foureps/MINEXP, (double) 1.0/3.0); if(maxfrac < 2.0) maxfrac = 2.0; lennard->d2min = sigma2 * minfrac; lennard->d2max = sigma2 * maxfrac; /* periodic boundary conditions? */ lennard->per = (model.period[0] > 0.0); return((Cdata *) lennard); } /* conditional intensity evaluator */ double lennardcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, ratio6, pairsum, cifval; double sigma2, d2max, d2min; double *period; Lennard *lennard; DECLARE_CLOSE_D2_VARS; lennard = (Lennard *) cdata; sigma2 = lennard->sigma2; d2max = lennard->d2max; d2min = lennard->d2min; period = lennard->period; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = 1.0; if(npts == 0) return(cifval); pairsum = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(lennard->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,d2max,d2)) { if(d2 < d2min) { cifval = 0.0; return cifval; } ratio6 = pow(sigma2/d2, 3); pairsum += ratio6 * (1.0 - ratio6); } } } if(ixp1 < npts) { for(j=ixp1; j 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], d2max, d2)) { if(d2 < lennard->d2min) { cifval = 0.0; return cifval; } ratio6 = pow(sigma2/d2, 3); pairsum += ratio6 * (1.0 - ratio6); } } } if(ixp1 < npts) { for(j=ixp1; jd2min) { cifval = 0.0; return cifval; } ratio6 = pow(sigma2/d2, 3); pairsum += ratio6 * (1.0 - ratio6); } } } } cifval *= exp(lennard->foureps * pairsum); return cifval; } Cifns LennardCifns = { &lennardinit, &lennardcif, (updafunptr) NULL, NO}; spatstat.core/src/dist2.h0000644000176200001440000000451314141377573015050 0ustar liggesusers/* dist2.h External declarations for the functions defined in dist2.c and In-line cpp macros for similar purposes $Revision: 1.20 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ double dist2(double u, double v, double x, double y, double *period); double dist2either(double u, double v, double x, double y, double *period); int dist2thresh(double u, double v, double x, double y, double *period, double r2); int dist2Mthresh(double u, double v, double x, double y, double *period, double r2); /* Efficient macros to test closeness of points */ /* These variables must be declared (note: some files e.g. straush.c use 'RESIDUE' explicitly) */ #define DECLARE_CLOSE_VARS \ register double DX, DY, DXP, DYP, RESIDUE #define DECLARE_CLOSE_D2_VARS \ register double DX, DY, DXP, DYP, DX2 #define CLOSE(U,V,X,Y,R2) \ ((DX = X - U), \ (RESIDUE = R2 - DX * DX), \ ((RESIDUE > 0.0) && \ ((DY = Y - V), \ (RESIDUE = RESIDUE - DY * DY), \ (RESIDUE > 0.0)))) #define CLOSE_D2(U,V,X,Y,R2,D2) \ ((DX = X - U), \ (DX2 = DX * DX), \ (DX2 < R2) && (((DY = Y - V), \ (D2 = DX2 + DY * DY), \ (D2 < R2)))) /* The following calculates X mod P, but it works only if X \in [-P, P] so that X is the difference between two values that lie in an interval of length P */ #define CLOSE_PERIODIC(U,V,X,Y,PERIOD,R2) \ ((DX = X - U), \ (DX = (DX < 0.0) ? -DX : DX), \ (DXP = (PERIOD)[0] - DX), \ (DX = (DX < DXP) ? DX : DXP), \ (RESIDUE = R2 - DX * DX), \ ((RESIDUE > 0.0) && ((DY = Y - V), \ (DY = (DY < 0.0) ? -DY : DY), \ (DYP = (PERIOD)[1] - DY), \ (DY = (DY < DYP) ? DY : DYP), \ (RESIDUE = RESIDUE - DY * DY), \ (RESIDUE > 0.0) ))) #define CLOSE_PERIODIC_D2(U,V,X,Y,PERIOD,R2,D2) \ ((DX = X - U), \ (DX = (DX < 0.0) ? -DX : DX), \ (DXP = (PERIOD)[0] - DX), \ (DX = (DX < DXP) ? DX : DXP), \ (D2 = DX * DX), \ ((D2 < R2) && ((DY = Y - V), \ (DY = (DY < 0.0) ? -DY : DY), \ (DYP = (PERIOD)[1] - DY), \ (DY = (DY < DYP) ? DY : DYP), \ (D2 += DY * DY), \ (D2 < R2) ))) spatstat.core/src/multihard.c0000644000176200001440000000735714141377573016020 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* for debugging code, include #define DEBUG 1 */ /* Conditional intensity computation for Multitype Hardcore process */ /* NOTE: types (marks) are numbered from 0 to ntypes-1 */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct MultiHard { int ntypes; double *hc; /* hc[i,j] = hc[j+ntypes*i] for i,j = 0... ntypes-1 */ double *hc2; /* squared radii */ double range2; /* square of interaction range */ double *period; int per; } MultiHard; /* initialiser function */ Cdata *multihardinit(state, model, algo) State state; Model model; Algor algo; { int i, j, ntypes, n2; double h, h2, range2; MultiHard *multihard; multihard = (MultiHard *) R_alloc(1, sizeof(MultiHard)); multihard->ntypes = ntypes = model.ntypes; n2 = ntypes * ntypes; #ifdef DEBUG Rprintf("initialising space for %d types\n", ntypes); #endif /* Allocate space for parameters */ multihard->hc = (double *) R_alloc((size_t) n2, sizeof(double)); /* Allocate space for transformed parameters */ multihard->hc2 = (double *) R_alloc((size_t) n2, sizeof(double)); /* Copy and process model parameters*/ range2 = 0.0; for(i = 0; i < ntypes; i++) { for(j = 0; j < ntypes; j++) { h = model.ipar[i + j*ntypes]; h2 = h * h; MAT(multihard->hc, i, j, ntypes) = h; MAT(multihard->hc2, i, j, ntypes) = h2; if(range2 < h2) range2 = h2; } } multihard->range2 = range2; /* periodic boundary conditions? */ multihard->period = model.period; multihard->per = (model.period[0] > 0.0); #ifdef DEBUG Rprintf("end initialiser\n"); #endif return((Cdata *) multihard); } /* conditional intensity evaluator */ double multihardcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ntypes, ix, ixp1, j, mrk, mrkj; int *marks; double *x, *y; double u, v; double d2, range2, cifval; double *period; MultiHard *multihard; DECLARE_CLOSE_D2_VARS; multihard = (MultiHard *) cdata; range2 = multihard->range2; period = multihard->period; u = prop.u; v = prop.v; mrk = prop.mrk; ix = prop.ix; x = state.x; y = state.y; marks = state.marks; npts = state.npts; #ifdef DEBUG Rprintf("computing cif: u=%lf, v=%lf, mrk=%d\n", u, v, mrk); #endif cifval = 1.0; if(npts == 0) return(cifval); ntypes = multihard->ntypes; #ifdef DEBUG Rprintf("scanning data\n"); #endif ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(multihard->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) { mrkj = marks[j]; if(d2 < MAT(multihard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } } } } if(ixp1 < npts) { for(j=ixp1; jhc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) { mrkj = marks[j]; if(d2 < MAT(multihard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } } } } if(ixp1 < npts) { for(j=ixp1; jhc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } } } } } #ifdef DEBUG Rprintf("returning positive cif\n"); #endif return cifval; } Cifns MultiHardCifns = { &multihardinit, &multihardcif, (updafunptr) NULL, YES}; spatstat.core/src/localpcf.c0000644000176200001440000000064314141377573015601 0ustar liggesusers#include #include #include #include "chunkloop.h" /* localpcf.c $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Assumes point patterns are sorted in increasing order of x coordinate */ #undef WEIGHTED #include "localpcf.h" #define WEIGHTED 1 #include "localpcf.h" spatstat.core/src/areapair.c0000644000176200001440000000375414141377573015610 0ustar liggesusers/* areapair.c $Revision: 1.7 $ $Date: 2018/12/18 02:43:11 $ Specialised code for the second order conditional intensity of the area-interaction process Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include "yesno.h" /* computes area of b(A, r) \int b(B, r) \setminus \bigcup_i b(X[i], r) */ void delta2area(xa, ya, xb, yb, nother, xother, yother, radius, epsilon, pixcount) double *xa, *ya, *xb, *yb; int *nother; double *xother, *yother; double *radius, *epsilon; int *pixcount; { int Ni, Nj, Nk, i, j, k, count, covered; double xA, yA, xB, yB, r, eps, r2; double xmin, xmax, ymin, ymax, xi, yj; double dxA, dyA; double dxB, dyB; double dx, dy; Nk = *nother; xA = *xa; yA = *ya; xB = *xb; yB = *yb; r = *radius; eps = *epsilon; r2 = r * r; /* find intersection of squares centred on A and B */ if(xA < xB) { xmin = xB - r; xmax = xA + r; } else { xmin = xA - r; xmax = xB + r; } if(xmin > xmax) return; if(yA < yB) { ymin = yB - r; ymax = yA + r; } else { ymin = yA - r; ymax = yB + r; } if(ymin > ymax) return; /* set up grid */ Ni = (int) ceil((xmax - xmin)/eps) + 1; Nj = (int) ceil((ymax - ymin)/eps) + 1; count = 0; for(i = 0, xi = xmin; i < Ni; i++, xi += eps) { dxA = xi - xA; for(j = 0, yj = ymin; j < Nj; j++, yj += eps) { dyA = yj - yA; if(dxA * dxA + dyA * dyA <= r2) { /* grid point belongs to b(A, r) */ dxB = xi - xB; dyB = yj - yB; if(dxB * dxB + dyB * dyB <= r2) { /* grid point belongs to b(A,r) \cap b(B,r) */ covered = NO; /* test whether it is covered by another b(X[k], r) */ for(k = 0; k < Nk; k++) { dx = xi - xother[k]; dy = yj - yother[k]; if(dx * dx + dy * dy <= r2) { covered = YES; break; } } if(!covered) { ++count; } } } } } *pixcount = count; } spatstat.core/src/g3.c0000644000176200001440000001266514141377573014336 0ustar liggesusers#include #include #include "geom3.h" #include "functable.h" /* $Revision: 1.3 $ $Date: 2012/05/22 07:17:31 $ G function (nearest neighbour distribution) of 3D point pattern Let b = distance from point p[i] to boundary of box d = distance from p[i] to nearest p[j] method = 1 naive ratio estimator (Ripley 1981) numerator(r) = count(i: b >= r, d <= r) denominator(r) = count(i: b >= r) method = 2 minus sampling estimator numerator(r) = count(i: b >= r, d <= r) denominator(r) = lambda * volume(x: b >= r) where lambda = (no of points)/volume(box) method = 3 Hanisch's G3 numerator(r) = count(i: b >= d, d <= r) denominator(r) = count(i: b >= d) method = 4 Hanisch's G4 numerator(r) = count(i: b >= d, d <= r) denominator(r) = fudge * volume(x: b >= r) fudge = numerator(R)/denominator(R) R = sup{r : denominator(r) > 0 } # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009, 2012. # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #define MIN(X,Y) (((X) > (Y)) ? (Y) : (X)) double * nndist3(p, n, b) /* compute nearest neighbour distance for each p[i] */ Point *p; int n; Box *b; { register int i, j; register double dx, dy, dz, dist2, nearest2, huge2; Point *ip, *jp; double *nnd; nnd = (double *) R_alloc(n, sizeof(double)); dx = b->x1 - b->x0; dy = b->y1 - b->y0; dz = b->z1 - b->z0; huge2 = 2.0 * (dx * dx + dy * dy + dz * dz); /* scan each point and find closest */ for( i = 0; i < n; i++) { ip = p + i; nearest2 = huge2; for(j = 0; j < n; j++) if(j != i) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist2 = dx * dx + dy * dy + dz * dz; if(dist2 < nearest2) nearest2 = dist2; } nnd[i] = sqrt(nearest2); } return(nnd); } double * border3(p, n, b) /* compute distances to border */ Point *p; int n; Box *b; { register int i; register double bord; register Point *ip; double *bored; bored = (double *) R_alloc(n, sizeof(double)); for( i = 0; i < n; i++) { ip = p + i; bord = MIN(ip->x - b->x0, b->x1 - ip->x); bord = MIN(bord, ip->y - b->y0); bord = MIN(bord, b->y1 - ip->y); bord = MIN(bord, ip->z - b->z0); bord = MIN(bord, b->z1 - ip->z); bored[i] = bord; } return(bored); } void g3one(p, n, b, g) Point *p; int n; Box *b; Ftable *g; { register int i, l, lbord, lnnd; double dt; double *bord, *nnd; bord = border3(p, n, b); nnd = nndist3(p, n, b); /* initialise */ for(l = 0; l < g->n; l++) (g->num)[l] = (g->denom)[l] = 0.0; /* spacing of argument in result vector g */ dt = (g->t1 - g->t0)/(g->n - 1); for(i = 0; i < n; i++) { lbord = floor( (bord[i] - g->t0) / dt ); if(lbord >= g->n) lbord = g->n - 1; for(l = 0; l <= lbord; l++) (g->denom)[l] += 1.0; lnnd = ceil( (nnd[i] - g->t0) / dt ); if(lnnd < 0) lnnd = 0; for(l = lnnd; l <= lbord; l++) (g->num)[l] += 1.0; } /* compute ratio */ for(l = 0; l < g->n; l++) (g->f)[l] = ((g->denom)[l] > 0)? (g->num)[l] / (g->denom)[l] : 1.0; } void g3three(p, n, b, g) Point *p; int n; Box *b; Ftable *g; { register int i, l, lmin; double dt; int denom; double *bord, *nnd; bord = border3(p, n, b); nnd = nndist3(p, n, b); /* initialise */ denom = 0; for(l = 0; l < g->n; l++) (g->num)[l] = 0.0; /* spacing of argument in result vector g */ dt = (g->t1 - g->t0)/(g->n - 1); for(i = 0; i < n; i++) { if(nnd[i] <= bord[i]) { ++denom; lmin = ceil( (nnd[i] - g->t0) / dt ); if(lmin < 0) lmin = 0; for(l = lmin; l < g->n; l++) (g->num)[l] += 1.0; } } /* compute ratio */ for(l = 0; l < g->n; l++) { (g->denom)[l] = denom; (g->f)[l] = (denom > 0)? (g->num)[l] / (double) denom : 1.0; } } void g3cen(p, n, b, count) Point *p; int n; Box *b; H4table *count; { register int i, lcen, lobs; register double dt, cens, obsv; double *bord, *nnd; bord = border3(p, n, b); nnd = nndist3(p, n, b); /* spacing of histogram cells */ dt = (count->t1 - count->t0)/(count->n - 1); /* 'count' is assumed to have been initialised */ for(i = 0; i < n; i++) { obsv = nnd[i]; cens = bord[i]; lobs = ceil( (obsv - count->t0) / dt ); lcen = floor( (cens - count->t0) / dt ); if(obsv <= cens) { /* observation is uncensored; increment all four histograms */ if(lobs >= count->n) ++(count->upperobs); else if(lobs >= 0) { (count->obs)[lobs]++; (count->nco)[lobs]++; } if(lcen >= count->n) ++(count->uppercen); else if(lcen >= 0) { (count->cen)[lcen]++; (count->ncc)[lcen]++; } } else { /* observation is censored; increment only two histograms */ lobs = MIN(lobs, lcen); if(lobs >= count->n) ++(count->upperobs); else if(lobs >= 0) (count->obs)[lobs]++; if(lcen >= count->n) ++(count->uppercen); else if(lcen >= 0) (count->cen)[lcen]++; } } } spatstat.core/src/sftcr.c0000644000176200001440000000436614141377573015145 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Soft Core process */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Softcore { double sigma; double kappa; double nook; /* -1/kappa */ double stok; /* sigma^(2/kappa) */ double *period; int per; } Softcore; /* initialiser function */ Cdata *sftcrinit(state, model, algo) State state; Model model; Algor algo; { Softcore *softcore; softcore = (Softcore *) R_alloc(1, sizeof(Softcore)); /* Interpret model parameters*/ softcore->sigma = model.ipar[0]; softcore->kappa = model.ipar[1]; softcore->period = model.period; /* constants */ softcore->nook = -1/softcore->kappa; softcore->stok = pow(softcore->sigma, 2/softcore->kappa); /* periodic boundary conditions? */ softcore->per = (model.period[0] > 0.0); return((Cdata *) softcore); } /* conditional intensity evaluator */ double sftcrcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, pairsum, cifval, nook, stok; Softcore *softcore; softcore = (Softcore *) cdata; nook = softcore->nook; stok = softcore->stok; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = 1.0; if(npts == 0) return(cifval); pairsum = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(softcore->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { d2 = dist2(u,v,x[j],y[j],softcore->period); pairsum += pow(d2, nook); } } if(ixp1 < npts) { for(j=ixp1; jperiod); pairsum += pow(d2, nook); } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { d2 = pow(u - x[j],2) + pow(v-y[j],2); pairsum += pow(d2, nook); } } if(ixp1 < npts) { for(j=ixp1; j #include #include #include "methas.h" #include "mhsnoopdef.h" /* mhsnoop.c $Revision: 1.9 $ $Date: 2018/12/18 02:43:11 $ support for visual debugger in RMH Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ /* To switch on debugging code, insert the line: #define MH_DEBUG YES */ #ifndef MH_DEBUG #define MH_DEBUG NO #endif void initmhsnoop(Snoop *s, SEXP env) { s->active = isEnvironment(env); s->nextstop = 0; /* stop at iteration 0 */ s->nexttype = NO_TYPE; /* deactivated */ if(s->active) { s->env = env; s->expr = findVar(install("callbackexpr"), env); } else { s->env = s->expr = R_NilValue; } } void mhsnoop(Snoop *s, int irep, Algor *algo, State *state, Propo *prop, double numer, double denom, int *itype) { SEXP e; int npts, j; /* passed from C to R before debugger */ SEXP Sirep, Sx, Sy, Sm, Sproptype, Sproplocn, Spropmark, Spropindx; SEXP Snumer, Sdenom, Sitype; double *Px, *Py, *Pproplocn; int *Pm; /* passed from R to C after debugger */ SEXP Sinxt, Stnxt, SitypeUser; #if MH_DEBUG Rprintf("mhsnoop called at iteration %d\n", irep); #endif if(!(s->active)) return; #if MH_DEBUG Rprintf("mhsnoop is active\n"); #endif /* execute when the simulation reaches the next stopping time: a specified iteration number 'nextstop' or a specified proposal type 'nexttype' */ if(irep != s->nextstop && prop->itype != s->nexttype) return; #if MH_DEBUG Rprintf("debug triggered\n"); #endif /* environment for communication with R */ e = s->env; /* copy data to R */ /* copy iteration number */ PROTECT(Sirep = NEW_INTEGER(1)); *(INTEGER_POINTER(Sirep)) = irep; setVar(install("irep"), Sirep, e); UNPROTECT(1); /* copy (x,y) coordinates */ npts = state->npts; PROTECT(Sx = NEW_NUMERIC(npts)); PROTECT(Sy = NEW_NUMERIC(npts)); Px = NUMERIC_POINTER(Sx); Py = NUMERIC_POINTER(Sy); for(j = 0; j < npts; j++) { Px[j] = state->x[j]; Py[j] = state->y[j]; } setVar(install("xcoords"), Sx, e); setVar(install("ycoords"), Sy, e); UNPROTECT(2); /* copy marks */ if(state->ismarked) { PROTECT(Sm = NEW_INTEGER(npts)); Pm = INTEGER_POINTER(Sm); for(j = 0; j < npts; j++) { Pm[j] = state->marks[j]; } setVar(install("mcodes"), Sm, e); UNPROTECT(1); } /* proposal type */ PROTECT(Sproptype = NEW_INTEGER(1)); *(INTEGER_POINTER(Sproptype)) = prop->itype; setVar(install("proptype"), Sproptype, e); UNPROTECT(1); /* proposal coordinates */ PROTECT(Sproplocn = NEW_NUMERIC(2)); Pproplocn = NUMERIC_POINTER(Sproplocn); Pproplocn[0] = prop->u; Pproplocn[1] = prop->v; setVar(install("proplocn"), Sproplocn, e); UNPROTECT(1); /* proposal mark value */ if(state->ismarked) { PROTECT(Spropmark = NEW_INTEGER(1)); *(INTEGER_POINTER(Spropmark)) = prop->mrk; setVar(install("propmark"), Spropmark, e); UNPROTECT(1); } /* proposal point index */ PROTECT(Spropindx = NEW_INTEGER(1)); *(INTEGER_POINTER(Spropindx)) = prop->ix; setVar(install("propindx"), Spropindx, e); UNPROTECT(1); /* Metropolis-Hastings numerator and denominator */ PROTECT(Snumer = NEW_NUMERIC(1)); PROTECT(Sdenom = NEW_NUMERIC(1)); *(NUMERIC_POINTER(Snumer)) = numer; *(NUMERIC_POINTER(Sdenom)) = denom; setVar(install("numerator"), Snumer, e); setVar(install("denominator"), Sdenom, e); UNPROTECT(2); /* tentative outcome of proposal */ PROTECT(Sitype = NEW_INTEGER(1)); *(INTEGER_POINTER(Sitype)) = *itype; setVar(install("itype"), Sitype, e); UNPROTECT(1); /* ..... call visual debugger */ #if MH_DEBUG Rprintf("executing callback\n"); #endif eval(s->expr, s->env); /* update outcome of proposal */ SitypeUser = findVar(install("itype"), e); *itype = *(INTEGER_POINTER(SitypeUser)); #if MH_DEBUG Rprintf("Assigning itype = %d\n", *itype); #endif /* update stopping time */ Sinxt = findVar(install("inxt"), e); s->nextstop = *(INTEGER_POINTER(Sinxt)); Stnxt = findVar(install("tnxt"), e); s->nexttype = *(INTEGER_POINTER(Stnxt)); #if MH_DEBUG if(s->nextstop >= 0) Rprintf("Next stop: iteration %d\n", s->nextstop); if(s->nexttype >= 0) { if(s->nexttype == BIRTH) Rprintf("Next stop: first birth proposal\n"); if(s->nexttype == DEATH) Rprintf("Next stop: first death proposal\n"); if(s->nexttype == SHIFT) Rprintf("Next stop: first shift proposal\n"); } #endif return; } spatstat.core/src/PerfectHardcore.h0000644000176200001440000001150314141377573017060 0ustar liggesusers // ........................... Hardcore process .......................... // $Revision: 1.6 $ $Date: 2020/05/12 03:31:48 $ class HardcoreProcess : public PointProcess { public: double beta, R, Rsquared; HardcoreProcess(double xmin, double xmax, double ymin, double ymax, double b, double Ri); ~HardcoreProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); }; HardcoreProcess::HardcoreProcess(double xmin, double xmax, double ymin, double ymax, double b, double Ri) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; R = Ri; Rsquared = R * R; InteractionRange = R; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double HardcoreProcess::Interaction(double dsquared) { double rtn; rtn = 1; if(dsquared < Rsquared) rtn = 0; return(rtn); } void HardcoreProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void HardcoreProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating HardcoreProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating HardcoreProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating HardcoreProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectHardcore(SEXP beta, SEXP r, SEXP xrange, SEXP yrange) { // input parameters double Beta, R, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(r = AS_NUMERIC(r)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 4 protected objects // extract arguments // model parameters Beta = *(NUMERIC_POINTER(beta)); R = *(NUMERIC_POINTER(r)); // window Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) { xcells = 9; } else if(xcells < 1) { xcells = 1; } ycells = (int) floor((Ymax-Ymin)/ R); if(ycells > 9) { ycells = 9; } else if(ycells < 1) { ycells = 1; } #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise Hardcore point process HardcoreProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax, Beta, R); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(8); // 4 arguments plus xout, yout, nout, out return(out); } } spatstat.core/src/Knone.c0000644000176200001440000000172014141377573015065 0ustar liggesusers#include #include #include /* Knone.c Efficient computation of uncorrected estimates of K for large datasets KnoneI() Estimates K function, returns integer numerator KnoneD() Estimates K function, returns double precision numerator Kwnone() Estimates Kinhom, returns double precision numerator Functions require (x,y) data to be sorted in ascending order of x and expect r values to be equally spaced and starting at zero $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef WEIGHTED #define FNAME KnoneI #define OUTTYPE int #include "Knone.h" #undef FNAME #undef OUTTYPE #define FNAME KnoneD #define OUTTYPE double #include "Knone.h" #undef FNAME #undef OUTTYPE #define FNAME Kwnone #define WEIGHTED #define OUTTYPE double #include "Knone.h" spatstat.core/src/segdens.c0000644000176200001440000000454114141377573015447 0ustar liggesusers#include #include #include #include /* segdens.c Convolution of segments with Gaussian kernel Entry points: segdens unweighted segwdens weighted Copyright (c) Adrian Baddeley, 02 dec 2016 (modified 01 mar 2021) Licence: GPL >= 2.0 $Revision: 1.6 $ $Date: 2021/03/01 00:28:37 $ */ #define DNORM(X, SIG) dnorm((X), (double) 0.0, (SIG), FALSE) #define PNORM(X, SIG) pnorm((X), (double) 0.0, (SIG), TRUE, FALSE) void segdens(sigma, ns, xs, ys, alps, lens, np, xp, yp, z) double *sigma; /* bandwidth */ int *ns; /* number of line segments */ double *xs, *ys, *alps, *lens; /* first endpoint, angle, length */ int *np; /* number of pixels or test locations */ double *xp, *yp; /* pixel coordinates */ double *z; /* result, assumed initially 0 */ { int i, j, Ns, Np; double Sigma; double xsi, ysi, angi, leni, cosi, sini; double dx, dy, u1, u2; Ns = *ns; Np = *np; Sigma = *sigma; for(i = 0; i < Ns; i++) { R_CheckUserInterrupt(); xsi = xs[i]; ysi = ys[i]; angi = alps[i]; leni = lens[i]; cosi = cos(angi); sini = sin(angi); for(j = 0; j < Np; j++) { dx = xp[j] - xsi; dy = yp[j] - ysi; u1 = dx * cosi + dy * sini; u2 = -dx * sini + dy * cosi; z[j] += DNORM(u2, Sigma) * (PNORM(u1, Sigma) - PNORM(u1-leni, Sigma)); } } } void segwdens(sigma, ns, xs, ys, alps, lens, ws, np, xp, yp, z) double *sigma; /* bandwidth */ int *ns; /* number of line segments */ double *xs, *ys, *alps, *lens; /* first endpoint, angle, length */ double *ws; /* segment weights */ int *np; /* number of pixels or test locations */ double *xp, *yp; /* pixel coordinates */ double *z; /* result, assumed initially 0 */ { int i, j, Ns, Np; double Sigma; double xsi, ysi, angi, leni, cosi, sini, wi; double dx, dy, u1, u2; Ns = *ns; Np = *np; Sigma = *sigma; for(i = 0; i < Ns; i++) { R_CheckUserInterrupt(); xsi = xs[i]; ysi = ys[i]; angi = alps[i]; leni = lens[i]; wi = ws[i]; cosi = cos(angi); sini = sin(angi); for(j = 0; j < Np; j++) { dx = xp[j] - xsi; dy = yp[j] - ysi; u1 = dx * cosi + dy * sini; u2 = -dx * sini + dy * cosi; z[j] += wi * DNORM(u2, Sigma) * (PNORM(u1, Sigma) - PNORM(u1-leni, Sigma)); } } } spatstat.core/src/f3.c0000644000176200001440000002477314141377573014340 0ustar liggesusers#include #include #include #include "geom3.h" #include "functable.h" #ifdef DEBUG #define DEBUGMESSAGE(S) Rprintf(S); #else #define DEBUGMESSAGE(S) #endif /* $Revision: 1.4 $ $Date: 2016/10/23 04:24:03 $ 3D distance transform # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ /* step lengths in distance transform */ #define STEP1 41 #define STEP2 58 #define STEP3 71 /* (41,58,71)/41 is a good rational approximation to (1, sqrt(2), sqrt(3)) */ #define MIN(X,Y) (((X) < (Y)) ? (X) : (Y)) #define MAX(X,Y) (((X) > (Y)) ? (X) : (Y)) typedef struct IntImage { int *data; int Mx, My, Mz; /* dimensions */ int length; } IntImage; typedef struct BinaryImage { unsigned char *data; int Mx, My, Mz; /* dimensions */ int length; } BinaryImage; #define VALUE(I,X,Y,Z) \ ((I).data)[ (Z) * ((I).Mx) * ((I).My) + (Y) * ((I).Mx) + (X) ] void allocBinImage(b, ok) BinaryImage *b; int *ok; { b->length = b->Mx * b->My * b->Mz; b->data = (unsigned char *) R_alloc(b->length, sizeof(unsigned char)); if(b->data == 0) { Rprintf("Can't allocate memory for %d binary voxels\n", b->length); *ok = 0; } *ok = 1; } void allocIntImage(v, ok) IntImage *v; int *ok; { v->length = v->Mx * v->My * v->Mz; v->data = (int *) R_alloc(v->length, sizeof(int)); if(v->data == 0) { Rprintf("Can't allocate memory for %d integer voxels\n", v->length); *ok = 0; } *ok = 1; } void freeBinImage(b) BinaryImage *b; { } void freeIntImage(v) IntImage *v; { } void cts2bin(p, n, box, vside, b, ok) /* convert a list of points inside a box into a 3D binary image */ Point *p; int n; Box *box; double vside; /* side of a (cubic) voxel */ BinaryImage *b; int *ok; { int i, lx, ly, lz; unsigned char *cp; b->Mx = (int) ceil((box->x1 - box->x0)/vside) + 1; b->My = (int) ceil((box->y1 - box->y0)/vside) + 1; b->Mz = (int) ceil((box->z1 - box->z0)/vside) + 1; allocBinImage(b, ok); if(! (*ok)) return; for(i = b->length, cp = b->data; i ; i--, cp++) *cp = 1; for(i=0;ix0)/vside)-1; ly = (int) ceil((p[i].y - box->y0)/vside)-1; lz = (int) ceil((p[i].z - box->z0)/vside)-1; if( lx >= 0 && lx < b->Mx && ly >= 0 && ly < b->My && lz >= 0 && lz < b->Mz ) VALUE((*b),lx,ly,lz) = 0; } } void distrans3(b, v, ok) /* Distance transform in 3D */ BinaryImage *b; /* input */ IntImage *v; /* output */ int *ok; { register int x, y, z; int infinity, q; /* allocate v same size as b */ v->Mx = b->Mx; v->My = b->My; v->Mz = b->Mz; allocIntImage(v, ok); if(! (*ok)) return; /* compute largest possible distance */ infinity = (int) ceil( ((double) STEP3) * sqrt( ((double) b->Mx) * b->Mx + ((double) b->My) * b->My + ((double) b->Mz) * b->Mz)); /* Forward pass: Top to Bottom; Back to Front; Left to Right. */ for(z=0;zMz;z++) { R_CheckUserInterrupt(); for(y=0;yMy;y++) { for(x=0;xMx;x++) { if(VALUE((*b),x,y,z) == 0) VALUE((*v),x,y,z) = 0; else { q = infinity; #define INTERVAL(W, DW, MW) \ ((DW == 0) || (DW == -1 && W > 0) || (DW == 1 && W < MW - 1)) #define BOX(X,Y,Z,DX,DY,DZ) \ (INTERVAL(X,DX,v->Mx) && INTERVAL(Y,DY,v->My) && INTERVAL(Z,DZ,v->Mz)) #define TEST(DX,DY,DZ,DV) \ if(BOX(x,y,z,DX,DY,DZ) && q > VALUE((*v),x+DX,y+DY,z+DZ) + DV) \ q = VALUE((*v),x+DX,y+DY,z+DZ) + DV /* same row */ TEST(-1, 0, 0, STEP1); /* same plane */ TEST(-1,-1, 0, STEP2); TEST( 0,-1, 0, STEP1); TEST( 1,-1, 0, STEP2); /* previous plane */ TEST( 1, 1,-1, STEP3); TEST( 0, 1,-1, STEP2); TEST(-1, 1,-1, STEP3); TEST( 1, 0,-1, STEP2); TEST( 0, 0,-1, STEP1); TEST(-1, 0,-1, STEP2); TEST( 1,-1,-1, STEP3); TEST( 0,-1,-1, STEP2); TEST(-1,-1,-1, STEP3); VALUE((*v),x,y,z) = q; } } } } /* Backward pass: Bottom to Top; Front to Back; Right to Left. */ for(z = b->Mz - 1; z >= 0; z--) { R_CheckUserInterrupt(); for(y = b->My - 1; y >= 0; y--) { for(x = b->Mx - 1; x >= 0; x--) { if((q = VALUE((*v),x,y,z)) != 0) { /* same row */ TEST(1, 0, 0, STEP1); /* same plane */ TEST(-1, 1, 0, STEP2); TEST( 0, 1, 0, STEP1); TEST( 1, 1, 0, STEP2); /* plane below */ TEST( 1, 1, 1, STEP3); TEST( 0, 1, 1, STEP2); TEST(-1, 1, 1, STEP3); TEST( 1, 0, 1, STEP2); TEST( 0, 0, 1, STEP1); TEST(-1, 0, 1, STEP2); TEST( 1,-1, 1, STEP3); TEST( 0,-1, 1, STEP2); TEST(-1,-1, 1, STEP3); VALUE((*v),x,y,z) = q; } } } } } void hist3d(v, vside, count) /* compute histogram of all values in *v using count->n histogram cells ranging from count->t0 to count->t1 and put results in count->num */ IntImage *v; double vside; Itable *count; { register int i, j, k; register int *ip; register double scale, width; /* relationship between distance transform units and physical units */ scale = vside/STEP1; width = (count->t1 - count->t0)/(count->n - 1); for(i = 0; i < count->n ; i++) { (count->num)[i] = 0; (count->denom)[i] = v->length; } for(i = v->length, ip = v->data; i; i--, ip++) { k = (int) ceil((*ip * scale - count->t0)/width); k = MAX(k, 0); for(j = k; j < count->n; j++) (count->num)[j]++; } } void hist3dminus(v, vside, count) /* minus sampling */ IntImage *v; double vside; Itable *count; { register int x, y, z, val, border, bx, by, bz, byz, j, kbord, kval; register double scale, width; DEBUGMESSAGE("inside hist3dminus\n") scale = vside/STEP1; width = (count->t1 - count->t0)/(count->n - 1); /* table is assumed to have been initialised in MakeItable */ for(z = 0; z < v->Mz; z++) { bz = MIN(z + 1, v->Mz - z); for(y = 0; y < v->My; y++) { by = MIN(y + 1, v->My - y); byz = MIN(by, bz); for(x = 0; x < v->Mx; x++) { bx = MIN(x + 1, v->My - x); border = MIN(bx, byz); kbord = (int) floor((vside * border - count->t0)/width); kbord = MIN(kbord, count->n - 1); /* denominator counts all voxels with distance to boundary >= r */ if(kbord >= 0) for(j = 0; j <= kbord; j++) (count->denom)[j]++; val = VALUE((*v), x, y, z); kval = (int) ceil((val * scale - count->t0)/width); kval = MAX(kval, 0); #ifdef DEBUG /* Rprintf("border=%lf\tkbord=%d\tval=%lf\tkval=%d\n", vside * border, kbord, scale * val, kval); */ #endif /* numerator counts all voxels with distance to boundary >= r and distance to nearest point <= r */ if(kval <= kbord) for(j = kval; j <= kbord; j++) (count->num)[j]++; } } } DEBUGMESSAGE("leaving hist3dminus\n") } void hist3dCen(v, vside, count) /* four censoring-related histograms */ IntImage *v; double vside; H4table *count; { register int x, y, z, val, border, bx, by, bz, byz, kbord, kval; register double scale, width, realborder, realval; DEBUGMESSAGE("inside hist3dCen\n") scale = vside/STEP1; width = (count->t1 - count->t0)/(count->n - 1); /* table is assumed to have been initialised in MakeH4table */ for(z = 0; z < v->Mz; z++) { bz = MIN(z + 1, v->Mz - z); for(y = 0; y < v->My; y++) { by = MIN(y + 1, v->My - y); byz = MIN(by, bz); for(x = 0; x < v->Mx; x++) { bx = MIN(x + 1, v->My - x); border = MIN(bx, byz); realborder = vside * border; kbord = (int) floor((realborder - count->t0)/width); val = VALUE((*v), x, y, z); realval = scale * val; kval = (int) ceil((realval - count->t0)/width); /* this could exceed array limits; that will be detected below */ #ifdef DEBUG Rprintf("border=%lf\tkbord=%d\tval=%lf\tkval=%d\n", realborder, kbord, realval, kval); #endif if(realval <= realborder) { /* observation is uncensored; increment all four histograms */ if(kval >= count->n) ++(count->upperobs); else if(kval >= 0) { (count->obs)[kval]++; (count->nco)[kval]++; } if(kbord >= count->n) ++(count->uppercen); else if(kbord >= 0) { (count->cen)[kbord]++; (count->ncc)[kbord]++; } } else { /* observation is censored; increment only two histograms */ kval = MIN(kval, kbord); if(kval >= count->n) ++(count->upperobs); else if(kval >= 0) (count->obs)[kval]++; if(kbord >= count->n) ++(count->uppercen); else if(kbord >= 0) (count->cen)[kbord]++; } } } } DEBUGMESSAGE("leaving hist3dCen\n") } /* CALLING ROUTINES */ void phatminus(p, n, box, vside, count) Point *p; int n; Box *box; double vside; Itable *count; { BinaryImage b; IntImage v; int ok; DEBUGMESSAGE("in phatminus\ncalling cts2bin...") cts2bin(p, n, box, vside, &b, &ok); DEBUGMESSAGE("out of cts2bin\ninto distrans3...") if(ok) distrans3(&b, &v, &ok); if(ok) { freeBinImage(&b); DEBUGMESSAGE("out of distrans3\ninto hist3dminus...") hist3dminus(&v, vside, count); DEBUGMESSAGE("out of hist3dminus\n") freeIntImage(&v); } } void phatnaive(p, n, box, vside, count) Point *p; int n; Box *box; double vside; Itable *count; { BinaryImage b; IntImage v; int ok; DEBUGMESSAGE("in phatnaive\ncalling cts2bin...") cts2bin(p, n, box, vside, &b, &ok); DEBUGMESSAGE("out of cts2bin\n into distrans3...") if(ok) distrans3(&b, &v, &ok); if(ok) { freeBinImage(&b); DEBUGMESSAGE("out of distrans3\ninto hist3d..."); hist3d(&v, vside, count); DEBUGMESSAGE("out of hist3d\n") freeIntImage(&v); } } void p3hat4(p, n, box, vside, count) Point *p; int n; Box *box; double vside; H4table *count; { BinaryImage b; IntImage v; int ok; DEBUGMESSAGE("in phatminus\ncalling cts2bin...") cts2bin(p, n, box, vside, &b, &ok); DEBUGMESSAGE("out of cts2bin\ninto distrans3...") if(ok) distrans3(&b, &v, &ok); if(ok) { freeBinImage(&b); DEBUGMESSAGE("out of distrans3\ninto hist3dminus...") hist3dCen(&v, vside, count); DEBUGMESSAGE("out of hist3dminus\n") freeIntImage(&v); } } spatstat.core/src/diggra.c0000644000176200001440000000637014141377573015256 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Diggle-Gratton process */ /* Conditional intensity function for a pairwise interaction point process with interaction function as given by e(t) = 0 for t < delta = (t-delta)/(rho-delta)^kappa for delta <= t < rho = 1 for t >= rho (See page 767 of Diggle, Gates, and Stibbard, Biometrika vol. 74, 1987, pages 763 -- 770.) */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Diggra { double kappa; double delta; double rho; double delta2; /* delta^2 */ double rho2; /* rho^2 */ double fac; /* 1/(rho-delta) */ double *period; int per; } Diggra; /* initialiser function */ Cdata *diggrainit(state, model, algo) State state; Model model; Algor algo; { Diggra *diggra; diggra = (Diggra *) R_alloc(1, sizeof(Diggra)); /* Interpret model parameters*/ diggra->kappa = model.ipar[0]; diggra->delta = model.ipar[1]; diggra->rho = model.ipar[2]; diggra->period = model.period; /* constants */ diggra->delta2 = pow(diggra->delta, 2); diggra->rho2 = pow(diggra->rho, 2); diggra->fac = 1/(diggra->rho - diggra->delta); /* periodic boundary conditions? */ diggra->per = (model.period[0] > 0.0); return((Cdata *) diggra); } /* conditional intensity evaluator */ double diggracif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, pairprod, cifval; double rho2, delta, delta2, fac; double *period; DECLARE_CLOSE_D2_VARS; Diggra *diggra; diggra = (Diggra *) cdata; period = diggra->period; rho2 = diggra->rho2; delta = diggra->delta; delta2 = diggra->delta2; fac = diggra->fac; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = pairprod = 1.0; if(npts == 0) return(cifval); ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(diggra->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,rho2,d2)) { if(d2 < delta2) { cifval = 0.0; return(cifval); } else { pairprod *= fac * (sqrt(d2)-delta); } } } } if(ixp1 < npts) { for(j=ixp1; j 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], rho2, d2)) { if(d2 <= delta2) { cifval = 0.0; return(cifval); } else { pairprod *= fac * (sqrt(d2)-delta); } } } } if(ixp1 < npts) { for(j=ixp1; jkappa); return cifval; } Cifns DiggraCifns = { &diggrainit, &diggracif, (updafunptr) NULL, NO}; spatstat.core/src/loccum.c0000644000176200001440000000304214141377573015274 0ustar liggesusers#include #include #include #include "chunkloop.h" /* loccum.c $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Compute local cumulative sums or products of weights locsum: f_i(t) = \sum_{j: j \neq i, ||x_j - x_i|| \le t} v(x_j) for a data point pattern {x_i} locxsum: f_u(t) = \sum_{||x_i - u|| \le t} v(x_i) for a grid of points {u} and a data point pattern {x_i} (also works if {u} is another point pattern) locprod: f_i(t) = \prod_{j: j \neq i, ||x_j - x_i|| \le t} v(x_j) for a data point pattern {x_i} locxprod: f_u(t) = \prod_{||x_i - u|| \le t} v(x_i) for a grid of points {u} and a data point pattern {x_i} (also works if {u} is another point pattern) Assumes point patterns are sorted in increasing order of x coordinate Uses C code template files : loccums.h, loccumx.h */ /* data-to-data */ #undef FNAME #undef NULVAL #undef INC #define FNAME locsum #define NULVAL 0.0 #define INC(A,B) A += B #include "loccums.h" #undef FNAME #undef NULVAL #undef INC #define FNAME locprod #define NULVAL 1.0 #define INC(A,B) A *= B #include "loccums.h" /* test-grid-to-data */ #undef FNAME #undef NULVAL #undef INC #define FNAME locxsum #define NULVAL 0.0 #define INC(A,B) A += B #include "loccumx.h" #undef FNAME #undef NULVAL #undef INC #define FNAME locxprod #define NULVAL 1.0 #define INC(A,B) A *= B #include "loccumx.h" spatstat.core/src/geyer.c0000644000176200001440000002363214141377573015134 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" void fexitc(const char *msg); #undef MH_DEBUG /* Conditional intensity function for a Geyer saturation process. */ typedef struct Geyer { /* model parameters */ double gamma; double r; double s; /* transformations of the parameters */ double r2; double loggamma; int hard; /* periodic distance */ double *period; int per; /* auxiliary counts */ int *aux; #ifdef MH_DEBUG int *freshaux; int prevtype; #endif } Geyer; Cdata *geyerinit(state, model, algo) State state; Model model; Algor algo; { int i, j, n1; Geyer *geyer; double r2; double *period; DECLARE_CLOSE_VARS; geyer = (Geyer *) R_alloc(1, sizeof(Geyer)); /* Interpret model parameters*/ geyer->gamma = model.ipar[0]; geyer->r = model.ipar[1]; /* not squared any more */ geyer->s = model.ipar[2]; geyer->r2 = geyer->r * geyer->r; #ifdef MHDEBUG Rprintf("Initialising Geyer gamma=%lf, r=%lf, sat=%lf\n", geyer->gamma, geyer->r, geyer->s); #endif /* is the model numerically equivalent to hard core ? */ geyer->hard = (geyer->gamma < DOUBLE_EPS); geyer->loggamma = (geyer->hard) ? 0 : log(geyer->gamma); /* periodic boundary conditions? */ geyer->period = model.period; geyer->per = (model.period[0] > 0.0); /* allocate storage for auxiliary counts */ geyer->aux = (int *) R_alloc((size_t) state.npmax, sizeof(int)); #ifdef MH_DEBUG geyer->freshaux = (int *) R_alloc((size_t) state.npmax, sizeof(int)); geyer->prevtype = -42; #endif r2 = geyer->r2; /* Initialise auxiliary counts */ for(i = 0; i < state.npmax; i++) geyer->aux[i] = 0; if(geyer->per) { /* periodic */ period = geyer->period; if(state.npts > 1) { n1 = state.npts - 1; for(i = 0; i < n1; i++) { for(j = i+1; j < state.npts; j++) { if(CLOSE_PERIODIC(state.x[i], state.y[i], state.x[j], state.y[j], period, r2)) { geyer->aux[i] += 1; geyer->aux[j] += 1; } } } } } else { /* Euclidean distance */ if(state.npts > 1) { n1 = state.npts - 1; for(i = 0; i < n1; i++) { for(j = i+1; j < state.npts; j++) { if(CLOSE(state.x[i], state.y[i], state.x[j], state.y[j], r2)) { geyer->aux[i] += 1; geyer->aux[j] += 1; } } } } } return((Cdata *) geyer); } double geyercif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int ix, j, npts, tee; double u, v, r2, s; double w, a, b, f, cifval; double *x, *y; int *aux; double *period; Geyer *geyer; DECLARE_CLOSE_VARS; geyer = (Geyer *) cdata; npts = state.npts; if(npts==0) return ((double) 1.0); x = state.x; y = state.y; u = prop.u; v = prop.v; ix = prop.ix; r2 = geyer->r2; s = geyer->s; period = geyer->period; aux = geyer->aux; /* tee = neighbour count at the point in question; w = sum of changes in (saturated) neighbour counts at other points */ tee = w = 0.0; if(prop.itype == BIRTH) { if(geyer->per) { /* periodic distance */ for(j=0; j 1) /* j is not saturated after addition of (u,v) */ w = w + 1; /* addition of (u,v) increases count by 1 */ else if(f > 0) /* j becomes saturated by addition of (u,v) */ w = w + f; } } } else { /* Euclidean distance */ for(j=0; j 1) /* j is not saturated after addition of (u,v) */ w = w + 1; /* addition of (u,v) increases count by 1 */ else if(f > 0) /* j becomes saturated by addition of (u,v) */ w = w + f; } } } } else if(prop.itype == DEATH) { tee = aux[ix]; if(geyer->per) { /* Periodic distance */ for(j=0; j 0) /* j is not saturated */ w = w + 1; /* deletion of 'ix' decreases count by 1 */ else { f = f+1; if(f > 0) { /* j is not saturated after deletion of 'ix' (s must be fractional) */ w = w + f; } } } } } else { /* Euclidean distance */ for(j=0; j 0) /* j was not saturated */ w = w + 1; /* deletion of 'ix' decreases count by 1 */ else { f = f+1; if(f > 0) { /* j is not saturated after deletion of 'ix' (s must be fractional) */ w = w + f; } } } } } } else if(prop.itype == SHIFT) { /* Compute the cif at the new point, not the ratio of new/old */ if(geyer->per) { /* Periodic distance */ for(j=0; j= b) w = w + 1; } } } else { /* Euclidean distance */ for(j=0; j= b) w = w + 1; } } } } w = w + ((tee < s) ? tee : s); if(geyer->hard) { if(tee > 0) cifval = 0.0; else cifval = 1.0; } else cifval = exp(geyer->loggamma*w); return cifval; } void geyerupd(state, prop, cdata) State state; Propo prop; Cdata *cdata; { /* Declare other variables */ int ix, npts, j; int oldclose, newclose; double u, v, xix, yix, r2; double *x, *y; int *aux; double *period; Geyer *geyer; #ifdef MH_DEBUG int *freshaux; int i; int oc, nc; #endif DECLARE_CLOSE_VARS; geyer = (Geyer *) cdata; period = geyer->period; aux = geyer->aux; r2 = geyer->r2; x = state.x; y = state.y; npts = state.npts; #ifdef MH_DEBUG /* ........................ debugging cross-check ................ */ /* recompute 'aux' values afresh */ freshaux = geyer->freshaux; for(i = 0; i < state.npts; i++) freshaux[i] = 0; if(geyer->per) { /* periodic */ for(i = 0; i < state.npts; i++) { for(j = 0; j < state.npts; j++) { if(i == j) continue; if(CLOSE_PERIODIC(state.x[i], state.y[i], state.x[j], state.y[j], period, r2)) freshaux[i] += 1; } } } else { /* Euclidean distance */ for(i = 0; i < state.npts; i++) { for(j = 0; j < state.npts; j++) { if(i == j) continue; if(CLOSE(state.x[i], state.y[i], state.x[j], state.y[j], r2)) freshaux[i] += 1; } } } /* Check agreement with 'aux' */ for(j = 0; j < state.npts; j++) { if(aux[j] != freshaux[j]) { Rprintf("\n\taux[%d] = %d, freshaux[%d] = %d\n", j, aux[j], j, freshaux[j]); Rprintf("\tnpts = %d\n", state.npts); Rprintf("\tperiod = (%lf, %lf)\n", period[0], period[1]); if(geyer->prevtype == BIRTH) error("updaux failed after BIRTH"); if(geyer->prevtype == DEATH) error("updaux failed after DEATH"); if(geyer->prevtype == SHIFT) error("updaux failed after SHIFT"); error("updaux failed at start"); } } /* OK. Record type of this transition */ geyer->prevtype = prop.itype; /* ................ end debug cross-check ................ */ #endif if(prop.itype == BIRTH) { /* Birth */ u = prop.u; v = prop.v; /* initialise auxiliary counter for new point */ aux[npts] = 0; /* update all auxiliary counters */ if(geyer->per) { /* periodic distance */ for(j=0; j < npts; j++) { if(CLOSE_PERIODIC(u,v,x[j],y[j],period,r2)) { aux[j] += 1; aux[npts] += 1; } } } else { /* Euclidean distance */ for(j=0; j < npts; j++) { if(CLOSE(u,v,x[j],y[j],r2)) { aux[j] += 1; aux[npts] += 1; } } } } else if(prop.itype == DEATH) { /* Death */ ix = prop.ix; u = x[ix]; v = y[ix]; /* decrement auxiliary counter for each point */ if(geyer->per) { /* periodic distance */ for(j=0; j= ix) aux[j-1] = aux[j]; } } else { /* Euclidean distance */ for(j=0; j= ix) aux[j-1] = aux[j]; } } } else if(prop.itype == SHIFT) { /* Shift */ u = prop.u; v = prop.v; ix = prop.ix; xix = x[ix]; yix = y[ix]; /* recompute auxiliary counter for point 'ix' */ aux[ix] = 0; /* update auxiliary counters for other points */ if(geyer->per) { for(j=0; j #include #include "methas.h" #include "dist2.h" /* for debugging code, include #define DEBUG 1 */ /* Conditional intensity computation for Multitype Strauss hardcore process */ /* NOTE: types (marks) are numbered from 0 to ntypes-1 */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct MultiStraussHard { int ntypes; double *gamma; /* gamma[i,j] = gamma[i+ntypes*j] for i,j = 0... ntypes-1 */ double *rad; /* rad[i,j] = rad[j+ntypes*i] for i,j = 0... ntypes-1 */ double *hc; /* hc[i,j] = hc[j+ntypes*i] for i,j = 0... ntypes-1 */ double *rad2; /* squared radii */ double *hc2; /* squared radii */ double *rad2hc2; /* r^2 - h^2 */ double range2; /* square of interaction range */ double *loggamma; /* logs of gamma[i,j] */ double *period; int *hard; /* hard[i,j] = 1 if gamma[i,j] ~~ 0 */ int *kount; /* space for kounting pairs of each type */ int per; } MultiStraussHard; /* initialiser function */ Cdata *straushminit(state, model, algo) State state; Model model; Algor algo; { int i, j, ntypes, n2, hard; double g, r, h, r2, h2, logg, range2; MultiStraussHard *multistrausshard; multistrausshard = (MultiStraussHard *) R_alloc(1, sizeof(MultiStraussHard)); multistrausshard->ntypes = ntypes = model.ntypes; n2 = ntypes * ntypes; #ifdef DEBUG Rprintf("initialising space for %d types\n", ntypes); #endif /* Allocate space for parameters */ multistrausshard->gamma = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->rad = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->hc = (double *) R_alloc((size_t) n2, sizeof(double)); /* Allocate space for transformed parameters */ multistrausshard->rad2 = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->hc2 = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->rad2hc2 = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->loggamma = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->hard = (int *) R_alloc((size_t) n2, sizeof(int)); /* Allocate scratch space for counts of each pair of types */ multistrausshard->kount = (int *) R_alloc((size_t) n2, sizeof(int)); /* Copy and process model parameters*/ /* ipar will contain n^2 values of gamma, then n^2 values of r, then n^2 values of h */ range2 = 0.0; for(i = 0; i < ntypes; i++) { for(j = 0; j < ntypes; j++) { g = model.ipar[ i + j*ntypes]; r = model.ipar[ n2 + i + j*ntypes]; h = model.ipar[2*n2 + i + j*ntypes]; r2 = r * r; h2 = h * h; hard = (g < DOUBLE_EPS); logg = (hard) ? 0 : log(g); MAT(multistrausshard->gamma, i, j, ntypes) = g; MAT(multistrausshard->rad, i, j, ntypes) = r; MAT(multistrausshard->hc, i, j, ntypes) = h; MAT(multistrausshard->rad2, i, j, ntypes) = r2; MAT(multistrausshard->hc2, i, j, ntypes) = h2; MAT(multistrausshard->rad2hc2, i, j, ntypes) = r2-h2; MAT(multistrausshard->hard, i, j, ntypes) = hard; MAT(multistrausshard->loggamma, i, j, ntypes) = logg; if(r2 > range2) range2 = r2; } } multistrausshard->range2 = range2; /* periodic boundary conditions? */ multistrausshard->period = model.period; multistrausshard->per = (model.period[0] > 0.0); #ifdef DEBUG Rprintf("end initialiser\n"); #endif return((Cdata *) multistrausshard); } /* conditional intensity evaluator */ double straushmcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ntypes, kount, ix, ixp1, j, mrk, mrkj, m1, m2; int *marks; double *x, *y; double u, v, lg; double d2, cifval; double range2; double *period; MultiStraussHard *multistrausshard; DECLARE_CLOSE_D2_VARS; multistrausshard = (MultiStraussHard *) cdata; range2 = multistrausshard->range2; period = multistrausshard->period; u = prop.u; v = prop.v; mrk = prop.mrk; ix = prop.ix; x = state.x; y = state.y; marks = state.marks; npts = state.npts; #ifdef DEBUG Rprintf("computing cif: u=%lf, v=%lf, mrk=%d\n", u, v, mrk); #endif cifval = 1.0; if(npts == 0) return(cifval); ntypes = multistrausshard->ntypes; #ifdef DEBUG Rprintf("initialising pair counts\n"); #endif /* initialise pair counts */ for(m1 = 0; m1 < ntypes; m1++) for(m2 = 0; m2 < ntypes; m2++) MAT(multistrausshard->kount, m1, m2, ntypes) = 0; /* compile pair counts */ #ifdef DEBUG Rprintf("compiling pair counts\n"); #endif ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(multistrausshard->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) { mrkj = marks[j]; if(d2 < MAT(multistrausshard->rad2, mrk, mrkj, ntypes)) { if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } MAT(multistrausshard->kount, mrk, mrkj, ntypes)++; } } } } if(ixp1 < npts) { for(j=ixp1; jrad2, mrk, mrkj, ntypes)) { if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } MAT(multistrausshard->kount, mrk, mrkj, ntypes)++; } } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) { mrkj = marks[j]; if(d2 < MAT(multistrausshard->rad2, mrk, mrkj, ntypes)) { if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } MAT(multistrausshard->kount, mrk, mrkj, ntypes)++; } } } } if(ixp1 < npts) { for(j=ixp1; jrad2, mrk, mrkj, ntypes)) { if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } MAT(multistrausshard->kount, mrk, mrkj, ntypes)++; } } } } } #ifdef DEBUG Rprintf("multiplying cif factors\n"); #endif /* multiply cif value by pair potential */ for(m1 = 0; m1 < ntypes; m1++) { for(m2 = 0; m2 < ntypes; m2++) { kount = MAT(multistrausshard->kount, m1, m2, ntypes); if(MAT(multistrausshard->hard, m1, m2, ntypes)) { if(kount > 0) { cifval = 0.0; return(cifval); } } else { lg = MAT(multistrausshard->loggamma, m1, m2, ntypes); cifval *= exp(lg * kount); } } } #ifdef DEBUG Rprintf("returning positive cif\n"); #endif return cifval; } Cifns MultiStraussHardCifns = { &straushminit, &straushmcif, (updafunptr) NULL, YES}; spatstat.core/src/pairloop.h0000644000176200001440000000344714141377573015655 0ustar liggesusers/* pairloop.h Generic code template for loop collecting contributions to point x_i from all points x_j such that ||x_i - x_j|| <= r cpp variables used: INITIAL_I code executed at start of 'i' loop CONTRIBUTE_IJ code executed to compute contribution from j to i COMMIT_I code executed to save total contribution to i C variables used: int i, j, n, maxchunk; double xi, yi, dx, dy, dx2, d2, r2max; double *x, *y; $Revision: 1.5 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifndef CHUNKLOOP_H #include "chunkloop.h" #endif #define PAIRLOOP(INITIAL_I, CONTRIBUTE_IJ, COMMIT_I) \ OUTERCHUNKLOOP(i, n, maxchunk, 65536) { \ R_CheckUserInterrupt(); \ INNERCHUNKLOOP(i, n, maxchunk, 65536) { \ \ xi = x[i]; \ yi = y[i]; \ \ INITIAL_I; \ \ if(i > 0) { \ for(j=i-1; j >= 0; j--) { \ dx = x[j] - xi; \ dx2 = dx * dx; \ if(dx2 > r2max) \ break; \ dy = y[j] - yi; \ d2 = dx2 + dy * dy; \ if(d2 <= r2max) { \ CONTRIBUTE_IJ; \ } \ } \ } \ \ if(i+1 < n) { \ for(j=i+1; j < n; j++) { \ dx = x[j] - xi; \ dx2 = dx * dx; \ if(dx2 > r2max) \ break; \ dy = y[j] - yi; \ d2 = dx2 + dy * dy; \ if(d2 <= r2max) { \ CONTRIBUTE_IJ; \ } \ } \ } \ COMMIT_I; \ } \ } spatstat.core/src/proto.h0000644000176200001440000002052214146622477015165 0ustar liggesusers#include #include /* Prototype declarations for all native routines in spatstat.core package Automatically generated - do not edit! */ /* Functions invoked by .C */ void delta2area(double *, double *, double *, double *, int *, double *, double *, double *, double *, int *); void delta2area(double *, double *, double *, double *, int *, double *, double *, double *, double *, int *); void digberJ(double *, double *, int *, int *, int *, double *); void Gdenspt(int *, double *, double *, double *, double *); void Gwtdenspt(int *, double *, double *, double *, double *, double *); void Gwtdenspt(int *, double *, double *, double *, double *, double *); void denspt(int *, double *, double *, double *, double *, double *); void wtdenspt(int *, double *, double *, double *, double *, double *, double *); void wtdenspt(int *, double *, double *, double *, double *, double *, double *); void adenspt(int *, double *, double *, double *, double *, double *, double *); void awtdenspt(int *, double *, double *, double *, double *, double *, double *, double *); void awtdenspt(int *, double *, double *, double *, double *, double *, double *, double *); void crdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *); void wtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void wtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void acrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void awtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *); void awtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *); void segdens(double *, int *, double *, double *, double *, double *, int *, double *, double *, double *); void segwdens(double *, int *, double *, double *, double *, double *, double *, int *, double *, double *, double *); void Ediggra(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, double *); void ESdiggra(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, double *, int *); void Ediggatsti(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *); void ripleybox(int *, double *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void ripboxDebug(int *, double *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void ripleypoly(int *, double *, double *, double *, int *, double *, int *, double *, double *, double *, double *, double *); void rippolDebug(int *, double *, double *, double *, int *, double *, int *, double *, double *, double *, double *, double *); void RcallK3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, double *, double *, double *, int *); void RcallG3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, double *, double *, double *, int *); void RcallF3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *); void RcallF3cen(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *, int *, int *, int *); void RcallG3cen(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *, int *, int *, int *); void Rcallpcf3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, double *, double *, double *, int *, double *); void RcallF3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *); void locxprod(int *, double *, double *, int *, double *, double *, double *, int *, double *, double *); void Efiksel(int *, double *, double *, int *, double *, double *, double *, double *, double *); void Egeyer(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, double *); void Cidw(double *, double *, double *, int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, double *); void Cidw2(double *, double *, double *, int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void idwloo(double *, double *, double *, int *, double *, double *, double *, double *); void idwloo2(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void locprod(int *, double *, double *, double *, int *, double *, double *); void locxprod(int *, double *, double *, int *, double *, double *, double *, int *, double *, double *); void KborderI(int *, double *, double *, double *, int *, double *, int *, int *); void KborderD(int *, double *, double *, double *, int *, double *, double *, double *); void Kwborder(int *, double *, double *, double *, double *, int *, double *, double *, double *); void KnoneI(int *, double *, double *, int *, double *, int *); void KnoneD(int *, double *, double *, int *, double *, double *); void Kwnone(int *, double *, double *, double *, int *, double *, double *); void KrectWtd(double *, double *, int *, double *, double *, double *, int *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *); void KrectInt(double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, int *, double *, double *, int *, int *, int *); void KrectDbl(double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *); void locpcfx(int *, double *, double *, int *, int *, double *, double *, int *, int *, double *, double *, double *); void locWpcfx(int *, double *, double *, int *, int *, double *, double *, int *, double *, int *, double *, double *, double *); void knownCif(char *, int *); void scantrans(double *, double *, int *, double *, double *, double *, double *, int *, int *, double *, int *); void Gsmoopt(int *, double *, double *, double *, int *, double *, double *); void Gwtsmoopt(int *, double *, double *, double *, int *, double *, double *, double *); void smoopt(int *, double *, double *, double *, int *, double *, double *, double *); void wtsmoopt(int *, double *, double *, double *, int *, double *, double *, double *, double *); void asmoopt(int *, double *, double *, double *, int *, double *, double *, double *); void awtsmoopt(int *, double *, double *, double *, int *, double *, double *, double *, double *); void crsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void wtcrsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *); void acrsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void awtcrsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *); void Cclosepaircounts(int *, double *, double *, double *, int *); void Ccrosspaircounts(int *, double *, double *, int *, double *, double *, double *, int *); /* Functions invoked by .Call */ SEXP thinjumpequal(SEXP, SEXP, SEXP); SEXP xmethas(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP xmethas(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP PerfectStrauss(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP PerfectHardcore(SEXP, SEXP, SEXP, SEXP); SEXP PerfectStraussHard(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP PerfectDiggleGratton(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP PerfectDGS(SEXP, SEXP, SEXP, SEXP); SEXP PerfectPenttinen(SEXP, SEXP, SEXP, SEXP, SEXP); spatstat.core/src/yesno.h0000644000176200001440000000011614141377573015153 0ustar liggesusers/* yesno.h */ #ifndef YES #define YES (0 == 0) #define NO (!YES) #endif spatstat.core/src/KrectV2.h0000644000176200001440000000027314141377573015302 0ustar liggesusers/* KrectV3.h with or without translation correction */ if((*doTrans) == 1) { #define TRANSLATION #include "KrectV3.h" } else { #undef TRANSLATION #include "KrectV3.h" } spatstat.core/src/Perfect.cc0000755000176200001440000005765114141377573015567 0ustar liggesusers// Debug switch // #define DBGS #include #include #include #include #include #include #include #include #include // #include // FILE *out; // File i/o is deprecated in R implementation #ifdef DBGS #define CHECK(PTR,MESSAGE) if(((void *) PTR) == ((void *) NULL)) error(MESSAGE) #define CLAMP(X, LOW, HIGH, XNAME) \ if((X) > (HIGH)) { \ Rprintf("Value of %s exceeds upper limit %d\n", XNAME, HIGH); \ X = HIGH; \ } else if((X) < (LOW)) { \ Rprintf("Value of %s is below %d\n", XNAME, LOW); \ X = LOW; \ } #else #define CHECK(PTR,MESSAGE) #define CLAMP(X, LOW, HIGH, XNAME) \ if((X) > (HIGH)) X = HIGH; else if((X) < (LOW)) X = LOW; #endif // ......................................... // memory allocation // using R_alloc #define ALLOCATE(TYPE) (TYPE *) R_alloc(1, sizeof(TYPE)) #define FREE(PTR) // Alternative using Calloc and Free // #define ALLOCATE(TYPE) (TYPE *) Calloc(1, sizeof(TYPE)) // #define FREE(PTR) Free(PTR) void R_CheckUserInterrupt(void); struct Point{ long int No; float X; float Y; float R; struct Point *next; }; struct Point2{ long int No; float X; float Y; char InLower[2]; double Beta; double TempBeta; struct Point2 *next; }; struct Point3{ char Case; char XCell; char YCell; struct Point3 *next; }; // const float Pi=3.141593; double slumptal(void){ return(runif((double) 0.0, (double) 1.0)); } long int poisson(double lambda){ return((long int)rpois(lambda)); } // ........................... Point patterns .......................... class Point2Pattern { public: long int UpperLiving[2]; long int MaxXCell, MaxYCell, NoP; double XCellDim, YCellDim, Xmin, Xmax, Ymin, Ymax; struct Point2 *headCell[10][10],*dummyCell; char DirX[10], DirY[10]; Point2Pattern(double xmin, double xmax, double ymin, double ymax, long int mxc, long int myc){ long int i,j; UpperLiving[0] = 0; UpperLiving[1] = 0; Xmin = xmin; Xmax = xmax; Ymin = ymin; Ymax = ymax; DirX[1] = 1; DirY[1] = 0; DirX[2] = 1; DirY[2] = -1; DirX[3] = 0; DirY[3] = -1; DirX[4] = -1; DirY[4] = -1; DirX[5] = -1; DirY[5] = 0; DirX[6] = -1; DirY[6] = 1; DirX[7] = 0; DirY[7] = 1; DirX[8] = 1; DirY[8] = 1; NoP = 0; // dummyCell = ALLOCATE(struct Point2); // dummyCell->next = dummyCell; dummyCell->No = 0; MaxXCell = mxc; MaxYCell = myc; if(MaxXCell>9) MaxXCell = 9; if(MaxYCell>9) MaxYCell = 9; for(i=0;i<=MaxXCell;i++){ for(j=0;j<=MaxYCell;j++){ // headCell[i][j] = ALLOCATE(struct Point2); // headCell[i][j]->next=dummyCell; } } XCellDim = (Xmax-Xmin)/((double)(MaxXCell+1)); YCellDim = (Ymax-Ymin)/((double)(MaxYCell+1)); }; ~Point2Pattern(){} // void Print(); void Return(double *X, double *Y, int *num, int maxnum); long int Count(); long int UpperCount(); void Empty(); void Clean(); // void DumpToFile(char FileName[100]); // void ReadFromFile(char FileName[100]); }; // void Point2Pattern::Print(){ // long int i,j,k; // k = 0; // struct Point2 *TempCell; // for(i=0;i<=MaxXCell;i++){ // for(j=0;j<=MaxYCell;j++){ // //Rprintf("%d %d:\n",i,j); // TempCell = headCell[i][j]->next; // CHECK(TempCell, "internal error: TempCell is null in Print()"); // while(TempCell->next != TempCell){ // k++; // Rprintf("%f %f %ld %ld %ld=%d %ld=%d UL0 %d UL1 %d %f\n", // TempCell->X,TempCell->Y,k, // TempCell->No, // i,int(TempCell->X/XCellDim), // j,int(TempCell->Y/YCellDim), // TempCell->InLower[0],TempCell->InLower[1], // TempCell->Beta); // TempCell = TempCell->next; // CHECK(TempCell, "internal error: TempCell is null in Print() loop"); // } // } // } // Rprintf("Printed %ld points.\n",k); // } void Point2Pattern::Return(double *X, double *Y, int *num, int maxnum){ long int i,j,k; k =0; *num = 0; #ifdef DBGS Rprintf("executing Return()\n"); #endif if(UpperLiving[0]<=maxnum){ struct Point2 *TempCell; for(i=0;i<=MaxXCell;i++){ for(j=0;j<=MaxYCell;j++){ #ifdef DBGS // Rprintf("%d %d:\n",i,j); #endif TempCell = headCell[i][j]->next; CHECK(TempCell, "internal error: TempCell is null in Return()"); while(TempCell->next != TempCell){ X[k] = TempCell->X; Y[k] = TempCell->Y; k++; TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Return() loop"); } } } *num = k; } else { *num = -1; } } long int Point2Pattern::Count(){ long int i,j,k; k = 0; struct Point2 *TempCell; for(i=0;i<=MaxXCell;i++){ for(j=0;j<=MaxYCell;j++){ // Rprintf("%d %d:\n",i,j); TempCell = headCell[i][j]->next; CHECK(TempCell, "internal error: TempCell is null in Count()"); while(TempCell->next != TempCell){ k++; TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Count() loop"); } } } //Rprintf("Printed %d points.\n",k); return(k); } // a quick (over)estimate of the number of points in the pattern, // for storage allocation long int Point2Pattern::UpperCount(){ return(UpperLiving[0]); } void Point2Pattern::Empty(){ struct Point2 *TempCell, *TempCell2; long int i,j; #ifdef DBGS long int k; k=0; Rprintf("executing Empty()\n"); #endif for(i=0; i<=this->MaxXCell; i++){ for(j=0; j<=this->MaxYCell; j++){ TempCell = headCell[i][j]->next; CHECK(TempCell, "internal error: TempCell is null in Empty()"); while(TempCell!=TempCell->next){ #ifdef DBGS // k++; Rprintf("%d %d %d\n",i,j,k); #endif TempCell2 = TempCell->next; FREE(TempCell); TempCell = TempCell2; CHECK(TempCell, "internal error: TempCell is null in Empty() loop"); } headCell[i][j]->next = dummyCell; } } } void Point2Pattern::Clean(){ struct Point2 *TempCell, *TempCell2; long int i,j; #ifdef DBGS Rprintf("executing Clean()\n"); #endif for(i=0; i<=MaxXCell; i++){ for(j=0; j<=MaxYCell; j++){ TempCell = headCell[i][j]; CHECK(TempCell, "internal error: TempCell is null in Clean()"); TempCell2 = headCell[i][j]->next; CHECK(TempCell2, "internal error: TempCell2 is null in Clean()"); while(TempCell2!=TempCell2->next){ TempCell2->No = 0; if(TempCell2->InLower[0]==0){ TempCell->next = TempCell2->next; FREE(TempCell2); TempCell2 = TempCell->next; CHECK(TempCell2, "internal error: TempCell2 is null in Clean() loop A"); } else{ TempCell2 = TempCell2->next; TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Clean() loop B"); CHECK(TempCell2, "internal error: TempCell2 is null in Clean() loop B"); } } } } } //void Point2Pattern::DumpToFile(char FileName[100]){ // FILE *out; // long int i,j; // out = fopen(FileName,"w"); // struct Point2 *TempCell; // for(i=0;i<=MaxXCell;i++){ // for(j=0;j<=MaxYCell;j++){ // //Rprintf("%d %d:\n",i,j); // TempCell = headCell[i][j]->next; // while(TempCell->next != TempCell){ // fprintf(out,"%f\t%f\t%ld\n", // TempCell->X,TempCell->Y,TempCell->No); // TempCell = TempCell->next; // } // } //} //fclose(out); //} //void Point2Pattern::ReadFromFile(char FileName[100]){ // FILE *out; //long int k,XCell,YCell; //float f1,xs,ys; //out = fopen(FileName,"r"); //struct Point2 *TempCell; //k=0; //while(feof(out)==0){ // k++; // fscanf(out,"%f%f\n",&xs,&ys); // //Rprintf("%f %f\n",xs,ys); // // // TempCell = ALLOCATE(struct Point2); // // // TempCell->No = k; // TempCell->X = xs; // TempCell->Y = ys; // TempCell->InLower[0] = 1; // TempCell->InLower[1] = 1; // // f1 = (xs-Xmin)/XCellDim; XCell = int(f1); // if(XCell>MaxXCell) XCell = MaxXCell; // f1 = (ys-Ymin)/YCellDim; YCell = int(f1); // if(YCell>MaxYCell) YCell = MaxYCell; // // TempCell->next = headCell[XCell][YCell]->next; // headCell[XCell][YCell]->next = TempCell; // //} //fclose(out); //Rprintf("%ld points loaded.\n",k); // //} // ........................... Point processes .......................... // ...................... (stationary, pairwise interaction) ............ class PointProcess { public: double Xmin, Xmax, Ymin, Ymax, TotalBirthRate, InteractionRange; PointProcess(double xmin, double xmax, double ymin, double ymax){ Xmin = xmin; Xmax = xmax; Ymin = ymin; Ymax = ymax; } virtual ~PointProcess(){} virtual void NewEvent(double *x, double *y, char *InWindow)=0; virtual void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP)=0; virtual double Interaction(double dsquared)=0; // virtual void CalcBeta(long int xsidepomm, long int ysidepomm, // double *betapomm){ // Rprintf("Define CalcBeta...\n"); // } // virtual void CheckBeta(long int xsidepomm, long int ysidepomm, // double *betapomm){ //Rprintf("Define CheckBeta...\n"); //} // virtual double lnCondInt(struct Point2 *TempCell, Point2Pattern *p2p) //{ return(0.0);}; // virtual double lnDens(Point2Pattern *p2p); // virtual void Beta(struct Point2 *TempCell){ // TempCell->Beta = 0; // Rprintf("Define Beta...\n");}; }; //double PointProcess::lnDens(Point2Pattern *p2p){ //// double f1; //long int xco,yco,xc,yc,fx,tx,fy,ty,ry,rx; //double dy,dx, lnDens,dst2; //struct Point2 *TempCell, *TempCell2; // //dx = (Xmax-Xmin)/(double(p2p->MaxXCell+1)); //dy = (Ymax-Ymin)/(double(p2p->MaxYCell+1)); //rx = int(InteractionRange/dx+1.0); //ry = int(InteractionRange/dy+1.0); // // //Rprintf("1:%f 2:%f 3:%d 4:%d 5:%f 6:%f\n",dx,dy,rx,ry, // // this->InteractionRange,InteractionRange); // //Rprintf("mx:%d my:%d\n",p2p->MaxXCell,p2p->MaxYCell); // // lnDens = 0; // // //Rprintf("lnDens: %f (0)\n",lnDens); // // for(xc = 0; xc <= p2p->MaxXCell; xc++){ // for(yc = 0; yc <= p2p->MaxYCell; yc++){ // //if(xc==1) Rprintf("%d %d\n",xc,yc); // CHECK(p2p->headCell[xc][yc], // "internal error: p2p->headCell[xc][yc] is null in lnDens()"); // TempCell = p2p->headCell[xc][yc]->next; // CHECK(TempCell, "internal error: TempCell is null in lnDens()"); // while(TempCell != TempCell->next){ // lnDens += log(TempCell->Beta); // //Rprintf("lnDens: %f (1) %d %d %d %d Beta %f\n",lnDens,xc,yc, // // p2p->MaxXCell,p2p->MaxYCell,TempCell->Beta); // //if(lnDens<(-100000)){Rprintf("%f",lnDens); scanf("%f",&f1);} // if(InteractionRange>0){ // if((xc+rx)<=p2p->MaxXCell) tx=xc+rx; else tx = p2p->MaxXCell; // if((yc+ry)<=p2p->MaxYCell) ty=yc+ry; else ty = p2p->MaxYCell; // if((xc-rx)>=0) fx=xc-rx; else fx = 0; // if((yc-ry)>=0) fy=yc-ry; else fy = 0; // for(xco = fx; xco <= tx; xco++){ // for(yco = fy; yco <= ty; yco++){ // //if(xc==1) Rprintf("%d %d %d %d %d %d\n",xco,yco,fx,tx,fy,ty); // CHECK(p2p->headCell[xco][yco], // "internal error: p2p->headCell[xco][yco] is null in lnDens() loop"); // TempCell2 = p2p->headCell[xco][yco]->next; // CHECK(TempCell2, // "internal error: TempCell2 is null in lnDens() loop A"); // while(TempCell2!=TempCell2->next){ // if(TempCell2 != TempCell){ // dst2 = pow(TempCell->X-TempCell2->X,2)+ // pow(TempCell->Y-TempCell2->Y,2); // lnDens += log(Interaction(dst2)); // } // TempCell2 = TempCell2->next; // CHECK(TempCell2, // "internal error: TempCell2 is null in lnDens() loop B"); // } // } // } // //Rprintf("lnDens: %f\n",lnDens); // } // TempCell = TempCell->next; // CHECK(TempCell, // "internal error: TempCell is null in lnDens() at end"); // } // } // } // return(lnDens); // //} // ........................... Sampler .......................... class Sampler{ public: PointProcess *PP; Point2Pattern *P2P; long int GeneratedPoints, LivingPoints, NoP; //long int UpperLiving[2]; Sampler(PointProcess *p){ PP = p;} ~Sampler(){} void Sim(Point2Pattern *p2p, long int *ST, long int *ET); long int BirthDeath(long int TimeStep, struct Point *headLiving, struct Point *headDeleted, struct Point3 *headTransition); // WAS: Sampler::Forward void Forward(long int TS, long int TT, char TX, char TY, struct Point *Proposal, long int *DDD); }; void Sampler::Forward(long int TS, long int TT, char TX, char TY, struct Point *Proposal, long int *DDD){ long int XCell, YCell, DirectionN; double dtmp2,dtmpx,dtmpy, tmpR, TempGamma[2], TempI; struct Point2 *TempCell, *TempCell2; float f1; /* Birth */ if(TT==1){ f1 = (Proposal->X-P2P->Xmin)/P2P->XCellDim; XCell = int(f1); CLAMP(XCell, 0, P2P->MaxXCell, "XCell"); f1 = (Proposal->Y-P2P->Ymin)/P2P->YCellDim; YCell = int(f1); CLAMP(YCell, 0, P2P->MaxYCell, "YCell"); // TempCell = ALLOCATE(struct Point2); // TempCell->No = Proposal->No; TempCell->X = Proposal->X; TempCell->Y = Proposal->Y; tmpR = Proposal->R; TempCell->next = P2P->headCell[XCell][YCell]->next; P2P->headCell[XCell][YCell]->next = TempCell; TempCell->InLower[0]=0; TempCell->InLower[1]=0; TempGamma[0] = 1.0; TempGamma[1] = 1.0; /*same cell*/ TempCell2 = TempCell->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() birth case"); while(TempCell2 != TempCell2->next){ dtmpx = TempCell->X - TempCell2->X; dtmpy = TempCell->Y - TempCell2->Y; dtmp2 = dtmpx*dtmpx+dtmpy*dtmpy; TempI = PP->Interaction(dtmp2); if(TempCell2->InLower[0]==1) TempGamma[0] = TempGamma[0]*TempI; if(TempCell2->InLower[1]==1) TempGamma[1] = TempGamma[1]*TempI; TempCell2=TempCell2->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() birth case loop"); } /*eight other cells*/ for(DirectionN=1;DirectionN<=8;DirectionN++){ if(((XCell+P2P->DirX[DirectionN])>=0) && ((XCell+P2P->DirX[DirectionN])<=P2P->MaxXCell) && ((YCell+P2P->DirY[DirectionN])>=0) && ((YCell+P2P->DirY[DirectionN])<=P2P->MaxYCell)){ CHECK(P2P->headCell[XCell+P2P->DirX[DirectionN]][YCell+P2P->DirY[DirectionN]], "internal error: HUGE P2P EXPRESSION is null in Forward() birth case loop A"); TempCell2 = P2P->headCell[XCell+P2P->DirX[DirectionN]] [YCell+P2P->DirY[DirectionN]]->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() birth case loop B"); while(TempCell2!=TempCell2->next){ dtmpx = TempCell->X - TempCell2->X; dtmpy = TempCell->Y - TempCell2->Y; dtmp2 = dtmpx*dtmpx+dtmpy*dtmpy; TempI = PP->Interaction(dtmp2); if(TempCell2->InLower[0]==1) TempGamma[0] = TempGamma[0]*TempI; if(TempCell2->InLower[1]==1) TempGamma[1] = TempGamma[1]*TempI; TempCell2=TempCell2->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() birth case loop C"); } } } if(tmpR <= TempGamma[1] ){ TempCell->InLower[0]=1; P2P->UpperLiving[0] = P2P->UpperLiving[0] +1; } if(tmpR <= TempGamma[0] ){ TempCell->InLower[1]=1; P2P->UpperLiving[1] = P2P->UpperLiving[1] +1; } } /* Death */ if(TT==0){ TempCell=P2P->headCell[(int)TX][(int)TY]; CHECK(TempCell, "internal error: TempCell is null in Forward() death case"); while(TempCell->next->No != *DDD){ TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Forward() death case loop"); if(TempCell->next == TempCell) { // Rprintf("internal error: unexpected self-reference. Dumping...\n"); // P2P->Print(); error("internal error: unexpected self-reference"); break; } }; CHECK(TempCell->next, "internal error: TempCell->next is null in Forward() death case"); if(*DDD!=TempCell->next->No) Rprintf("diagnostic message: multi cell: !!DDD:%ld TempUpper->No:%ld ", *DDD,TempCell->No); if(TempCell->next->InLower[0]==1) P2P->UpperLiving[0] = P2P->UpperLiving[0] -1; if(TempCell->next->InLower[1]==1) P2P->UpperLiving[1] = P2P->UpperLiving[1] -1; TempCell2 = TempCell->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() death case B"); TempCell->next = TempCell2->next; FREE(TempCell2); /* Common stuff */ //KillCounter ++; *DDD = *DDD - 1; } } long int Sampler::BirthDeath(long int TimeStep, struct Point *headLiving, struct Point *headDeleted, struct Point3 *headTransition){ long int i,n; float f1,f2,f3,f4; double xtemp,ytemp; char InWindow, Success; struct Point *TempPoint, *TempPoint2; struct Point3 *TempTransition; R_CheckUserInterrupt(); f1 = LivingPoints; f2 = PP->TotalBirthRate; f3 = f2/(f1+f2); f4 = slumptal(); n = 0; Success = 0; //Rprintf("LivingPoints: %d TotalBirthRate %f GeneratedPoints %d\n", // LivingPoints,PP->TotalBirthRate,GeneratedPoints); /* Birth */ while(Success==0){ if(f4NewEvent(&xtemp, &ytemp, &InWindow); //Rprintf("Ping 2 (BD)\n"); if(InWindow==1){ Success = 1; // TempTransition = ALLOCATE(struct Point3); // //Rprintf("Ping 3 (BD)\n"); TempTransition->Case = 0; LivingPoints ++; GeneratedPoints ++; // TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = GeneratedPoints; TempPoint->R = slumptal(); TempPoint->next = headLiving->next; headLiving->next = TempPoint; NoP ++; f1 = (TempPoint->X-P2P->Xmin)/P2P->XCellDim; TempTransition->XCell = int(f1); f1 = (TempPoint->Y-P2P->Ymin)/P2P->YCellDim; TempTransition->YCell = int(f1); //Rprintf("X %f XCell %d\n",TempPoint->X,TempTransition->XCell); // CLAMP(TempTransition->XCell, 0, P2P->MaxXCell, "TempTransition->XCell"); CLAMP(TempTransition->YCell, 0, P2P->MaxYCell, "TempTransition->YCell"); TempTransition->next = headTransition->next; headTransition->next = TempTransition; } } /* Death */ else{ Success = 1; // TempTransition = ALLOCATE(struct Point3); // TempTransition->Case = 1; f1 = LivingPoints; f2 = f1*slumptal()+1.0; n = int(f2); if(n < 1) n = 1; if(n>LivingPoints){ // Rprintf("diagnostic message: random integer n=%ld > %ld = number of living points\n", n,LivingPoints); n=LivingPoints; } TempPoint2 = TempPoint = headLiving; for(i=1; i<=n; i++){ TempPoint2 = TempPoint; TempPoint = TempPoint->next; } TempPoint2->next = TempPoint->next; TempPoint->next = headDeleted->next; headDeleted->next = TempPoint; LivingPoints --; NoP --; TempTransition->next = headTransition->next; headTransition->next = TempTransition; } } return(n); } void Sampler::Sim(Point2Pattern *p2p, long int *ST, long int *ET) { P2P = p2p; long int StartTime, EndTime, TimeStep, D0Time, D0Living; long int XCell, YCell, DDD, i; float f1; /* Initialising linked listed for backward simulation */ struct Point *headDeleted, *headLiving, *dummyDeleted, *dummyLiving; struct Point *TempPoint; // headLiving = ALLOCATE(struct Point); dummyLiving = ALLOCATE(struct Point); // headLiving->next = dummyLiving; dummyLiving->next = dummyLiving; // headDeleted = ALLOCATE(struct Point); dummyDeleted = ALLOCATE(struct Point); // headDeleted->next = dummyDeleted; dummyDeleted->next = dummyDeleted; struct Point2 *TempCell2; struct Point3 *headTransition, *dummyTransition; // headTransition = ALLOCATE(struct Point3); dummyTransition = ALLOCATE(struct Point3); // headTransition->next = dummyTransition; dummyTransition->next = dummyTransition; PP->GeneratePoisson(headLiving, &GeneratedPoints, &LivingPoints, &NoP); StartTime=1; EndTime=1; TimeStep = 0; D0Time = 0; D0Living = GeneratedPoints; long int tmp, D0; do{ tmp=BirthDeath(TimeStep, headLiving, headDeleted, headTransition); if(tmp>0){ if(tmp>(LivingPoints+1-D0Living)){ D0Living --; } } D0Time++; }while(D0Living>0); tmp=BirthDeath(TimeStep, headLiving, headDeleted, headTransition); StartTime=1; EndTime=D0Time+1; D0 = 0; do{ if(D0==1){ for(TimeStep=StartTime;TimeStep<=EndTime;TimeStep ++){ tmp=BirthDeath(TimeStep, headLiving, headDeleted, headTransition); } } D0 = 1; P2P->Empty(); /* headUpper->next = dummyUpper; dummyUpper->next = dummyUpper; for(XCell=0;XCell<=P2P->MaxXCell;XCell++){ for(YCell=0;YCell<=P2P->MaxYCell;YCell++){ headUpperCell[XCell][YCell]->next=dummyUpper; } } */ P2P->UpperLiving[0] = LivingPoints; P2P->UpperLiving[1] = 0; P2P->NoP = 0; i=0; TempPoint = headLiving->next; CHECK(TempPoint, "internal error: TempPoint is null in Sim()"); while(TempPoint!=TempPoint->next){ i++; // TempCell2 = ALLOCATE(struct Point2); // TempCell2->No = TempPoint->No; TempCell2->X = TempPoint->X; TempCell2->Y = TempPoint->Y; TempCell2->InLower[0] = 1; TempCell2->InLower[1] = 0; f1 = (TempPoint->X-P2P->Xmin)/P2P->XCellDim; XCell = int(floor(f1)); CLAMP(XCell, 0, P2P->MaxXCell, "XCell"); f1 = (TempPoint->Y-P2P->Ymin)/P2P->YCellDim; YCell = int(floor(f1)); CLAMP(YCell, 0, P2P->MaxYCell, "YCell"); TempCell2->next = P2P->headCell[XCell][YCell]->next; P2P->headCell[XCell][YCell]->next = TempCell2; TempPoint = TempPoint->next; CHECK(TempPoint, "internal error: TempPoint is null in Sim() loop"); } //P2P->DumpToFile("temp0.dat"); struct Point3 *TempTransition; struct Point *Proposal; TempTransition = headTransition->next; CHECK(TempTransition, "internal error: TempTransition is null in Sim()"); Proposal = headDeleted->next; DDD = GeneratedPoints; for(TimeStep=EndTime;TimeStep>=1;TimeStep--){ R_CheckUserInterrupt(); Forward(TimeStep,TempTransition->Case, TempTransition->XCell,TempTransition->YCell, Proposal,&DDD); if(TempTransition->Case == 1) Proposal = Proposal ->next; TempTransition = TempTransition->next; CHECK(TempTransition, "internal error: TempTransition is null in Sim() loop"); } /* Doubling strategy used!*/ StartTime = EndTime+1; EndTime=EndTime*2; //P2P->DumpToFile("temp.dat"); }while(P2P->UpperLiving[0]!=P2P->UpperLiving[1]); P2P->Clean(); i=0; struct Point *TempPoint2; TempPoint = headLiving; TempPoint2 = headLiving->next; CHECK(TempPoint2, "internal error: TempPoint2 is null in Sim() position B"); while(TempPoint!=TempPoint->next){ i++; FREE(TempPoint); TempPoint = TempPoint2; TempPoint2 = TempPoint2->next; CHECK(TempPoint2, "internal error: TempPoint2 is null in Sim() loop C"); } FREE(TempPoint); i = 0; TempPoint = headDeleted; TempPoint2 = headDeleted->next; CHECK(TempPoint2, "internal error: TempPoint2 is null in Sim() position D"); while(TempPoint!=TempPoint->next){ i++; FREE(TempPoint); TempPoint = TempPoint2; TempPoint2 = TempPoint2->next; CHECK(TempPoint2, "internal error: TempPoint2 is null in Sim() loop D"); } FREE(TempPoint); //Rprintf("%d ",i); struct Point3 *TempTransition,*TempTransition2; i = 0; TempTransition = headTransition; TempTransition2 = headTransition->next; CHECK(TempTransition2, "internal error: TempTransition2 is null in Sim() position E"); while(TempTransition!=TempTransition->next){ i++; FREE(TempTransition); TempTransition = TempTransition2; TempTransition2 = TempTransition2->next; CHECK(TempTransition2, "internal error: TempTransition2 is null in Sim() loop F"); } FREE(TempTransition); //Rprintf("%d ST: %d ET: %d\n",i,StartTime,EndTime); //scanf("%f",&f1); *ST = StartTime; *ET = EndTime; } #include "PerfectStrauss.h" #include "PerfectStraussHard.h" #include "PerfectHardcore.h" #include "PerfectDiggleGratton.h" #include "PerfectDGS.h" #include "PerfectPenttinen.h" spatstat.core/src/KrectV1.h0000644000176200001440000000026314141377573015300 0ustar liggesusers/* KrectV2.h with or without isotropic correction */ if((*doIso) == 1) { #define ISOTROPIC #include "KrectV2.h" } else { #undef ISOTROPIC #include "KrectV2.h" } spatstat.core/src/PerfectDGS.h0000644000176200001440000001166114141377573015753 0ustar liggesusers // ........................... Diggle-Gates-Stibbard process ................ // $Revision: 1.5 $ $Date: 2020/05/12 03:31:12 $ #ifndef PI #define PI 3.14159265358979 #endif class DgsProcess : public PointProcess { public: double beta, rho, rhosquared; DgsProcess(double xmin, double xmax, double ymin, double ymax, double b, double r); ~DgsProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); }; DgsProcess::DgsProcess(double xmin, double xmax, double ymin, double ymax, double b, double r) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; rho = r; rhosquared = rho * rho; InteractionRange = rho; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double DgsProcess::Interaction(double dsquared) { double rtn, dist, t; rtn = 1; if(dsquared < rhosquared) { dist = sqrt(dsquared); t = sin((PI/2) * dist/rho); rtn = t * t; } return(rtn); } void DgsProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void DgsProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating DgsProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating DgsProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating DgsProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectDGS(SEXP beta, SEXP rho, SEXP xrange, SEXP yrange) { // input parameters double Beta, Rho, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(rho = AS_NUMERIC(rho)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 4 protected objects // extract arguments // model parameters Beta = *(NUMERIC_POINTER(beta)); Rho = *(NUMERIC_POINTER(rho)); // window Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ Rho); if(xcells > 9) { xcells = 9; } else if(xcells < 1) { xcells = 1; } ycells = (int) floor((Ymax-Ymin)/ Rho); if(ycells > 9) { ycells = 9; } else if(ycells < 1) { ycells = 1; } #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise Diggle-Gates-Stibbard point process DgsProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax,Beta,Rho); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(8); // 4 arguments plus xout, yout, nout, out return(out); } } spatstat.core/src/badgey.c0000644000176200001440000003136514141377573015256 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* To get debug output, insert the line: #define DEBUG 1 */ void fexitc(const char *msg); /* Conditional intensity function for a multiscale saturation process. parameter vector: ipar[0] = ndisc ipar[1] = gamma[0] ipar[2] = r[0] ipar[3] = s[0] ... */ typedef struct BadGey { /* model parameters */ int ndisc; double *gamma; double *r; double *s; /* transformations of the parameters */ double *r2; double *loggamma; int *hard; /* periodic distance */ double *period; int per; /* auxiliary counts */ int *aux; /* matrix[ndisc, npmax]: neighbour counts in current state */ int *tee; /* vector[ndisc] : neighbour count at point in question */ double *w; /* vector[ndisc] : sum of changes in counts at other points */ } BadGey; Cdata *badgeyinit(state, model, algo) State state; Model model; Algor algo; { int i, j, k, i0, ndisc, nmatrix; double r, g, d2; BadGey *badgey; /* create storage */ badgey = (BadGey *) R_alloc(1, sizeof(BadGey)); badgey->ndisc = ndisc = model.ipar[0]; /* Allocate space for parameter vectors */ badgey->gamma = (double *) R_alloc((size_t) ndisc, sizeof(double)); badgey->r = (double *) R_alloc((size_t) ndisc, sizeof(double)); badgey->s = (double *) R_alloc((size_t) ndisc, sizeof(double)); /* Derived values */ badgey->r2 = (double *) R_alloc((size_t) ndisc, sizeof(double)); badgey->loggamma = (double *) R_alloc((size_t) ndisc, sizeof(double)); badgey->hard = (int *) R_alloc((size_t) ndisc, sizeof(int)); /* copy and transform parameters */ for(i=0; i < ndisc; i++) { i0 = 3*i + 1; g = badgey->gamma[i] = model.ipar[i0]; r = badgey->r[i] = model.ipar[i0 + 1]; badgey->s[i] = model.ipar[i0 + 2]; badgey->r2[i] = r * r; badgey->hard[i] = (g < DOUBLE_EPS); badgey->loggamma[i] = (g < DOUBLE_EPS) ? 0 : log(g); } /* periodic boundary conditions? */ badgey->period = model.period; badgey->per = (model.period[0] > 0.0); /* Allocate scratch space */ badgey->tee = (int *) R_alloc((size_t) ndisc, sizeof(int)); badgey->w = (double *) R_alloc((size_t) ndisc, sizeof(double)); /* Allocate space for auxiliary counts */ nmatrix = ndisc * state.npmax; badgey->aux = (int *) R_alloc((size_t) nmatrix, sizeof(int)); /* Initialise auxiliary counts */ for(i = 0; i < nmatrix; i++) badgey->aux[i] = 0; for(i = 0; i < state.npts; i++) { for(j = 0; j < state.npts; j++) { if(j == i) continue; d2 = dist2either(state.x[i], state.y[i], state.x[j], state.y[j], badgey->period); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) MAT(badgey->aux, k, i, ndisc) += 1; } } } #ifdef DEBUG Rprintf("Finished initialiser; ndisc=%d\n", ndisc); #endif return((Cdata *) badgey); } #define AUX(I,J) MAT(aux, I, J, ndisc) double badgeycif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int ix, j, k, npts, ndisc, tk; double u, v, d2; double a, dd2, b, f, r2, s, cifval; double *x, *y; int *tee, *aux; double *w; BadGey *badgey; badgey = (BadGey *) cdata; #ifdef DEBUG Rprintf("Entering badgeycif\n"); #endif npts = state.npts; cifval = 1.0; if(npts==0) return cifval; x = state.x; y = state.y; u = prop.u; v = prop.v; ix = prop.ix; ndisc = badgey->ndisc; tee = badgey->tee; aux = badgey->aux; w = badgey->w; /* For disc k, tee[k] = neighbour count at the point in question; w[k] = sum of changes in (saturated) neighbour counts at other points */ if(prop.itype == BIRTH) { /* compute tee[k] and w[k] from scratch */ for(k = 0; k < ndisc; k++) { tee[k] = 0; w[k] = 0.0; } if(badgey->per) { /* periodic distance */ for(j=0; jperiod); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { tee[k]++; f = badgey->s[k] - AUX(k,j); if(f > 1) /* j is not saturated after addition of (u,v) */ w[k] += 1; /* addition of (u,v) increases count by 1 */ else if(f > 0) /* j becomes saturated by addition of (u,v) */ w[k] += f; } } } } else { /* Euclidean distance */ for(j=0; jr2[k]) { tee[k]++; f = badgey->s[k] - AUX(k,j); if(f > 1) /* j is not saturated after addition of (u,v) */ w[k] += 1; /* addition of (u,v) increases count by 1 */ else if(f > 0) /* j becomes saturated by addition of (u,v) */ w[k] += f; } } } } } else if(prop.itype == DEATH) { /* extract current auxiliary counts for point ix */ /* compute w[k] from scratch */ for(k = 0; k < ndisc; k++) { tee[k] = AUX(k,ix); w[k] = 0.0; } /* compute change in counts for other points */ if(badgey->per) { /* Periodic distance */ for(j=0; jperiod); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { f = badgey->s[k] - AUX(k,j); if(f > 0) /* j is not saturated */ w[k] += 1; /* deletion of 'ix' decreases count by 1 */ else { f += 1; if(f > 0) { /* j is not saturated after deletion of 'ix' (s must be fractional) */ w[k] += f; } } } } } } else { /* Euclidean distance */ for(j=0; jr2[k]) { f = badgey->s[k] - AUX(k,j); if(f > 0) /* j is not saturated */ w[k] += 1; /* deletion of 'ix' decreases count by 1 */ else { f += 1; if(f > 0) { /* j is not saturated after deletion of 'ix' (s must be fractional) */ w[k] += f; } } } } } } } else if(prop.itype == SHIFT) { /* compute auxiliary counts from scratch */ for(k = 0; k < ndisc; k++) { tee[k] = 0; w[k] = 0.0; } /* Compute the cif at the new point, not the ratio of new/old */ if(badgey->per) { /* periodic distance */ for(j=0; jperiod); for(k = 0; k < ndisc; k++) { r2 = badgey->r2[k]; if(d2 < r2) { /* shifted point is a neighbour of point j */ tee[k]++; a = AUX(k,j); s = badgey->s[k]; /* Adjust */ dd2 = dist2(x[ix],y[ix], x[j],y[j],badgey->period); if(dd2 < r2) a -= 1; b = a + 1; /* b is the number of neighbours of point j in new state */ if(a < s && s < b) { w[k] += s - a; /* s is fractional and j is saturated */ } else if(s >= b) w[k] += 1; } } } } else { /* Euclidean distance */ for(j=0; jr2[k]; if(d2 < r2) { /* shifted point is a neighbour of point j */ tee[k]++; a = AUX(k,j); s = badgey->s[k]; /* Adjust */ dd2 = pow(x[ix] - x[j], 2) + pow(y[ix] - y[j], 2); if(dd2 < r2) a -= 1; b = a + 1; /* b is the number of neighbours of point j in new state */ if(a < s && s < b) { w[k] += s - a; /* s is fractional and j is saturated */ } else if(s >= b) w[k] += 1; } } } } } #ifdef DEBUG Rprintf("ndisc=%d\n", ndisc); #endif /* compute total change in saturated count */ for(k = 0; k < ndisc; k++) { s = badgey->s[k]; tk = tee[k]; w[k] += ((tk < s) ? tk : s); #ifdef DEBUG Rprintf("s[%d]=%lf, t[%d]=%d, w[%d]=%lf\n", k, s, k, tk, k, w[k]); #endif } /* evaluate cif */ for(k = 0; k < ndisc; k++) { if(badgey->hard[k]) { if(tee[k] > 0) return(0.0); /* else cifval multiplied by 0^0 = 1 */ } else cifval *= exp(badgey->loggamma[k] * w[k]); } return cifval; } void badgeyupd(state, prop, cdata) State state; Propo prop; Cdata *cdata; { /* Declare other variables */ int ix, npts, ndisc, j, k; double u, v, xix, yix, r2, d2, d2old, d2new; double *x, *y; int *aux; BadGey *badgey; badgey = (BadGey *) cdata; aux = badgey->aux; /* 'state' is current state before transition */ x = state.x; y = state.y; npts = state.npts; ndisc = badgey->ndisc; #ifdef DEBUG Rprintf("start update ---- \n"); for(j=0; j < npts; j++) { for(k=0; k < ndisc; k++) Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j)); Rprintf("\n"); } #endif if(prop.itype == BIRTH) { #ifdef DEBUG Rprintf("Update for birth ---- \n"); #endif /* Birth */ u = prop.u; v = prop.v; /* initialise auxiliary counters for new point x[npts], y[npts] */ for(k = 0; k < ndisc; k++) AUX(k, npts) = 0; /* update all auxiliary counters */ if(badgey->per) { /* periodic distance */ for(j=0; j < npts; j++) { d2 = dist2(u,v,x[j],y[j],badgey->period); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { AUX(k, j) += 1; AUX(k, npts) += 1; } } } } else { /* Euclidean distance */ for(j=0; j < npts; j++) { d2 = pow(u - x[j], 2) + pow(v - y[j], 2); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { AUX( k, j) += 1; AUX( k, npts) += 1; } } } } #ifdef DEBUG Rprintf("end update ---- \n"); for(j=0; j <= npts; j++) { for(k=0; k < ndisc; k++) Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j)); Rprintf("\n"); } #endif return; } if(prop.itype == DEATH) { /* Death */ ix = prop.ix; u = x[ix]; v = y[ix]; #ifdef DEBUG Rprintf("--- Update for death of point %d = (%lf,%lf) ---- \n", ix, u, v); #endif /* Decrement auxiliary counter for each neighbour of deleted point, and remove entry corresponding to deleted point */ if(badgey->per) { /* periodic distance */ for(j=0; jperiod); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { if(j < ix) AUX(k,j) -= 1; else AUX(k,j-1) = AUX(k,j) - 1; } else if(j >= ix) AUX(k,j-1) = AUX(k,j); } } } else { /* Euclidean distance */ for(j=0; jr2[k]) { #ifdef DEBUG Rprintf("hit for point %d with radius r[%d]\n", j, k); #endif if(j < ix) AUX(k,j) -= 1; else AUX(k,j-1) = AUX(k,j) - 1; } else if(j >= ix) AUX(k,j-1) = AUX(k,j); } } } #ifdef DEBUG Rprintf("end update ---- \n"); for(j=0; j < npts-1; j++) { for(k=0; k < ndisc; k++) Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j)); Rprintf("\n"); } #endif return; } if(prop.itype == SHIFT) { #ifdef DEBUG Rprintf("Update for shift ---- \n"); #endif /* Shift */ u = prop.u; v = prop.v; ix = prop.ix; xix = x[ix]; yix = y[ix]; /* recompute all auxiliary counters for point ix */ for(k = 0; k < ndisc; k++) AUX(k,ix) = 0; if(badgey->per) { for(j=0; jperiod); d2old = dist2(xix,yix,x[j],y[j],badgey->period); for(k = 0; k < ndisc; k++) { r2 = badgey->r2[k]; if(d2old >= r2 && d2new >= r2) continue; if(d2new < r2) { /* increment neighbour count for new point */ AUX(k,ix) += 1; if(d2old >= r2) AUX(k,j) += 1; /* point j gains a new neighbour */ } else if(d2old < r2) AUX(k,j) -= 1; /* point j loses a neighbour */ } } } else { /* Euclidean distance */ for(j=0; jr2[k]; if(d2old >= r2 && d2new >= r2) continue; if(d2new < r2) { #ifdef DEBUG Rprintf("shifted point is close to j=%d\n", j); #endif /* increment neighbour count for new point */ AUX(k,ix) += 1; if(d2old >= r2) { #ifdef DEBUG Rprintf("\t(previous position was not)\n"); #endif AUX(k,j) += 1; /* point j gains a new neighbour */ } } else if(d2old < r2) { #ifdef DEBUG Rprintf("previous position was close to j=%d, shifted point is not\n", j); #endif AUX(k,j) -= 1; /* point j loses a neighbour */ } } } } #ifdef DEBUG Rprintf("end update ---- \n"); for(j=0; j < npts; j++) { for(k=0; k < ndisc; k++) Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j)); Rprintf("\n"); } #endif return; } fexitc("Unrecognised transition type; bailing out.\n"); } Cifns BadGeyCifns = { &badgeyinit, &badgeycif, &badgeyupd, NO}; spatstat.core/src/PerfectPenttinen.h0000644000176200001440000001236014141377573017277 0ustar liggesusers // ........................... Penttinen process ................ // $Revision: 1.4 $ $Date: 2020/05/12 03:32:19 $ class PenttProcess : public PointProcess { public: double beta, gamma, radius, reachsquared, loggamma2pi; int ishard; PenttProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double r); ~PenttProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); }; PenttProcess::PenttProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double r) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; gamma = g; radius = r; ishard = (gamma <= DOUBLE_EPS); loggamma2pi = M_2PI * (ishard? 0.0 : log(gamma)); reachsquared = 4.0 * radius * radius; InteractionRange = 2.0 * radius; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double PenttProcess::Interaction(double dsquared) { double rtn, z, z2; rtn = 1.0; if(dsquared < reachsquared) { if(ishard) return(0.0); z2 = dsquared/reachsquared; z = sqrt(z2); if(z < 1.0) { rtn = exp(loggamma2pi * (acos(z) - z * sqrt(1.0 - z2))); } } return(rtn); } void PenttProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void PenttProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating PenttProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating PenttProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating PenttProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectPenttinen(SEXP beta, SEXP gamma, SEXP r, SEXP xrange, SEXP yrange) { // input parameters double Beta, Gamma, R, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(gamma = AS_NUMERIC(gamma)); PROTECT(r = AS_NUMERIC(r)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 5 protected objects // extract arguments // model parameters Beta = *(NUMERIC_POINTER(beta)); Gamma = *(NUMERIC_POINTER(gamma)); R = *(NUMERIC_POINTER(r)); // window Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) { xcells = 9; } else if(xcells < 1) { xcells = 1; } ycells = (int) floor((Ymax-Ymin)/ R); if(ycells > 9) { ycells = 9; } else if(ycells < 1) { ycells = 1; } #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise Penttinen point process PenttProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax,Beta,Gamma,R); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(9); // 5 arguments plus xout, yout, nout, out return(out); } } spatstat.core/src/pcf3.c0000644000176200001440000001216214141377573014650 0ustar liggesusers#include #include #include #include #include "geom3.h" #include "functable.h" #include "chunkloop.h" #include "constants.h" /* $Revision: 1.8 $ $Date: 2018/12/18 02:43:11 $ pair correlation function of 3D point pattern (Epanechnikov kernel) pcf3trans translation correction pcf3isot isotropic correction Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define FOURPI (2.0 * M_2PI) void pcf3trans(p, n, b, pcf, delta) Point *p; int n; Box *b; Ftable *pcf; double delta; { register int i, j, l, lmin, lmax, maxchunk; register double dx, dy, dz, dist; register double vx, vy, vz, tval; Point *ip, *jp; double dt, vol, lambda, denom; double coef, twocoef, frac, invweight, kernel; double sphesfrac(), sphevol(); /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); lambda = ((double) n )/ vol; denom = lambda * lambda; for(l = 0; l < pcf->n; l++) { (pcf->denom)[l] = denom; (pcf->num)[l] = 0.0; } /* spacing of argument in result vector */ dt = (pcf->t1 - pcf->t0)/(pcf->n - 1); /* compute numerator */ OUTERCHUNKLOOP(i, n, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 8196) { ip = p + i; for(j = i + 1; j < n; j++) { /* compute pairwise distance */ jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( ((dist - delta) - pcf->t0) / dt ); lmax = floor( ((dist + delta) - pcf->t0) / dt ); if(lmax >= 0 && lmin < pcf->n) { /* kernel centred at 'dist' has nonempty intersection with specified range of t values */ /* compute intersection */ if(lmin < 0) lmin = 0; if(lmax >= pcf->n) lmax = pcf->n - 1; /* compute (inverse) edge correction weight */ vx = b->x1 - b->x0 - (dx > 0 ? dx : -dx); vy = b->y1 - b->y0 - (dy > 0 ? dy : -dy); vz = b->z1 - b->z0 - (dz > 0 ? dz : -dz); invweight = vx * vy * vz * FOURPI * dist * dist; if(invweight > 0.0) { for(l = lmin; l < pcf->n; l++) { tval = pcf->t0 + l * dt; /* unnormalised Epanechnikov kernel with halfwidth delta */ frac = (dist - tval)/delta; kernel = (1 - frac * frac); if(kernel > 0) (pcf->num)[l] += kernel / invweight; } } } } } } /* constant factor in kernel */ coef = 3.0/(4.0 * delta); /* multiplied by 2 because we only visited i < j pairs */ twocoef = 2.0 * coef; /* normalise kernel and compute ratio estimate */ for(l = 0; l < pcf->n; l++) { (pcf->num)[l] *= twocoef; (pcf->f)[l] = ((pcf->denom)[l] > 0.0) ? (pcf->num)[l] / (pcf->denom)[l] : 0.0; } } void pcf3isot(p, n, b, pcf, delta) Point *p; int n; Box *b; Ftable *pcf; double delta; { register int i, j, l, lmin, lmax, maxchunk; register double dx, dy, dz, dist; Point *ip, *jp; double dt, vol, denom, mass, tval; double coef, frac, kernel; double sphesfrac(), sphevol(); Point vertex; Box half; /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); denom = ((double) (n * n))/vol; for(l = 0; l < pcf->n; l++) { (pcf->denom)[l] = denom; (pcf->num)[l] = 0.0; } /* spacing of argument in result vector */ dt = (pcf->t1 - pcf->t0)/(pcf->n - 1); /* set up for volume correction */ vertex.x = b->x0; vertex.y = b->y0; vertex.z = b->z0; half.x1 = b->x1; half.y1 = b->y1; half.z1 = b->z1; half.x0 = (b->x0 + b->x1)/2.0; half.y0 = (b->y0 + b->y1)/2.0; half.z0 = (b->z0 + b->z1)/2.0; /* compute numerator */ OUTERCHUNKLOOP(i, n, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 8196) { ip = p + i; for(j = i + 1; j < n; j++) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( ((dist - delta) - pcf->t0) / dt ); lmax = floor( ((dist + delta) - pcf->t0) / dt ); if(lmax >= 0 && lmin < pcf->n) { /* kernel centred at 'dist' has nonempty intersection with specified range of t values */ /* compute intersection */ if(lmin < 0) lmin = 0; if(lmax >= pcf->n) lmax = pcf->n - 1; /* compute edge correction weight */ mass = (1.0 / sphesfrac(ip, b, dist)) + (1.0 / sphesfrac(jp, b, dist)); mass *= 1.0 - 8.0 * sphevol(&vertex, &half, dist) / vol; if(mass > 0.0) { mass /= FOURPI * dist * dist; for(l = lmin; l < pcf->n; l++) { tval = pcf->t0 + l * dt; /* unnormalised Epanechnikov kernel with halfwidth delta */ frac = (dist - tval)/delta; kernel = (1 - frac * frac); if(kernel > 0) (pcf->num)[l] += kernel * mass; } } } } } } /* constant factor in kernel */ coef = 3.0/(4.0 * delta); /* normalise kernel and compute ratio estimate */ for(l = 0; l < pcf->n; l++) { (pcf->num)[l] *= coef; (pcf->f)[l] = ((pcf->denom)[l] > 0.0)? (pcf->num)[l] / (pcf->denom)[l] : 0.0; } } spatstat.core/src/looptest.h0000644000176200001440000000030214141377573015664 0ustar liggesusers/* looptest.h Utilities for looping $Revision: 1.1 $ $Date: 2014/09/19 00:47:34 $ */ /* a small value relative to threshold X, for loop exit test */ #define EPSILON(X) ((X)/64) spatstat.core/src/straussm.c0000644000176200001440000001300314141377573015671 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* for debugging code, include #define DEBUG 1 */ /* Conditional intensity computation for Multitype Strauss process */ /* NOTE: types (marks) are numbered from 0 to ntypes-1 */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct MultiStrauss { int ntypes; double *gamma; /* gamma[i,j] = gamma[i+ntypes*j] for i,j = 0... ntypes-1 */ double *rad; /* rad[i,j] = rad[j+ntypes*i] for i,j = 0... ntypes-1 */ double *rad2; /* squared radii */ double range2; /* square of interaction range */ double *loggamma; /* logs of gamma[i,j] */ double *period; int *hard; /* hard[i,j] = 1 if gamma[i,j] ~~ 0 */ int *kount; /* space for kounting pairs of each type */ int per; } MultiStrauss; /* initialiser function */ Cdata *straussminit(state, model, algo) State state; Model model; Algor algo; { int i, j, ntypes, n2, hard; double g, r, r2, logg, range2; MultiStrauss *multistrauss; multistrauss = (MultiStrauss *) R_alloc(1, sizeof(MultiStrauss)); multistrauss->ntypes = ntypes = model.ntypes; n2 = ntypes * ntypes; #ifdef DEBUG Rprintf("initialising space for %d types\n", ntypes); #endif /* Allocate space for parameters */ multistrauss->gamma = (double *) R_alloc((size_t) n2, sizeof(double)); multistrauss->rad = (double *) R_alloc((size_t) n2, sizeof(double)); /* Allocate space for transformed parameters */ multistrauss->rad2 = (double *) R_alloc((size_t) n2, sizeof(double)); multistrauss->loggamma = (double *) R_alloc((size_t) n2, sizeof(double)); multistrauss->hard = (int *) R_alloc((size_t) n2, sizeof(int)); /* Allocate scratch space for counts of each pair of types */ multistrauss->kount = (int *) R_alloc((size_t) n2, sizeof(int)); /* Copy and process model parameters*/ /* ipar will contain n^2 gamma values followed by n^2 values of r */ range2 = 0.0; for(i = 0; i < ntypes; i++) { for(j = 0; j < ntypes; j++) { g = model.ipar[i + j*ntypes]; r = model.ipar[n2 + i + j*ntypes]; r2 = r * r; hard = (g < DOUBLE_EPS); logg = (hard) ? 0 : log(g); MAT(multistrauss->gamma, i, j, ntypes) = g; MAT(multistrauss->rad, i, j, ntypes) = r; MAT(multistrauss->hard, i, j, ntypes) = hard; MAT(multistrauss->loggamma, i, j, ntypes) = logg; MAT(multistrauss->rad2, i, j, ntypes) = r2; if(r2 > range2) range2 = r2; } } multistrauss->range2 = range2; /* periodic boundary conditions? */ multistrauss->period = model.period; multistrauss->per = (model.period[0] > 0.0); #ifdef DEBUG Rprintf("end initialiser\n"); #endif return((Cdata *) multistrauss); } /* conditional intensity evaluator */ double straussmcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ntypes, kount, ix, ixp1, j, mrk, mrkj, m1, m2; int *marks; double *x, *y; double u, v, lg; double d2, cifval; double range2; double *period; MultiStrauss *multistrauss; DECLARE_CLOSE_D2_VARS; multistrauss = (MultiStrauss *) cdata; range2 = multistrauss->range2; period = multistrauss->period; u = prop.u; v = prop.v; mrk = prop.mrk; ix = prop.ix; x = state.x; y = state.y; marks = state.marks; npts = state.npts; #ifdef DEBUG Rprintf("computing cif: u=%lf, v=%lf, mrk=%d\n", u, v, mrk); #endif cifval = 1.0; if(npts == 0) return(cifval); ntypes = multistrauss->ntypes; #ifdef DEBUG Rprintf("initialising pair counts\n"); #endif /* initialise pair counts */ for(m1 = 0; m1 < ntypes; m1++) for(m2 = 0; m2 < ntypes; m2++) MAT(multistrauss->kount, m1, m2, ntypes) = 0; /* compile pair counts */ #ifdef DEBUG Rprintf("compiling pair counts\n"); #endif ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(multistrauss->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) { mrkj = marks[j]; if(d2 < MAT(multistrauss->rad2, mrk, mrkj, ntypes)) MAT(multistrauss->kount, mrk, mrkj, ntypes)++; } } } if(ixp1 < npts) { for(j=ixp1; jrad2, mrk, mrkj, ntypes)) MAT(multistrauss->kount, mrk, mrkj, ntypes)++; } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) { mrkj = marks[j]; if(d2 < MAT(multistrauss->rad2, mrk, mrkj, ntypes)) MAT(multistrauss->kount, mrk, mrkj, ntypes)++; } } } if(ixp1 < npts) { for(j=ixp1; jrad2, mrk, mrkj, ntypes)) MAT(multistrauss->kount, mrk, mrkj, ntypes)++; } } } } #ifdef DEBUG Rprintf("multiplying cif factors\n"); #endif /* multiply cif value by pair potential */ for(m1 = 0; m1 < ntypes; m1++) { for(m2 = 0; m2 < ntypes; m2++) { kount = MAT(multistrauss->kount, m1, m2, ntypes); if(MAT(multistrauss->hard, m1, m2, ntypes)) { if(kount > 0) { cifval = 0.0; return(cifval); } } else { lg = MAT(multistrauss->loggamma, m1, m2, ntypes); cifval *= exp(lg * kount); } } } #ifdef DEBUG Rprintf("returning positive cif\n"); #endif return cifval; } Cifns MultiStraussCifns = { &straussminit, &straussmcif, (updafunptr) NULL, YES}; spatstat.core/src/fiksel.c0000644000176200001440000000574414141377573015302 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Fiksel process */ /* Conditional intensity function for a pairwise interaction point process with interaction function e(t) = 0 for t < h = exp(a * exp(- kappa * t)) for h <= t < r = 1 for t >= r */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Fiksel { double r; double h; double kappa; double a; double h2; /* h^2 */ double r2; /* r^2 */ double *period; int per; } Fiksel; /* initialiser function */ Cdata *fikselinit(state, model, algo) State state; Model model; Algor algo; { Fiksel *fiksel; fiksel = (Fiksel *) R_alloc(1, sizeof(Fiksel)); /* Interpret model parameters*/ fiksel->r = model.ipar[0]; fiksel->h = model.ipar[1]; fiksel->kappa = model.ipar[2]; fiksel->a = model.ipar[3]; fiksel->period = model.period; /* constants */ fiksel->h2 = pow(fiksel->h, 2); fiksel->r2 = pow(fiksel->r, 2); /* periodic boundary conditions? */ fiksel->per = (model.period[0] > 0.0); return((Cdata *) fiksel); } /* conditional intensity evaluator */ double fikselcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, pairpotsum, cifval; double kappa, r2, h2; double *period; Fiksel *fiksel; DECLARE_CLOSE_D2_VARS; fiksel = (Fiksel *) cdata; period = fiksel->period; kappa = fiksel->kappa; r2 = fiksel->r2; h2 = fiksel->h2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = 1.0; if(npts == 0) return(cifval); pairpotsum = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(fiksel->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,r2,d2)) { if(d2 < h2) { cifval = 0.0; return(cifval); } else { pairpotsum += exp(-kappa * sqrt(d2)); } } } } if(ixp1 < npts) { for(j=ixp1; j 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u,v,x[j],y[j],r2,d2)) { if(d2 < h2) { cifval = 0.0; return(cifval); } else { pairpotsum += exp(-kappa * sqrt(d2)); } } } } if(ixp1 < npts) { for(j=ixp1; ja * pairpotsum); return cifval; } Cifns FikselCifns = { &fikselinit, &fikselcif, (updafunptr) NULL, NO}; spatstat.core/src/mhv5.h0000644000176200001440000000054114141377573014677 0ustar liggesusers/* mhv5.h tempered or not Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef MH_TEMPER if(tempered) { /* tempering */ #define MH_TEMPER YES #include "mhloop.h" #undef MH_TEMPER } else { /* usual, no tempering */ #define MH_TEMPER NO #include "mhloop.h" #undef MH_TEMPER } spatstat.core/src/methas.c0000644000176200001440000002750514141377573015305 0ustar liggesusers#include #include #include #include "methas.h" #include "chunkloop.h" #include "mhsnoop.h" void fexitc(const char *msg); /* To switch on debugging code, insert the line: #define MH_DEBUG YES */ #ifndef MH_DEBUG #define MH_DEBUG NO #endif /* This is the value of 'ix' when we are proposing a birth. It must be equal to -1 so that NONE+1 = 0. */ #define NONE -1 extern Cifns getcif(char *); SEXP xmethas( SEXP ncif, SEXP cifname, SEXP beta, SEXP ipar, SEXP iparlen, SEXP period, SEXP xprop, SEXP yprop, SEXP mprop, SEXP ntypes, SEXP nrep, SEXP p, SEXP q, SEXP nverb, SEXP nrep0, SEXP x, SEXP y, SEXP marks, SEXP ncond, SEXP fixall, SEXP track, SEXP thin, SEXP snoopenv, SEXP temper, SEXP invertemp) { char *cifstring; double cvd, cvn, qnodds, anumer, adenom, betavalue; double *iparvector; int verb, marked, tempered, mustupdate, itype; int nfree, nsuspect; int irep, ix, j, maxchunk, iverb; int Ncif; int *plength; long Nmore; int permitted; double invtemp; double *xx, *yy, *xpropose, *ypropose; int *mm, *mpropose, *pp, *aa; SEXP out, xout, yout, mout, pout, aout; int tracking, thinstart; #ifdef HISTORY_INCLUDES_RATIO SEXP numout, denout; double *nn, *dd; #endif State state; Model model; Algor algo; Propo birthprop, deathprop, shiftprop; History history; Snoop snooper; /* The following variables are used only for a non-hybrid interaction */ Cifns thecif; /* cif structure */ Cdata *thecdata; /* pointer to initialised cif data block */ /* The following variables are used only for a hybrid interaction */ Cifns *cif; /* vector of cif structures */ Cdata **cdata; /* vector of pointers to initialised cif data blocks */ int *needupd; /* vector of logical values */ int k; /* loop index for cif's */ /* =================== Protect R objects from garbage collector ======= */ PROTECT(ncif = AS_INTEGER(ncif)); PROTECT(cifname = AS_CHARACTER(cifname)); PROTECT(beta = AS_NUMERIC(beta)); PROTECT(ipar = AS_NUMERIC(ipar)); PROTECT(iparlen = AS_INTEGER(iparlen)); PROTECT(period = AS_NUMERIC(period)); PROTECT(xprop = AS_NUMERIC(xprop)); PROTECT(yprop = AS_NUMERIC(yprop)); PROTECT(mprop = AS_INTEGER(mprop)); PROTECT(ntypes = AS_INTEGER(ntypes)); PROTECT(nrep = AS_INTEGER(nrep)); PROTECT( p = AS_NUMERIC(p)); PROTECT( q = AS_NUMERIC(q)); PROTECT(nverb = AS_INTEGER(nverb)); PROTECT(nrep0 = AS_INTEGER(nrep0)); PROTECT( x = AS_NUMERIC(x)); PROTECT( y = AS_NUMERIC(y)); PROTECT( marks = AS_INTEGER(marks)); PROTECT(fixall = AS_INTEGER(fixall)); PROTECT(ncond = AS_INTEGER(ncond)); PROTECT(track = AS_INTEGER(track)); PROTECT(thin = AS_INTEGER(thin)); PROTECT(temper = AS_INTEGER(temper)); PROTECT(invertemp = AS_NUMERIC(invertemp)); /* that's 24 protected objects */ /* =================== Translate arguments from R to C ================ */ /* Ncif is the number of cif's plength[i] is the number of interaction parameters in the i-th cif */ Ncif = *(INTEGER_POINTER(ncif)); plength = INTEGER_POINTER(iparlen); /* copy RMH algorithm parameters */ algo.nrep = *(INTEGER_POINTER(nrep)); algo.nverb = *(INTEGER_POINTER(nverb)); algo.nrep0 = *(INTEGER_POINTER(nrep0)); algo.p = *(NUMERIC_POINTER(p)); algo.q = *(NUMERIC_POINTER(q)); algo.fixall = ((*(INTEGER_POINTER(fixall))) == 1); algo.ncond = *(INTEGER_POINTER(ncond)); algo.tempered = tempered = (*(INTEGER_POINTER(temper)) != 0); algo.invtemp = invtemp = *(NUMERIC_POINTER(invertemp)); /* copy model parameters without interpreting them */ model.beta = NUMERIC_POINTER(beta); model.ipar = iparvector = NUMERIC_POINTER(ipar); model.period = NUMERIC_POINTER(period); model.ntypes = *(INTEGER_POINTER(ntypes)); state.ismarked = marked = (model.ntypes > 1); /* copy initial state */ state.npts = LENGTH(x); state.npmax = 4 * ((state.npts > 256) ? state.npts : 256); state.x = (double *) R_alloc(state.npmax, sizeof(double)); state.y = (double *) R_alloc(state.npmax, sizeof(double)); xx = NUMERIC_POINTER(x); yy = NUMERIC_POINTER(y); if(marked) { state.marks =(int *) R_alloc(state.npmax, sizeof(int)); mm = INTEGER_POINTER(marks); } if(!marked) { for(j = 0; j < state.npts; j++) { state.x[j] = xx[j]; state.y[j] = yy[j]; } } else { for(j = 0; j < state.npts; j++) { state.x[j] = xx[j]; state.y[j] = yy[j]; state.marks[j] = mm[j]; } } #if MH_DEBUG Rprintf("\tnpts=%d\n", state.npts); #endif /* access proposal data */ xpropose = NUMERIC_POINTER(xprop); ypropose = NUMERIC_POINTER(yprop); mpropose = INTEGER_POINTER(mprop); /* we need to initialise 'mpropose' to keep compilers happy. mpropose is only used for marked patterns. Note 'mprop' is always a valid pointer */ /* ================= Allocate space for cifs etc ========== */ if(Ncif > 1) { cif = (Cifns *) R_alloc(Ncif, sizeof(Cifns)); cdata = (Cdata **) R_alloc(Ncif, sizeof(Cdata *)); needupd = (int *) R_alloc(Ncif, sizeof(int)); } else { /* Keep the compiler happy */ cif = (Cifns *) R_alloc(1, sizeof(Cifns)); cdata = (Cdata **) R_alloc(1, sizeof(Cdata *)); needupd = (int *) R_alloc(1, sizeof(int)); } /* ================= Determine process to be simulated ========== */ /* Get the cif's */ if(Ncif == 1) { cifstring = (char *) STRING_VALUE(cifname); thecif = getcif(cifstring); mustupdate = NEED_UPDATE(thecif); if(thecif.marked && !marked) fexitc("cif is for a marked point process, but proposal data are not marked points; bailing out."); /* Keep compiler happy*/ cif[0] = thecif; needupd[0] = mustupdate; } else { mustupdate = NO; for(k = 0; k < Ncif; k++) { cifstring = (char *) CHAR(STRING_ELT(cifname, k)); cif[k] = getcif(cifstring); needupd[k] = NEED_UPDATE(cif[k]); if(needupd[k]) mustupdate = YES; if(cif[k].marked && !marked) fexitc("component cif is for a marked point process, but proposal data are not marked points; bailing out."); } } /* ============= Initialise transition history ========== */ tracking = (*(INTEGER_POINTER(track)) != 0); /* Initialise even if not needed, to placate the compiler */ if(tracking) { history.nmax = algo.nrep; } else { history.nmax = 1; } history.n = 0; history.proptype = (int *) R_alloc(history.nmax, sizeof(int)); history.accepted = (int *) R_alloc(history.nmax, sizeof(int)); #ifdef HISTORY_INCLUDES_RATIO history.numerator = (double *) R_alloc(history.nmax, sizeof(double)); history.denominator = (double *) R_alloc(history.nmax, sizeof(double)); #endif /* ============= Visual debugging ========== */ /* Active if 'snoopenv' is an environment */ #if MH_DEBUG Rprintf("Initialising mhsnoop\n"); #endif initmhsnoop(&snooper, snoopenv); #if MH_DEBUG Rprintf("Initialised\n"); if(snooper.active) Rprintf("Debugger is active.\n"); #endif /* ================= Thinning of initial state ==================== */ thinstart = (*(INTEGER_POINTER(thin)) != 0); /* ================= Initialise algorithm ==================== */ /* Interpret the model parameters and initialise auxiliary data */ if(Ncif == 1) { thecdata = (*(thecif.init))(state, model, algo); /* keep compiler happy */ cdata[0] = thecdata; } else { for(k = 0; k < Ncif; k++) { if(k > 0) model.ipar += plength[k-1]; cdata[k] = (*(cif[k].init))(state, model, algo); } /* keep compiler happy */ thecdata = cdata[0]; } /* Set the fixed elements of the proposal objects */ birthprop.itype = BIRTH; deathprop.itype = DEATH; shiftprop.itype = SHIFT; birthprop.ix = NONE; if(!marked) birthprop.mrk = deathprop.mrk = shiftprop.mrk = NONE; /* Set up some constants */ verb = (algo.nverb !=0); qnodds = (1.0 - algo.q)/algo.q; /* Set value of beta for unmarked process */ /* (Overwritten for marked process, but keeps compiler happy) */ betavalue = model.beta[0]; /* ============= Run Metropolis-Hastings ================== */ /* Initialise random number generator */ GetRNGstate(); /* Here comes the code for the M-H loop. The basic code (in mhloop.h) is #included many times using different options The C preprocessor descends through a chain of files mhv1.h, mhv2.h, ... to enumerate all possible combinations of flags. */ #include "mhv1.h" /* relinquish random number generator */ PutRNGstate(); /* ============= Done ================== */ /* Create space for output, and copy final state */ /* Point coordinates */ PROTECT(xout = NEW_NUMERIC(state.npts)); PROTECT(yout = NEW_NUMERIC(state.npts)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); for(j = 0; j < state.npts; j++) { xx[j] = state.x[j]; yy[j] = state.y[j]; } /* Marks */ if(marked) { PROTECT(mout = NEW_INTEGER(state.npts)); mm = INTEGER_POINTER(mout); for(j = 0; j < state.npts; j++) mm[j] = state.marks[j]; } else { /* Keep the compiler happy */ PROTECT(mout = NEW_INTEGER(1)); mm = INTEGER_POINTER(mout); mm[0] = 0; } /* Transition history */ if(tracking) { PROTECT(pout = NEW_INTEGER(algo.nrep)); PROTECT(aout = NEW_INTEGER(algo.nrep)); pp = INTEGER_POINTER(pout); aa = INTEGER_POINTER(aout); for(j = 0; j < algo.nrep; j++) { pp[j] = history.proptype[j]; aa[j] = history.accepted[j]; } #ifdef HISTORY_INCLUDES_RATIO PROTECT(numout = NEW_NUMERIC(algo.nrep)); PROTECT(denout = NEW_NUMERIC(algo.nrep)); nn = NUMERIC_POINTER(numout); dd = NUMERIC_POINTER(denout); for(j = 0; j < algo.nrep; j++) { nn[j] = history.numerator[j]; dd[j] = history.denominator[j]; } #endif } else { /* Keep the compiler happy */ PROTECT(pout = NEW_INTEGER(1)); PROTECT(aout = NEW_INTEGER(1)); pp = INTEGER_POINTER(pout); aa = INTEGER_POINTER(aout); pp[0] = aa[0] = 0; #ifdef HISTORY_INCLUDES_RATIO PROTECT(numout = NEW_NUMERIC(1)); PROTECT(denout = NEW_NUMERIC(1)); nn = NUMERIC_POINTER(numout); dd = NUMERIC_POINTER(denout); nn[0] = dd[0] = 0; #endif } /* Pack up into list object for return */ if(!tracking) { /* no transition history */ if(!marked) { PROTECT(out = NEW_LIST(2)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); } else { PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, mout); } } else { /* transition history */ if(!marked) { #ifdef HISTORY_INCLUDES_RATIO PROTECT(out = NEW_LIST(6)); #else PROTECT(out = NEW_LIST(4)); #endif SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, pout); SET_VECTOR_ELT(out, 3, aout); #ifdef HISTORY_INCLUDES_RATIO SET_VECTOR_ELT(out, 4, numout); SET_VECTOR_ELT(out, 5, denout); #endif } else { #ifdef HISTORY_INCLUDES_RATIO PROTECT(out = NEW_LIST(7)); #else PROTECT(out = NEW_LIST(5)); #endif SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, mout); SET_VECTOR_ELT(out, 3, pout); SET_VECTOR_ELT(out, 4, aout); #ifdef HISTORY_INCLUDES_RATIO SET_VECTOR_ELT(out, 5, numout); SET_VECTOR_ELT(out, 6, denout); #endif } } #ifdef HISTORY_INCLUDES_RATIO UNPROTECT(32); /* 24 arguments plus xout, yout, mout, pout, aout, out, numout, denout */ #else UNPROTECT(30); /* 24 arguments plus xout, yout, mout, pout, aout, out */ #endif return(out); } spatstat.core/src/localpcf.h0000644000176200001440000000470614141377573015612 0ustar liggesusers/* localpcf.h Source template for versions of local pair correlation Requires variable: WEIGHTED Assumes point patterns are sorted in increasing order of x coordinate $Revision: 1.6 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifdef WEIGHTED #define FNAME locWpcfx #else #define FNAME locpcfx #endif void FNAME(nn1, x1, y1, id1, nn2, x2, y2, id2, #ifdef WEIGHTED w2, #endif nnr, rmaxi, del, pcf) /* inputs */ int *nn1, *nn2, *nnr; double *x1, *y1, *x2, *y2; int *id1, *id2; double *rmaxi, *del; #ifdef WEIGHTED double *w2; #endif /* output */ double *pcf; /* matrix of column vectors of pcf's for each point of first pattern */ { int n1, n2, nr, i, j, k, jleft, kmin, kmax, id1i, maxchunk; double x1i, y1i, rmax, delta, xleft, dx, dy, dx2; double d2, d2max, dmax, d; double rstep, rvalue, frac, contrib, weight, coef; n1 = *nn1; n2 = *nn2; nr = *nnr; rmax = *rmaxi; delta = *del; dmax = rmax + delta; /* maximum relevant value of interpoint distance */ d2max = dmax * dmax; rstep = rmax/(nr-1); coef = 3.0 /(4.0 * delta); if(n1 == 0 || n2 == 0) return; jleft = 0; OUTERCHUNKLOOP(i, n1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n1, maxchunk, 8196) { x1i = x1[i]; y1i = y1[i]; id1i = id1[i]; /* adjust starting point */ xleft = x1i - dmax; while((x2[jleft] < xleft) && (jleft+1 < n2)) ++jleft; /* process from jleft until |dx| > dmax */ for(j=jleft; j < n2; j++) { dx = x2[j] - x1i; dx2 = dx * dx; if(dx2 > d2max) break; dy = y2[j] - y1i; d2 = dx2 + dy * dy; if(d2 <= d2max && id2[j] != id1i) { d = sqrt(d2); kmin = (int) floor((d-delta)/rstep); kmax = (int) ceil((d+delta)/rstep); if(kmin <= nr-1 && kmax >= 0) { /* nonempty intersection with range of r values */ /* compute intersection */ if(kmin < 0) kmin = 0; if(kmax >= nr) kmax = nr-1; /* */ weight = coef/d; #ifdef WEIGHTED weight = weight * w2[j]; #endif for(k = kmin; k <= kmax; k++) { rvalue = k * rstep; frac = (d - rvalue)/delta; /* Epanechnikov kernel with halfwidth delta */ contrib = (1 - frac * frac); if(contrib > 0) pcf[k + nr * i] += contrib * weight; } } } } } } } #undef FNAME spatstat.core/src/KrectBody.h0000644000176200001440000001042514141377573015710 0ustar liggesusers /* KrectBody.h +++ Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2014 ++++ Main function body for 'Krect' Included multiple times with different values of the macros: (#define or #undef) WEIGHTED ISOTROPIC TRANSLATION BORDER UNCORRECTED **Assumes point pattern is sorted in increasing order of x coordinate** **Assumes window is (0,wide) x (0, high) ** **Assumes output vectors were initialised to zero** Variables are declared in 'KrectFunDec.c' This algorithm is optimal (amongst the choices in spatstat) when the window is a rectangle *and* at least one of the ISOTROPIC, TRANSLATION corrections is needed. There are faster algorithms for the border correction on its own. $Revision: 1.3 $ $Date: 2014/02/09 03:01:27 $ */ /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < N) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > N) maxchunk = N; /* ............. LOOP OVER i ................. */ for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; #ifdef WEIGHTED wi = w[i]; #endif #ifdef BORDER /* For border correction */ /* compute distance to border */ bx = MIN(xi, (wide - xi)); by = MIN(yi, (high - yi)); bdisti = MIN(bx, by); /* denominator will ultimately be incremented for all r < b[i] */ bratio = bdisti/rstep; /* lbord is the largest integer STRICTLY less than bratio */ lbord = (int) ceil(bratio) - 1; lbord = (lbord <= Nr1) ? lbord : Nr1; /* increment entry corresponding to r = b[i] */ #ifdef WEIGHTED if(lbord >= 0) denomAccum[lbord] += wi; #else if(lbord >= 0) (denomAccum[lbord])++; #endif #endif #ifdef ISOTROPIC /* For isotropic correction */ /* perpendicular distance from point i to each edge of rectangle L = left, R = right, D = down, U = up */ dL = xi; dR = wide - xi; dD = yi; dU = high - yi; /* test for corner of the rectangle */ ncor = SMALL(dL) + SMALL(dR) + SMALL(dD) + SMALL(dU); corner = (ncor >= 2); /* angle between - perpendicular to edge of rectangle and - line from point to corner of rectangle */ bLU = atan2(dU, dL); bLD = atan2(dD, dL); bRU = atan2(dU, dR); bRD = atan2(dD, dR); bUL = atan2(dL, dU); bUR = atan2(dR, dU); bDL = atan2(dL, dD); bDR = atan2(dR, dD); #endif /* ............. LOOP OVER j ................. */ /* scan through points (x[j],y[j]) */ /* scan backward from i-1 until |x[j]-x[i]| > Rmax */ if(i > 0) { for(j=i-1; j >= 0; j--) { /* squared interpoint distance */ dx = xi - x[j]; dx2 = dx * dx; if(dx2 >= R2max) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < R2max) { #include "KrectIncrem.h" } } } /* scan forward from i+1 until x[j]-x[i] > Rmax */ if(i < N1) { for(j=i+1; j < N; j++) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= R2max) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < R2max) { #include "KrectIncrem.h" } } } } } /* .................. END OF LOOPS ................................ */ /* ............. compute cumulative functions ..................... */ #ifdef UNCORRECTED naccum = ZERO; for(l = 0; l < Nr; l++) { unco[l] += naccum; naccum = unco[l]; } #endif #ifdef ISOTROPIC accum = 0.0; for(l = 0; l < Nr; l++) { iso[l] += accum; accum = iso[l]; } #endif #ifdef TRANSLATION accum = 0.0; for(l = 0; l < Nr; l++) { trans[l] += accum; accum = trans[l]; } #endif #ifdef BORDER /* Now use the accumulated values to compute the numerator and denominator. The value of denomAccum[l] should be added to denom[k] for all k <= l. numerHighAccum[l] should be added to numer[k] for all k <=l numerLowAccum[l] should then be subtracted from numer[k] for k <= l. */ for(l=Nr1, naccum=daccum=ZERO; l>=0; l--) { daccum += denomAccum[l]; bdenom[l] = daccum; naccum += numerHighAccum[l]; bnumer[l] = naccum; naccum -= numerLowAccum[l]; } #endif spatstat.core/src/scan.c0000644000176200001440000000402514141377573014740 0ustar liggesusers/* scan.c Scan transform $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include "raster.h" void shape_raster(); void Cscantrans(x, y, npt, R, out) double *x, *y; /* data points */ int npt; double R; /* radius */ Raster *out; /* scan image */ { int i,j,k,l,m; double d2, R2; int rmin, rmax, cmin, cmax, Rrow, Rcol, lmin, lmax, mmin, mmax; /* initialise raster */ Clear(*out,int,0); /* If the list of data points is empty, ... exit now */ if(npt == 0) return; R2 = R * R; cmin = out->cmin; cmax = out->cmax; rmin = out->rmin; rmax = out->rmax; /* disc size in rows/columns */ Rrow = (int) ceil(R/(out->ystep)); Rcol = (int) ceil(R/(out->xstep)); if(Rrow < 1) Rrow = 1; if(Rcol < 1) Rcol = 1; /* run through points */ for(i = 0; i < npt; i++) { j = RowIndex(*out,y[i]); k = ColIndex(*out,x[i]); lmin = j - Rrow; if(lmin < rmin) lmin = rmin; lmax = j + Rrow; if(lmax > rmax) lmax = rmax; mmin = k - Rcol; if(mmin < cmin) mmin = cmin; mmax = k + Rcol; if(mmax > cmax) mmax = cmax; for(l = lmin; l <= lmax; l++) { for(m = mmin; m <= mmax; m++) { d2 = DistanceToSquared(x[i],y[i],*out,l,m); if(d2 <= R2) Entry(*out,l,m,int) += 1; } } } } /* R interface */ void scantrans(x, y, n, xmin, ymin, xmax, ymax, nr, nc, R, counts) double *x, *y; /* input data points */ int *n; double *xmin, *ymin, *xmax, *ymax; /* guaranteed bounding box */ int *nr, *nc; /* desired raster dimensions */ double *R; /* radius */ /* output array */ int *counts; /* number of R-close points */ { Raster out; int nrow, ncol, npoints; double r; nrow = *nr; ncol = *nc; npoints = *n; r = *R; shape_raster( &out, (void *) counts, *xmin,*ymin,*xmax,*ymax, nrow, ncol, 0, 0); Cscantrans(x, y, npoints, r, &out); } spatstat.core/src/straush.c0000644000176200001440000000602514141377573015507 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Hard core Strauss process */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct StraussHard { double gamma; double r; /* interaction distance */ double h; /* hard core distance */ double loggamma; double r2; double h2; double r2h2; /* r^2 - h^2 */ double *period; int hard; int per; } StraussHard; /* initialiser function */ Cdata *straushinit(state, model, algo) State state; Model model; Algor algo; { StraussHard *strausshard; strausshard = (StraussHard *) R_alloc(1, sizeof(StraussHard)); /* Interpret model parameters*/ strausshard->gamma = model.ipar[0]; strausshard->r = model.ipar[1]; /* No longer passed as r^2 */ strausshard->h = model.ipar[2]; /* No longer passed as h^2 */ strausshard->r2 = pow(strausshard->r, 2); strausshard->h2 = pow(strausshard->h, 2); strausshard->r2h2 = strausshard->r2 - strausshard->h2; strausshard->period = model.period; /* is the interaction numerically equivalent to hard core ? */ strausshard->hard = (strausshard->gamma < DOUBLE_EPS); strausshard->loggamma = (strausshard->hard) ? 0.0 : log(strausshard->gamma); /* periodic boundary conditions? */ strausshard->per = (model.period[0] > 0.0); return((Cdata *) strausshard); } /* conditional intensity evaluator */ double straushcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, kount, ix, ixp1, j; double *x, *y; double u, v; double r2, r2h2, cifval; StraussHard *strausshard; double *period; DECLARE_CLOSE_VARS; strausshard = (StraussHard *) cdata; r2 = strausshard->r2; r2h2 = strausshard->r2h2; period = strausshard->period; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); kount = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(strausshard->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC(u,v,x[j],y[j],period,r2)) { /* RESIDUE = r2 - distance^2 */ if(RESIDUE > r2h2) return((double) 0.0); ++kount; } } } if(ixp1 < npts) { for(j=ixp1; j r2h2) return((double) 0.0); ++kount; } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE(u,v,x[j],y[j],r2)) { if(RESIDUE > r2h2) return((double) 0.0); ++kount; } } } if(ixp1 < npts) { for(j=ixp1; j r2h2) return((double) 0.0); ++kount; } } } } if(strausshard->hard) { if(kount > 0) cifval = (double) 0.0; else cifval = (double) 1.0; } else cifval = exp(strausshard->loggamma*kount); return cifval; } Cifns StraussHardCifns = { &straushinit, &straushcif, (updafunptr) NULL, NO}; spatstat.core/src/chunkloop.h0000644000176200001440000000161514141377573016025 0ustar liggesusers/* chunkloop.h Divide a loop into chunks Convenient for divide-and-recombine, and reducing calls to R_CheckUserInterrupt, etc. $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define OUTERCHUNKLOOP(IVAR, LOOPLENGTH, ICHUNK, CHUNKSIZE) \ IVAR = 0; \ ICHUNK = 0; \ while(IVAR < LOOPLENGTH) #define INNERCHUNKLOOP(IVAR, LOOPLENGTH, ICHUNK, CHUNKSIZE) \ ICHUNK += CHUNKSIZE; \ if(ICHUNK > LOOPLENGTH) ICHUNK = LOOPLENGTH; \ for(; IVAR < ICHUNK; IVAR++) #define XOUTERCHUNKLOOP(IVAR, ISTART, IEND, ICHUNK, CHUNKSIZE) \ IVAR = ISTART; \ ICHUNK = 0; \ while(IVAR <= IEND) #define XINNERCHUNKLOOP(IVAR, ISTART, IEND, ICHUNK, CHUNKSIZE) \ ICHUNK += CHUNKSIZE; \ if(ICHUNK > IEND) ICHUNK = IEND; \ for(; IVAR <= IEND; IVAR++) #define CHUNKLOOP_H spatstat.core/src/methas.h0000644000176200001440000000712314141377573015304 0ustar liggesusers/* Definitions of types and data structures for Metropolis-Hastings State Current state of point pattern Model Model parameters passed from R Cdata (pointer to) model parameters and precomputed data in C Algor Algorithm parameters (p, q, nrep etc) Propo Proposal in Metropolis-Hastings algorithm History Transition history of MH algorithm Cifns Set of functions for computing the conditional intensity for a point process model. This consists of three functions init(State, Model, Algor) .... initialises auxiliary data eval(State, Propo) ........... evaluates cif update(State,Propo) .......... updates auxiliary data */ /* Current state of point pattern */ typedef struct State { double *x; /* vectors of Cartesian coordinates */ double *y; int *marks; /* vector of mark values */ int npts; /* current number of points */ int npmax; /* storage limit */ int ismarked; /* whether the pattern is marked */ } State; /* Parameters of model passed from R */ typedef struct Model { double *beta; /* vector of activity parameters */ double *ipar; /* vector of interaction parameters */ double *period; /* width & height of rectangle, if torus */ int ntypes; /* number of possible marks */ } Model; /* A pointer to Cdata is a pointer to C storage for parameters of model */ typedef void Cdata; /* RMH Algorithm parameters */ typedef struct Algor { double p; /* probability of proposing shift */ double q; /* conditional probability of proposing death */ int fixall; /* if TRUE, only shifts of location are feasible */ int ncond; /* For conditional simulation, the first 'ncond' points are fixed */ int nrep; /* number of iterations */ int nverb; /* print report every 'nverb' iterations */ int nrep0; /* number of iterations already performed in previous blocks - for reporting purposes */ int tempered; /* TRUE if tempering is applied */ double invtemp; /* inverse temperature if tempering is applied */ } Algor; /* Metropolis-Hastings proposal */ typedef struct Propo { double u; /* location of point of interest */ double v; int mrk; /* mark of point of interest */ int ix; /* index of point of interest, if already in pattern */ int itype; /* transition type */ } Propo; /* transition codes 'itype' */ #define REJECT 0 #define BIRTH 1 #define DEATH 2 #define SHIFT 3 #define HISTORY_INCLUDES_RATIO /* Record of transition history */ typedef struct History { int nmax; /* length of vectors */ int n; /* number of events recorded */ int *proptype; /* vector: proposal type */ int *accepted; /* vector: 0 for reject, 1 for accept */ #ifdef HISTORY_INCLUDES_RATIO double *numerator; /* vectors: Hastings ratio numerator & denominator */ double *denominator; #endif } History; /* conditional intensity functions */ typedef Cdata * (*initfunptr)(State state, Model model, Algor algo); typedef double (*evalfunptr)(Propo prop, State state, Cdata *cdata); typedef void (*updafunptr)(State state, Propo prop, Cdata *cdata); typedef struct Cifns { initfunptr init; evalfunptr eval; updafunptr update; int marked; } Cifns; #define NEED_UPDATE(X) ((X).update != (updafunptr) NULL) #define NULL_CIFNS { (initfunptr) NULL, (evalfunptr) NULL, (updafunptr) NULL, NO} /* miscellaneous macros */ #include "yesno.h" # define MAT(X,I,J,M) (X[(I)+(J)*(M)]) spatstat.core/src/triplets.c0000644000176200001440000000615514141377573015670 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Triplets process */ /* Format for storage of parameters and precomputed/auxiliary data */ typedef struct Triplets { double gamma; double r; double loggamma; double r2; double *period; int hard; int per; int *neighbour; /* scratch list of neighbours of current point */ int Nmax; /* length of scratch space allocated */ } Triplets; /* initialiser function */ Cdata *tripletsinit(state, model, algo) State state; Model model; Algor algo; { /* create storage for model parameters */ Triplets *triplets; triplets = (Triplets *) R_alloc(1, sizeof(Triplets)); /* create scratch space */ triplets->Nmax = 1024; triplets->neighbour = (int *) R_alloc(1024, sizeof(int)); /* Interpret model parameters*/ triplets->gamma = model.ipar[0]; triplets->r = model.ipar[1]; /* No longer passed as r^2 */ triplets->r2 = triplets->r * triplets->r; triplets->period = model.period; #ifdef MHDEBUG Rprintf("Initialising Triplets gamma=%lf, r=%lf\n", triplets->gamma, triplets->r); #endif /* is the model numerically equivalent to hard core ? */ triplets->hard = (triplets->gamma < DOUBLE_EPS); triplets->loggamma = (triplets->hard) ? 0 : log(triplets->gamma); /* periodic boundary conditions? */ triplets->per = (model.period[0] > 0.0); return((Cdata *) triplets); } /* conditional intensity evaluator */ double tripletscif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, kount, ix, j, k, nj, nk, N, Nmax, Nmore, N1; int *neighbour; double *x, *y; double u, v; double r2, d2, cifval; Triplets *triplets; triplets = (Triplets *) cdata; r2 = triplets->r2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); neighbour = triplets->neighbour; Nmax = triplets->Nmax; N = 0; /* compile list of neighbours */ for(j=0; j < npts; j++) { if(j != ix) { d2 = dist2either(u,v,x[j],y[j],triplets->period); if(d2 < r2) { /* add j to list of neighbours of current point */ if(N >= Nmax) { /* storage space overflow: reallocate */ Nmore = 2 * Nmax; triplets->neighbour = neighbour = (int *) S_realloc((char *) triplets->neighbour, Nmore, Nmax, sizeof(int)); triplets->Nmax = Nmax = Nmore; } neighbour[N] = j; N++; } } } /* count r-close (ordered) pairs of neighbours */ kount = 0; if(N > 1) { N1 = N - 1; for(j = 0; j < N1; j++) { nj = neighbour[j]; for(k = j+1; k < N; k++) { nk = neighbour[k]; if(nj != nk) { d2 = dist2either(x[nj],y[nj],x[nk],y[nk],triplets->period); if(d2 < r2) kount++; } } } } if(triplets->hard) { if(kount > 0) cifval = 0.0; else cifval = 1.0; } else cifval = exp((triplets->loggamma) * kount); #ifdef MHDEBUG Rprintf("triplet count=%d cif=%lf\n", kount, cifval); #endif return cifval; } Cifns TripletsCifns = { &tripletsinit, &tripletscif, (updafunptr) NULL, NO}; spatstat.core/src/dgs.c0000644000176200001440000000505614141377573014576 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" #include "constants.h" /* Conditional intensity computation for Diggle-Gates-Stibbard process */ /* Conditional intensity function for a pairwise interaction point process with interaction function as given by e(t) = sin^2(pi*t/2*rho) for t < rho = 1 for t >= rho (See page 767 of Diggle, Gates, and Stibbard, Biometrika vol. 74, 1987, pages 763 -- 770.) */ #define PION2 M_PI_2 /* pi/2 defined in Rmath.h */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Dgs { double rho; double rho2; double pion2rho; double *period; int per; } Dgs; /* initialiser function */ Cdata *dgsinit(state, model, algo) State state; Model model; Algor algo; { Dgs *dgs; /* allocate storage */ dgs = (Dgs *) R_alloc(1, sizeof(Dgs)); /* Interpret model parameters*/ dgs->rho = model.ipar[0]; dgs->period = model.period; /* constants */ dgs->rho2 = pow(dgs->rho, 2); dgs->pion2rho = PION2/dgs->rho; /* periodic boundary conditions? */ dgs->per = (model.period[0] > 0.0); return((Cdata *) dgs); } /* conditional intensity evaluator */ double dgscif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, r2, pairprod, cifval; Dgs *dgs; DECLARE_CLOSE_D2_VARS; dgs = (Dgs *) cdata; r2 = dgs->rho2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = pairprod = 1.0; if(npts == 0) return(cifval); ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(dgs->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],dgs->period,r2,d2)) pairprod *= sin(dgs->pion2rho * sqrt(d2)); } } if(ixp1 < npts) { for(j=ixp1; jperiod,r2,d2)) pairprod *= sin(dgs->pion2rho * sqrt(d2)); } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], r2, d2)) pairprod *= sin(dgs->pion2rho * sqrt(d2)); } } if(ixp1 < npts) { for(j=ixp1; jpion2rho * sqrt(d2)); } } } /* sin to sin^2 */ cifval = pairprod * pairprod; return cifval; } Cifns DgsCifns = { &dgsinit, &dgscif, (updafunptr) NULL, NO}; spatstat.core/src/mhloop.h0000644000176200001440000003036614141377573015326 0ustar liggesusers /* mhloop.h This file contains the iteration loop for the Metropolis-Hastings algorithm methas.c It is #included several times in methas.c with different #defines for the following variables MH_MARKED whether the simulation is marked (= the variable 'marked' is TRUE) MH_SINGLE whether there is a single interaction (as opposed to a hybrid of several interactions) MH_TEMPER whether tempering is applied MH_TRACKING whether to save transition history MH_DEBUG whether to print debug information MH_SNOOP whether to run visual debugger $Revision: 1.23 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifndef MH_DEBUG #define MH_DEBUG NO #endif /* ..... Pre-processing: recursively delete illegal/improbable points ..... */ nfree = state.npts - algo.ncond; /* number of 'free' points */ if(thinstart && nfree > 0) { nsuspect = nfree; while(nsuspect > 0) { /* scan for illegal points */ ix = state.npts - nsuspect; deathprop.ix = ix; deathprop.u = state.x[ix]; deathprop.v = state.y[ix]; #if MH_MARKED deathprop.mrk = state.marks[ix]; #endif #if MH_DEBUG #if MH_MARKED Rprintf("check legality of point %d = (%lf, %lf) with mark %d\n", ix, deathprop.u, deathprop.v, deathprop.mrk); #else Rprintf("check legality of point %d = (%lf, %lf)\n", ix, deathprop.u, deathprop.v); #endif #endif /* evaluate conditional intensity without trend terms */ #if MH_SINGLE adenom = (*(thecif.eval))(deathprop, state, thecdata); #else adenom = 1.0; for(k = 0; k < Ncif; k++) adenom *= (*(cif[k].eval))(deathprop, state, cdata[k]); #endif #if MH_TEMPER adenom = pow(adenom, invtemp); #endif #if MH_DEBUG Rprintf("cif = %lf\n", adenom); #endif /* accept/reject */ if(unif_rand() >= adenom) { #if MH_DEBUG Rprintf("deleting illegal/improbable point\n"); #endif /* delete point x[ix], y[ix] */ if(mustupdate) { /* Update auxiliary variables first */ #if MH_SINGLE (*(thecif.update))(state, deathprop, thecdata); #else for(k = 0; k < Ncif; k++) { if(needupd[k]) (*(cif[k].update))(state, deathprop, cdata[k]); } #endif } state.npts--; nfree--; #if MH_DEBUG Rprintf("deleting point %d\n", ix); Rprintf("\tnpts=%d\n", state.npts); #endif if(ix < state.npts) { for(j = ix; j < state.npts; j++) { state.x[j] = state.x[j+1]; state.y[j] = state.y[j+1]; #if MH_MARKED state.marks[j] = state.marks[j+1]; #endif } } } nsuspect--; } } /* ............... MAIN ITERATION LOOP ............................. */ OUTERCHUNKLOOP(irep, algo.nrep, maxchunk, 1024) { R_CheckUserInterrupt(); INNERCHUNKLOOP(irep, algo.nrep, maxchunk, 1024) { #if MH_DEBUG Rprintf("iteration %d\n", irep); #endif if(verb) { /* print progress message every nverb iterations */ iverb = irep + 1 + algo.nrep0; if((iverb % algo.nverb) == 0) Rprintf("iteration %d\n", iverb); } itype = REJECT; nfree = state.npts - algo.ncond; /* number of 'free' points */ /* ................ generate proposal ..................... */ /* Shift or birth/death: */ if(unif_rand() > algo.p) { #if MH_DEBUG Rprintf("propose birth or death\n"); #endif /* Birth/death: */ if(unif_rand() > algo.q) { /* Propose birth: */ birthprop.u = xpropose[irep]; birthprop.v = ypropose[irep]; #if MH_MARKED birthprop.mrk = mpropose[irep]; #endif #if MH_DEBUG #if MH_MARKED Rprintf("propose birth at (%lf, %lf) with mark %d\n", birthprop.u, birthprop.v, birthprop.mrk); #else Rprintf("propose birth at (%lf, %lf)\n", birthprop.u, birthprop.v); #endif #endif /* evaluate conditional intensity */ #if MH_MARKED betavalue = model.beta[birthprop.mrk]; #endif #if MH_SINGLE anumer = betavalue * (*(thecif.eval))(birthprop, state, thecdata); #else anumer = betavalue; for(k = 0; k < Ncif; k++) anumer *= (*(cif[k].eval))(birthprop, state, cdata[k]); #endif #if MH_TEMPER anumer = pow(anumer, invtemp); #endif adenom = qnodds*(nfree+1); #if MH_DEBUG Rprintf("cif = %lf, Hastings ratio = %lf\n", anumer, anumer/adenom); #endif /* accept/reject */ if(unif_rand() * adenom < anumer) { #if MH_DEBUG Rprintf("accepted birth\n"); #endif itype = BIRTH; /* Birth proposal accepted. */ } #if MH_SNOOP /* visual debug */ mhsnoop(&snooper, irep, &algo, &state, &birthprop, anumer, adenom, &itype); #endif #if MH_TRACKING /* save transition history */ if(irep < history.nmax) { history.n++; history.proptype[irep] = BIRTH; history.accepted[irep] = (itype == REJECT) ? 0 : 1; #ifdef HISTORY_INCLUDES_RATIO history.numerator[irep] = anumer; history.denominator[irep] = adenom; #endif } #endif } else if(nfree > 0) { /* Propose death: */ ix = floor(nfree * unif_rand()); if(ix < 0) ix = 0; ix = algo.ncond + ix; if(ix >= state.npts) ix = state.npts - 1; deathprop.ix = ix; deathprop.u = state.x[ix]; deathprop.v = state.y[ix]; #if MH_MARKED deathprop.mrk = state.marks[ix]; #endif #if MH_DEBUG #if MH_MARKED Rprintf("propose death of point %d = (%lf, %lf) with mark %d\n", ix, deathprop.u, deathprop.v, deathprop.mrk); #else Rprintf("propose death of point %d = (%lf, %lf)\n", ix, deathprop.u, deathprop.v); #endif #endif /* evaluate conditional intensity */ #if MH_MARKED betavalue = model.beta[deathprop.mrk]; #endif #if MH_SINGLE adenom = betavalue * (*(thecif.eval))(deathprop, state, thecdata); #else adenom = betavalue; for(k = 0; k < Ncif; k++) adenom *= (*(cif[k].eval))(deathprop, state, cdata[k]); #endif #if MH_TEMPER adenom = pow(adenom, invtemp); #endif anumer = qnodds * nfree; #if MH_DEBUG Rprintf("cif = %lf, Hastings ratio = %lf\n", adenom, anumer/adenom); #endif /* accept/reject */ if(unif_rand() * adenom < anumer) { #if MH_DEBUG Rprintf("accepted death\n"); #endif itype = DEATH; /* Death proposal accepted. */ } #if MH_SNOOP /* visual debug */ mhsnoop(&snooper, irep, &algo, &state, &deathprop, anumer, adenom, &itype); #endif #if MH_TRACKING /* save transition history */ if(irep < history.nmax) { history.n++; history.proptype[irep] = DEATH; history.accepted[irep] = (itype == REJECT) ? 0 : 1; #ifdef HISTORY_INCLUDES_RATIO history.numerator[irep] = anumer; history.denominator[irep] = adenom; #endif } #endif } } else if(nfree > 0) { /* Propose shift: */ /* point to be shifted */ ix = floor(nfree * unif_rand()); if(ix < 0) ix = 0; ix = algo.ncond + ix; if(ix >= state.npts) ix = state.npts - 1; deathprop.ix = ix; deathprop.u = state.x[ix]; deathprop.v = state.y[ix]; #if MH_MARKED deathprop.mrk = state.marks[ix]; #endif /* where to shift */ permitted = YES; shiftprop.ix = ix; shiftprop.u = xpropose[irep]; shiftprop.v = ypropose[irep]; #if MH_MARKED shiftprop.mrk = mpropose[irep]; if(algo.fixall) permitted = (shiftprop.mrk == deathprop.mrk); #endif #if MH_DEBUG #if MH_MARKED Rprintf("propose shift of point %d = (%lf, %lf)[mark %d] to (%lf, %lf)[mark %d]\n", ix, deathprop.u, deathprop.v, deathprop.mrk, shiftprop.u, shiftprop.v, shiftprop.mrk); #else Rprintf("propose shift of point %d = (%lf, %lf) to (%lf, %lf)\n", ix, deathprop.u, deathprop.v, shiftprop.u, shiftprop.v); #endif #endif /* evaluate cif in two stages */ cvn = cvd = 1.0; if(permitted) { #if MH_SINGLE cvn = (*(thecif.eval))(shiftprop, state, thecdata); if(cvn > 0.0) { cvd = (*(thecif.eval))(deathprop, state, thecdata); } else { permitted = NO; } #else for(k = 0; k < Ncif; k++) { cvn *= (*(cif[k].eval))(shiftprop, state, cdata[k]); if(cvn > 0.0) { cvd *= (*(cif[k].eval))(deathprop, state, cdata[k]); } else { permitted = NO; break; } } #endif } if(permitted) { #if MH_MARKED cvn *= model.beta[shiftprop.mrk]; cvd *= model.beta[deathprop.mrk]; #endif #if MH_TEMPER cvn = pow(cvn, invtemp); cvd = pow(cvd, invtemp); #endif #if MH_DEBUG Rprintf("cif[old] = %lf, cif[new] = %lf, Hastings ratio = %lf\n", cvd, cvn, cvn/cvd); #endif /* accept/reject */ if(unif_rand() * cvd < cvn) { #if MH_DEBUG Rprintf("accepted shift\n"); #endif itype = SHIFT; /* Shift proposal accepted . */ } } else { cvn = 0.0; cvd = 1.0; #if MH_DEBUG Rprintf("Forbidden shift"); #endif } #if MH_SNOOP /* visual debug */ mhsnoop(&snooper, irep, &algo, &state, &shiftprop, cvn, cvd, &itype); #endif #if MH_TRACKING /* save transition history */ if(irep < history.nmax) { history.n++; history.proptype[irep] = SHIFT; history.accepted[irep] = (itype == REJECT) ? 0 : 1; #ifdef HISTORY_INCLUDES_RATIO history.numerator[irep] = cvn; history.denominator[irep] = cvd; #endif } #endif } if(itype != REJECT) { /* ....... implement the transition ............ */ if(itype == BIRTH) { /* Birth transition */ /* add point at (u,v) */ #if MH_DEBUG #if MH_MARKED Rprintf("implementing birth at (%lf, %lf) with mark %d\n", birthprop.u, birthprop.v, birthprop.mrk); #else Rprintf("implementing birth at (%lf, %lf)\n", birthprop.u, birthprop.v); #endif #endif if(state.npts + 1 > state.npmax) { #if MH_DEBUG Rprintf("!!!!!!!!!!! storage overflow !!!!!!!!!!!!!!!!!\n"); #endif /* storage overflow; allocate more storage */ Nmore = 2 * state.npmax; state.x = (double *) S_realloc((char *) state.x, Nmore, state.npmax, sizeof(double)); state.y = (double *) S_realloc((char *) state.y, Nmore, state.npmax, sizeof(double)); #if MH_MARKED state.marks = (int *) S_realloc((char *) state.marks, Nmore, state.npmax, sizeof(int)); #endif state.npmax = Nmore; /* call the initialiser again, to allocate additional space */ #if MH_SINGLE thecdata = (*(thecif.init))(state, model, algo); #else model.ipar = iparvector; for(k = 0; k < Ncif; k++) { if(k > 0) model.ipar += plength[k-1]; cdata[k] = (*(cif[k].init))(state, model, algo); } #endif #if MH_DEBUG Rprintf("........... storage extended .................\n"); #endif } if(mustupdate) { /* Update auxiliary variables first */ #if MH_SINGLE (*(thecif.update))(state, birthprop, thecdata); #else for(k = 0; k < Ncif; k++) { if(needupd[k]) (*(cif[k].update))(state, birthprop, cdata[k]); } #endif } /* Now add point */ state.x[state.npts] = birthprop.u; state.y[state.npts] = birthprop.v; #if MH_MARKED state.marks[state.npts] = birthprop.mrk; #endif state.npts = state.npts + 1; #if MH_DEBUG Rprintf("\tnpts=%d\n", state.npts); #endif } else if(itype==DEATH) { /* Death transition */ /* delete point x[ix], y[ix] */ if(mustupdate) { /* Update auxiliary variables first */ #if MH_SINGLE (*(thecif.update))(state, deathprop, thecdata); #else for(k = 0; k < Ncif; k++) { if(needupd[k]) (*(cif[k].update))(state, deathprop, cdata[k]); } #endif } ix = deathprop.ix; state.npts = state.npts - 1; #if MH_DEBUG Rprintf("implementing death of point %d\n", ix); Rprintf("\tnpts=%d\n", state.npts); #endif if(ix < state.npts) { for(j = ix; j < state.npts; j++) { state.x[j] = state.x[j+1]; state.y[j] = state.y[j+1]; #if MH_MARKED state.marks[j] = state.marks[j+1]; #endif } } } else { /* Shift transition */ /* Shift (x[ix], y[ix]) to (u,v) */ #if MH_DEBUG #if MH_MARKED Rprintf("implementing shift from %d = (%lf, %lf)[%d] to (%lf, %lf)[%d]\n", deathprop.ix, deathprop.u, deathprop.v, deathprop.mrk, shiftprop.u, shiftprop.v, shiftprop.mrk); #else Rprintf("implementing shift from %d = (%lf, %lf) to (%lf, %lf)\n", deathprop.ix, deathprop.u, deathprop.v, shiftprop.u, shiftprop.v); Rprintf("\tnpts=%d\n", state.npts); #endif #endif if(mustupdate) { /* Update auxiliary variables first */ #if MH_SINGLE (*(thecif.update))(state, shiftprop, thecdata); #else for(k = 0; k < Ncif; k++) { if(needupd[k]) (*(cif[k].update))(state, shiftprop, cdata[k]); } #endif } ix = shiftprop.ix; state.x[ix] = shiftprop.u; state.y[ix] = shiftprop.v; #if MH_MARKED state.marks[ix] = shiftprop.mrk; #endif } #if MH_DEBUG } else { Rprintf("rejected\n"); #endif } } } spatstat.core/src/hardcore.c0000644000176200001440000000410714141377573015604 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Hard core process */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Hardcore { double h; /* hard core distance */ double h2; double *period; int per; } Hardcore; /* initialiser function */ Cdata *hardcoreinit(state, model, algo) State state; Model model; Algor algo; { Hardcore *hardcore; double h; hardcore = (Hardcore *) R_alloc(1, sizeof(Hardcore)); /* Interpret model parameters*/ hardcore->h = h = model.ipar[0]; hardcore->h2 = h * h; hardcore->period = model.period; /* periodic boundary conditions? */ hardcore->per = (model.period[0] > 0.0); return((Cdata *) hardcore); } /* conditional intensity evaluator */ double hardcorecif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double h2, a; Hardcore *hardcore; hardcore = (Hardcore *) cdata; h2 = hardcore->h2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(hardcore->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(dist2thresh(u,v,x[j],y[j],hardcore->period, h2)) return((double) 0.0); } } if(ixp1 < npts) { for(j=ixp1; jperiod, h2)) return((double) 0.0); } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { a = h2 - pow(u - x[j], 2); if(a > 0) { a -= pow(v - y[j], 2); if(a > 0) return((double) 0.0); } } } if(ixp1 < npts) { for(j=ixp1; j 0) { a -= pow(v - y[j], 2); if(a > 0) return((double) 0.0); } } } } return ((double) 1.0); } Cifns HardcoreCifns = { &hardcoreinit, &hardcorecif, (updafunptr) NULL, NO}; spatstat.core/src/PerfectStrauss.h0000644000176200001440000002156414141377573017005 0ustar liggesusers // ........................... Strauss process .......................... // $Revision: 1.6 $ $Date: 2020/05/12 03:32:45 $ class StraussProcess : public PointProcess { public: double beta, gamma, R, Rsquared; StraussProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double Ri); ~StraussProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); // void CalcBeta(long int xsidepomm, long int ysidepomm, // double *betapomm); // void CheckBeta(long int xsidepomm, long int ysidepomm, // double *betapomm); // double lnCondInt(struct Point2 *TempCell, Point2Pattern *p2p); // void Beta(struct Point2 *TempCell); // void CalcBeta(Point2Pattern *p2p); }; StraussProcess::StraussProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double Ri) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; gamma = g; R = Ri; Rsquared = R * R; InteractionRange = R; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double StraussProcess::Interaction(double dsquared) { double rtn; rtn = 1; if(dsquared < Rsquared) rtn = gamma; return(rtn); } void StraussProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void StraussProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating StraussProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating StraussProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating StraussProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } //void StraussProcess::CalcBeta(long int xsidepomm, long int ysidepomm, // double *betapomm){ // long int i,j,k; // k=0; // // Rprintf("\ndiagnostic message: Strauss CalcBeta... %ld %ld\n",xsidepomm,ysidepomm); // for(i=0; ibeta; // k++; // } // } //} //void StraussProcess::CheckBeta(long int xsidepomm, long int ysidepomm, // double *betapomm){ // long int i,j,k; // // double d1; // k=0; // // Rprintf("\ndiagnostic message: Strauss CalcBeta... %ld %ld\n",xsidepomm,ysidepomm); // for(i=0; i0.001) && (k==0)){ // Rprintf("%f %f %f %ld %ld\n",fabs(*(betapomm + i*ysidepomm + j)- beta), // *(betapomm + i*ysidepomm + j),beta,i,j); // k++; // // scanf("%lf",&d1); // } // } // } //} //double StraussProcess::lnCondInt(struct Point2 *TempCell, // Point2Pattern *p2p){ // double f1; // long int xco,yco,xc,yc,fx,tx,fy,ty,ry,rx,k; // double dy,dx, lnCI,dst2; // struct Point2 *TempCell2; // // f1 = (TempCell->X-p2p->Xmin)/p2p->XCellDim; xc = int(f1); // CLAMP(xc, 0, p2p->MaxXCell, "xc"); // f1 = (TempCell->Y-p2p->Ymin)/p2p->YCellDim; yc = int(f1); // CLAMP(yc, 0, p2p->MaxYCell, "yc"); // // dx = (Xmax-Xmin)/(double(p2p->MaxXCell+1)); // dy = (Ymax-Ymin)/(double(p2p->MaxYCell+1)); // rx = int(this->InteractionRange/dx+1.0); // ry = int(this->InteractionRange/dy+1.0); // // lnCI = log(TempCell->Beta); // // k = 0; // // if((xc+rx)<=p2p->MaxXCell) tx=xc+rx; else tx = p2p->MaxXCell; // if((yc+ry)<=p2p->MaxYCell) ty=yc+ry; else ty = p2p->MaxYCell; // if((xc-rx)>=0) fx=xc-rx; else fx = 0; // if((yc-ry)>=0) fy=yc-ry; else fy = 0; // // //Rprintf("MCI! %d %d %d %d\n",fx,tx,fy,ty); // // for(xco = fx; xco <= tx; xco++){ // for(yco = fy; yco <= ty; yco++){ // CHECK(p2p->headCell[xco][yco], // "internal error: p2p->headCell[xco][yco] is null in lnCondInt()"); // TempCell2 = p2p->headCell[xco][yco]->next; // CHECK(TempCell2, "internal error: TempCell2 is null in lnCondInt()"); // while(TempCell2!=TempCell2->next){ // if(TempCell2 != TempCell){ // k++; // dst2 = pow(TempCell->X-TempCell2->X,2)+ // pow(TempCell->Y-TempCell2->Y,2); // lnCI += log(Interaction(dst2)); // } // TempCell2 = TempCell2->next; // CHECK(TempCell2, // "internal error: TempCell2 is null in lnCondInt() loop"); // } // } // } // return(lnCI); //} //void StraussProcess::Beta(struct Point2 *TempCell){ // TempCell->Beta = beta; //} //void StraussProcess::CalcBeta(Point2Pattern *p2p){ // long int xco,yco; // // double dy,dx; // struct Point2 *TempMother; // // for(xco = 0; xco <= p2p->MaxXCell; xco++){ // for(yco = 0; yco <= p2p->MaxYCell; yco++){ // CHECK(p2p->headCell[xco][yco], // "internal error: p2p->headCell[xco][yco] is null in CalcBeta()"); // TempMother = p2p->headCell[xco][yco]->next; // CHECK(TempMother, "internal error: TempMother is null in CalcBeta()"); // while(TempMother!=TempMother->next){ // TempMother->Beta = this->beta; // TempMother = TempMother->next; // CHECK(TempMother, // "internal error: TempMother is null in CalcBeta() loop"); // } // } // } //} // ........................... Interface to R .......................... extern "C" { SEXP PerfectStrauss(SEXP beta, SEXP gamma, SEXP r, SEXP xrange, SEXP yrange) { // input parameters double Beta, Gamma, R, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int EndTime, StartTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; SEXP stout, etout; int *ss, *ee; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(gamma = AS_NUMERIC(gamma)); PROTECT(r = AS_NUMERIC(r)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 5 protected objects // extract arguments // model parameters Beta = *(NUMERIC_POINTER(beta)); Gamma = *(NUMERIC_POINTER(gamma)); R = *(NUMERIC_POINTER(r)); // window Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) { xcells = 9; } else if(xcells < 1) { xcells = 1; } ycells = (int) floor((Ymax-Ymin)/ R); if(ycells > 9) { ycells = 9; } else if(ycells < 1) { ycells = 1; } #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise Strauss point process StraussProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax, Beta, Gamma, R); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); PROTECT(stout = NEW_INTEGER(1)); PROTECT(etout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); ss = INTEGER_POINTER(stout); ee = INTEGER_POINTER(etout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); *ss = StartTime; *ee = EndTime; // pack up into output list PROTECT(out = NEW_LIST(5)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); SET_VECTOR_ELT(out, 3, stout); SET_VECTOR_ELT(out, 4, etout); // return UNPROTECT(11); // 5 arguments plus xout, yout, nout, stout, etout, out return(out); } } spatstat.core/src/densptcross.c0000644000176200001440000002243414141377573016367 0ustar liggesusers#include #include #include "chunkloop.h" #include "crossloop.h" #include "constants.h" /* densptcross.c $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Assumes point patterns are sorted in increasing order of x coordinate *crdenspt Density estimate at points *crsmoopt Smoothed mark values at points Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define TWOPI M_2PI double sqrt(), exp(); #define STD_DECLARATIONS \ int i, j, n1, n2, maxchunk, jleft; \ double x1i, y1i, xleft, dx, dy, d2, rmax, r2max; \ double *x1, *y1, *x2, *y2; #define STD_INITIALISE \ n1 = *nquery; \ x1 = xq; y1 = yq; \ n2 = *ndata; \ x2 = xd; y2 = yd; \ rmax = *rmaxi; \ r2max = rmax * rmax /* ----------------- density estimation -------------------- */ void crdenspt(nquery, xq, yq, ndata, xd, yd, rmaxi, sig, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed density values */ { STD_DECLARATIONS; double resulti, coef; double sigma, twosig2; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; coef = 1.0/(TWOPI * sigma * sigma); if(n1 == 0 || n2 == 0) return; CROSSLOOP( { resulti = 0.0; }, { resulti += exp(-d2/twosig2); } , { result[i] = coef * resulti; }) } void wtcrdenspt(nquery, xq, yq, ndata, xd, yd, wd, rmaxi, sig, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *wd; /* weights of data points */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed density values */ { STD_DECLARATIONS; double resulti, coef; double sigma, twosig2; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; coef = 1.0/(TWOPI * sigma * sigma); if(n1 == 0 || n2 == 0) return; CROSSLOOP( { resulti = 0.0; }, { resulti += wd[j] * exp(-d2/twosig2); }, { result[i] = coef * resulti; } ) } /* ------------- anisotropic versions -------------------- */ void acrdenspt(nquery, xq, yq, ndata, xd, yd, rmaxi, detsigma, sinv, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *rmaxi; /* maximum distance at which points contribute */ double *detsigma; /* determinant of variance matrix */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of computed density values */ { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n1 == 0 || n2 == 0) return; CROSSLOOP( { resulti = 0.0; }, { resulti += exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); }, { result[i] = coef * resulti; }) } void awtcrdenspt(nquery, xq, yq, ndata, xd, yd, wd, rmaxi, detsigma, sinv, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *wd; /* weights of data points */ double *rmaxi; /* maximum distance at which points contribute */ double *detsigma; /* determinant of variance matrix */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of weighted density values */ { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n1 == 0 || n2 == 0) return; CROSSLOOP( { resulti = 0.0; }, { resulti += wd[j] * \ exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); }, { result[i] = coef * resulti; }) } /* --------------- smoothing --------------------------- */ void crsmoopt(nquery, xq, yq, ndata, xd, yd, vd, rmaxi, sig, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *vd; /* mark values at data points */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; if(n1 == 0 || n2 == 0) return; CROSSLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2/twosig2); \ denom += wij; \ numer += wij * vd[j]; \ }, { \ result[i] = numer/denom; \ }) } void wtcrsmoopt(nquery, xq, yq, ndata, xd, yd, vd, wd, rmaxi, sig, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *vd; /* mark values at data points */ double *wd; /* weights of data points */ double *rmaxi; /* maximum distance */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; if(n1 == 0 || n2 == 0) return; CROSSLOOP({ numer = denom = 0.0; }, { \ wij = wd[j] * exp(-d2/twosig2); \ denom += wij; \ numer += wij * vd[j]; \ }, { \ result[i] = numer/denom; \ }) } /* ------------- anisotropic versions -------------------- */ void acrsmoopt(nquery, xq, yq, ndata, xd, yd, vd, rmaxi, sinv, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *vd; /* mark values at data points */ double *rmaxi; /* maximum distance at which points contribute */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of smoothed values */ { STD_DECLARATIONS; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n1 == 0 || n2 == 0) return; CROSSLOOP({ numer = denom = 0.0; }, { \ wij = exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * vd[j]; \ }, { \ result[i] = numer/denom; \ }) } void awtcrsmoopt(nquery, xq, yq, ndata, xd, yd, vd, wd, rmaxi, sinv, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *vd; /* mark values at data points */ double *wd; /* weights of data points */ double *rmaxi; /* maximum distance at which points contribute */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of smoothed values */ { STD_DECLARATIONS; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n1 == 0 || n2 == 0) return; CROSSLOOP({ numer = denom = 0.0; }, { \ wij = wd[j] * exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * vd[j]; \ }, { \ result[i] = numer/denom; \ }) } spatstat.core/src/k3.c0000644000176200001440000000716314141377573014337 0ustar liggesusers#include #include #include "geom3.h" #include "functable.h" /* $Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $ K function of 3D point pattern k3trans translation correction k3isot isotropic correction # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009. # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ void k3trans(p, n, b, k) Point *p; int n; Box *b; Ftable *k; { register int i, j, l, lmin; register double dx, dy, dz, dist; register double vx, vy, vz; Point *ip, *jp; double dt, vol, lambda, denom, term; double sphesfrac(), sphevol(); /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); lambda = ((double) n )/ vol; denom = lambda * lambda; for(l = 0; l < k->n; l++) { (k->denom)[l] = denom; (k->num)[l] = 0.0; } /* spacing of argument in result vector k */ dt = (k->t1 - k->t0)/(k->n - 1); /* compute numerator */ for( i = 0; i < n; i++) { ip = p + i; for(j = i + 1; j < n; j++) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( (dist - k->t0) / dt ); if(lmin < 0) lmin = 0; vx = b->x1 - b->x0 - (dx > 0 ? dx : -dx); vy = b->y1 - b->y0 - (dy > 0 ? dy : -dy); vz = b->z1 - b->z0 - (dz > 0 ? dz : -dz); if(vx >= 0.0 && vy >= 0.0 && vz >= 0.0) { term = 2.0 /(vx * vy * vz); /* 2 because they're ordered pairs */ for(l = lmin; l < k->n; l++) (k->num)[l] += term; } } } /* compute ratio */ for(l = 0; l < k->n; l++) (k->f)[l] = ((k->denom)[l] > 0.0)? (k->num)[l] / (k->denom)[l] : 0.0; } void k3isot(p, n, b, k) Point *p; int n; Box *b; Ftable *k; { register int i, j, l, lmin; register double dx, dy, dz, dist; Point *ip, *jp; double dt, vol, denom, term; double sphesfrac(), sphevol(); Point vertex; Box half; /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); denom = ((double) (n * n))/vol; for(l = 0; l < k->n; l++) { (k->denom)[l] = denom; (k->num)[l] = 0.0; } /* spacing of argument in result vector k */ dt = (k->t1 - k->t0)/(k->n - 1); /* set up for volume correction */ vertex.x = b->x0; vertex.y = b->y0; vertex.z = b->z0; half.x1 = b->x1; half.y1 = b->y1; half.z1 = b->z1; half.x0 = (b->x0 + b->x1)/2.0; half.y0 = (b->y0 + b->y1)/2.0; half.z0 = (b->z0 + b->z1)/2.0; /* compute numerator */ for( i = 0; i < n; i++) { ip = p + i; for(j = i + 1; j < n; j++) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( (dist - k->t0) / dt ); if(lmin < 0) lmin = 0; term = (1.0 / sphesfrac(ip, b, dist)) + (1.0 / sphesfrac(jp, b, dist)); term *= 1.0 - 8.0 * sphevol(&vertex, &half, dist) / vol; for(l = lmin; l < k->n; l++) (k->num)[l] += term; } } /* compute ratio */ for(l = 0; l < k->n; l++) (k->f)[l] = ((k->denom)[l] > 0.0)? (k->num)[l] / (k->denom)[l] : 0.0; } spatstat.core/src/Krect.c0000644000176200001440000000373714141377573015075 0ustar liggesusers/* Krect.c $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ +++ Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2014 ++++ Fast code for K function in rectangular case. **Assumes point pattern is sorted in increasing order of x coordinate** **Assumes window is (0,wide) x (0, high) ** **Assumes output vectors were initialised to zero** Krect.c defines three interface functions, for weighted, unweighted double, and unweighted integer cases KrectFunDec.h (#included thrice) Function declaration, arguments, storage allocation KrectV1.h split according to whether Isotropic Correction is wanted Macro ISOTROPIC is #defined KrectV2.h split according to whether Translation Correction is wanted Macro TRANSLATION is #defined KrectV3.h split according to whether Border Correction is wanted Macro BORDER is #defined KrectV4.h split according to whether Uncorrected estimate is wanted Macro UNCORRECTED is #defined KrectBody.h Function body, including loops over i and j KrectIncrem.h (#included twice) Code performed when a close pair of points has been found: calculate edge corrections, increment results. */ #include #include #include /* This constant is defined in Rmath.h */ #define TWOPI M_2PI #define ABS(X) (((X) >= 0) ? (X) : (-X)) #define SMALL(X) ((ABS(X) < 1.0e-12) ? 1 : 0) #define MIN(X,Y) (((X) < (Y)) ? (X) : (Y)) #undef FNAME #undef WEIGHTED #undef COUNTTYPE #define FNAME KrectInt #define COUNTTYPE int #include "KrectFunDec.h" #undef FNAME #undef WEIGHTED #undef COUNTTYPE #define FNAME KrectDbl #define COUNTTYPE double #include "KrectFunDec.h" #undef FNAME #undef WEIGHTED #undef COUNTTYPE #define FNAME KrectWtd #define COUNTTYPE double #define WEIGHTED #include "KrectFunDec.h" spatstat.core/src/mhsnoop.h0000644000176200001440000000065114141377573015505 0ustar liggesusers/* Function declarations from mhsnoop.c $Revision: 1.5 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include "mhsnoopdef.h" void initmhsnoop(Snoop *s, SEXP env); void mhsnoop(Snoop *s, int irep, Algor *algo, State *state, Propo *prop, double numer, double denom, int *itype); spatstat.core/src/mhv4.h0000644000176200001440000000055214141377573014700 0ustar liggesusers/* mhv4.h visual debugger or not Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef MH_SNOOP if(snooper.active) { /* visual debugger */ #define MH_SNOOP YES #include "mhv5.h" #undef MH_SNOOP } else { /* no visual debugger */ #define MH_SNOOP NO #include "mhv5.h" #undef MH_SNOOP } spatstat.core/src/denspt.c0000644000176200001440000003142114141377573015311 0ustar liggesusers#include #include #include "chunkloop.h" #include "pairloop.h" #include "constants.h" /* denspt.c Calculation of density estimate at data points $Revision: 1.19 $ $Date: 2018/12/18 02:43:11 $ Assumes point pattern is sorted in increasing order of x coordinate *denspt* Density estimate at points *smoopt* Smoothed mark values at points Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define TWOPI M_2PI double sqrt(), exp(); #define STD_DECLARATIONS \ int n, i, j, maxchunk; \ double xi, yi, rmax, r2max, dx, dy, dx2, d2 #define STD_INITIALISE \ n = *nxy; \ rmax = *rmaxi; \ r2max = rmax * rmax /* ----------------- density estimation -------------------- */ void denspt(nxy, x, y, rmaxi, sig, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed density values */ { STD_DECLARATIONS; double resulti, coef; double sigma, twosig2; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; coef = 1.0/(TWOPI * sigma * sigma); if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += exp(-d2/twosig2); } , { result[i] = coef * resulti; }) } void wtdenspt(nxy, x, y, rmaxi, sig, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance */ double *sig; /* Gaussian sd */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of weighted density values */ { STD_DECLARATIONS; double resulti, coef; double sigma, twosig2; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; coef = 1.0/(TWOPI * sigma * sigma); if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += weight[j] * exp(-d2/twosig2); }, { result[i] = coef * resulti; } ) } /* ------------- anisotropic versions -------------------- */ void adenspt(nxy, x, y, rmaxi, detsigma, sinv, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance at which points contribute */ double *detsigma; /* determinant of variance matrix */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of density values */ { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; PAIRLOOP( { resulti = 0.0; }, { resulti += exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); }, { result[i] = coef * resulti; }) } void awtdenspt(nxy, x, y, rmaxi, detsigma, sinv, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance at which points contribute */ double *detsigma; /* determinant of variance matrix */ double *sinv; /* inverse variance matrix (2x2, flattened) */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of weighted density values */ { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += weight[j] * \ exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); }, { result[i] = coef * resulti; }) } /* --------------- smoothing --------------------------- */ void smoopt(nxy, x, y, v, self, rmaxi, sig, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; int countself; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; countself = *self; twosig2 = 2.0 * sigma * sigma; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += 1; \ numer += v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } void wtsmoopt(nxy, x, y, v, self, rmaxi, sig, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance */ double *sig; /* Gaussian sd */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; int countself; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; countself = *self; twosig2 = 2.0 * sigma * sigma; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += weight[i]; \ numer += weight[i] * v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } /* ------------- anisotropic versions -------------------- */ void asmoopt(nxy, x, y, v, self, rmaxi, sinv, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance at which points contribute */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of smoothed values */ { STD_DECLARATIONS; int countself; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; countself = *self; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += 1; \ numer += v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } void awtsmoopt(nxy, x, y, v, self, rmaxi, sinv, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance at which points contribute */ double *sinv; /* inverse variance matrix (2x2, flattened) */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of smoothed values */ { STD_DECLARATIONS; int countself; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; countself = *self; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += weight[i]; \ numer += weight[i] * v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } /* ----------------- transformed coordinates -------------------- */ /* The following functions assume that x, y have been transformed by the inverse of the variance matrix, and subsequently scaled by 1/sqrt(2) so that the Gaussian density is proportional to exp(-(x^2+y^2)). Constant factor in density is omitted. */ void Gdenspt(nxy, x, y, rmaxi, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance at which points contribute */ /* output */ double *result; /* vector of computed density values */ { STD_DECLARATIONS; double resulti; STD_INITIALISE; if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += exp(-d2); } , { result[i] = resulti; }) } void Gwtdenspt(nxy, x, y, rmaxi, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of weighted density values */ { STD_DECLARATIONS; double resulti; STD_INITIALISE; if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += weight[j] * exp(-d2); }, { result[i] = resulti; } ) } void Gsmoopt(nxy, x, y, v, self, rmaxi, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance at which points contribute */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; int countself; double numer, denom, wij; STD_INITIALISE; countself = *self; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += 1; \ numer += v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } void Gwtsmoopt(nxy, x, y, v, self, rmaxi, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; int countself; double numer, denom, wij; STD_INITIALISE; countself = *self; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += weight[i]; \ numer += weight[i] * v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } spatstat.core/src/penttinen.c0000644000176200001440000000602614141377573016023 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" #include "constants.h" /* Conditional intensity computation for Penttinen process */ /* Format for storage of parameters and precomputed/auxiliary data */ typedef struct Penttinen { double gamma; double r; double loggamma; double reach2; double *period; int hard; int per; } Penttinen; /* initialiser function */ Cdata *penttineninit(state, model, algo) State state; Model model; Algor algo; { /* create storage for model parameters */ Penttinen *penttinen; penttinen = (Penttinen *) R_alloc(1, sizeof(Penttinen)); /* Interpret model parameters*/ penttinen->gamma = model.ipar[0]; penttinen->r = model.ipar[1]; penttinen->reach2 = 4.0 * penttinen->r * penttinen->r; penttinen->period = model.period; #ifdef MHDEBUG Rprintf("Initialising Penttinen gamma=%lf, r=%lf\n", penttinen->gamma, penttinen->r); #endif /* is the model numerically equivalent to hard core ? */ penttinen->hard = (penttinen->gamma < DOUBLE_EPS); penttinen->loggamma = (penttinen->hard) ? 0 : log(penttinen->gamma); /* periodic boundary conditions? */ penttinen->per = (model.period[0] > 0.0); return((Cdata *) penttinen); } /* conditional intensity evaluator */ double penttinencif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, reach2, z, z2, logpot, cifval; Penttinen *penttinen; DECLARE_CLOSE_D2_VARS; penttinen = (Penttinen *) cdata; reach2 = penttinen->reach2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); logpot = 0.0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(penttinen->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],penttinen->period,reach2,d2)) { z2 = d2/reach2; z = sqrt(z2); if(z < 1.0) { logpot += acos(z) - z * sqrt(1 - z2); } } } } if(ixp1 < npts) { for(j=ixp1; jperiod,reach2,d2)) { z2 = d2/reach2; z = sqrt(z2); if(z < 1.0) { logpot += acos(z) - z * sqrt(1 - z2); } } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], reach2, d2)) { z2 = d2/reach2; z = sqrt(z2); if(z < 1.0) { logpot += acos(z) - z * sqrt(1 - z2); } } } } if(ixp1 < npts) { for(j=ixp1; jhard) { if(logpot > 0) cifval = 0.0; else cifval = 1.0; } else cifval = exp((penttinen->loggamma) * M_2_PI * logpot); return cifval; } Cifns PenttinenCifns = { &penttineninit, &penttinencif, (updafunptr) NULL, NO}; spatstat.core/src/Estrauss.c0000644000176200001440000000577114141377573015636 0ustar liggesusers#include #include #include "chunkloop.h" #include "looptest.h" /* Estrauss.c $Revision: 1.6 $ $Date: 2020/11/30 10:58:22 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 C implementation of 'eval' for Strauss interaction Calculates number of data points within distance r of each quadrature point (when 'source' = quadrature points, 'target' = data points) Assumes point patterns are sorted in increasing order of x coordinate Additionally: 'Cclosepaircounts' for a single point pattern */ double sqrt(); void Ccrosspaircounts(nnsource, xsource, ysource, nntarget, xtarget, ytarget, rrmax, counts) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget, *rrmax; /* output */ int *counts; { int nsource, ntarget, maxchunk, j, i, ileft, counted; double xsourcej, ysourcej, rmax, r2max, r2maxpluseps, xleft, dx, dy, dx2, d2; nsource = *nnsource; ntarget = *nntarget; rmax = *rrmax; r2max = rmax * rmax; r2maxpluseps = r2max + EPSILON(r2max); if(nsource == 0 || ntarget == 0) return; ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { counted = 0; xsourcej = xsource[j]; ysourcej = ysource[j]; /* adjust starting point */ xleft = xsourcej - rmax; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process from ileft to iright */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > r2maxpluseps) break; dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= r2max) ++counted; } counts[j] = counted; } } } /* count for each i the number of j closer than distance r */ void Cclosepaircounts(nxy, x, y, rmaxi, counts) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance */ /* output VECTOR, assumed initialised to 0 */ int *counts; { int n, maxchunk, i, j; double xi, yi, rmax, r2max, dx, dy, a; n = *nxy; rmax = *rmaxi; r2max = rmax * rmax; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; if(i > 0) { /* scan backwards from i */ for(j = i - 1; j >= 0; j--) { dx = x[j] - xi; a = r2max - dx * dx; if(a < 0) break; dy = y[j] - yi; a -= dy * dy; if(a >= 0) (counts[i])++; } } if(i + 1 < n) { /* scan forwards from i */ for(j = i + 1; j < n; j++) { dx = x[j] - xi; a = r2max - dx * dx; if(a < 0) break; dy = y[j] - yi; a -= dy * dy; if(a >= 0) (counts[i])++; } } /* end loop over i */ } } } spatstat.core/src/Egeyer.c0000644000176200001440000000464214141377573015241 0ustar liggesusers#include #include #include "chunkloop.h" #include "looptest.h" /* Egeyer.c $Revision: 1.7 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Part of C implementation of 'eval' for Geyer interaction Calculates change in saturated count (xquad, yquad): quadscheme (xdata, ydata): data tdata: unsaturated pair counts for data pattern quadtodata[j] = i if quad[j] == data[i] (indices start from ZERO) Assumes point patterns are sorted in increasing order of x coordinate */ double sqrt(); void Egeyer(nnquad, xquad, yquad, quadtodata, nndata, xdata, ydata, tdata, rrmax, ssat, result) /* inputs */ int *nnquad, *nndata, *quadtodata, *tdata; double *xquad, *yquad, *xdata, *ydata, *rrmax, *ssat; /* output */ double *result; { int nquad, ndata, maxchunk, j, i, ileft, dataindex, isdata; double xquadj, yquadj, rmax, sat, r2max, r2maxpluseps, xleft, dx, dy, dx2, d2; double tbefore, tafter, satbefore, satafter, delta, totalchange; nquad = *nnquad; ndata = *nndata; rmax = *rrmax; sat = *ssat; if(nquad == 0 || ndata == 0) return; r2max = rmax * rmax; r2maxpluseps = r2max + EPSILON(r2max); ileft = 0; OUTERCHUNKLOOP(j, nquad, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nquad, maxchunk, 65536) { totalchange = 0.0; xquadj = xquad[j]; yquadj = yquad[j]; dataindex = quadtodata[j]; isdata = (dataindex >= 0); /* adjust starting point */ xleft = xquadj - rmax; while((xdata[ileft] < xleft) && (ileft+1 < ndata)) ++ileft; /* process until dx > rmax */ for(i=ileft; i < ndata; i++) { dx = xdata[i] - xquadj; dx2 = dx * dx; if(dx2 > r2maxpluseps) break; if(i != dataindex) { dy = ydata[i] - yquadj; d2 = dx2 + dy * dy; if(d2 <= r2max) { /* effect of adding dummy point j or negative effect of removing data point */ tbefore = tdata[i]; tafter = tbefore + ((isdata) ? -1 : 1); /* effect on saturated values */ satbefore = (double) ((tbefore < sat)? tbefore : sat); satafter = (double) ((tafter < sat)? tafter : sat); /* sum changes over all i */ delta = satafter - satbefore; totalchange += ((isdata) ? -delta : delta); } } } result[j] = totalchange; } } } spatstat.core/src/fexitc.c0000644000176200001440000000045514141377573015301 0ustar liggesusers# include # include # include void fexitc(const char *msg) { size_t nc = strlen(msg); char buf[256]; if(nc > 255) { warning("invalid character length in fexitc"); nc = 255; } strncpy(buf, msg, nc); buf[nc] = '\0'; error(buf); } spatstat.core/src/dist2.c0000644000176200001440000000415114141377573015041 0ustar liggesusers# include #include #include "yesno.h" /* dist2: squared distance in torus dist2thresh: faster code for testing whether dist2 < r2 dist2Mthresh: same as dist2thresh, but does not assume the points are within one period of each other. Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ double dist2(u,v,x,y,period) double u, v, x, y; double *period; { double wide, high, dx, dy, dxp, dyp, a, b, d2; /* points are assumed to lie within one period of each other */ wide = period[0]; high = period[1]; dx = u - x; if(dx < 0.0) dx = -dx; dxp = wide - dx; a = (dx < dxp)? dx : dxp; dy = v - y; if(dy < 0.0) dy = -dy; dyp = high - dy; b = (dy < dyp)? dy : dyp; d2 = a * a + b * b; return d2; } double dist2either(u,v,x,y,period) double u, v, x, y; double *period; { if(period[0] < 0.0) return pow(u-x,2) + pow(v-y,2); return(dist2(u,v,x,y,period)); } int dist2thresh(u,v,x,y,period,r2) double u, v, x, y, r2; double *period; { double wide, high, dx, dy, dxp, dyp, a, b, residue; /* points are assumed to lie within one period of each other */ wide = period[0]; high = period[1]; dx = u - x; if(dx < 0.0) dx = -dx; dxp = wide - dx; a = (dx < dxp) ? dx : dxp; residue = r2 - a * a; if(residue <= 0.0) return NO; dy = v - y; if(dy < 0.0) dy = -dy; dyp = high - dy; b = (dy < dyp) ? dy : dyp; if (residue > b * b) return YES; return NO; } int dist2Mthresh(u,v,x,y,period,r2) double u, v, x, y, r2; double *period; { double wide, high, dx, dy, dxp, dyp, a, b, residue; /* points are NOT assumed to lie within one period of each other */ wide = period[0]; high = period[1]; dx = u - x; if(dx < 0.0) dx = -dx; while(dx > wide) dx -= wide; dxp = wide - dx; a = (dx < dxp) ? dx : dxp; residue = r2 - a * a; if(residue < 0.0) return NO; dy = v - y; if(dy < 0.0) dy = -dy; while(dy > high) dy -= high; dyp = high - dy; b = (dy < dyp) ? dy : dyp; if (residue >= b * b) return YES; return NO; } spatstat.core/src/raster.h0000644000176200001440000000512014141377573015316 0ustar liggesusers/* raster.h Definition of raster structures & operations requires (for floor()) $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ typedef struct Raster{ /* array of data */ char *data; /* coerced to appropriate type */ int nrow; /* dimensions of entire array */ int ncol; int length; int rmin; /* position of valid subrectangle */ int rmax; int cmin; int cmax; /* definition of mapping into continuous space */ double x0; /* position of entry (rmin,cmin) */ double y0; double x1; /* position of entry (rmax,cmax) */ double y1; double xstep; /* x increment for each column step */ double ystep; /* y increment for each row step */ /* xstep = (x1 - x0)/(cmax - cmin) = (x1 - x0)/(number of valid columns - 1) CAN BE POSITIVE OR NEGATIVE */ /* image of valid subrectangle */ double xmin; /* = min{x0,x1} */ double xmax; double ymin; double ymax; } Raster; /* how to clear the data */ #define Clear(ARRAY,TYPE,VALUE) \ { unsigned int i; TYPE *p; \ for(i = 0, p = (TYPE *) (ARRAY).data; i < (ARRAY).length; i++, p++) \ *p = VALUE; } /* how to index a rectangular array stored sequentially in row-major order */ #define Entry(ARRAY,ROW,COL,TYPE) \ ((TYPE *)((ARRAY).data))[COL + (ROW) * ((ARRAY).ncol)] /* test for indices inside subrectangle */ #define Inside(ARRAY,ROW,COL) \ ( (ROW >= (ARRAY).rmin) && (ROW <= (ARRAY).rmax) && \ (COL >= (ARRAY).cmin) && (COL <= (ARRAY).cmax)) /* how to compute the position in R^2 corresponding to a raster entry */ #define Xpos(ARRAY,COL) \ ((ARRAY).x0 + (ARRAY).xstep * (COL - (ARRAY).cmin)) #define Ypos(ARRAY,ROW) \ ((ARRAY).y0 + (ARRAY).ystep * (ROW - (ARRAY).rmin)) #define Distance(X,Y,XX,YY) sqrt((X - XX)* (X - XX) + (Y - YY) * (Y - YY)) #define DistanceTo(X,Y,ARRAY,ROW,COL)\ Distance(X,Y,Xpos(ARRAY,COL),Ypos(ARRAY,ROW)) #define DistanceSquared(X,Y,XX,YY) ((X - XX)* (X - XX) + (Y - YY) * (Y - YY)) #define DistanceToSquared(X,Y,ARRAY,ROW,COL)\ DistanceSquared(X,Y,Xpos(ARRAY,COL),Ypos(ARRAY,ROW)) /* how to map a point (x,y) in R^2 to a raster entry */ /* (x,y) is guaranteed to lie in the rectangle bounded by the images of the entries (r,c), (r+1,c), (r,c+1), (r+1,c+1) where r = RowIndex(..) and c = ColIndex(..). */ #define RowIndex(ARRAY,Y) \ ((ARRAY).rmin + (int) floor(((Y) - (ARRAY).y0)/(ARRAY).ystep)) #define ColIndex(ARRAY,X) \ ((ARRAY).cmin + (int) floor(((X) - (ARRAY).x0)/(ARRAY).xstep)) spatstat.core/src/digber.c0000644000176200001440000000237414141377573015255 0ustar liggesusers/* digber.c Diggle-Berman function J used in bandwidth selection J(r) = \int_0^(2r) phi(t, r) dK(t) where K is the K-function and phi(t, r) = 2 r^2 * (acos(y) - y sqrt(1 - y^2)) where y = t/(2r). $Revision: 1.8 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include double sqrt(), acos(); /* r is the vector of distance values, starting from 0, with length nr, equally spaced. dK = diff(K) is the vector of increments of the K-function, with length ndK = nr-1. values of J are computed only up to max(r)/2 nrmax = floor(nr/2). */ void digberJ(r, dK, nr, nrmax, ndK, J) /* inputs */ int *nr, *nrmax, *ndK; double *r, *dK; /* output */ double *J; { int i, j, Ni, NdK; double ri, twori, tj, y, phiy, integral; Ni = *nrmax; NdK = *ndK; J[0] = 0.0; for(i = 1; i < Ni; i++) { ri = r[i]; twori = 2 * ri; integral = 0.0; for(j = 0; j < NdK; j++) { tj = r[j]; y = tj/twori; if(y >= 1.0) break; phiy = acos(y) - y * sqrt(1 - y * y); integral += phiy * dK[j]; } J[i] = 2 * ri * ri * integral; } } spatstat.core/src/Kborder.c0000644000176200001440000000172214141377573015405 0ustar liggesusers#include #include #include /* Kborder.c Efficient computation of border-corrected estimates of K for large datasets KborderI() Estimates K function, returns integer numerator & denominator KborderD() Estimates K function, returns double precision numerator & denominator Kwborder() Estimates Kinhom. Functions require (x,y) data to be sorted in ascending order of x and expect r values to be equally spaced and starting at zero $Revision: 1.5 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef WEIGHTED #define FNAME KborderI #define OUTTYPE int #include "Kborder.h" #undef FNAME #undef OUTTYPE #define FNAME KborderD #define OUTTYPE double #include "Kborder.h" #undef FNAME #undef OUTTYPE #define FNAME Kwborder #define WEIGHTED #define OUTTYPE double #include "Kborder.h" spatstat.core/src/Ediggra.c0000644000176200001440000000732014141377573015357 0ustar liggesusers#include #include #include "chunkloop.h" #include "looptest.h" /* Ediggra.c $Revision: 1.8 $ $Date: 2018/12/18 02:43:11 $ C implementation of 'eval' for DiggleGratton interaction (exponentiated) Assumes point patterns are sorted in increasing order of x coordinate Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ double sqrt(); void Ediggra(nnsource, xsource, ysource, idsource, nntarget, xtarget, ytarget, idtarget, ddelta, rrho, values) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget; int *idsource, *idtarget; double *ddelta, *rrho; /* output */ double *values; { int nsource, ntarget, maxchunk, j, i, ileft, idsourcej; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double delta, rho, delta2, rho2, rho2pluseps, rhominusdelta; double product; nsource = *nnsource; ntarget = *nntarget; delta = *ddelta; rho = *rrho; if(nsource == 0 || ntarget == 0) return; rho2 = rho * rho; delta2 = delta * delta; rhominusdelta = rho - delta; rho2pluseps = rho2 + EPSILON(rho2); ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { product = 1; xsourcej = xsource[j]; ysourcej = ysource[j]; idsourcej = idsource[j]; /* adjust starting point */ xleft = xsourcej - rho; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process until dx > rho (or until product is zero) */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > rho2pluseps) break; if(idtarget[i] != idsourcej) { dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= rho2) { if(d2 <= delta2) { product = 0; break; } else product *= (sqrt(d2) - delta)/rhominusdelta; } } } values[j] = product; } } } /* 'split' version separating hard core terms from others */ void ESdiggra(nnsource, xsource, ysource, idsource, nntarget, xtarget, ytarget, idtarget, ddelta, rrho, positive, hardcore) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget; int *idsource, *idtarget; double *ddelta, *rrho; /* output */ double *positive; int *hardcore; { int nsource, ntarget, maxchunk, j, i, ileft, idsourcej; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double delta, rho, delta2, rho2, rho2pluseps, rhominusdelta; double product; nsource = *nnsource; ntarget = *nntarget; delta = *ddelta; rho = *rrho; if(nsource == 0 || ntarget == 0) return; rho2 = rho * rho; delta2 = delta * delta; rhominusdelta = rho - delta; rho2pluseps = rho2 + EPSILON(rho2); ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { product = 1; xsourcej = xsource[j]; ysourcej = ysource[j]; idsourcej = idsource[j]; /* adjust starting point */ xleft = xsourcej - rho; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process until dx > rho */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > rho2pluseps) break; if(idtarget[i] != idsourcej) { dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= rho2) { if(d2 <= delta2) { hardcore[j] = 1; } else { product *= (sqrt(d2) - delta)/rhominusdelta; } } } } positive[j] = product; } } } spatstat.core/src/areaint.c0000644000176200001440000001641514141377573015445 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity function for an area-interaction process: cif = eta^(1-B) where B = (uncovered area)/(pi r^2) Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define NGRID 16 /* To explore serious bug, #define BADBUG */ #undef BADBUG /* Format for storage of parameters and precomputed/auxiliary data */ typedef struct AreaInt { /* model parameters */ double eta; double r; /* transformations of the parameters */ double r2; double range2; double logeta; int hard; /* periodic distance */ double *period; int per; /* grid counting */ double dx; double xgrid0; int *my; int kdisc; /* scratch space for saving list of neighbours */ int *neighbour; } AreaInt; /* initialiser function */ Cdata *areaintInit(state, model, algo) State state; Model model; Algor algo; { double r, dx, dy, x0; int i, my, kdisc; AreaInt *areaint; /* create storage */ areaint = (AreaInt *) R_alloc(1, sizeof(AreaInt)); /* Interpret model parameters*/ areaint->eta = model.ipar[0]; areaint->r = r = model.ipar[1]; #ifdef BADBUG Rprintf("r = %lf\n", r); #endif areaint->r2 = r * r; areaint->range2 = 4 * r * r; /* square of interaction distance */ /* is the model numerically equivalent to hard core ? */ areaint->hard = (areaint->eta == 0.0); areaint->logeta = (areaint->hard) ? log(DOUBLE_XMIN) : log(areaint->eta); #ifdef BADBUG if(areaint->hard) Rprintf("Hard core recognised\n"); #endif /* periodic boundary conditions? */ areaint->period = model.period; areaint->per = (model.period[0] > 0.0); #ifdef BADBUG if(areaint->per) { Rprintf("*** periodic boundary conditions ***\n"); Rprintf("period = %lf, %lf\n", model.period[0], model.period[1]); } #endif /* grid counting */ dx = dy = areaint->dx = (2 * r)/NGRID; #ifdef BADBUG Rprintf("areaint->dx = %lf\n", areaint->dx); #endif areaint->xgrid0 = -r + dx/2; areaint->my = (int *) R_alloc((long) NGRID, sizeof(int)); kdisc = 0; for(i = 0; i < NGRID; i++) { x0 = areaint->xgrid0 + i * dx; my = floor(sqrt(r * r - x0 * x0)/dy); my = (my < 0) ? 0 : my; areaint->my[i] = my; #ifdef BADBUG Rprintf("\tmy[%ld] = %ld\n", i, my); #endif kdisc += 2 * my + 1; } areaint->kdisc = kdisc; #ifdef BADBUG Rprintf("areaint->kdisc = %ld\n", areaint->kdisc); #endif /* allocate space for neighbour indices */ areaint->neighbour = (int *) R_alloc((long) state.npmax, sizeof(int)); return((Cdata *) areaint); } #ifdef BADBUG void fexitc(); #endif /* conditional intensity evaluator */ double areaintCif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *period, *x, *y; double u, v; double r2, dx, dy, a, range2; double xgrid, ygrid, xgrid0, covfrac, cifval; int kount, kdisc, kx, my, ky; int *neighbour; int nn, k; AreaInt *areaint; areaint = (AreaInt *) cdata; r2 = areaint->r2; range2 = areaint->range2; /* square of interaction distance */ dy = dx = areaint->dx; kdisc = areaint->kdisc; /* pointers */ period = areaint->period; neighbour = areaint->neighbour; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return ((double) 1.0); if(!areaint->per) { /* .......... Euclidean distance .................... First identify which data points are neighbours of (u,v) */ nn = 0; ixp1 = ix + 1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(ix > 0) { for(j=0; j < ix; j++) { a = range2 - pow(u - x[j], 2); if(a > 0.) { a -= pow(v - y[j], 2); if(a > 0.) { /* point j is a neighbour of (u,v) */ neighbour[nn] = j; ++nn; } } } } if(ixp1 < npts) { for(j=ixp1; j < npts; j++) { a = range2 - pow(u - x[j], 2); if(a > 0.) { a -= pow(v - y[j], 2); if(a > 0.) { /* point j is a neighbour of (u,v) */ neighbour[nn] = j; ++nn; } } } } if(nn == 0) { /* no neighbours; no interaction */ cifval = 1.0; return cifval; } else if(areaint->hard) { /* neighbours forbidden if it's a hard core process */ cifval = 0.0; return cifval; } else { /* scan a grid of points centred at (u,v) */ kount = 0; xgrid0 = u + areaint->xgrid0; for(kx=0; kxmy[kx]; for(ky=(-my); ky<=my; ky++) { ygrid = v + ky * dy; /* Grid point (xgrid,ygrid) is inside disc of radius r centred at (u,v) Loop through all neighbouring data points to determine whether the grid point is covered by another disc */ if(nn > 0) { for(k=0; k < nn; k++) { j = neighbour[k]; a = r2 - pow(xgrid - x[j], 2); if(a > 0) { a -= pow(ygrid - y[j], 2); if(a > 0) { /* point j covers grid point */ ++kount; break; } } } } /* finished consideration of grid point (xgrid, ygrid) */ } } } } else { /* ............. periodic distance ...................... First identify which data points are neighbours of (u,v) */ nn = 0; ixp1 = ix + 1; if(ix > 0) { for(j=0; j < ix; j++) { if(dist2thresh(u,v,x[j],y[j],period,range2)) { /* point j is a neighbour of (u,v) */ neighbour[nn] = j; ++nn; } } } if(ixp1 < npts) { for(j=ixp1; jhard) { /* neighbours forbidden if it's a hard core process */ cifval = 0.0; return cifval; } else { /* scan a grid of points centred at (u,v) */ kount = 0; xgrid0 = u + areaint->xgrid0; for(kx=0; kxmy[kx]; for(ky=(-my); ky<=my; ky++) { ygrid = v + ky * dy; /* Grid point (xgrid,ygrid) is inside disc of radius r centred at (u,v) Loop through all neighbouring data points to determine whether the grid point is covered by another disc */ for(k=0; k < nn; k++) { j = neighbour[k]; if(dist2Mthresh(xgrid,ygrid,x[j],y[j],period,r2)) { /* point j covers grid point */ ++kount; break; } } /* finished considering grid point (xgrid,ygrid) */ } } } } /* `kdisc' is the number of grid points in the disc `kount' is the number of COVERED grid points in the disc */ /* Hard core case has been handled. */ /* Usual calculation: covered area fraction */ covfrac = ((double) kount)/((double) kdisc); cifval = exp(areaint->logeta * covfrac); #ifdef BADBUG if(!R_FINITE(cifval)) { Rprintf("Non-finite CIF value\n"); Rprintf("kount=%ld, kdisc=%ld, covfrac=%lf, areaint->logeta=%lf\n", kount, kdisc, covfrac, areaint->logeta); Rprintf("u=%lf, v=%lf\n", u, v); fexitc("Non-finite CIF"); } #endif return cifval; } Cifns AreaIntCifns = { &areaintInit, &areaintCif, (updafunptr) NULL, NO}; spatstat.core/src/functable.h0000644000176200001440000000310214141377573015757 0ustar liggesusers/* $Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $ Definitions of C structures for spatial statistics function estimates. Usually the estimates are of the form f^(x) = a^(x)/b^(x); we store f^ and also a^ and b^ to cater for applications with replicated data. # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ typedef struct Ftable { /* double precision function table */ double t0; double t1; int n; /* number of entries */ double *f; double *num; /* f[i] = num[i]/denom[i] */ double *denom; } Ftable; typedef struct Itable { /* integer count table e.g for histograms */ double t0; double t1; int n; int *num; int *denom; /* usually p[i] = num[i]/denom[i] */ } Itable; typedef struct H4table { /* Four histograms, for censored data */ double t0; double t1; int n; int *obs; /* observed lifetimes: o_i = min(t_i, c_i) */ int *nco; /* uncensored lifetimes: o_i for which t_i <= c_i */ int *cen; /* censoring times: c_i */ int *ncc; /* censor times of uncensored data: c_i for which t_i <= c_i */ int upperobs; /* number of o_i that exceed t1 */ int uppercen; /* number of c_i that exceed t1 */ } H4table; spatstat.core/src/mhv3.h0000644000176200001440000000060014141377573014671 0ustar liggesusers/* mhv3.h tracking or not Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef MH_TRACKING if(tracking) { /* saving transition history */ #define MH_TRACKING YES #include "mhv4.h" #undef MH_TRACKING } else { /* not saving transition history */ #define MH_TRACKING NO #include "mhv4.h" #undef MH_TRACKING } spatstat.core/src/mhv2.h0000644000176200001440000000056314141377573014700 0ustar liggesusers/* mhv2.h single interaction or hybrid Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef MH_SINGLE if(Ncif == 1) { /* single interaction */ #define MH_SINGLE YES #include "mhv3.h" #undef MH_SINGLE } else { /* hybrid interaction */ #define MH_SINGLE NO #include "mhv3.h" #undef MH_SINGLE } spatstat.core/src/strauss.c0000644000176200001440000000474314141377573015527 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Strauss process */ /* Format for storage of parameters and precomputed/auxiliary data */ typedef struct Strauss { double gamma; double r; double loggamma; double r2; double *period; int hard; int per; } Strauss; /* initialiser function */ Cdata *straussinit(state, model, algo) State state; Model model; Algor algo; { /* create storage for model parameters */ Strauss *strauss; strauss = (Strauss *) R_alloc(1, sizeof(Strauss)); /* Interpret model parameters*/ strauss->gamma = model.ipar[0]; strauss->r = model.ipar[1]; /* No longer passed as r^2 */ strauss->r2 = strauss->r * strauss->r; strauss->period = model.period; #ifdef MHDEBUG Rprintf("Initialising Strauss gamma=%lf, r=%lf\n", strauss->gamma, strauss->r); #endif /* is the model numerically equivalent to hard core ? */ strauss->hard = (strauss->gamma < DOUBLE_EPS); strauss->loggamma = (strauss->hard) ? 0 : log(strauss->gamma); /* periodic boundary conditions? */ strauss->per = (model.period[0] > 0.0); return((Cdata *) strauss); } /* conditional intensity evaluator */ double strausscif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, kount, ix, ixp1, j; double *x, *y; double u, v; double r2, cifval; Strauss *strauss; DECLARE_CLOSE_VARS; strauss = (Strauss *) cdata; r2 = strauss->r2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); kount = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(strauss->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC(u,v,x[j],y[j],strauss->period, r2)) ++kount; } } if(ixp1 < npts) { for(j=ixp1; jperiod, r2)) ++kount; } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE(u,v,x[j],y[j], r2)) ++kount; } } if(ixp1 < npts) { for(j=ixp1; jhard) { if(kount > 0) cifval = 0.0; else cifval = 1.0; } else cifval = exp((strauss->loggamma) * kount); return cifval; } Cifns StraussCifns = { &straussinit, &strausscif, (updafunptr) NULL, NO}; spatstat.core/src/PerfectDiggleGratton.h0000644000176200001440000001300014141377573020055 0ustar liggesusers // ........................... Diggle-Gratton process .......................... // $Revision: 1.6 $ $Date: 2020/05/12 03:30:46 $ class DiggleGrattonProcess : public PointProcess { public: double beta, delta, rho, kappa, rhominusdelta, deltasquared, rhosquared; DiggleGrattonProcess(double xmin, double xmax, double ymin, double ymax, double b, double d, double r, double k); ~DiggleGrattonProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); }; DiggleGrattonProcess::DiggleGrattonProcess(double xmin, double xmax, double ymin, double ymax, double b, double d, double r, double k) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; delta = d; rho = r; kappa = k; deltasquared = delta * delta; rhosquared = rho * rho; rhominusdelta = rho - delta; InteractionRange = rho; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double DiggleGrattonProcess::Interaction(double dsquared) { double rtn, dist, t; rtn = 1; if(dsquared < rhosquared) { if(dsquared < deltasquared) { rtn = 0; } else { dist = sqrt(dsquared); t = (dist - delta)/rhominusdelta; rtn = pow(t, kappa); } } return(rtn); } void DiggleGrattonProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void DiggleGrattonProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating DiggleGrattonProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating DiggleGrattonProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating DiggleGrattonProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectDiggleGratton(SEXP beta, SEXP delta, SEXP rho, SEXP kappa, SEXP xrange, SEXP yrange) { // input parameters double Beta, Delta, Rho, Kappa, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(delta = AS_NUMERIC(delta)); PROTECT(rho = AS_NUMERIC(rho)); PROTECT(kappa = AS_NUMERIC(kappa)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 6 protected objects // extract arguments // model parameters Beta = *(NUMERIC_POINTER(beta)); Delta = *(NUMERIC_POINTER(delta)); Rho = *(NUMERIC_POINTER(rho)); Kappa = *(NUMERIC_POINTER(kappa)); // window dimensions Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ Rho); if(xcells > 9) { xcells = 9; } else if(xcells < 1) { xcells = 1; } ycells = (int) floor((Ymax-Ymin)/ Rho); if(ycells > 9) { ycells = 9; } else if(ycells < 1) { ycells = 1; } #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise DiggleGratton point process DiggleGrattonProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax,Beta,Delta,Rho,Kappa); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(10); // 6 arguments plus xout, yout, nout, out return(out); } } spatstat.core/NEWS0000644000176200001440000004467114147573205013567 0ustar liggesusers CHANGES IN spatstat.core VERSION 2.3-2 OVERVIEW o More diagnostics for spatial logistic regression models. o Important bug fix in kppm. o Increased numerical stability in kppm. o Minor improvements and bug fixes. o We thank Jonas Brehmer for contributions. NEW FUNCTIONS o lurking.slrm Lurking variable plot for spatial logistic regression models. o eem.slrm Exponential energy marks for spatial logistic regression models. o eem.ppm Exponential energy marks for Gibbs and Poisson point process models (this function was previously called 'eem'). SIGNIFICANT USER-VISIBLE CHANGES o eem The function 'eem' is now generic, with methods for 'ppm' and 'slrm'. The function previously named 'eem' is now called 'eem.ppm'. o objsurf.dppm, objsurf.kppm, objsurf.mincontrast New arguments 'xlim', 'ylim' determine the range of parameter values to be considered. o Hybrid Printed output from hybrid models has been improved slightly. o kppm New default settings ensure greater numerical stability of the optimization algorithm against the effects of the scale of the spatial coordinates. New argument 'stabilize' specifies whether the optimization algorithm should be numerically stabilized. o pcf Improved error message BUG FIXES o kppm Results were sometimes incorrect for method='clik2' and method='palm' because the log composite likelihood was erroneously truncated to positive values. Any fitted model for which logLik(model) = 2.2e-16 should be suspected of being incorrect. Fixed. o edge.Ripley Results were incorrect for data points lying exactly at the corners of a rectangle. Fixed. o MultiHard A hybrid of 'MultiHard' with another multitype interaction caused an error. Fixed. o simulate.dppm, simulate.detpointprocfamily Crashed, rarely, with error message 'The dimension differs from the number of columns in index'. Fixed. o as.fv.kppm, as.fv.dppm The default plot labels in 'as.fv(x)' implied that the model 'x' was inhomogeneous. Fixed. CHANGES IN spatstat.core VERSION 2.3-1 OVERVIEW o Covariates in ppm and mppm may be functions that depend on the marks as well as the spatial coordinates. o Automatic selection of threshold for defining a binary predictor. o Random perturbation of line segments. o Minor extensions, performance improvements, and bug fixes. NEW FUNCTIONS o thresholdSelect, thresholdCI Select the optimal threshold for converting a numerical predictor to a binary predictor. o coef<-.fii Changes the coefficients of a fitted interaction object (a method for the generic "coef<-") o rjitter.psp Random perturbation of a spatial pattern of line segments. A method for 'rjitter' for class 'psp'. SIGNIFICANT USER-VISIBLE CHANGES o ppm Covariates which are functions may now depend on the marks as well as the spatial coordinates: function(x,y,marks). o mppm Covariates which are functions may now depend on the marks as well as the spatial coordinates: function(x,y,marks). o reach.kppm Now returns a result for LGCP models as well as cluster models. o distcdf Improved regularisation algorithm. Argument 'nr=NULL' is now accepted. New argument 'delta' allows the result to be interpolated onto a finer grid. o collapse.fv Columns identified by the arguments 'same' and 'different' may now be absent from some of the 'fv' objects that will be collapsed. o Kest When the argument 'domain' is given, the calculation of estimates of K(r) has changed slightly, to adhere more closely to the description in the help file. o reload.or.compute Now prints a message indicating whether the data were recomputed or reloaded from file. New argument 'verbose'. o update.kppm New argument 'envir'. o clusterfit Minor changes to the argument list. o mincontrast Minor changes to the argument list. o mincontrast Improved algorithm for handling NA, NaN or infinite values. o varcount Argument B has a sensible default. o pool.envelope Now uses the value of 'nrank' which was used in the original envelopes. o Kmulti New argument 'rmax'. o Kinhom No longer issues a warning about changed behaviour in the case where 'lambda' is a fitted model. o pcfinhom No longer issues a warning about changed behaviour in the case where 'lambda' is a fitted model. BUG FIXES o subfits The fitted interaction coefficients were garbled. If 'mfit' is the mppm object and 'a <- subfits(mfit)[[i]]' is one of the sub-models, then coef(a) was correct, but coef(fitin(a)) was incorrect. The fitted interaction was shown correctly by printing 'mfit' but incorrectly by printing 'a'. Fixed. o varcount The result was very inaccurate if the cluster radius was small compared to the size of the window 'B', due to discretisation error. Fixed. o segregation.test The test statistic was calculated as the mean, rather than the sum, of discrepancies between probabilities. (The p-value was not affected.) Fixed. o Kest If 'domain' was specified, 'rmax' was ignored. Fixed. o edge.Ripley Value was incorrect for a point lying exactly on a corner. Fixed. o edge.Ripley Crashed when method="interpreted", if a point lay exactly on a corner. Fixed. o plot.fv, plot.envelope Crashed when trying to display a significance band of width zero around a constant function. Fixed. o collapse.fv Crashed if 'length(same) > 1'. Fixed. CHANGES IN spatstat.core VERSION 2.3-0 OVERVIEW o We thank Chiara Fend for contributions. o Extensive support for spatial logistic regression models. o New fitting method in kppm and dppm. o Transect of an image along a curve. o Image cross-correlation and cross-covariance. o Summary method for 'objsurf'. o Minor bug fixes. NEW FUNCTIONS o response Generic function which extracts the values of the response in a fitted model. There are methods for lm, glm (which extract the numeric vector of responses), and ppm, kppm, slrm, lppm and mppm (which extract the original data point pattern). o cov.im, cor.im Correlation or covariance between several pixel images. o summary.objsurf, print.summary.objsurf Summary method for 'objsurf' o residuals.slrm Residuals for spatial logistic regression models. o leverage.slrm, influence.slrm, dfbetas.slrm, dffit.slrm Leverage and influence diagnostics for spatial logistic regression models. o rhohat.slrm Method for 'rhohat' for spatial logistic regression models. o envelope.slrm Method for 'envelope' for spatial logistic regression models. o intensity.slrm Method for 'intensity' for spatial logistic regression models. o deviance.slrm Method for 'deviance' for spatial logistic regression models. o pseudoR2.slrm Method for 'pseudoR2' for spatial logistic regression models. o quadrat.test.slrm Method for 'quadrat.test' for spatial logistic regression models. o parameters.slrm Method for 'parameters' for spatial logistic regression models. o valid.slrm Method for 'valid' for spatial logistic regression models. o emend.slrm Method for 'emend' for spatial logistic regression models. o roc.slrm Method for 'roc' for spatial logistic regression models. o auc.slrm Method for 'auc' for spatial logistic regression models. o Window.slrm, as.owin.slrm Methods for 'Window' and 'as.owin' for spatial logistic regression models. SIGNIFICANT USER-VISIBLE CHANGES o kppm, dppm New option 'method="adapcl"' performs adaptive composite likelihood fitting. [Contributed by Chiara Fend.] o transect.im New argument 'curve' allows the user to specify a curved transect. BUG FIXES o clusterfield Values of the cluster field were slightly incorrect (slightly higher than the correct values) near the edge of the window, because an 'edge correction' was mistakenly applied. Fixed. o rhohat The rug plot (produced by plot.rhohat) was incorrect when rhohat was called with method="piecewise". Fixed. o markcrosscorr Did not recognise the option 'correction="none"'. Fixed. o roc.ppp The default plot of the result of roc.ppp did not include the diagonal line 'y=x'. Fixed. CHANGES IN spatstat.core VERSION 2.2-0 OVERVIEW o We thank Abdollah Jalilian, Yongtao Guan and Rasmus Waagepetersen for contributions. o summary method for spatial logistic regression models o estimation of the spatial covariance function of a pixel image o simulation of the product shot noise Cox process. o extensions to rhohat NEW FUNCTIONS o rPSNCP Generate simulated realisations of the product shot noise Cox process. Contributed by Abdollah Jalilian, Yongtao Guan and Rasmus Waagepetersen. o spatcov Estimate the spatial covariance function of a pixel image. o summary.slrm, print.summary.slrm Summary method for spatial logistic regression models o coef.summary.slrm Print the fitted coefficients, confidence interval and p-values for a spatial logistic regression model. o pairMean Compute the mean of a specified function of interpoint distance between random points in a window. SIGNIFICANT USER-VISIBLE CHANGES o rhohat New option (smoother='piecewise') computes a piecewise-constant estimate of rho(z). o rhohat The result now includes the 'average' intensity rho. o distcdf Arguments which are NULL will be treated as missing. o distcdf New argument 'savedenom'. CHANGES IN spatstat.core VERSION 2.1-2 OVERVIEW o Reduced CRAN check time. CHANGES IN spatstat.core VERSION 2.1-1 OVERVIEW o Minor bug fix BUG FIXES o simulate.kppm Conditional simulation crashed on rare occasions, with an error about negative probabilities. Fixed. CHANGES IN spatstat.core VERSION 2.1-0 OVERVIEW o We thank Achmad Choiruddin, Jean-Francois Coeurjolly and Rasmus Waagepetersen for contributions. o Conditional simulation in kppm o Information criteria for model selection in kppm o Modified handling of covariates in slrm o densityfun.ppp handles query points outside original window o Improved output in summary.mppm o Minor improvements and bug fixes. NEW FUNCTIONS o ic Information criteria for model selection in ppm and kppm. Kindly contributed by Achmad Choiruddin, Jean-Francois Coeurjolly and Rasmus Waagepetersen. SIGNIFICANT USER-VISIBLE CHANGES o simulate.kppm Conditional simulation of the model, given a fixed number of points, is now supported using the new arguments 'n.cond' and 'w.cond'. o densityfun.ppp The resulting function can now handle query points which lie outside the window of the original data, and has argument 'drop=TRUE' which specifies how to handle them. o rpoint New argument 'forcewin' forces the code to use the window 'win' when 'f' is a pixel image. o slrm In the default case (where dataAtPoints is not given) all spatial covariates, including the spatial coordinates x and y, are now evaluated at the centre of each pixel. This improves consistency with other implementations of spatial logistic regression. o slrm Silently ignores any arguments '...' that are not recognised by 'as.mask' o summary.mppm Improved summary of the dependence of the interpoint interaction on the covariates. o pairs.im New argument 'drop'. BUG FIXES o model.matrix.mppm If the model was fitted using 'gam', the resulting matrix did not have an 'assign' attribute. Fixed. o model.depends Crashed for models fitted using 'gam'. Fixed. o predict.slrm, fitted.slrm Crashed if the model was fitted using split pixels (argument 'splitby'). Fixed. o predict.slrm, fitted.slrm Crashed in some cases when 'window' was given. Fixed. o update.slrm Failed to find covariates that were provided in 'env'. Fixed. o cdf.test Crashed if the covariate was constant. Fixed. CHANGES IN spatstat.core VERSION 2.0-0 OVERVIEW o We thank Tilman Davies, Greg McSwiggan and Suman Rakshit for contributions. o We thank Corey Anderson, Michael Chirico, Andy Craig, Marcelino de la Cruz, Tilman Davies, Pavel Fibich, Kurt Hornik, Gopalan Nair, Yonatan Rosen and Rasmus Waagepetersen for contributions. o Diffusion kernel smoothing. o More support for spatial logistic regression models. o predict.mppm now works for multitype point process models. o Improved handling of 'newdata' in predict.mppm. o More support for multi-dimensional patterns. NEW FUNCTIONS o densityHeat New generic function for diffusion kernel estimation of intensity o densityHeat.ppp Diffusion kernel estimation of intensity for point pattern in 2 dimensions. This is an alternative to density.ppp. o intersect.boxx Compute intersection of boxes in multi-dimensional space o scale.boxx, scale.ppx Methods for 'scale' for boxes and patterns in multi-dimensional space o shift.boxx, shift.ppx Methods for 'shift' for boxes and patterns in multi-dimensional space o is.boxx Determine whether an object is a multidimensional box o mincontrast New argument 'action.bad.values' specifies what action is taken when the summary function produces NA or NaN or infinite values. SIGNIFICANT USER-VISIBLE CHANGES o slrm 'step' can now be applied to models fitted using 'slrm'. o predict.mppm Now supports multitype point process models. o predict.mppm Improved handling of argument 'newdata' o rotmean The result now has the same 'unitname' as the input object X. New argument 'adjust' controls the smoothing bandwidth. o rlabel New argument 'group' specifies that the points are divided into several groups, and that relabelling is applied within each group. o simulate.ppm Now recognises the argument 'window' as an alternative to 'w'. o kppm Improved numerical robustness. o Kcross, Gcross, Jcross Function labels (shown on the plot legend) have been improved when i = j. o anova.mppm Issues a warning when applied to random-effects models (models fitted using the argument 'random'). BUG FIXES o Gest If correction="rs" or correction="km", then both the reduced-sample (border correction) and Kaplan-Meier corrected estimates were calculated. [Spotted by Gopalan Nair.] Fixed. o rMatClust, rThomas, rCauchy, rVarGamma If the fitted model was effectively a Poisson process, the result did not have attributes 'Lambda' and 'parents' even when the user requested them. Fixed. o model.matrix.mppm Crashed with random-effects models. Fixed. o anova.mppm Crashed with random-effects models. Fixed. o simulate.rhohat Crashed when applied to rhohat objects computed from data on a linear network. Fixed. o objsurf.kppm Crashed if the model was fitted by Palm likelihood (method="palm") or second order composite likelihood (method="clik2"). Fixed. CHANGES IN spatstat.core VERSION 1.65-11 OVERVIEW o Internal tweaks. CHANGES IN spatstat.core VERSION 1.65-10 OVERVIEW o Minor corrections to documentation. CHANGES IN spatstat.core VERSION 1.65-9 OVERVIEW o We thank Ian Buller for a suggestion. o weights permitted in density calculation for line segments. SIGNIFICANT USER-VISIBLE CHANGES o density.psp New argument 'weights'. CHANGES IN spatstat.core VERSION 1.65-8 OVERVIEW o Minor changes to appease the compiler. CHANGES IN spatstat.core VERSION 1.65-7 OVERVIEW o We thank Michael Chirico for a contribution. o Minor changes to appease the compiler. CHANGES IN spatstat.core VERSION 1.65-6 OVERVIEW o We thank Tilman Davies and Pavel Fibich for contributions. o Important bug fix in simulation of log-Gaussian Cox processes. o Increased speed for large datasets. o variance calculations handle larger datasets. SIGNIFICANT USER-VISIBLE CHANGES o vcov.ppm, summary.ppm Variance calculations now handle larger datasets (using sparse arrays). o rSSI Accelerated. o overall speed Changes have been made to the internal code of spatstat which should accelerate computations involving large datasets. o localpcf, localpcfinhom New argument 'rvalue'. BUG FIXES o rLGCP, simulate.kppm Simulation results for log-Gaussian Cox processes were incorrect unless the pixel dimensions and pixel spacings were identical on the horizontal and vertical axes. (If pixel dimensions were not specified, then the results were incorrect whenever the Frame of the simulation window was not a square.) [Spotted by Tilman Davies.] Fixed. o Vmark Crashed if normalise=TRUE when there was only one column of marks. (Spotted by Pavel Fibich.) Fixed. o nnclean Crashed if k >= npoints(X). Fixed. o print.ppm Crashed sometimes when applied to the result of subfits(). Fixed. CHANGES IN spatstat.core VERSION 1.65-5 OVERVIEW o Minor changes required by CRAN. CHANGES IN spatstat.core VERSION 1.65-1 OVERVIEW o Added NEWS file. CHANGES IN spatstat.core VERSION 1.65-0 OVERVIEW o Package initialised at version 1.65-0 SIGNIFICANT USER-VISIBLE CHANGES o spatstat.core The package 'spatstat.core' has been created from a subset of the code in the original 'spatstat' package version 1.65-0. It contains the core functionality for statistical analysis of spatial data. For an overview, see help("spatstat.core-package") o Execution The 'spatstat.core' package is slightly faster than the corresponding code in the 'spatstat' package, because the procedure for calling internal C functions has been streamlined. spatstat.core/R/0000755000176200001440000000000014141452520013243 5ustar liggesusersspatstat.core/R/Kinhom.R0000644000176200001440000004562114144333461014630 0ustar liggesusers# # Kinhom.S Estimation of K function for inhomogeneous patterns # # $Revision: 1.100 $ $Date: 2021/10/26 07:12:00 $ # # Kinhom() compute estimate of K_inhom # # # Reference: # Non- and semiparametric estimation of interaction # in inhomogeneous point patterns # A.Baddeley, J.Moller, R.Waagepetersen # Statistica Neerlandica 54 (2000) 329--350. # # -------- functions ---------------------------------------- # Kinhom() compute estimate of K # using various edge corrections # # Kwtsum() internal routine for border correction # # -------- standard arguments ------------------------------ # X point pattern (of class 'ppp') # # r distance values at which to compute K # # lambda vector of intensity values for points of X # # -------- standard output ------------------------------ # A data frame (class "fv") with columns named # # r: same as input # # trans: K function estimated by translation correction # # iso: K function estimated by Ripley isotropic correction # # theo: K function for Poisson ( = pi * r ^2 ) # # border: K function estimated by border method # (denominator = sum of weights of points) # # bord.modif: K function estimated by border method # (denominator = area of eroded window) # # ------------------------------------------------------------------------ "Linhom" <- function(X, ..., correction) { if(missing(correction)) correction <- NULL K <- Kinhom(X, ..., correction=correction) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) # relabel the fv object L <- rebadge.fv(L, quote(L[inhom](r)), c("L", "inhom"), names(K), new.labl=attr(K, "labl")) attr(L, "labl") <- attr(K, "labl") attr(L, "dangerous") <- attr(K, "dangerous") # return(L) } "Kinhom"<- function (X, lambda=NULL, ..., r = NULL, breaks = NULL, correction=c("border", "bord.modif", "isotropic", "translate"), renormalise=TRUE, normpower=1, update = TRUE, leaveoneout = TRUE, nlarge = 1000, lambda2=NULL, reciplambda=NULL, reciplambda2=NULL, diagonal=TRUE, sigma=NULL, varcov=NULL, ratio=FALSE) { verifyclass(X, "ppp") nlarge.given <- !missing(nlarge) rfixed <- !missing(r) || !missing(breaks) # determine basic parameters W <- X$window npts <- npoints(X) areaW <- area(W) diamW <- diameter(W) rmaxdefault <- rmax.rule("K", W, npts/areaW) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # match corrections correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "bord.modif", "isotropic", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="good", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) ## replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) ## retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) ########################################################### # DETERMINE WEIGHTS AND VALIDATE # # The matrix 'lambda2' or 'reciplambda2' is sufficient information # unless we want the border correction. lambda2.given <- !is.null(lambda2) || !is.null(reciplambda2) lambda2.suffices <- !any(correction %in% c("bord", "bord.modif")) ## Arguments that are 'dangerous' for envelope, if fixed dangerous <- c("lambda", "reciplambda", "lambda2", "reciplambda2") danger <- TRUE # Use matrix of weights if it was provided and if it is sufficient if(lambda2.suffices && lambda2.given) { if(!is.null(reciplambda2)) { check.nmatrix(reciplambda2, npts) validate.weights(reciplambda2, recip=TRUE) } else { check.nmatrix(lambda2, npts) validate.weights(lambda2) reciplambda2 <- 1/lambda2 } # renormalise if(renormalise) { check.1.real(normpower) stopifnot(normpower %in% 1:2) rlam2 <- reciplambda2 if(!diagonal) diag(rlam2) <- 0 renorm.factor <- (areaW^2/sum(rlam2))^(normpower/2) } } else { # Vector lambda or reciplambda is required if(missing(lambda) && is.null(reciplambda)) { # No intensity data provided danger <- FALSE # Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) lambda <- as.numeric(lambda) validate.weights(lambda, how="density estimation") reciplambda <- 1/lambda } else if(!is.null(reciplambda)) { # 1/lambda values provided if(is.im(reciplambda)) reciplambda <- safelookup(reciplambda, X) else if(is.function(reciplambda)) reciplambda <- reciplambda(X$x, X$y) else if(is.numeric(reciplambda) && is.vector(as.numeric(reciplambda))) check.nvector(reciplambda, npts) else stop(paste(sQuote("reciplambda"), "should be a vector, a pixel image, or a function")) validate.weights(reciplambda, recip=TRUE) } else { # lambda values provided if(is.im(lambda)) lambda <- safelookup(lambda, X) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) { model <- lambda if(!update) { ## just use intensity of fitted model lambda <- predict(model, locations=X, type="trend") } else { ## re-fit model to data X if(is.ppm(model)) { model <- update(model, Q=X) lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { model <- update(model, X=X) lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } danger <- FALSE } } else if(is.function(lambda)) lambda <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) check.nvector(lambda, npts) else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) validate.weights(lambda) # evaluate reciprocal reciplambda <- 1/lambda } # renormalise if(renormalise) { check.1.real(normpower) stopifnot(normpower %in% 1:2) if(!diagonal && normpower == 2) { renorm.factor <- (areaW^2)/(sum(reciplambda)^2 - sum(reciplambda^2)) } else { renorm.factor <- (areaW/sum(reciplambda))^normpower } } } # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) ########################################### # Efficient code for border correction and no correction # Usable only if r values are evenly spaced from 0 to rmax # Invoked automatically if number of points is large can.do.fast <- breaks$even && !lambda2.given large.n <- (npts >= nlarge) # demand.best <- correction.given && best.wanted large.n.trigger <- large.n && !correction.given fastcorrections <- c("border", "bord.modif", "none") fastdefault <- "border" correction.fast <- all(correction %in% fastcorrections) will.do.fast <- can.do.fast && (correction.fast || large.n.trigger) asked.fast <- (correction.given && correction.fast) || (nlarge.given && large.n.trigger) if(!can.do.fast && asked.fast) { whynot <- if(!(breaks$even)) "r values not evenly spaced" else if(!missing(lambda)) "matrix lambda2 was given" else NULL warning(paste("cannot use efficient code", whynot, sep="; ")) } if(will.do.fast) { ## Compute Kinhom using fast algorithm(s) ## determine correction(s) ok <- correction %in% fastcorrections correction <- if(any(ok)) correction[ok] else fastdefault bord <- any(correction %in% c("border", "bord.modif")) none <- any(correction =="none") if(!all(ok)) { ## some corrections were overridden; notify user corx <- c(if(bord) "border correction estimate" else NULL, if(none) "uncorrected estimate" else NULL) corx <- paste(corx, collapse=" and ") message(paste("number of data points exceeds", nlarge, "- computing", corx , "only")) } ## restrict r values to recommended range, unless specifically requested if(!rfixed) r <- seq(from=0, to=alim[2], length.out=length(r)) ## border method if(bord) { Kb <- Kborder.engine(X, max(r), length(r), correction, weights=reciplambda, ratio=ratio) if(renormalise) { ynames <- setdiff(fvnames(Kb, "*"), "theo") Kb <- adjust.ratfv(Kb, ynames, denfactor=1/renorm.factor) } Kb <- tweak.ratfv.entry(Kb, "border", new.labl="{hat(%s)[%s]^{bord}} (r)") Kb <- tweak.ratfv.entry(Kb, "bord.modif", new.labl="{hat(%s)[%s]^{bordm}} (r)") } ## uncorrected if(none) { Kn <- Knone.engine(X, max(r), length(r), weights=reciplambda, ratio=ratio) if(renormalise) Kn <- adjust.ratfv(Kn, "un", denfactor=1/renorm.factor) Kn <- tweak.ratfv.entry(Kn, "un", new.labl="{hat(%s)[%s]^{un}} (r)") } K <- if(bord && !none) Kb else if(!bord && none) Kn else if(!ratio) cbind.fv(Kb, Kn[, c("r", "un")]) else bind.ratfv(Kb, Kn[, c("r", "un")], ratio=TRUE) ## tweak labels K <- rebadge.fv(K, quote(K[inhom](r)), c("K", "inhom")) if(danger) attr(K, "dangerous") <- dangerous return(K) } ########################################### # Fast code for rectangular window ########################################### if(can.do.fast && is.rectangle(W) && spatstat.options("use.Krect")) { K <- Krect.engine(X, rmax, length(r), correction, weights=reciplambda, ratio=ratio, fname=c("K", "inhom")) if(renormalise) { allfun <- setdiff(fvnames(K, "*"), "theo") K <- adjust.ratfv(K, allfun, denfactor=1/renorm.factor) } K <- rebadge.fv(K, quote(K[inhom](r)), c("K", "inhom")) attr(K, "alim") <- alim if(danger) attr(K, "dangerous") <- dangerous return(K) } ########################################### # Slower code ########################################### # this will be the output data frame K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") denom <- if(renormalise) (areaW / renorm.factor) else areaW K <- ratfv(K, NULL, denom, argu="r", ylab=quote(K[inhom](r)), valu="theo", fmla=NULL, alim=alim, labl=c("r","{%s[%s]^{pois}}(r)"), desc=desc, fname=c("K", "inhom"), ratio=ratio) # identify all close pairs rmax <- max(r) what <- if(any(correction == "translate")) "all" else "ijd" close <- closepairs(X, rmax, what=what) dIJ <- close$d # compute weights for these pairs I <- close$i J <- close$j # wI <- reciplambda[I] wIJ <- if(!lambda2.given) reciplambda[I] * reciplambda[J] else reciplambda2[cbind(I,J)] # # compute edge corrected estimates if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(X) bI <- b[I] # apply reduced sample algorithm RS <- Kwtsum(dIJ, bI, wIJ, b, w=reciplambda, breaks) if(any(correction == "border")) { Kb <- RS$ratio if(renormalise) Kb <- Kb * renorm.factor K <- bind.ratfv(K, quotient = data.frame(border=Kb), denominator = denom, labl = "{hat(%s)[%s]^{bord}}(r)", desc = "border-corrected estimate of %s", preferred = "border", ratio=ratio) } if(any(correction == "bord.modif")) { Kbm <- RS$numerator/eroded.areas(W, r) if(renormalise) Kbm <- Kbm * renorm.factor K <- bind.ratfv(K, quotient = data.frame(bord.modif=Kbm), denominator = denom, labl = "{hat(%s)[%s]^{bordm}}(r)", desc = "modified border-corrected estimate of %s", preferred = "bord.modif", ratio=ratio) } } if(any(correction == "translate")) { # translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE) allweight <- edgewt * wIJ wh <- whist(dIJ, breaks$val, allweight) Ktrans <- cumsum(wh)/areaW if(renormalise) Ktrans <- Ktrans * renorm.factor rmax <- diamW/2 Ktrans[r >= rmax] <- NA K <- bind.ratfv(K, quotient = data.frame(trans=Ktrans), denominator = denom, labl ="{hat(%s)[%s]^{trans}}(r)", desc = "translation-correction estimate of %s", preferred = "trans", ratio=ratio) } if(any(correction == "isotropic" | correction == "Ripley")) { # Ripley isotropic correction edgewt <- edge.Ripley(X[I], matrix(dIJ, ncol=1)) allweight <- edgewt * wIJ wh <- whist(dIJ, breaks$val, allweight) Kiso <- cumsum(wh)/areaW if(renormalise) Kiso <- Kiso * renorm.factor rmax <- diamW/2 Kiso[r >= rmax] <- NA K <- bind.ratfv(K, quotient = data.frame(iso=Kiso), denominator = denom, labl = "{hat(%s)[%s]^{iso}}(r)", desc = "Ripley isotropic correction estimate of %s", preferred = "iso", ratio=ratio) } # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) if(danger) attr(K, "dangerous") <- dangerous return(K) } Kwtsum <- function(dIJ, bI, wIJ, b, w, breaks, fatal=TRUE) { # # "internal" routine to compute border-correction estimates of Kinhom # # dIJ: vector containing pairwise distances for selected I,J pairs # bI: corresponding vector of boundary distances for I # wIJ: product weight for selected I, J pairs # # b: vector of ALL distances to window boundary # w: weights for ALL points # # breaks : breakpts object # stopifnot(length(dIJ) == length(bI)) stopifnot(length(bI) == length(wIJ)) stopifnot(length(w) == length(b)) if(!is.finite(sum(w, wIJ))) { if(fatal) stop("Weights in K-function were infinite or NA", call.=FALSE) #' set non-finite weights to zero if(any(bad <- !is.finite(w))) { warning(paste(sum(bad), "out of", length(bad), paren(percentage(bad)), "of the boundary weights", "in the K-function were NA or NaN or Inf", "and were reset to zero"), call.=FALSE) w[bad] <- 0 } if(any(bad <- !is.finite(wIJ))) { warning(paste(sum(bad), "out of", length(bad), paren(percentage(bad)), "of the weights for pairwise distances", "in the K-function were NA or NaN or Inf", "and were reset to zero"), call.=FALSE) wIJ[bad] <- 0 } } bkval <- breaks$val # determine which distances d_{ij} were observed without censoring uncen <- (dIJ <= bI) # # histogram of noncensored distances nco <- whist(dIJ[uncen], bkval, wIJ[uncen]) # histogram of censoring times for noncensored distances ncc <- whist(bI[uncen], bkval, wIJ[uncen]) # histogram of censoring times (yes, this is a different total size) cen <- whist(b, bkval, w) # total weight of censoring times beyond rightmost breakpoint uppercen <- sum(w[b > breaks$max]) # go RS <- reduced.sample(nco, cen, ncc, show=TRUE, uppercen=uppercen) # extract results numerator <- RS$numerator denominator <- RS$denominator ratio <- RS$numerator/RS$denominator # check if(length(numerator) != breaks$ncells) stop("internal error: length(numerator) != breaks$ncells") if(length(denominator) != breaks$ncells) stop("internal error: length(denom.count) != breaks$ncells") return(list(numerator=numerator, denominator=denominator, ratio=ratio)) } validate.weights <- function(x, recip=FALSE, how = NULL, allowzero = recip, allowinf = !recip) { xname <- deparse(substitute(x)) ra <- range(x) offence <- if(!allowinf && !all(is.finite(ra))) "infinite" else if(ra[1] < 0) "negative" else if(!allowzero && ra[1] == 0) "zero" else NULL if(!is.null(offence)) { offenders <- paste(offence, "values of", sQuote(xname)) if(is.null(how)) stop(paste(offenders, "are not allowed"), call.=FALSE) stop(paste(how, "yielded", offenders), call.=FALSE) } return(TRUE) } resolve.lambda <- function(X, lambda=NULL, ..., sigma=NULL, varcov=varcov, leaveoneout=TRUE, update=TRUE) { dangerous <- "lambda" danger <- TRUE if(is.null(lambda)) { ## No intensity data provided ## Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) lambda <- as.numeric(lambda) danger <- FALSE } else if(is.im(lambda)) { lambda <- safelookup(lambda, X) } else if(is.function(lambda)) { lambda <- lambda(X$x, X$y) } else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) { check.nvector(lambda, npoints(X)) } else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) { model <- lambda if(!update) { ## use intensity of model lambda <- predict(model, locations=X, type="trend") } else { ## re-fit model to data X model <- if(is.ppm(model)) update(model, Q=X) else update(model, X=X) lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) danger <- FALSE } } else stop(paste(sQuote("lambda"), "should be a vector, a pixel image,", "a fitted model, or a function")) return(list(lambda=lambda, danger=danger, dangerous=if(danger) dangerous else NULL)) } spatstat.core/R/polynom.R0000644000176200001440000000450514144333463015076 0ustar liggesusers#' #' polynom.R #' #' $Revision: 1.1 $ $Date: 2017/01/02 09:48:36 $ #' polynom <- function(x, ...) { rest <- list(...) # degree not given if(length(rest) == 0) stop("degree of polynomial must be given") #call with single variable and degree if(length(rest) == 1) { degree <- ..1 if((degree %% 1) != 0 || length(degree) != 1 || degree < 1) stop("degree of polynomial should be a positive integer") # compute values result <- outer(x, 1:degree, "^") # compute column names - the hard part ! namex <- deparse(substitute(x)) # check whether it needs to be parenthesised if(!is.name(substitute(x))) namex <- paste("(", namex, ")", sep="") # column names namepowers <- if(degree == 1) namex else c(namex, paste(namex, "^", 2:degree, sep="")) namepowers <- paste("[", namepowers, "]", sep="") # stick them on dimnames(result) <- list(NULL, namepowers) return(result) } # call with two variables and degree if(length(rest) == 2) { y <- ..1 degree <- ..2 # list of exponents of x and y, in nice order xexp <- yexp <- numeric() for(i in 1:degree) { xexp <- c(xexp, i:0) yexp <- c(yexp, 0:i) } nterms <- length(xexp) # compute result <- matrix(, nrow=length(x), ncol=nterms) for(i in 1:nterms) result[, i] <- x^xexp[i] * y^yexp[i] # names of these terms namex <- deparse(substitute(x)) # namey <- deparse(substitute(..1)) ### seems not to work in R zzz <- as.list(match.call()) namey <- deparse(zzz[[3]]) # check whether they need to be parenthesised # if so, add parentheses if(!is.name(substitute(x))) namex <- paste("(", namex, ")", sep="") if(!is.name(zzz[[3]])) namey <- paste("(", namey, ")", sep="") nameXexp <- c("", namex, paste(namex, "^", 2:degree, sep="")) nameYexp <- c("", namey, paste(namey, "^", 2:degree, sep="")) # make the term names termnames <- paste(nameXexp[xexp + 1], ifelse(xexp > 0 & yexp > 0, ".", ""), nameYexp[yexp + 1], sep="") termnames <- paste("[", termnames, "]", sep="") dimnames(result) <- list(NULL, termnames) # return(result) } stop("Can't deal with more than 2 variables yet") } spatstat.core/R/rose.R0000644000176200001440000002445014144333463014352 0ustar liggesusers#' #' rose.R #' #' Rose diagrams #' #' $Revision: 1.11 $ $Date: 2020/12/19 05:25:06 $ #' rose <- function(x, ...) UseMethod("rose") rose.default <- local({ rose.default <- function(x, breaks = NULL, ..., weights=NULL, nclass=NULL, unit=c("degree", "radian"), start=0, clockwise=FALSE, main) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) stopifnot(is.numeric(x)) if(!is.null(weights)) check.nvector(weights, length(x), things="observations") #' determine units missu <- missing(unit) unit <- match.arg(unit) unit <- validate.angles(x, unit, missu) FullCircle <- switch(unit, degree = 360, radian = 2*pi) #' reduce to [0, 2pi] x <- x %% FullCircle #' determine breakpoints strictly inside full circle breaks <- makebreaks(x, c(0, FullCircle), breaks, nclass) #' histogram without weights h <- do.call.matched(hist.default, list(x=x, breaks=breaks, ..., plot=FALSE), skipargs=graphicsAargh, sieve=TRUE) result <- h$result otherargs <- h$otherargs #' redo weights, if given if(!is.null(weights)) { wh <- whist(x=x, breaks=breaks, weights=weights) result$count <- wh result$density <- wh/diff(breaks) } # do.call(rose.histogram, c(list(x=result, main=main, unit=unit, start=start, clockwise=clockwise), otherargs)) } graphicsAargh <- c("density", "angle", "col", "border", "xlim", "ylim", "xlab", "ylab", "axes") makebreaks <- function(x, r, breaks=NULL, nclass=NULL) { use.br <- !is.null(breaks) if (use.br) { if (!is.null(nclass)) warning("'nclass' not used when 'breaks' is specified") } else if (!is.null(nclass) && length(nclass) == 1L) { breaks <- nclass } else breaks <- "Sturges" use.br <- use.br && (nB <- length(breaks)) > 1L if (use.br) breaks <- sort(breaks) else { if (is.character(breaks)) { breaks <- match.arg(tolower(breaks), c("sturges", "fd", "freedman-diaconis", "scott")) breaks <- switch(breaks, sturges = nclass.Sturges(x), `freedman-diaconis` = , fd = nclass.FD(x), scott = nclass.scott(x), stop("unknown 'breaks' algorithm")) } else if (is.function(breaks)) { breaks <- breaks(x) } if (length(breaks) == 1) { if (!is.numeric(breaks) || !is.finite(breaks) || breaks < 1L) stop("invalid number of 'breaks'") breaks <- seq(r[1], r[2], length.out=breaks) } else { if (!is.numeric(breaks) || length(breaks) <= 1) stop(gettextf("Invalid breakpoints produced by 'breaks(x)': %s", format(breaks)), domain = NA) breaks <- sort(breaks) } } return(breaks) } rose.default }) rose.histogram <- function(x, ..., unit=c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot=TRUE) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) #' determine units missu <- missing(unit) unit <- match.arg(unit) #' validate bks <- x$breaks unit <- validate.angles(bks, unit, missu) # FullCircle <- switch(unit, degree = 360, radian = 2*pi) #' get sector sizes y <- x$density ymax <- max(y) #' draw disc insideclearance <- 0.1 outsidespace <- if(!is.null(at) && length(at) == 0) 0 else if(identical(labels, FALSE)) 0.1 else 0.25 R <- (1+insideclearance) * ymax DD <- disc(R) Rout <- (1 + outsidespace) * R disco <- disc(Rout) dont.complain.about(DD, disco) result <- do.call.matched(plot.owin, resolve.defaults(list(x=quote(disco), main=main, type="n"), list(...))) do.call.matched(plot.owin, resolve.defaults(list(x=quote(DD), hatch=FALSE, add=TRUE), list(...)), extrargs=graphicsPars("owin"), skipargs="col") if(do.plot) { #' draw sectors ang <- ang2rad(bks, unit=unit, start=start, clockwise=clockwise) eps <- min(diff(ang), pi/128)/2 for(i in seq_along(y)) { aa <- seq(ang[i], ang[i+1], by=eps) aa[length(aa)] <- ang[i+1] yi <- y[i] xx <- c(0, yi * cos(aa), 0) yy <- c(0, yi * sin(aa), 0) do.call.matched(polygon, list(x=xx, y=yy, ...)) } #' add tick marks circticks(R, at=at, unit=unit, start=start, clockwise=clockwise, labels=labels) } #' return(invisible(result)) } rose.density <- function(x, ..., unit=c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot=TRUE) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) ang <- x$x rad <- x$y missu <- missing(unit) unit <- match.arg(unit) unit <- validate.angles(ang, unit, missu) #' result <- roseContinuous(ang, rad, unit, ..., start=start, clockwise=clockwise, main=main, labels=labels, at=at, do.plot=do.plot) return(invisible(result)) } rose.fv <- function(x, ..., unit=c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot=TRUE) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) ang <- with(x, .x) rad <- with(x, .y) missu <- missing(unit) unit <- match.arg(unit) unit <- validate.angles(ang, unit, missu) #' result <- roseContinuous(ang, rad, unit, ..., start=start, clockwise=clockwise, main=main, labels=labels, at=at, do.plot=do.plot) return(invisible(result)) } roseContinuous <- function(ang, rad, unit, ..., start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot=TRUE) { rmax <- max(rad) #' draw disc insideclearance <- 0.1 outsidespace <- if(!is.null(at) && length(at) == 0) 0 else if(identical(labels, FALSE)) 0.1 else 0.25 R <- (1+insideclearance) * rmax DD <- disc(R) Rout <- (1 + outsidespace) * R disco <- disc(Rout) dont.complain.about(DD, disco) result <- do.call.matched(plot.owin, resolve.defaults(list(x=quote(disco), main=main, type="n"), list(...))) do.call.matched(plot.owin, resolve.defaults(list(x=quote(DD), add=TRUE, hatch=FALSE), list(...)), extrargs=graphicsPars("owin"), skipargs="col") #' draw plot if(do.plot) { ang <- ang2rad(ang, unit=unit, start=start, clockwise=clockwise) xx <- rad * cos(ang) yy <- rad * sin(ang) do.call.matched(polygon, list(x=xx, y=yy, ...), extrargs="lwd") circticks(R, at=at, unit=unit, start=start, clockwise=clockwise, labels=labels) } return(result) } ang2rad <- local({ compasspoints <- c(E=0,N=90,W=180,S=270) ang2rad <- function(ang, unit=c("degree", "radian"), start=0, clockwise=FALSE) { unit <- match.arg(unit) clocksign <- if(clockwise) -1 else 1 stopifnot(length(start) == 1) if(is.character(start)) { if(is.na(match(toupper(start), names(compasspoints)))) stop(paste("Unrecognised compass point", sQuote(start)), call.=FALSE) startdegrees <- compasspoints[[start]] start <- switch(unit, degree = startdegrees, radian = pi * (startdegrees/180)) # start is measured anticlockwise ang <- start + clocksign * ang } else { stopifnot(is.numeric(start)) # start is measured according to value of 'clockwise' ang <- clocksign * (start + ang) } rad <- switch(unit, degree = pi * (ang/180), radian = ang) return(rad) } ang2rad }) circticks <- function(R, at=NULL, unit=c("degree", "radian"), start=0, clockwise=FALSE, labels=TRUE) { unit <- match.arg(unit) FullCircle <- switch(unit, degree = 360, radian = 2*pi) if(is.null(at)) { at <- FullCircle * (0:23)/24 major <- ((0:23) %% 6 == 0) } else { if(length(at) == 0) return(invisible(NULL)) nat <- (at/FullCircle) * 4 major <- abs(nat - round(nat)) < 0.01 } atradians <- ang2rad(ang=at, unit=unit, start=start, clockwise=clockwise) tx <- R * cos(atradians) ty <- R * sin(atradians) expan <- ifelse(major, 1.1, 1.05) segments(tx, ty, expan * tx, expan * ty, lwd=major+1) if(!identical(labels, FALSE)) { if(identical(labels, TRUE)) { labels <- switch(unit, degree=paste(round(at)), radian=parse(text= simplenumber(at/pi, "pi", "*", 1e-3))) } else stopifnot(is.vector(labels) && length(labels) == length(at)) big <- expan + 0.1 text(big * tx, big * ty, labels=labels) } invisible(NULL) } validate.angles <- function(angles, unit=c("degree", "radian"), guess=TRUE) { #' validate width <- diff(range(angles)) if(missing(unit) && guess && width <= 6.2832) { warning("Very small range of angles: treating them as radian") unit <- "radian" } else unit <- match.arg(unit) FullCircle <- switch(unit, degree = 360, radian = 2*pi) if(width > 1.002 * FullCircle) stop("Range of angles exceeds a full circle") return(unit) } spatstat.core/R/pairorient.R0000644000176200001440000001624714144333463015563 0ustar liggesusers## ## pairorient.R ## ## point pair orientation distribution ## ## Function O_{r1,r2}(phi) defined in ## Stoyan & Stoyan (1994) equ (14.53) page 271 ## ## and its derivative estimated by kernel smoothing ## ## $Revision: 1.10 $ $Date: 2019/01/13 07:33:20 $ pairorient <- function(X, r1, r2, ..., cumulative=FALSE, correction, ratio=FALSE, unit=c("degree", "radian"), domain=NULL) { stopifnot(is.ppp(X)) check.1.real(r1) check.1.real(r2) stopifnot(r1 < r2) W <- Window(X) if(!is.null(domain)) stopifnot(is.subset.owin(domain, W)) unit <- match.arg(unit) switch(unit, degree = { FullCircle <- 360 Convert <- 180/pi }, radian = { FullCircle <- 2 * pi Convert <- 1 }) ## choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(!correction.given) correction <- c("border", "bord.modif", "translate", "isotropic") correction <- pickoption("correction", correction, c(none="none", border="border", bord.modif="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="good", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) ## replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) ## retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) ## Find close pairs in range [r1, r2] close <- as.data.frame(closepairs(X, r2)) ok <- with(close, r1 <= d & d <= r2) if(!is.null(domain)) ok <- ok & with(close, inside.owin(xi, yi, domain)) if(!any(ok)) { warning(paste("There are no pairs of points in the distance range", prange(c(r1,r2)))) return(NULL) } close <- close[ok, , drop=FALSE] ANGLE <- with(close, atan2(dy, dx) * Convert) %% FullCircle ## initialise output object Nphi <- 512 breaks <- make.even.breaks(bmax=FullCircle, npos=Nphi-1) phi <- breaks$r Odf <- data.frame(phi = phi, theo = (if(cumulative) phi else 1)/FullCircle) desc <- c("angle argument phi", "theoretical isotropic %s") Oletter <- if(cumulative) "O" else "o" Osymbol <- as.name(Oletter) OO <- ratfv(Odf, NULL, denom=nrow(close), argu="phi", ylab=substitute(fn[R1,R2](phi), list(R1=r1, R2=r2, fn=Osymbol)), valu="theo", fmla = . ~ phi, alim = c(0, FullCircle), c("phi", "{%s[%s]^{pois}}(phi)"), desc, fname=c(Oletter, paste0("list(", r1, ",", r2, ")")), yexp=substitute(fn[list(R1,R2)](phi), list(R1=r1,R2=r2,fn=Osymbol))) ## ^^^^^^^^^^^^^^^ Compute edge corrected estimates ^^^^^^^^^^^^^^^^ nangles <- length(ANGLE) if(any(correction == "none")) { ## uncorrected! For demonstration purposes only! if(cumulative) { wh <- whist(ANGLE, breaks$val) # no weights num.un <- cumsum(wh) } else { kd <- circdensity(ANGLE, ..., n=Nphi, unit=unit) num.un <- kd$y * nangles } den.un <- nangles ## uncorrected estimate OO <- bind.ratfv(OO, data.frame(un=num.un), den.un, "{hat(%s)[%s]^{un}}(phi)", "uncorrected estimate of %s", "un", ratio=ratio) } if(any(c("border", "bord.modif") %in% correction)) { ## border type corrections bX <- bdist.points(X) bI <- bX[close$i] if("border" %in% correction) { bok <- (bI > r2) ANGLEok <- ANGLE[bok] nok <- length(ANGLEok) if(cumulative) { wh <- whist(ANGLEok, breaks$val) num.bord <- cumsum(wh) } else { kd <- circdensity(ANGLEok, ..., n=Nphi, unit=unit) num.bord <- kd$y * nok } den.bord <- nok OO <- bind.ratfv(OO, data.frame(border=num.bord), den.bord, "{hat(%s)[%s]^{bord}}(phi)", "border-corrected estimate of %s", "border", ratio=ratio) } if("bord.modif" %in% correction) { ok <- (close$d < bI) nok <- sum(ok) inradius <- max(distmap(W, invert=TRUE)) rrr <- range(r2, inradius) rr <- seq(rrr[1], rrr[2], length=256) Ar <- eroded.areas(W, rr) Arf <- approxfun(rr, Ar, rule=2) AI <- (Arf(bX))[close$i] edgewt <- ifelse(ok, pmin(area(W)/AI, 100), 0) if(cumulative) { wh <- whist(ANGLE, breaks$val, edgewt) num.bm <- cumsum(wh)/mean(edgewt) } else { w <- edgewt/sum(edgewt) kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit) num.bm <- kd$y * nok } den.bm <- nok OO <- bind.ratfv(OO, data.frame(bordm=num.bm), den.bm, "{hat(%s)[%s]^{bordm}}(phi)", "modified border-corrected estimate of %s", "bordm", ratio=ratio) } } if(any(correction == "translate")) { ## Ohser-Stoyan translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE) if(cumulative) { wh <- whist(ANGLE, breaks$val, edgewt) num.trans <- cumsum(wh)/mean(edgewt) } else { w <- edgewt/sum(edgewt) kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit) num.trans <- kd$y * nangles } den.trans <- nangles OO <- bind.ratfv(OO, data.frame(trans=num.trans), den.trans, "{hat(%s)[%s]^{trans}}(phi)", "translation-corrected estimate of %s", "trans", ratio=ratio) } if(any(correction == "isotropic")) { ## Ripley isotropic correction XI <- ppp(close$xi, close$yi, window=W, check=FALSE) DIJ <- close$d edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) if(cumulative) { wh <- whist(ANGLE, breaks$val, edgewt) num.iso <- cumsum(wh)/mean(edgewt) } else { w <- edgewt/sum(edgewt) kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit) num.iso <- kd$y * nangles } den.iso <- nangles OO <- bind.ratfv(OO, data.frame(iso=num.iso), den.iso, "{hat(%s)[%s]^{iso}}(phi)", "Ripley isotropic-corrected estimate of %s", "iso", ratio=ratio) } unitname(OO) <- switch(unit, degree = c("degree", "degrees"), radian = c("radian", "radians")) return(OO) } spatstat.core/R/bw.scott.R0000644000176200001440000000132614144333461015140 0ustar liggesusers#' #' bw.scott.R #' #' Bandwidth selection rule bw.scott for point patterns in any dimension #' #' $Revision: 1.1 $ $Date: 2019/07/22 11:41:41 $ bw.scott <- function(X, isotropic=FALSE, d=NULL) { stopifnot(is.ppp(X) || is.lpp(X) || is.pp3(X) || is.ppx(X)) if(is.null(d)) { d <- spatdim(X, intrinsic=FALSE) } else check.1.integer(d) nX <- npoints(X) cX <- coords(X, spatial=TRUE, temporal=FALSE, local=FALSE) sdX <- apply(cX, 2, sd) if(isotropic) { #' geometric mean sdX <- exp(mean(log(pmax(sdX, .Machine$double.eps)))) } b <- sdX * nX^(-1/(d+4)) names(b) <- if(isotropic) "sigma" else paste0("sigma.", colnames(cX)) return(b) } bw.scott.iso <- function(X) { bw.scott(X, isotropic=TRUE) } spatstat.core/R/rmh.default.R0000644000176200001440000010206714144333463015614 0ustar liggesusers# # $Id: rmh.default.R,v 1.116 2021/01/07 03:08:41 adrian Exp adrian $ # rmh.default <- function(model,start=NULL, control=default.rmhcontrol(model), ..., nsim=1, drop=TRUE, saveinfo=TRUE, verbose=TRUE, snoop=FALSE) { # # Function rmh. To simulate realizations of 2-dimensional point # patterns, given the conditional intensity function of the # underlying process, via the Metropolis-Hastings algorithm. # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # V A L I D A T E # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== if(verbose) cat("Checking arguments..") # validate arguments and fill in the defaults model <- rmhmodel(model) start <- rmhstart(start) if(is.null(control)) { control <- default.rmhcontrol(model) } else { control <- rmhcontrol(control) } # override if(length(list(...)) > 0) control <- update(control, ...) control <- rmhResolveControl(control, model) saveinfo <- as.logical(saveinfo) # retain "..." arguments unrecognised by rmhcontrol # These are assumed to be arguments of functions defining the trend argh <- list(...) known <- names(argh) %in% names(formals(rmhcontrol.default)) f.args <- argh[!known] #### Multitype models # Decide whether the model is multitype; if so, find the types. types <- rmhResolveTypes(model, start, control) ntypes <- length(types) mtype <- (ntypes > 1) # If the model is multitype, check that the model parameters agree with types # and digest them if(mtype && !is.null(model$check)) { model <- rmhmodel(model, types=types) } else { model$types <- types } ######## Check for illegal combinations of model, start and control ######## # No expansion can be done if we are using x.start if(start$given == "x") { if(control$expand$force.exp) stop("Cannot expand window when using x.start.\n", call.=FALSE) control$expand <- .no.expansion } # Warn about a silly value of fixall: if(control$fixall & ntypes==1) { warning("control$fixall applies only to multitype processes. Ignored.", call.=FALSE) control$fixall <- FALSE if(control$fixing == "n.each.type") control$fixing <- "n.total" } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # M O D E L P A R A M E T E R S # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ####### Determine windows ################################ if(verbose) cat("determining simulation windows...") # these may be NULL w.model <- model$w x.start <- start$x.start trend <- model$trend trendy <- !is.null(trend) singletrend <- trendy && (is.im(trend) || is.function(trend) || (is.numeric(trend) && length(trend) == 1)) trendlist <- if(singletrend) list(trend) else trend # window implied by trend image, if any w.trend <- if(is.im(trend)) as.owin(trend) else if(is.list(trend) && any(ok <- unlist(lapply(trend, is.im)))) as.owin((trend[ok])[[1L]]) else NULL ## Clipping window (for final result) w.clip <- if(!is.null(w.model)) w.model else if(!will.expand(control$expand)) { if(start$given == "x" && is.ppp(x.start)) x.start$window else if(is.owin(w.trend)) w.trend } else NULL if(!is.owin(w.clip)) stop("Unable to determine window for pattern", call.=FALSE) ## Simulation window xpn <- rmhResolveExpansion(w.clip, control, trendlist, "trend") w.sim <- xpn$wsim expanded <- xpn$expanded ## Check the fine print if(expanded) { if(control$fixing != "none") stop(paste("If we're conditioning on the number of points,", "we cannot clip the result to another window."), call.=FALSE) if(!is.subset.owin(w.clip, w.sim)) stop("Expanded simulation window does not contain model window", call.=FALSE) } ####### Trend ################################ # Check that the expanded window fits inside the window # upon which the trend(s) live if there are trends and # if any trend is given by an image. if(expanded && !is.null(trend)) { trends <- if(is.im(trend)) list(trend) else trend images <- unlist(lapply(trends, is.im)) if(any(images)) { iwindows <- lapply(trends[images], as.owin) nimages <- length(iwindows) misfit <- !sapply(iwindows, is.subset.owin, A=w.sim) nmisfit <- sum(misfit) if(nmisfit > 1) stop(paste("Expanded simulation window is not contained in", "several of the trend windows.\n", "Bailing out."), call.=FALSE) else if(nmisfit == 1) { warning(paste("Expanded simulation window is not contained in", if(nimages == 1) "the trend window.\n" else "one of the trend windows.\n", "Expanding to this trend window (only)."), call.=FALSE) w.sim <- iwindows[[which(misfit)]] } } } # Extract the 'beta' parameters if(length(model$cif) == 1) { # single interaction beta <- model$C.beta betalist <- list(beta) } else { # hybrid betalist <- model$C.betalist # multiply beta vectors for each component beta <- Reduce("*", betalist) } ##### .................. CONDITIONAL SIMULATION ................... ##### #|| Determine windows for conditional simulation #|| #|| w.state = window for the full configuration #|| #|| w.sim = window for the 'free' (random) points #|| w.state <- w.sim condtype <- control$condtype x.cond <- control$x.cond # n.cond <- control$n.cond switch(condtype, none={ w.cond <- NULL }, window={ # conditioning on the realisation inside a subwindow w.cond <- as.owin(x.cond) # subtract from w.sim w.sim <- setminus.owin(w.state, w.cond) if(is.empty(w.sim)) stop(paste("Conditional simulation is undefined;", "the conditioning window", sQuote("as.owin(control$x.cond)"), "covers the entire simulation window"), call.=FALSE) }, Palm={ # Palm conditioning w.cond <- NULL }) ##### #|| Convert conditioning points to appropriate format x.condpp <- switch(condtype, none=NULL, window=x.cond, Palm=as.ppp(x.cond, w.state)) # validate if(!is.null(x.condpp)) { if(mtype) { if(!is.marked(x.condpp)) stop("Model is multitype, but x.cond is unmarked", call.=FALSE) if(!isTRUE(all.equal(types, levels(marks(x.condpp))))) stop("Types of points in x.cond do not match types in model", call.=FALSE) } } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # S T A R T I N G S T A T E # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ###################### Starting state data ############################ # whether the initial state should be thinned thin <- (start$given != "x") && (control$fixing == "none") # There must now be a starting state. if(start$given == "none") { # For conditional simulation, the starting state must be given if(condtype != "none") stop("No starting state given", call.=FALSE) # Determine integral of beta * trend over data window. # This is the expected number of points in the reference Poisson process. area.w.clip <- area(w.clip) if(trendy) { tsummaries <- summarise.trend(trend, w=w.clip, a=area.w.clip) En <- beta * sapply(tsummaries, getElement, name="integral") } else { En <- beta * area.w.clip } # Fix n.start equal to this integral n.start <- if(spatstat.options("scalable")) round(En) else ceiling(En) start <- rmhstart(n.start=n.start) } # In the case of conditional simulation, the start data determine # the 'free' points (i.e. excluding x.cond) in the initial state. switch(start$given, none={ stop("No starting state given", call.=FALSE) }, x = { # x.start was given # coerce it to a ppp object if(!is.ppp(x.start)) x.start <- as.ppp(x.start, w.state) if(condtype == "window") { # clip to simulation window xs <- x.start[w.sim] nlost <- x.start$n - xs$n if(nlost > 0) warning(paste(nlost, ngettext(nlost, "point","points"), "of x.start", ngettext(nlost, "was", "were"), "removed because", ngettext(nlost, "it", "they"), "fell in the window of x.cond"), call.=FALSE) x.start <- xs } npts.free <- x.start$n }, n = { # n.start was given n.start <- start$n.start # Adjust the number of points in the starting state in accordance # with the expansion that has occurred. if(expanded) { holnum <- if(spatstat.options("scalable")) round else ceiling n.start <- holnum(n.start * area(w.sim)/area(w.clip)) } # npts.free <- sum(n.start) # The ``sum()'' is redundant if n.start # is scalar; no harm, but. }, stop("Internal error: start$given unrecognized"), call.=FALSE) #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # C O N T R O L P A R A M E T E R S # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ################### Periodic boundary conditions ######################### periodic <- control$periodic if(is.null(periodic)) { # undecided. Use default rule control$periodic <- periodic <- expanded && is.rectangle(w.state) } else if(periodic && !is.rectangle(w.state)) { # if periodic is TRUE we have to be simulating in a rectangular window. stop("Need rectangular window for periodic simulation.", call.=FALSE) } # parameter passed to C: period <- if(periodic) c(diff(w.state$xrange), diff(w.state$yrange)) else c(-1,-1) #### vector of proposal probabilities if(!mtype) ptypes <- 1 else { ptypes <- control$ptypes if(is.null(ptypes)) { # default proposal probabilities ptypes <- if(start$given == "x" && (nx <- npoints(x.start)) > 0) { table(marks(x.start, dfok=FALSE))/nx } else rep.int(1/ntypes, ntypes) } else { # Validate ptypes if(length(ptypes) != ntypes | sum(ptypes) != 1) stop("Argument ptypes is mis-specified.", call.=FALSE) } } ######################################################################## # Normalising constant for proposal density # # Integral of trend over the expanded window (or area of window): # Iota == Integral Of Trend (or) Area. area.w.sim <- area(w.sim) if(trendy) { if(verbose) cat("Evaluating trend integral...") tsummaries <- summarise.trend(trend, w=w.sim, a=area.w.sim) mins <- sapply(tsummaries, getElement, name="min") if(any(mins < 0)) stop("Trend has negative values", call.=FALSE) iota <- sapply(tsummaries, getElement, name="integral") tmax <- sapply(tsummaries, getElement, name="max") } else { iota <- area.w.sim tmax <- NULL } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # A.S. EMPTY PROCESS # # for conditional simulation, 'empty' means there are no 'free' points # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== a.s.empty <- FALSE # # Empty pattern, simulated conditional on n # if(npts.free == 0 && control$fixing != "none") { a.s.empty <- TRUE if(verbose) { mess <- paste("Initial pattern has 0 random points,", "and simulation is conditional on the number of points -") if(condtype == "none") warning(paste(mess, "returning an empty pattern"), call.=FALSE) else warning(paste(mess, "returning a pattern with no random points"), call.=FALSE) } } # # If beta = 0, the process is almost surely empty # if(all(beta < .Machine$double.eps)) { if(control$fixing == "none" && condtype == "none") { # return empty pattern if(verbose) warning("beta = 0 implies an empty pattern", call.=FALSE) a.s.empty <- TRUE } else stop("beta = 0 implies an empty pattern, but we are simulating conditional on a nonzero number of points", call.=FALSE) } # # If we're conditioning on the contents of a subwindow, # and the subwindow covers the clipping region, # the result is deterministic. if(condtype == "window" && is.subset.owin(w.clip, w.cond)) { a.s.empty <- TRUE warning(paste("Model window is a subset of conditioning window:", "result is deterministic"), call.=FALSE) } # # if(a.s.empty) { # create empty pattern, to be returned if(!is.null(x.condpp)) empty <- x.condpp[w.clip] else { empty <- ppp(numeric(0), numeric(0), window=w.clip) if(mtype) { vide <- factor(types[integer(0)], levels=types) empty <- empty %mark% vide } } } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # PACK UP # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ######### Store decisions Model <- model Start <- start Control <- control Model$w <- w.clip Model$types <- types Control$expand <- if(expanded) rmhexpand(w.state) else .no.expansion Control$internal <- list(w.sim=w.sim, w.state=w.state, x.condpp=x.condpp, ptypes=ptypes, period=period, thin=thin) Model$internal <- list(a.s.empty=a.s.empty, empty=if(a.s.empty) empty else NULL, mtype=mtype, trendy=trendy, betalist=betalist, beta=beta, iota=iota, tmax=tmax) Start$internal <- list(npts.free=npts.free) InfoList <- list(model=Model, start=Start, control=Control) class(InfoList) <- c("rmhInfoList", class(InfoList)) # go if(nsim == 1 && drop) { result <- do.call(rmhEngine, append(list(InfoList, verbose=verbose, snoop=snoop, kitchensink=saveinfo), f.args)) } else { result <- vector(mode="list", length=nsim) if(verbose) { splat("Generating", nsim, "point patterns...") pstate <- list() } subverb <- verbose && (nsim == 1) for(isim in 1:nsim) { if(verbose) pstate <- progressreport(isim, nsim, state=pstate) result[[isim]] <- do.call(rmhEngine, append(list(InfoList, verbose=subverb, snoop=snoop, kitchensink=saveinfo), f.args)) } if(verbose) splat("Done.\n") result <- simulationresult(result, nsim, drop) } return(result) } print.rmhInfoList <- function(x, ...) { cat("\nPre-digested Metropolis-Hastings algorithm parameters (rmhInfoList)\n") print(as.anylist(x)) } #--------------- rmhEngine ------------------------------------------- # # This is the interface to the C code. # # InfoList is a list of pre-digested, validated arguments # obtained from rmh.default. # # This function is called by rmh.default to generate one simulated # realisation of the model. # It's called repeatedly by ho.engine and qqplot.ppm to generate multiple # realisations (saving time by not repeating the argument checking # in rmh.default). # arguments: # kitchensink: whether to tack InfoList on to the return value as an attribute # preponly: whether to just return InfoList without simulating # # rmh.default digests arguments and calls rmhEngine with kitchensink=T # # qqplot.ppm first gets InfoList by calling rmh.default with preponly=T # (which digests the model arguments and calls rmhEngine # with preponly=T, returning InfoList), # then repeatedly calls rmhEngine(InfoList) to simulate. # # ------------------------------------------------------- rmhEngine <- function(InfoList, ..., verbose=FALSE, kitchensink=FALSE, preponly=FALSE, snoop=FALSE, overrideXstart=NULL, overrideclip=FALSE) { # Internal Use Only! # This is the interface to the C code. if(!inherits(InfoList, "rmhInfoList")) stop("data not in correct format for internal function rmhEngine", call.=FALSE) if(preponly) return(InfoList) model <- InfoList$model start <- InfoList$start control <- InfoList$control w.sim <- control$internal$w.sim w.state <- control$internal$w.state w.clip <- model$w condtype <- control$condtype x.condpp <- control$internal$x.condpp types <- model$types ntypes <- length(types) ptypes <- control$internal$ptypes period <- control$internal$period mtype <- model$internal$mtype trend <- model$trend trendy <- model$internal$trendy # betalist <- model$internal$betalist beta <- model$internal$beta iota <- model$internal$iota tmax <- model$internal$tmax npts.free <- start$internal$npts.free n.start <- start$n.start x.start <- start$x.start #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # E M P T Y P A T T E R N # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== if(model$internal$a.s.empty) { if(verbose) cat("\n") empty <- model$internal$empty attr(empty, "info") <- InfoList return(empty) } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # S I M U L A T I O N # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ############################################# #### #### Random number seed: initialisation & capture #### ############################################# if(!exists(".Random.seed")) runif(1L) saved.seed <- .Random.seed ############################################# #### #### Poisson case #### ############################################# if(is.poisson.rmhmodel(model)) { if(verbose) cat("\n") intensity <- if(!trendy) beta else model$trend Xsim <- switch(control$fixing, none= { # Poisson process if(!mtype) rpoispp(intensity, win=w.sim, ..., warnwin=FALSE) else rmpoispp(intensity, win=w.sim, types=types, warnwin=FALSE) }, n.total = { # Binomial/multinomial process with fixed total number of points if(!mtype) rpoint(npts.free, intensity, win=w.sim, verbose=verbose) else rmpoint(npts.free, intensity, win=w.sim, types=types, verbose=verbose) }, n.each.type = { # Multinomial process with fixed number of points of each type npts.each <- switch(start$given, n = n.start, x = as.integer(table(marks(x.start, dfok=FALSE))), stop("No starting state given; can't condition on fixed number of points", call.=FALSE)) rmpoint(npts.each, intensity, win=w.sim, types=types, verbose=verbose) }, stop("Internal error: control$fixing unrecognised", call.=FALSE) ) # if conditioning, add fixed points if(condtype != "none") Xsim <- superimpose(Xsim, x.condpp, W=w.state) # clip result to output window Xclip <- if(!overrideclip) Xsim[w.clip] else Xsim attr(Xclip, "info") <- InfoList return(Xclip) } ######################################################################## # M e t r o p o l i s H a s t i n g s s i m u l a t i o n ######################################################################## if(verbose) cat("Starting simulation.\nInitial state...") #### Build starting state npts.cond <- if(condtype != "none") x.condpp$n else 0 # npts.total <- npts.free + npts.cond #### FIRST generate the 'free' points #### First the marks, if any. #### The marks must be integers 0 to (ntypes-1) for passing to C Ctypes <- if(mtype) 0:(ntypes-1) else 0 Cmarks <- if(!mtype) 0 else switch(start$given, n = { # n.start given if(control$fixing=="n.each.type") rep.int(Ctypes,n.start) else sample(Ctypes,npts.free,TRUE,ptypes) }, x = { # x.start given as.integer(marks(x.start, dfok=FALSE))-1L }, stop("internal error: start$given unrecognised", call.=FALSE) ) # # Then the x, y coordinates # switch(start$given, x = { x <- x.start$x y <- x.start$y }, n = { xy <- if(!trendy) runifpoint(npts.free, w.sim, ...) else rpoint.multi(npts.free, trend, tmax, factor(Cmarks,levels=Ctypes), w.sim, ...) x <- xy$x y <- xy$y }) ## APPEND the free points AFTER the conditioning points if(condtype != "none") { x <- c(x.condpp$x, x) y <- c(x.condpp$y, y) if(mtype) Cmarks <- c(as.integer(marks(x.condpp))-1L, Cmarks) } if(!is.null(overrideXstart)) { #' override the previous data x <- overrideXstart$x y <- overrideXstart$y if(mtype) Cmarks <- as.integer(marks(overrideXstart))-1L } # decide whether to activate visual debugger if(snoop) { Xinit <- ppp(x, y, window=w.sim) if(mtype) marks(Xinit) <- factor(Cmarks, levels=Ctypes, labels=types) if(verbose) cat("\nCreating debugger environment..") snoopenv <- rmhSnoopEnv(Xinit=Xinit, Wclip=w.clip, R=reach(model)) if(verbose) cat("Done.\n") } else snoopenv <- "none" ####################################################################### # Set up C call ###################################################################### # Determine the name of the cif used in the C code C.id <- model$C.id ncif <- length(C.id) # Get the parameters in C-ese ipar <- model$C.ipar iparlist <- if(ncif == 1) list(ipar) else model$C.iparlist iparlen <- lengths(iparlist) beta <- model$internal$beta # Absorb the constants or vectors `iota' and 'ptypes' into the beta parameters beta <- (iota/ptypes) * beta # Algorithm control parameters p <- control$p q <- control$q nrep <- control$nrep # fixcode <- control$fixcode # fixing <- control$fixing fixall <- control$fixall nverb <- control$nverb saving <- control$saving nsave <- control$nsave nburn <- control$nburn track <- control$track thin <- control$internal$thin pstage <- control$pstage %orifnull% "start" if(pstage == "block" && !saving) pstage <- "start" temper <- FALSE invertemp <- 1.0 if(verbose) cat("Ready to simulate. ") storage.mode(ncif) <- "integer" storage.mode(C.id) <- "character" storage.mode(beta) <- "double" storage.mode(ipar) <- "double" storage.mode(iparlen) <- "integer" storage.mode(period) <- "double" storage.mode(ntypes) <- "integer" storage.mode(nrep) <- "integer" storage.mode(p) <- storage.mode(q) <- "double" storage.mode(nverb) <- "integer" storage.mode(x) <- storage.mode(y) <- "double" storage.mode(Cmarks) <- "integer" storage.mode(fixall) <- "integer" storage.mode(npts.cond) <- "integer" storage.mode(track) <- "integer" storage.mode(thin) <- "integer" storage.mode(temper) <- "integer" storage.mode(invertemp) <- "double" if(pstage == "start" || !saving) { #' generate all proposal points now. if(verbose) cat("Generating proposal points...") #' If the pattern is multitype, generate the mark proposals (0 to ntypes-1) Cmprop <- if(mtype) sample(Ctypes,nrep,TRUE,prob=ptypes) else 0 storage.mode(Cmprop) <- "integer" #' Generate the ``proposal points'' in the expanded window. xy <- if(trendy) { rpoint.multi(nrep,trend,tmax, factor(Cmprop, levels=Ctypes), w.sim, ..., warn=FALSE) } else runifpoint(nrep, w.sim, warn=FALSE) xprop <- xy$x yprop <- xy$y storage.mode(xprop) <- storage.mode(yprop) <- "double" } if(!saving) { # ////////// Single block ///////////////////////////////// nrep0 <- 0 storage.mode(nrep0) <- "integer" # Call the Metropolis-Hastings C code: if(verbose) cat("Running Metropolis-Hastings.\n") out <- .Call(SC_xmethas, ncif, C.id, beta, ipar, iparlen, period, xprop, yprop, Cmprop, ntypes, nrep, p, q, nverb, nrep0, x, y, Cmarks, npts.cond, fixall, track, thin, snoopenv, temper, invertemp, PACKAGE="spatstat.core") # Extract the point pattern returned from C X <- ppp(x=out[[1L]], y=out[[2L]], window=w.state, check=FALSE) if(mtype) { #' convert integer marks from C to R #' then restore original type levels marks(X) <- factor(out[[3L]], levels=Ctypes, labels=types) } # Now clip the pattern to the ``clipping'' window: if(!overrideclip && !control$expand$force.noexp) X <- X[w.clip] # Extract transition history: if(track) { usedout <- if(mtype) 3 else 2 proptype <- factor(out[[usedout+1]], levels=1:3, labels=c("Birth", "Death", "Shift")) accepted <- as.logical(out[[usedout+2]]) History <- data.frame(proposaltype=proptype, accepted=accepted) if(length(out) >= usedout + 4) { # history includes numerator & denominator of Hastings ratio numerator <- as.double(out[[usedout + 3]]) denominator <- as.double(out[[usedout + 4]]) History <- cbind(History, data.frame(numerator=numerator, denominator=denominator)) } } } else { # ////////// Multiple blocks ///////////////////////////////// ## determine length of each block of simulations nsuperblocks <- as.integer(1L + ceiling((nrep - nburn)/sum(nsave))) block <- c(nburn, rep.int(nsave, nsuperblocks-1L)) block <- block[cumsum(block) <= nrep] if((tot <- sum(block)) < nrep) block <- c(block, nrep-tot) block <- block[block >= 1L] nblocks <- length(block) blockend <- cumsum(block) ## set up list to contain the saved point patterns Xlist <- vector(mode="list", length=nblocks+1L) ## save initial state Xinit <- ppp(x=x, y=y, window=w.state, check=FALSE) if(mtype) { ## convert integer marks from C to R ## then restore original type levels marks(Xinit) <- factor(Cmarks, levels=Ctypes, labels=types) } Xlist[[1L]] <- Xinit # Call the Metropolis-Hastings C code repeatedly: xprev <- x yprev <- y Cmarksprev <- Cmarks # thinFALSE <- as.integer(FALSE) storage.mode(thinFALSE) <- "integer" # ................ loop ......................... for(I in 1:nblocks) { # number of iterations for this block nrepI <- block[I] storage.mode(nrepI) <- "integer" # number of previous iterations nrep0 <- if(I == 1) 0 else blockend[I-1] storage.mode(nrep0) <- "integer" # Generate or extract proposals switch(pstage, start = { #' extract proposals from previously-generated vectors if(verbose) cat("Extracting proposal points...") seqI <- 1:nrepI xpropI <- xprop[seqI] ypropI <- yprop[seqI] CmpropI <- Cmprop[seqI] storage.mode(xpropI) <- storage.mode(ypropI) <- "double" storage.mode(CmpropI) <- "integer" }, block = { # generate 'nrepI' random proposals if(verbose) cat("Generating proposal points...") #' If the pattern is multitype, generate the mark proposals CmpropI <- if(mtype) sample(Ctypes,nrepI,TRUE,prob=ptypes) else 0 storage.mode(CmpropI) <- "integer" #' Generate the ``proposal points'' in the expanded window. xy <- if(trendy) { rpoint.multi(nrepI,trend,tmax, factor(CmpropI, levels=Ctypes), w.sim, ..., warn=FALSE) } else runifpoint(nrepI, w.sim, warn=FALSE) xpropI <- xy$x ypropI <- xy$y storage.mode(xpropI) <- storage.mode(ypropI) <- "double" }) # no thinning in subsequent blocks if(I > 1) thin <- thinFALSE #' call if(verbose) cat("Running Metropolis-Hastings.\n") out <- .Call(SC_xmethas, ncif, C.id, beta, ipar, iparlen, period, xpropI, ypropI, CmpropI, ntypes, nrepI, p, q, nverb, nrep0, xprev, yprev, Cmarksprev, npts.cond, fixall, track, thin, snoopenv, temper, invertemp, PACKAGE="spatstat.core") # Extract the point pattern returned from C X <- ppp(x=out[[1L]], y=out[[2L]], window=w.state, check=FALSE) if(mtype) { # convert integer marks from C to R # then restore original type levels marks(X) <- factor(out[[3L]], levels=Ctypes, labels=types) } # Now clip the pattern to the ``clipping'' window: if(!overrideclip && !control$expand$force.noexp) X <- X[w.clip] # commit to list Xlist[[I+1L]] <- X # Extract transition history: if(track) { usedout <- if(mtype) 3 else 2 proptype <- factor(out[[usedout+1]], levels=1:3, labels=c("Birth", "Death", "Shift")) accepted <- as.logical(out[[usedout+2]]) HistoryI <- data.frame(proposaltype=proptype, accepted=accepted) if(length(out) >= usedout + 4) { # history includes numerator & denominator of Hastings ratio numerator <- as.double(out[[usedout + 3]]) denominator <- as.double(out[[usedout + 4]]) HistoryI <- cbind(HistoryI, data.frame(numerator=numerator, denominator=denominator)) } # concatenate with histories of previous blocks History <- if(I == 1) HistoryI else rbind(History, HistoryI) } # update 'previous state' xprev <- out[[1L]] yprev <- out[[2L]] Cmarksprev <- if(!mtype) 0 else out[[3]] storage.mode(xprev) <- storage.mode(yprev) <- "double" storage.mode(Cmarksprev) <- "integer" if(pstage == "start") { #' discard used proposals xprop <- xprop[-seqI] yprop <- yprop[-seqI] Cmprop <- Cmprop[-seqI] } } # .............. end loop ............................... # Result of simulation is final state 'X' # Tack on the list of intermediate states names(Xlist) <- paste("Iteration", c(0,as.integer(blockend)), sep="_") attr(X, "saved") <- as.solist(Xlist) } # Append to the result information about how it was generated. if(kitchensink) { attr(X, "info") <- InfoList attr(X, "seed") <- saved.seed } if(track) attr(X, "history") <- History return(X) } # helper function summarise.trend <- local({ # main function summarise.trend <- function(trend, w, a=area(w)) { tlist <- if(is.function(trend) || is.im(trend)) list(trend) else trend return(lapply(tlist, summarise1, w=w, a=a)) } # summarise1 <- function(x, w, a) { if(is.numeric(x)) { mini <- maxi <- x integ <- a*x } else { Z <- as.im(x, w)[w, drop=FALSE] ran <- range(Z) mini <- ran[1L] maxi <- ran[2L] integ <- integral.im(Z) } return(list(min=mini, max=maxi, integral=integ)) } summarise.trend }) spatstat.core/R/kppm.R0000644000176200001440000017723414146622475014370 0ustar liggesusers# # kppm.R # # kluster/kox point process models # # $Revision: 1.195 $ $Date: 2021/11/22 00:39:32 $ # kppm <- function(X, ...) { UseMethod("kppm") } kppm.formula <- function(X, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), ..., data=NULL) { ## remember call callstring <- short.deparse(sys.call()) cl <- match.call() ########### INTERPRET FORMULA ############################## if(!inherits(X, "formula")) stop(paste("Argument 'X' should be a formula")) formula <- X if(spatstat.options("expand.polynom")) formula <- expand.polynom(formula) ## check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Formula must have a left hand side")) Yexpr <- formula[[2L]] trend <- formula[c(1L,3L)] ## FIT ####################################### thecall <- call("kppm", X=Yexpr, trend=trend, data=data, clusters=clusters) ncall <- length(thecall) argh <- list(...) nargh <- length(argh) if(nargh > 0) { thecall[ncall + 1:nargh] <- argh names(thecall)[ncall + 1:nargh] <- names(argh) } ## result <- eval(thecall, ## envir=if(!is.null(data)) data else parent.frame(), ## enclos=if(!is.null(data)) parent.frame() else baseenv()) callenv <- list2env(as.list(data), parent=parent.frame()) result <- eval(thecall, envir=callenv, enclos=baseenv()) result$call <- cl result$callframe <- parent.frame() if(!("callstring" %in% names(list(...)))) result$callstring <- callstring return(result) } kppm.ppp <- kppm.quad <- function(X, trend = ~1, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), data=NULL, ..., covariates = data, subset, method = c("mincon", "clik2", "palm", "adapcl"), improve.type = c("none", "clik1", "wclik1", "quasi"), improve.args = list(), weightfun=NULL, control=list(), stabilize=TRUE, algorithm, statistic="K", statargs=list(), rmax = NULL, epsilon=0.01, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) { cl <- match.call() callstring <- paste(short.deparse(sys.call()), collapse="") Xname <- short.deparse(substitute(X)) clusters <- match.arg(clusters) improve.type <- match.arg(improve.type) method <- match.arg(method) if(method == "mincon") statistic <- pickoption("summary statistic", statistic, c(K="K", g="pcf", pcf="pcf")) if(missing(algorithm)) { algorithm <- if(method == "adapcl") "Broyden" else "Nelder-Mead" } else check.1.string(algorithm) ClusterArgs <- list(method = method, improve.type = improve.type, improve.args = improve.args, weightfun=weightfun, control=control, stabilize=stabilize, algorithm=algorithm, statistic=statistic, statargs=statargs, rmax = rmax) Xenv <- list2env(as.list(covariates), parent=parent.frame()) X <- eval(substitute(X), envir=Xenv, enclos=baseenv()) isquad <- is.quad(X) if(!is.ppp(X) && !isquad) stop("X should be a point pattern (ppp) or quadrature scheme (quad)") if(is.marked(X)) stop("Sorry, cannot handle marked point patterns") if(!missing(subset)) { W <- eval(subset, covariates, parent.frame()) if(!is.null(W)) { if(is.im(W)) { W <- solutionset(W) } else if(!is.owin(W)) { stop("Argument 'subset' should yield a window or logical image", call.=FALSE) } X <- X[W] } } po <- ppm(Q=X, trend=trend, covariates=covariates, forcefit=TRUE, rename.intercept=FALSE, covfunargs=covfunargs, use.gam=use.gam, nd=nd, eps=eps) XX <- if(isquad) X$data else X ## set default weight function if(is.null(weightfun)) switch(method, adapcl = { weightfun <- function(d) { as.integer(abs(d) <= 1)*exp(1/(d^2-1)) } attr(weightfun, "selfprint") <- "Indicator(-1 <= distance <= 1) * exp(1/(distance^2-1))" }, mincon = { }, { RmaxW <- (rmax %orifnull% rmax.rule("K", Window(XX), intensity(XX))) / 2 weightfun <- function(d) { as.integer(d <= RmaxW) } attr(weightfun, "selfprint") <- paste0("Indicator(distance <= ", RmaxW, ")") }) ## fit out <- switch(method, mincon = kppmMinCon(X=XX, Xname=Xname, po=po, clusters=clusters, control=control, stabilize=stabilize, statistic=statistic, statargs=statargs, rmax=rmax, algorithm=algorithm, ...), clik2 = kppmComLik(X=XX, Xname=Xname, po=po, clusters=clusters, control=control, stabilize=stabilize, weightfun=weightfun, rmax=rmax, algorithm=algorithm, ...), palm = kppmPalmLik(X=XX, Xname=Xname, po=po, clusters=clusters, control=control, stabilize=stabilize, weightfun=weightfun, rmax=rmax, algorithm=algorithm, ...), adapcl = kppmCLadap(X=XX, Xname=Xname, po=po, clusters=clusters, control=control, epsilon=epsilon, weightfun=weightfun, rmax=rmax, algorithm=algorithm, ...)) ## h <- attr(out, "h") out <- append(out, list(ClusterArgs=ClusterArgs, call=cl, callframe=parent.frame(), callstring=callstring)) # Detect DPPs DPP <- list(...)$DPP class(out) <- c(ifelse(is.null(DPP), "kppm", "dppm"), class(out)) # Update intensity estimate with improve.kppm if necessary: if(improve.type != "none") out <- do.call(improve.kppm, append(list(object = out, type = improve.type), improve.args)) attr(out, "h") <- h return(out) } kppmMinCon <- function(X, Xname, po, clusters, control=list(), stabilize=TRUE, statistic, statargs, algorithm="Nelder-Mead", DPP=NULL, ...) { # Minimum contrast fit stationary <- is.stationary(po) # compute intensity if(stationary) { lambda <- summary(po)$trend$value } else { # compute intensity at high resolution if available w <- as.owin(po, from="covariates") if(!is.mask(w)) w <- NULL lambda <- predict(po, locations=w) } # Detect DPP model and change clusters and intensity correspondingly if(!is.null(DPP)){ tmp <- dppmFixIntensity(DPP, lambda, po) clusters <- tmp$clusters lambda <- tmp$lambda po <- tmp$po } mcfit <- clusterfit(X, clusters, lambda = lambda, dataname = Xname, control = control, stabilize=stabilize, statistic = statistic, statargs = statargs, algorithm=algorithm, ...) fitinfo <- attr(mcfit, "info") attr(mcfit, "info") <- NULL # all info that depends on the fitting method: Fit <- list(method = "mincon", statistic = statistic, Stat = fitinfo$Stat, StatFun = fitinfo$StatFun, StatName = fitinfo$StatName, FitFun = fitinfo$FitFun, statargs = statargs, pspace = fitinfo$pspace, mcfit = mcfit, maxlogcl = NULL) # results if(!is.null(DPP)){ clusters <- update(clusters, as.list(mcfit$par)) out <- list(Xname = Xname, X = X, stationary = stationary, fitted = clusters, po = po, Fit = Fit) } else{ out <- list(Xname = Xname, X = X, stationary = stationary, clusters = clusters, modelname = fitinfo$modelname, isPCP = fitinfo$isPCP, po = po, lambda = lambda, mu = mcfit$mu, par = mcfit$par, clustpar = mcfit$clustpar, clustargs = mcfit$clustargs, modelpar = mcfit$modelpar, covmodel = mcfit$covmodel, Fit = Fit) } return(out) } clusterfit <- function(X, clusters, lambda = NULL, startpar = NULL, ..., q=1/4, p=2, rmin=NULL, rmax=NULL, ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax), statistic = NULL, statargs = NULL, algorithm="Nelder-Mead", verbose=FALSE, pspace=NULL){ if(verbose) splat("Fitting cluster model") ## If possible get dataname from dots dataname <- list(...)$dataname ## Cluster info: info <- spatstatClusterModelInfo(clusters) if(verbose) splat("Retrieved cluster model information") ## Determine model type isPCP <- info$isPCP isDPP <- inherits(clusters, "detpointprocfamily") ## resolve algorithm parameters default.ctrl <- list(q=if(isDPP) 1/2 else 1/4, p=2, rmin=NULL, rmax=NULL) given.ctrl <- if(missing(ctrl)) list() else ctrl[names(default.ctrl)] given.args <- c(if(missing(q)) NULL else list(q=q), if(missing(p)) NULL else list(p=p), if(missing(rmin)) NULL else list(rmin=rmin), if(missing(rmax)) NULL else list(rmax=rmax)) ctrl <- resolve.defaults(given.args, given.ctrl, default.ctrl) if(verbose) { splat("Algorithm parameters:") print(ctrl) } ## if(inherits(X, "ppp")){ if(verbose) splat("Using point pattern data") if(is.null(dataname)) dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(is.null(statistic)) statistic <- "K" # Startpar: if(is.null(startpar)) startpar <- info$selfstart(X) stationary <- is.null(lambda) || (is.numeric(lambda) && length(lambda)==1) if(verbose) { splat("Starting parameters:") print(startpar) cat("Calculating summary function...") } # compute summary function if(stationary) { if(is.null(lambda)) lambda <- intensity(X) StatFun <- if(statistic == "K") "Kest" else "pcf" StatName <- if(statistic == "K") "K-function" else "pair correlation function" Stat <- do.call(StatFun, resolve.defaults(list(X=quote(X)), statargs, list(correction="best"))) } else { StatFun <- if(statistic == "K") "Kinhom" else "pcfinhom" StatName <- if(statistic == "K") "inhomogeneous K-function" else "inhomogeneous pair correlation function" Stat <- do.call(StatFun, resolve.defaults(list(X=quote(X), lambda=lambda), statargs, list(correction="best"))) } if(verbose) splat("Done.") } else if(inherits(X, "fv")){ if(verbose) splat("Using the given summary function") Stat <- X ## Get statistic type stattype <- attr(Stat, "fname") StatFun <- paste0(stattype) StatName <- NULL if(is.null(statistic)){ if(is.null(stattype) || !is.element(stattype[1L], c("K", "pcf"))) stop("Cannot infer the type of summary statistic from argument ", sQuote("X"), " please specify this via argument ", sQuote("statistic")) statistic <- stattype[1L] } if(stattype[1L]!=statistic) stop("Statistic inferred from ", sQuote("X"), " not equal to supplied argument ", sQuote("statistic")) # Startpar: if(is.null(startpar)){ if(isDPP) stop("No rule for starting parameters in this case. Please set ", sQuote("startpar"), " explicitly.") startpar <- info$checkpar(startpar, old=FALSE) startpar[["scale"]] <- mean(range(Stat[[fvnames(Stat, ".x")]])) } } else{ stop("Unrecognised format for argument X") } ## avoid using g(0) as it may be infinite if(statistic=="pcf"){ if(verbose) splat("Checking g(0)") argu <- fvnames(Stat, ".x") rvals <- Stat[[argu]] if(rvals[1L] == 0 && (is.null(rmin) || rmin == 0)) { if(verbose) splat("Ignoring g(0)") rmin <- rvals[2L] } } ## DPP resolving algorithm and checking startpar changealgorithm <- length(startpar)==1 && algorithm=="Nelder-Mead" if(isDPP){ if(verbose) splat("Invoking dppmFixAlgorithm") alg <- dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar) algorithm <- alg$algorithm } dots <- info$resolvedots(...) #' determine initial values of parameters startpar <- info$checkpar(startpar) #' code to compute the theoretical summary function of the model theoret <- info[[statistic]] #' explanatory text desc <- paste("minimum contrast fit of", info$descname) #' mcargs <- resolve.defaults(list(observed=Stat, theoretical=theoret, startpar=startpar, ctrl=ctrl, method=algorithm, fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=statistic, modelname=info$modelname), margs=dots$margs, model=dots$model, funaux=info$funaux, pspace=pspace), list(...) ) if(isDPP && algorithm=="Brent" && changealgorithm) mcargs <- resolve.defaults(mcargs, list(lower=alg$lower, upper=alg$upper)) ## .............. FIT ....................... if(verbose) splat("Starting minimum contrast fit") mcfit <- do.call(mincontrast, mcargs) if(verbose) splat("Returned from minimum contrast fit") ## .......................................... ## extract fitted parameters optpar <- mcfit$par names(optpar) <- names(startpar) mcfit$par <- optpar # Return results for DPPs if(isDPP){ extra <- list(Stat = Stat, StatFun = StatFun, StatName = StatName, modelname = info$modelabbrev, lambda = lambda) attr(mcfit, "info") <- extra if(verbose) splat("Returning from clusterfit (DPP case)") return(mcfit) } ## Extra stuff for ordinary cluster/lgcp models ## imbue with meaning ## infer model parameters mcfit$modelpar <- info$interpret(optpar, lambda) mcfit$internal <- list(model=ifelse(isPCP, clusters, "lgcp")) mcfit$covmodel <- dots$covmodel if(isPCP) { # Poisson cluster process: extract parent intensity kappa kappa <- mcfit$par[["kappa"]] # mu = mean cluster size mu <- lambda/kappa } else { # LGCP: extract variance parameter sigma2 sigma2 <- mcfit$par[["sigma2"]] # mu = mean of log intensity mu <- log(lambda) - sigma2/2 } ## Parameter values (new format) mcfit$mu <- mu mcfit$clustpar <- info$checkpar(mcfit$par, old=FALSE) mcfit$clustargs <- info$checkclustargs(dots$margs, old=FALSE) ## The old fit fun that would have been used (DO WE NEED THIS?) FitFun <- paste0(tolower(clusters), ".est", statistic) extra <- list(FitFun = FitFun, Stat = Stat, StatFun = StatFun, StatName = StatName, modelname = info$modelabbrev, isPCP = isPCP, lambda = lambda, pspace = pspace) attr(mcfit, "info") <- extra if(verbose) splat("Returning from clusterfit") return(mcfit) } kppmComLik <- function(X, Xname, po, clusters, control=list(), stabilize=TRUE, weightfun, rmax, algorithm="Nelder-Mead", DPP=NULL, ..., pspace=NULL) { W <- as.owin(X) if(is.null(rmax)) rmax <- rmax.rule("K", W, intensity(X)) # identify pairs of points that contribute cl <- closepairs(X, rmax, what="ijd") # I <- cl$i # J <- cl$j dIJ <- cl$d # compute weights for pairs of points if(is.function(weightfun)) { wIJ <- weightfun(dIJ) sumweight <- safePositiveValue(sum(wIJ)) } else { npairs <- length(dIJ) wIJ <- rep.int(1, npairs) sumweight <- npairs } # convert window to mask, saving other arguments for later dcm <- do.call.matched(as.mask, append(list(w=W), list(...)), sieve=TRUE) M <- dcm$result otherargs <- dcm$otherargs ## Detect DPP usage isDPP <- inherits(clusters, "detpointprocfamily") # compute intensity at pairs of data points # and c.d.f. of interpoint distance in window if(stationary <- is.stationary(po)) { # stationary unmarked Poisson process lambda <- intensity(X) # lambdaIJ <- lambda^2 # compute cdf of distance between two uniform random points in W g <- distcdf(W, delta=rmax/4096) # scaling constant is (area * intensity)^2 gscale <- npoints(X)^2 } else { # compute fitted intensity at data points and in window # lambdaX <- fitted(po, dataonly=TRUE) lambda <- lambdaM <- predict(po, locations=M) # lambda(x_i) * lambda(x_j) # lambdaIJ <- lambdaX[I] * lambdaX[J] # compute cdf of distance between two random points in W # with density proportional to intensity function g <- distcdf(M, dW=lambdaM, delta=rmax/4096) # scaling constant is (integral of intensity)^2 gscale <- safePositiveValue(integral.im(lambdaM)^2, default=npoints(X)^2) } # Detect DPP model and change clusters and intensity correspondingly isDPP <- !is.null(DPP) if(isDPP){ tmp <- dppmFixIntensity(DPP, lambda, po) clusters <- tmp$clusters lambda <- tmp$lambda po <- tmp$po } # trim 'g' to [0, rmax] g <- g[with(g, .x) <= rmax,] # get pair correlation function (etc) for model info <- spatstatClusterModelInfo(clusters) pcfun <- info$pcf funaux <- info$funaux selfstart <- info$selfstart isPCP <- info$isPCP parhandler <- info$parhandler modelname <- info$modelname # Assemble information required for computing pair correlation pcfunargs <- list(funaux=funaux) if(is.function(parhandler)) { # Additional parameters of cluster model are required. # These may be given as individual arguments, # or in a list called 'covmodel' clustargs <- if("covmodel" %in% names(otherargs)) otherargs[["covmodel"]] else otherargs clargs <- do.call(parhandler, clustargs) pcfunargs <- append(clargs, pcfunargs) } else clargs <- NULL # determine starting parameter values startpar <- selfstart(X) # ..................................................... # create local function to evaluate pair correlation # (with additional parameters 'pcfunargs' in its environment) paco <- function(d, par) { do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) } #' .......... define objective function ...................... if(!is.function(weightfun)) { # pack up necessary information objargs <- list(dIJ=dIJ, sumweight=sumweight, g=g, gscale=gscale, envir=environment(paco), BIGVALUE=1, # updated below SMALLVALUE=.Machine$double.eps) # define objective function (with 'paco' in its environment) # This is the log composite likelihood minus the constant term # sum(log(lambdaIJ)) - npairs * log(gscale) obj <- function(par, objargs) { with(objargs, { logprod <- sum(log(safePositiveValue(paco(dIJ, par)))) integ <- unlist(stieltjes(paco, g, par=par)) integ <- pmax(SMALLVALUE, integ) logcl <- 2*(logprod - sumweight * log(integ)) logcl <- safeFiniteValue(logcl, default=-BIGVALUE) return(logcl) }, enclos=objargs$envir) } ## determine a suitable large number to replace Inf objargs$BIGVALUE <- bigvaluerule(obj, objargs, startpar) } else { # create local function to evaluate pair correlation(d) * weight(d) # (with additional parameters 'pcfunargs', 'weightfun' in its environment) force(weightfun) wpaco <- function(d, par) { y <- do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) w <- weightfun(d) return(y * w) } # pack up necessary information objargs <- list(dIJ=dIJ, wIJ=wIJ, sumweight=sumweight, g=g, gscale=gscale, envir=environment(wpaco), BIGVALUE=1, # updated below SMALLVALUE=.Machine$double.eps) # define objective function (with 'paco', 'wpaco' in its environment) # This is the log composite likelihood minus the constant term # sum(wIJ * log(lambdaIJ)) - sumweight * log(gscale) obj <- function(par, objargs) { with(objargs, { integ <- unlist(stieltjes(wpaco, g, par=par)) integ <- pmax(SMALLVALUE, integ) logcl <- safeFiniteValue( 2*(sum(wIJ * log(safePositiveValue(paco(dIJ, par)))) - sumweight * log(integ)), default=-BIGVALUE) return(logcl) }, enclos=objargs$envir) } ## determine a suitable large number to replace Inf objargs$BIGVALUE <- bigvaluerule(obj, objargs, startpar) } ## ...................... Optimization settings ........................ if(stabilize) { ## Numerical stabilisation ## evaluate objective at starting state startval <- obj(startpar, objargs) ## use to determine appropriate global scale smallscale <- sqrt(.Machine$double.eps) fnscale <- -max(abs(startval), smallscale) parscale <- pmax(abs(startpar), smallscale) scaling <- list(fnscale=fnscale, parscale=parscale) } else { scaling <- list(fnscale=-1) } ## Update list of algorithm control arguments control.updated <- resolve.defaults(control, scaling, list(trace=0)) ## Initialise list of all arguments to 'optim' optargs <- list(par=startpar, fn=obj, objargs=objargs, control=control.updated, method=algorithm) ## DPP case: check startpar and modify algorithm changealgorithm <- length(startpar)==1 && algorithm=="Nelder-Mead" if(isDPP){ alg <- dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar) algorithm <- optargs$method <- alg$algorithm if(algorithm=="Brent" && changealgorithm){ optargs$lower <- alg$lower optargs$upper <- alg$upper } } ## .......... optimize it .............................. opt <- do.call(optim, optargs) ## raise warning/error if something went wrong signalStatus(optimStatus(opt), errors.only=TRUE) ## .......... extract fitted parameters ..................... optpar <- opt$par names(optpar) <- names(startpar) ## save starting values in 'opt' for consistency with mincontrast() opt$par <- optpar opt$startpar <- startpar ## Finish in DPP case if(!is.null(DPP)){ ## all info that depends on the fitting method: Fit <- list(method = "clik2", clfit = opt, weightfun = weightfun, rmax = rmax, objfun = obj, objargs = objargs, maxlogcl = opt$value, pspace = pspace) # pack up clusters <- update(clusters, as.list(opt$par)) result <- list(Xname = Xname, X = X, stationary = stationary, fitted = clusters, modelname = modelname, po = po, lambda = lambda, Fit = Fit) return(result) } ## meaningful model parameters modelpar <- info$interpret(optpar, lambda) # infer parameter 'mu' if(isPCP) { # Poisson cluster process: extract parent intensity kappa kappa <- optpar[["kappa"]] # mu = mean cluster size mu <- if(stationary) lambda/kappa else eval.im(lambda/kappa) } else { # LGCP: extract variance parameter sigma2 sigma2 <- optpar[["sigma2"]] # mu = mean of log intensity mu <- if(stationary) log(lambda) - sigma2/2 else eval.im(log(lambda) - sigma2/2) } # all info that depends on the fitting method: Fit <- list(method = "clik2", clfit = opt, weightfun = weightfun, rmax = rmax, objfun = obj, objargs = objargs, maxlogcl = opt$value, pspace = pspace) # pack up result <- list(Xname = Xname, X = X, stationary = stationary, clusters = clusters, modelname = modelname, isPCP = isPCP, po = po, lambda = lambda, mu = mu, par = optpar, clustpar = info$checkpar(par=optpar, old=FALSE), clustargs = info$checkclustargs(clargs$margs, old=FALSE), #clargs$margs, modelpar = modelpar, covmodel = clargs, Fit = Fit) return(result) } kppmPalmLik <- function(X, Xname, po, clusters, control=list(), stabilize=TRUE, weightfun, rmax, algorithm="Nelder-Mead", DPP=NULL, ..., pspace=NULL) { W <- as.owin(X) if(is.null(rmax)) rmax <- rmax.rule("K", W, intensity(X)) # identify pairs of points that contribute cl <- closepairs(X, rmax) # I <- cl$i J <- cl$j dIJ <- cl$d # compute weights for pairs of points if(is.function(weightfun)) { wIJ <- weightfun(dIJ) # sumweight <- sum(wIJ) } else { npairs <- length(dIJ) wIJ <- rep.int(1, npairs) # sumweight <- npairs } # convert window to mask, saving other arguments for later dcm <- do.call.matched(as.mask, append(list(w=W), list(...)), sieve=TRUE) M <- dcm$result otherargs <- dcm$otherargs ## Detect DPP usage isDPP <- inherits(clusters, "detpointprocfamily") # compute intensity at data points # and c.d.f. of interpoint distance in window if(stationary <- is.stationary(po)) { # stationary unmarked Poisson process lambda <- intensity(X) lambdaJ <- rep(lambda, length(J)) # compute cdf of distance between a uniform random point in W # and a randomly-selected point in X g <- distcdf(X, M, delta=rmax/4096) # scaling constant is (integral of intensity) * (number of points) gscale <- npoints(X)^2 } else { # compute fitted intensity at data points and in window lambdaX <- fitted(po, dataonly=TRUE) lambda <- lambdaM <- predict(po, locations=M) lambdaJ <- lambdaX[J] # compute cdf of distance between a uniform random point in X # and a random point in W with density proportional to intensity function g <- distcdf(X, M, dV=lambdaM, delta=rmax/4096) # scaling constant is (integral of intensity) * (number of points) gscale <- safePositiveValue(integral.im(lambdaM) * npoints(X), default=npoints(X)^2) } # Detect DPP model and change clusters and intensity correspondingly isDPP <- !is.null(DPP) if(isDPP){ tmp <- dppmFixIntensity(DPP, lambda, po) clusters <- tmp$clusters lambda <- tmp$lambda po <- tmp$po } # trim 'g' to [0, rmax] g <- g[with(g, .x) <= rmax,] # get pair correlation function (etc) for model info <- spatstatClusterModelInfo(clusters) pcfun <- info$pcf funaux <- info$funaux selfstart <- info$selfstart isPCP <- info$isPCP parhandler <- info$parhandler modelname <- info$modelname # Assemble information required for computing pair correlation pcfunargs <- list(funaux=funaux) if(is.function(parhandler)) { # Additional parameters of cluster model are required. # These may be given as individual arguments, # or in a list called 'covmodel' clustargs <- if("covmodel" %in% names(otherargs)) otherargs[["covmodel"]] else otherargs clargs <- do.call(parhandler, clustargs) pcfunargs <- append(clargs, pcfunargs) } else clargs <- NULL # determine starting parameter values startpar <- selfstart(X) ## ..................................................... # create local function to evaluate pair correlation # (with additional parameters 'pcfunargs' in its environment) paco <- function(d, par) { do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) } # define objective function if(!is.function(weightfun)) { # pack up necessary information objargs <- list(dIJ=dIJ, g=g, gscale=gscale, sumloglam=safeFiniteValue(sum(log(lambdaJ))), envir=environment(paco), BIGVALUE=1, # updated below SMALLVALUE=.Machine$double.eps) # define objective function (with 'paco' in its environment) # This is the log Palm likelihood obj <- function(par, objargs) { with(objargs, { integ <- unlist(stieltjes(paco, g, par=par)) integ <- pmax(SMALLVALUE, integ) logplik <- safeFiniteValue( sumloglam + sum(log(safePositiveValue(paco(dIJ, par)))) - gscale * integ, default=-BIGVALUE) return(logplik) }, enclos=objargs$envir) } ## determine a suitable large number to replace Inf objargs$BIGVALUE <- bigvaluerule(obj, objargs, startpar) } else { # create local function to evaluate pair correlation(d) * weight(d) # (with additional parameters 'pcfunargs', 'weightfun' in its environment) force(weightfun) wpaco <- function(d, par) { y <- do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) w <- weightfun(d) return(y * w) } # pack up necessary information objargs <- list(dIJ=dIJ, wIJ=wIJ, g=g, gscale=gscale, wsumloglam=safeFiniteValue( sum(wIJ * safeFiniteValue(log(lambdaJ))) ), envir=environment(wpaco), BIGVALUE=1, # updated below SMALLVALUE=.Machine$double.eps) # define objective function (with 'paco', 'wpaco' in its environment) # This is the log Palm likelihood obj <- function(par, objargs) { with(objargs, { integ <- unlist(stieltjes(wpaco, g, par=par)) integ <- pmax(SMALLVALUE, integ) logplik <- safeFiniteValue(wsumloglam + sum(wIJ * log(safePositiveValue(paco(dIJ, par)))) - gscale * integ, default=-BIGVALUE) return(logplik) }, enclos=objargs$envir) } ## determine a suitable large number to replace Inf objargs$BIGVALUE <- bigvaluerule(obj, objargs, startpar) } ## ...................... Optimization settings ........................ if(stabilize) { ## Numerical stabilisation ## evaluate objective at starting state startval <- obj(startpar, objargs) ## use to determine appropriate global scale smallscale <- sqrt(.Machine$double.eps) fnscale <- -max(abs(startval), smallscale) parscale <- pmax(abs(startpar), smallscale) scaling <- list(fnscale=fnscale, parscale=parscale) } else { scaling <- list(fnscale=-1) } ## Update list of algorithm control arguments control.updated <- resolve.defaults(control, scaling, list(trace=0)) ## Initialise list of all arguments to 'optim' optargs <- list(par=startpar, fn=obj, objargs=objargs, control=control.updated, method=algorithm) ## DPP case: check startpar and modify algorithm changealgorithm <- length(startpar)==1 && algorithm=="Nelder-Mead" if(isDPP){ alg <- dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar) algorithm <- optargs$method <- alg$algorithm if(algorithm=="Brent" && changealgorithm){ optargs$lower <- alg$lower optargs$upper <- alg$upper } } ## ....................................................................... # optimize it opt <- do.call(optim, optargs) # raise warning/error if something went wrong signalStatus(optimStatus(opt), errors.only=TRUE) ## Extract optimal values of parameters optpar <- opt$par names(optpar) <- names(startpar) ## save starting values in 'opt' for consistency with minconfit() opt$par <- optpar opt$startpar <- startpar ## Finish in DPP case if(!is.null(DPP)){ ## all info that depends on the fitting method: Fit <- list(method = "palm", clfit = opt, weightfun = weightfun, rmax = rmax, objfun = obj, objargs = objargs, maxlogcl = opt$value, pspace = pspace) # pack up clusters <- update(clusters, as.list(optpar)) result <- list(Xname = Xname, X = X, stationary = stationary, fitted = clusters, modelname = modelname, po = po, lambda = lambda, Fit = Fit) return(result) } # meaningful model parameters modelpar <- info$interpret(optpar, lambda) # infer parameter 'mu' if(isPCP) { # Poisson cluster process: extract parent intensity kappa kappa <- optpar[["kappa"]] # mu = mean cluster size mu <- if(stationary) lambda/kappa else eval.im(lambda/kappa) } else { # LGCP: extract variance parameter sigma2 sigma2 <- optpar[["sigma2"]] # mu = mean of log intensity mu <- if(stationary) log(lambda) - sigma2/2 else eval.im(log(lambda) - sigma2/2) } # all info that depends on the fitting method: Fit <- list(method = "palm", clfit = opt, weightfun = weightfun, rmax = rmax, objfun = obj, objargs = objargs, maxlogcl = opt$value, pspace = pspace) # pack up result <- list(Xname = Xname, X = X, stationary = stationary, clusters = clusters, modelname = modelname, isPCP = isPCP, po = po, lambda = lambda, mu = mu, par = optpar, clustpar = info$checkpar(par=optpar, old=FALSE), clustargs = info$checkclustargs(clargs$margs, old=FALSE), #clargs$margs, modelpar = modelpar, covmodel = clargs, Fit = Fit) return(result) } ## ........... contributed by Chiara Fend ................... ## needs nonlinear equation solver nleqslv kppmCLadap <- function(X, Xname, po, clusters, control, weightfun, rmax=NULL, epsilon=0.01, DPP=NULL, algorithm="Broyden", ..., startpar=NULL, globStrat="dbldog") { if(!requireNamespace("nleqslv", quietly=TRUE)) stop(paste("The package", sQuote("nleqslv"), "is required"), call.=FALSE) W <- as.owin(X) if(is.null(rmax)) # specified for numerical stability rmax <- shortside(Frame(W)) # identify pairs of points that might contribute cl <- closepairs(X, rmax) dIJ <- cl$d #pairwise distances Rmin <- min(dIJ) indexmin <- which(dIJ==Rmin) #for later use # convert window to mask, saving other arguments for later dcm <- do.call.matched(as.mask, append(list(w=W), list(...)), sieve=TRUE) M <- dcm$result otherargs <- dcm$otherargs # compute intensity at pairs of data points # and c.d.f. of interpoint distance in window if(stationary <- is.stationary(po)) { # stationary unmarked Poisson process lambda <- intensity(X) # compute cdf of distance between two uniform random points in W g <- distcdf(W, delta=rmax/4096) # scaling constant is (area * intensity)^2 gscale <- npoints(X)^2 } else { # compute fitted intensity at data points and in window # lambdaX <- fitted(po, dataonly=TRUE) lambda <- lambdaM <- predict(po, locations=M) # compute cdf of distance between two random points in W # with density proportional to intensity function g <- distcdf(M, dW=lambdaM, delta=rmax/4096) # scaling constant is (integral of intensity)^2 gscale <- safePositiveValue(integral.im(lambdaM)^2, default=npoints(X)^2) } isDPP <- !is.null(DPP) if(isDPP){ tmp <- dppmFixIntensity(DPP, lambda, po) clusters <- tmp$clusters lambda <- tmp$lambda po <- tmp$po } # get pair correlation function (etc) for model info <- spatstatClusterModelInfo(clusters) pcfun <- info$pcf dpcfun <- info$Dpcf funaux <- info$funaux selfstart <- info$selfstart isPCP <- info$isPCP parhandler <- info$parhandler modelname <- info$modelname # Assemble information required for computing pair correlation pcfunargs <- list(funaux=funaux) if(is.function(parhandler)) { # Additional parameters of cluster model are required. # These may be given as individual arguments, # or in a list called 'covmodel' clustargs <- if("covmodel" %in% names(otherargs)) otherargs[["covmodel"]] else otherargs clargs <- do.call(parhandler, clustargs) pcfunargs <- append(clargs, pcfunargs) } else clargs <- NULL ## determine starting parameter values if(is.null(startpar)) { startpar <- selfstart(X) } else if(!isDPP){ checkpar <- info$checkpar startpar <- checkpar(startpar, old=TRUE) } ## optimization will be over the logarithms of the parameters startparLog <- log(startpar) pcfunLog <- function(par, ...) { pcfun(exp(par), ...) } dpcfunLog <- function(par, ...) { dpcfun(exp(par), ...) } # create local functions to evaluate pair correlation and its gradient # (with additional parameters 'pcfunargs' in its environment) paco <- function(d, par) { do.call(pcfunLog, append(list(par=par, rvals=d), pcfunargs)) } dpaco <- function(d, par) { do.call(dpcfunLog, append(list(par=par, rvals=d), pcfunargs)) } # trim 'g' to [0, rmax] g <- g[with(g, .x) <= rmax,] #' .......... define objective function ...................... # create local function to evaluate weight(epsilon*M/(pcf(d)-1)) weight <- function(d, par) { y <- paco(d=d, par=par) # calculate M (only needs to be calculated for cluster models) M <- 1 if(!isDPP){ M <- abs(paco(d=0, par=par)-1) } return(weightfun(epsilon*M/(y-1))) } wlogcl2score <- function(par, paco, dpaco, dIJ, gscale, epsilon, cdf=g){ p <- length(par) temp <- rep(0, p) # check if current parameter is valid, if not return inf if(isDPP){ if(length(par)==1 && is.null(names(par))) names(par) <- clusters$freepar mod <- update(clusters, as.list(exp(par))) if(!valid(mod)){ return(rep(Inf, p)) } } # everything can be computed wdIJ <- weight(d=dIJ, par=par) index <- unique(c(which(wdIJ!=0), indexmin)) dIJcurrent <- dIJ[index] for(i in 1:p){ parname <- names(par)[i] # weighted derivatives wrt log of parname dpcfweighted <- function(d, par){ y <- dpaco(d = d, par = par)[parname,]*exp(par[i]) return(y*weight(d = d, par = par)) } temp[i] <- sum(dpcfweighted(d = dIJcurrent, par=par)/paco(d = dIJcurrent, par = par)) - gscale * stieltjes(dpcfweighted,cdf, par=par)$f } return(temp) } ## ................. optimize it .............................. opt <- nleqslv::nleqslv(x = startparLog, fn = wlogcl2score, method = algorithm, global = globStrat, control = control, paco=paco, dpaco=dpaco, dIJ=dIJ, gscale=gscale, epsilon=epsilon) ## .......... extract fitted parameters on original scale ............... optpar <- exp(opt$x) names(optpar) <- names(startpar) ## insert entries expected in 'opt' opt$par <- optpar opt$startpar <- startpar ## Finish in DPP case if(isDPP){ # all info that depends on the fitting method: Fit <- list(method = "adapcl", cladapfit = opt, weightfun = weightfun, rmax = rmax, epsilon = epsilon, objfun = wlogcl2score, objargs = control, estfunc = opt$fvec) # pack up clusters <- update(clusters, as.list(exp(opt$x))) result <- list(Xname = Xname, X = X, stationary = stationary, fitted = clusters, modelname = modelname, po = po, lambda = lambda, Fit = Fit) return(result) } ## meaningful model parameters modelpar <- info$interpret(optpar, lambda) # infer parameter 'mu' if(isPCP) { # Poisson cluster process: extract parent intensity kappa kappa <- optpar[["kappa"]] # mu = mean cluster size mu <- if(stationary) lambda/kappa else eval.im(lambda/kappa) } else { # LGCP: extract variance parameter sigma2 sigma2 <- optpar[["sigma2"]] # mu = mean of log intensity mu <- if(stationary) log(lambda) - sigma2/2 else eval.im(log(lambda) - sigma2/2) } # all info that depends on the fitting method: Fit <- list(method = "adapcl", cladapfit = opt, weightfun = weightfun, rmax = rmax, epsilon = epsilon, objfun = wlogcl2score, objargs = control, estfunc = opt$fvec) # pack up result <- list(Xname = Xname, X = X, stationary = stationary, clusters = clusters, modelname = modelname, isPCP = isPCP, po = po, lambda = lambda, mu = mu, par = optpar, clustpar = info$checkpar(par=optpar, old=FALSE), clustargs = info$checkclustargs(clargs$margs, old=FALSE), modelpar = modelpar, covmodel = clargs, Fit = Fit) return(result) } improve.kppm <- local({ fnc <- function(r, eps, g){ (g(r) - 1)/(g(0) - 1) - eps} improve.kppm <- function(object, type=c("quasi", "wclik1", "clik1"), rmax = NULL, eps.rmax = 0.01, dimyx = 50, maxIter = 100, tolerance = 1e-06, fast = TRUE, vcov = FALSE, fast.vcov = FALSE, verbose = FALSE, save.internals = FALSE) { verifyclass(object, "kppm") type <- match.arg(type) gfun <- pcfmodel(object) X <- object$X win <- as.owin(X) ## simple (rectangular) grid quadrature scheme ## (using pixels with centers inside owin only) mask <- as.mask(win, dimyx = dimyx) wt <- pixellate(win, W = mask) wt <- wt[mask] Uxy <- rasterxy.mask(mask) U <- ppp(Uxy$x, Uxy$y, window = win, check=FALSE) U <- U[mask] # nU <- npoints(U) Yu <- pixellate(X, W = mask) Yu <- Yu[mask] ## covariates at quadrature points po <- object$po Z <- model.images(po, mask) Z <- sapply(Z, "[", i=U) ##obtain initial beta estimate using composite likelihood beta0 <- coef(po) ## determining the dependence range if (type != "clik1" && is.null(rmax)) { diamwin <- diameter(win) rmax <- if(fnc(diamwin, eps.rmax, gfun) >= 0) diamwin else uniroot(fnc, lower = 0, upper = diameter(win), eps=eps.rmax, g=gfun)$root if(verbose) splat(paste0("type: ", type, ", ", "dependence range: ", rmax, ", ", "dimyx: ", dimyx, ", g(0) - 1:", gfun(0) -1)) } ## preparing the WCL case if (type == "wclik1") Kmax <- 2*pi * integrate(function(r){r * (gfun(r) - 1)}, lower=0, upper=rmax)$value * exp(c(Z %*% beta0)) ## the g()-1 matrix without tapering if (!fast || (vcov && !fast.vcov)){ if (verbose) cat("computing the g(u_i,u_j)-1 matrix ...") gminus1 <- matrix(gfun(c(pairdist(U))) - 1, U$n, U$n) if (verbose) cat("..Done.\n") } if ( (fast && type == "quasi") | fast.vcov ){ if (verbose) cat("computing the sparse G-1 matrix ...\n") ## Non-zero gminus1 entries (when using tapering) cp <- crosspairs(U,U,rmax,what="ijd") if (verbose) cat("crosspairs done\n") Gtap <- (gfun(cp$d) - 1) if(vcov){ if(fast.vcov){ gminus1 <- Matrix::sparseMatrix(i=cp$i, j=cp$j, x=Gtap, dims=c(U$n, U$n)) } else{ if(fast) gminus1 <- matrix(gfun(c(pairdist(U))) - 1, U$n, U$n) } } if (verbose & type!="quasi") cat("..Done.\n") } if (type == "quasi" && fast){ mu0 <- exp(c(Z %*% beta0)) * wt mu0root <- sqrt(mu0) sparseG <- Matrix::sparseMatrix(i=cp$i, j=cp$j, x=mu0root[cp$i] * mu0root[cp$j] * Gtap, dims=c(U$n, U$n)) Rroot <- Matrix::Cholesky(sparseG, perm = TRUE, Imult = 1) ##Imult=1 means that we add 1*I if (verbose) cat("..Done.\n") } ## iterative weighted least squares/Fisher scoring bt <- beta0 noItr <- 1 repeat { mu <- exp(c(Z %*% bt)) * wt mu.root <- sqrt(mu) ## the core of estimating equation: ff=phi ## in case of ql, \phi=V^{-1}D=V_\mu^{-1/2}x where (G+I)x=V_\mu^{1/2} Z ff <- switch(type, clik1 = Z, wclik1= Z/(1 + Kmax), quasi = if(fast){ Matrix::solve(Rroot, mu.root * Z)/mu.root } else{ solve(diag(U$n) + t(gminus1 * mu), Z) } ) ##alternative ##R=chol(sparseG+sparseMatrix(i=c(1:U$n),j=c(1:U$n), ## x=rep(1,U$n),dims=c(U$n,U$n))) ##ff2 <- switch(type, ## clik1 = Z, ## wclik1= Z/(1 + Kmax), ## quasi = if (fast) ## solve(R,solve(t(R), mu.root * Z))/mu.root ## else solve(diag(U$n) + t(gminus1 * mu), Z)) ## print(summary(as.numeric(ff)-as.numeric(ff2))) ## the estimating equation: u_f(\beta) uf <- (Yu - mu) %*% ff ## inverse of minus expectation of Jacobian matrix: I_f Jinv <- solve(t(Z * mu) %*% ff) if(maxIter==0){ ## This is a built-in early exit for vcov internal calculations break } deltabt <- as.numeric(uf %*% Jinv) if (any(!is.finite(deltabt))) { warning(paste("Infinite value, NA or NaN appeared", "in the iterative weighted least squares algorithm.", "Returning the initial intensity estimate unchanged."), call.=FALSE) return(object) } ## updating the present estimate of \beta bt <- bt + deltabt if (verbose) splat(paste0("itr: ", noItr, ",\nu_f: ", as.numeric(uf), "\nbeta:", bt, "\ndeltabeta:", deltabt)) if (max(abs(deltabt/bt)) <= tolerance || max(abs(uf)) <= tolerance) break if (noItr > maxIter) stop("Maximum number of iterations reached without convergence.") noItr <- noItr + 1 } out <- object out$po$coef.orig <- beta0 out$po$coef <- bt loc <- if(is.sob(out$lambda)) as.mask(out$lambda) else mask out$lambda <- predict(out$po, locations = loc) out$improve <- list(type = type, rmax = rmax, dimyx = dimyx, fast = fast, fast.vcov = fast.vcov) if(save.internals){ out$improve <- append(out$improve, list(ff=ff, uf=uf, J.inv=Jinv)) } if(vcov){ if (verbose) cat("computing the asymptotic variance ...\n") ## variance of the estimation equation: Sigma_f = Var(u_f(bt)) trans <- if(fast) Matrix::t else t Sig <- trans(ff) %*% (ff * mu) + trans(ff * mu) %*% gminus1 %*% (ff * mu) ## note Abdollah's G does not have mu.root inside... ## the asymptotic variance of \beta: ## inverse of the Godambe information matrix out$vcov <- as.matrix(Jinv %*% Sig %*% Jinv) } return(out) } improve.kppm }) is.kppm <- function(x) { inherits(x, "kppm")} print.kppm <- print.dppm <- function(x, ...) { isPCP <- x$isPCP # detect DPP isDPP <- inherits(x, "dppm") # handle outdated objects - which were all cluster processes if(!isDPP && is.null(isPCP)) isPCP <- TRUE terselevel <- spatstat.options('terse') digits <- getOption('digits') splat(if(x$stationary) "Stationary" else "Inhomogeneous", if(isDPP) "determinantal" else if(isPCP) "cluster" else "Cox", "point process model") Xname <- x$Xname if(waxlyrical('extras', terselevel) && nchar(Xname) < 20) { has.subset <- ("subset" %in% names(x$call)) splat("Fitted to", if(has.subset) "(a subset of)" else NULL, "point pattern dataset", sQuote(Xname)) } if(waxlyrical('gory', terselevel)) { switch(x$Fit$method, mincon = { splat("Fitted by minimum contrast") splat("\tSummary statistic:", x$Fit$StatName) }, clik =, clik2 = { splat("Fitted by maximum second order composite likelihood") splat("\trmax =", x$Fit$rmax) if(!is.null(wtf <- x$Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } }, palm = { splat("Fitted by maximum Palm likelihood") splat("\trmax =", x$Fit$rmax) if(!is.null(wtf <- x$Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } }, adapcl = { splat("Fitted by adaptive second order composite likelihood") splat("\tepsilon =", x$Fit$epsilon) if(!is.null(wtf <- x$Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } }, warning(paste("Unrecognised fitting method", sQuote(x$Fit$method))) ) } parbreak(terselevel) # ............... trend ......................... if(!(isDPP && is.null(x$fitted$intensity))) print(x$po, what="trend") # ..................... clusters ................ # DPP case if(isDPP){ splat("Fitted DPP model:") print(x$fitted) return(invisible(NULL)) } tableentry <- spatstatClusterModelInfo(x$clusters) splat(if(isPCP) "Cluster" else "Cox", "model:", tableentry$printmodelname(x)) cm <- x$covmodel if(!isPCP) { # Covariance model - LGCP only splat("\tCovariance model:", cm$model) margs <- cm$margs if(!is.null(margs)) { nama <- names(margs) tags <- ifelse(nzchar(nama), paste(nama, "="), "") tagvalue <- paste(tags, margs) splat("\tCovariance parameters:", paste(tagvalue, collapse=", ")) } } pa <- x$clustpar if (!is.null(pa)) { splat("Fitted", if(isPCP) "cluster" else "covariance", "parameters:") print(pa, digits=digits) } if(!is.null(mu <- x$mu)) { if(isPCP) { splat("Mean cluster size: ", if(!is.im(mu)) paste(signif(mu, digits), "points") else "[pixel image]") } else { splat("Fitted mean of log of random intensity:", if(!is.im(mu)) signif(mu, digits) else "[pixel image]") } } if(isDPP) { rx <- repul(x) splat(if(is.im(rx)) "(Average) strength" else "Strength", "of repulsion:", signif(mean(rx), 4)) } invisible(NULL) } plot.kppm <- local({ plotem <- function(x, ..., main=dmain, dmain) { plot(x, ..., main=main) } plot.kppm <- function(x, ..., what=c("intensity", "statistic", "cluster"), pause=interactive(), xname) { ## catch objectname from dots if present otherwise deparse x: if(missing(xname)) xname <- short.deparse(substitute(x)) nochoice <- missing(what) what <- pickoption("plot type", what, c(statistic="statistic", intensity="intensity", cluster="cluster"), multi=TRUE) ## handle older objects Fit <- x$Fit if(is.null(Fit)) { warning("kppm object is in outdated format") Fit <- x Fit$method <- "mincon" } ## Catch locations for clusters if given loc <- list(...)$locations inappropriate <- (nochoice & ((what == "intensity") & (x$stationary))) | ((what == "statistic") & (Fit$method != "mincon")) | ((what == "cluster") & (identical(x$isPCP, FALSE))) | ((what == "cluster") & (!x$stationary) & is.null(loc)) if(!nochoice && !x$stationary && "cluster" %in% what && is.null(loc)) stop("Please specify additional argument ", sQuote("locations"), " which will be passed to the function ", sQuote("clusterfield"), ".") if(any(inappropriate)) { what <- what[!inappropriate] if(length(what) == 0){ message("Nothing meaningful to plot. Exiting...") return(invisible(NULL)) } } pause <- pause && (length(what) > 1) if(pause) opa <- par(ask=TRUE) for(style in what) switch(style, intensity={ plotem(x$po, ..., dmain=c(xname, "Intensity"), how="image", se=FALSE) }, statistic={ plotem(Fit$mcfit, ..., dmain=c(xname, Fit$StatName)) }, cluster={ plotem(clusterfield(x, locations = loc, verbose=FALSE), ..., dmain=c(xname, "Fitted cluster")) }) if(pause) par(opa) return(invisible(NULL)) } plot.kppm }) predict.kppm <- predict.dppm <- function(object, ...) { se <- resolve.1.default(list(se=FALSE), list(...)) interval <- resolve.1.default(list(interval="none"), list(...)) if(se) warning("Standard error calculation assumes a Poisson process") if(interval != "none") warning(paste(interval, "interval calculation assumes a Poisson process")) predict(as.ppm(object), ...) } fitted.kppm <- fitted.dppm <- function(object, ...) { fitted(as.ppm(object), ...) } residuals.kppm <- residuals.dppm <- function(object, ...) { type <- resolve.1.default(list(type="raw"), list(...)) if(type != "raw") warning(paste("calculation of", type, "residuals", "assumes a Poisson process")) residuals(as.ppm(object), ...) } formula.kppm <- formula.dppm <- function(x, ...) { formula(x$po, ...) } terms.kppm <- terms.dppm <- function(x, ...) { terms(x$po, ...) } labels.kppm <- labels.dppm <- function(object, ...) { labels(object$po, ...) } update.kppm <- function(object, ..., evaluate=TRUE, envir=environment(terms(object))) { argh <- list(...) nama <- names(argh) callframe <- object$callframe #' look for a formula argument fmla <- formula(object) jf <- integer(0) if(!is.null(trend <- argh$trend)) { if(!can.be.formula(trend)) stop("Argument \"trend\" should be a formula") fmla <- newformula(formula(object), trend, callframe, envir) jf <- which(nama == "trend") } else if(any(isfo <- sapply(argh, can.be.formula))) { if(sum(isfo) > 1) { if(!is.null(nama)) isfo <- isfo & nzchar(nama) if(sum(isfo) > 1) stop(paste("Arguments not understood:", "there are two unnamed formula arguments")) } jf <- which(isfo) fmla <- argh[[jf]] fmla <- newformula(formula(object), fmla, callframe, envir) } #' look for a point pattern or quadscheme if(!is.null(X <- argh$X)) { if(!inherits(X, c("ppp", "quad"))) stop(paste("Argument X should be a formula,", "a point pattern or a quadrature scheme")) jX <- which(nama == "X") } else if(any(ispp <- sapply(argh, inherits, what=c("ppp", "quad")))) { if(sum(ispp) > 1) { if(!is.null(nama)) ispp <- ispp & nzchar(nama) if(sum(ispp) > 1) stop(paste("Arguments not understood:", "there are two unnamed point pattern/quadscheme arguments")) } jX <- which(ispp) X <- argh[[jX]] } else { X <- object$X jX <- integer(0) } Xexpr <- if(length(jX) > 0) sys.call()[[2L + jX]] else NULL #' remove arguments just recognised, if any jused <- c(jf, jX) if(length(jused) > 0) { argh <- argh[-jused] nama <- names(argh) } #' update the matched call thecall <- getCall(object) methodname <- as.character(thecall[[1L]]) switch(methodname, kppm.formula = { # original call has X = [formula with lhs] if(!is.null(Xexpr)) { lhs.of.formula(fmla) <- Xexpr } else if(is.null(lhs.of.formula(fmla))) { lhs.of.formula(fmla) <- as.name('.') } oldformula <- as.formula(getCall(object)$X) thecall$X <- newformula(oldformula, fmla, callframe, envir) }, { # original call has X = ppp and trend = [formula without lhs] oldformula <- as.formula(getCall(object)$trend %orifnull% (~1)) fom <- newformula(oldformula, fmla, callframe, envir) if(!is.null(Xexpr)) lhs.of.formula(fom) <- Xexpr if(is.null(lhs.of.formula(fom))) { # new call has same format thecall$trend <- fom if(length(jX) > 0) thecall$X <- X } else { # new call has formula with lhs thecall$trend <- NULL thecall$X <- fom } }) knownnames <- unique(c(names(formals(kppm.ppp)), names(formals(mincontrast)), names(formals(optim)))) knownnames <- setdiff(knownnames, c("X", "trend", "observed", "theoretical", "fn", "gr", "...")) ok <- nama %in% knownnames thecall <- replace(thecall, nama[ok], argh[ok]) thecall$formula <- NULL # artefact of 'step', etc thecall[[1L]] <- as.name("kppm") if(!evaluate) return(thecall) out <- eval(thecall, envir=parent.frame(), enclos=envir) #' update name of data if(length(jX) == 1) { mc <- match.call() Xlang <- mc[[2L+jX]] out$Xname <- short.deparse(Xlang) } #' return(out) } unitname.kppm <- unitname.dppm <- function(x) { return(unitname(x$X)) } "unitname<-.kppm" <- "unitname<-.dppm" <- function(x, value) { unitname(x$X) <- value if(!is.null(x$Fit$mcfit)) { unitname(x$Fit$mcfit) <- value } else if(is.null(x$Fit)) { warning("kppm object in outdated format") if(!is.null(x$mcfit)) unitname(x$mcfit) <- value } return(x) } as.fv.kppm <- as.fv.dppm <- function(x) { if(x$Fit$method == "mincon") return(as.fv(x$Fit$mcfit)) gobs <- if(is.stationary(x)) pcf(x$X, correction="good") else pcfinhom(x$X, lambda=x, correction="good", update=FALSE) gfit <- (pcfmodel(x))(gobs$r) g <- bind.fv(gobs, data.frame(fit=gfit), "%s[fit](r)", "predicted %s for fitted model") return(g) } coef.kppm <- coef.dppm <- function(object, ...) { return(coef(object$po)) } Kmodel.kppm <- function(model, ...) { Kpcf.kppm(model, what="K") } pcfmodel.kppm <- function(model, ...) { Kpcf.kppm(model, what="pcf") } Kpcf.kppm <- function(model, what=c("K", "pcf", "kernel")) { what <- match.arg(what) # Extract function definition from internal table clusters <- model$clusters tableentry <- spatstatClusterModelInfo(clusters) if(is.null(tableentry)) stop("No information available for", sQuote(clusters), "cluster model") fun <- tableentry[[what]] if(is.null(fun)) stop("No expression available for", what, "for", sQuote(clusters), "cluster model") # Extract model parameters par <- model$par # Extract auxiliary definitions (if applicable) funaux <- tableentry$funaux # Extract covariance model (if applicable) cm <- model$covmodel model <- cm$model margs <- cm$margs # f <- function(r) as.numeric(fun(par=par, rvals=r, funaux=funaux, model=model, margs=margs)) return(f) } is.stationary.kppm <- is.stationary.dppm <- function(x) { return(x$stationary) } is.poisson.kppm <- function(x) { switch(x$clusters, Cauchy=, VarGamma=, Thomas=, MatClust={ # Poisson cluster process mu <- x$mu return(!is.null(mu) && (max(mu) == 0)) }, LGCP = { # log-Gaussian Cox process sigma2 <- x$par[["sigma2"]] return(sigma2 == 0) }, return(FALSE)) } # extract ppm component as.ppm.kppm <- as.ppm.dppm <- function(object) { object$po } # other methods that pass through to 'ppm' as.owin.kppm <- as.owin.dppm <- function(W, ..., from=c("points", "covariates"), fatal=TRUE) { from <- match.arg(from) as.owin(as.ppm(W), ..., from=from, fatal=fatal) } domain.kppm <- Window.kppm <- domain.dppm <- Window.dppm <- function(X, ..., from=c("points", "covariates")) { from <- match.arg(from) as.owin(X, from=from) } model.images.kppm <- model.images.dppm <- function(object, W=as.owin(object), ...) { model.images(as.ppm(object), W=W, ...) } model.matrix.kppm <- model.matrix.dppm <- function(object, data=model.frame(object, na.action=NULL), ..., Q=NULL, keepNA=TRUE) { if(missing(data)) data <- NULL model.matrix(as.ppm(object), data=data, ..., Q=Q, keepNA=keepNA) } model.frame.kppm <- model.frame.dppm <- function(formula, ...) { model.frame(as.ppm(formula), ...) } logLik.kppm <- logLik.dppm <- function(object, ...) { cl <- object$Fit$maxlogcl if(is.null(cl)) stop(paste("logLik is only available for kppm objects fitted with", "method='palm' or method='clik2'"), call.=FALSE) ll <- logLik(as.ppm(object)) # to inherit class and d.f. ll[] <- cl return(ll) } AIC.kppm <- AIC.dppm <- function(object, ..., k=2) { cl <- logLik(object) df <- attr(cl, "df") return(- 2 * as.numeric(cl) + k * df) } extractAIC.kppm <- extractAIC.dppm <- function (fit, scale = 0, k = 2, ...) { cl <- logLik(fit) edf <- attr(cl, "df") aic <- - 2 * as.numeric(cl) + k * edf return(c(edf, aic)) } nobs.kppm <- nobs.dppm <- function(object, ...) { nobs(as.ppm(object)) } psib <- function(object) UseMethod("psib") psib.kppm <- function(object) { clus <- object$clusters info <- spatstatClusterModelInfo(clus) if(!info$isPCP) { warning("The model is not a cluster process") return(NA) } g <- pcfmodel(object) p <- 1 - 1/g(0) return(p) } spatstat.core/R/rho2hat.R0000644000176200001440000002302014144333463014741 0ustar liggesusers# # rho2hat.R # # Relative risk for pairs of covariate values # # $Revision: 1.27 $ $Date: 2020/12/19 05:25:06 $ # rho2hat <- function(object, cov1, cov2, ..., method=c("ratio", "reweight")) { cov1name <- short.deparse(substitute(cov1)) cov2name <- short.deparse(substitute(cov2)) callstring <- short.deparse(sys.call()) method <- match.arg(method) # validate model if(is.ppp(object) || is.quad(object)) { model <- ppm(object, ~1, forcefit=TRUE) reference <- "area" modelcall <- NULL } else if(is.ppm(object)) { model <- object reference <- "model" modelcall <- model$call if(is.null(getglmfit(model))) model <- update(model, forcefit=TRUE) } else stop("object should be a point pattern or a point process model") # interpret string "x" or "y" as a coordinate function getxyfun <- function(s) { switch(s, x = { function(x,y) { x } }, y = { function(x,y) { y } }, stop(paste("Unrecognised covariate name", sQuote(s)))) } if(is.character(cov1) && length(cov1) == 1) { cov1name <- cov1 cov1 <- getxyfun(cov1name) } if(is.character(cov2) && length(cov2) == 1) { cov2name <- cov2 cov2 <- getxyfun(cov2name) } if( (cov1name == "x" && cov2name == "y") || (cov1name == "y" && cov2name == "x")) { # spatial relative risk isxy <- TRUE needflip <- (cov1name == "y" && cov2name == "x") X <- data.ppm(model) if(needflip) X <- flipxy(X) switch(method, ratio = { # ratio of smoothed intensity estimates den <- density(X, ...) sigma <- attr(den, "sigma") varcov <- attr(den, "varcov") W <- as.owin(den) if(!needflip) { lambda <- predict(model, locations=W) } else { lambda <- flipxy(predict(model, locations=flipxy(W))) } rslt <- switch(reference, area = { den }, model = { lam <- blur(lambda, sigma=sigma, varcov=varcov, normalise=TRUE) eval.im(den/lam) }) }, reweight = { ## smoothed point pattern with weights = 1/reference W <- as.owin(X) W <- do.call.matched(as.mask, list(w=quote(W), ...)) if(!needflip) { lambda <- predict(model, locations=W) } else { lambda <- flipxy(predict(model, locations=flipxy(W))) } gstarX <- switch(reference, area = { rep.int(area(W), npoints(X)) }, model = { lambda[X] }) rslt <- density(X, ..., weights=1/gstarX) sigma <- attr(rslt, "sigma") varcov <- attr(rslt, "varcov") }) Z12points <- X r1 <- W$xrange r2 <- W$yrange lambda <- lambda[] } else { # general case isxy <- FALSE # harmonise covariates if(is.function(cov1) && is.im(cov2)) { cov1 <- as.im(cov1, W=cov2) } else if(is.im(cov1) && is.function(cov2)) { cov2 <- as.im(cov2, W=cov1) } # evaluate each covariate at data points and at pixels stuff1 <- evalCovar(model, cov1) stuff2 <- evalCovar(model, cov2) # unpack values1 <- stuff1$values values2 <- stuff2$values # covariate values at each data point Z1X <- values1$ZX Z2X <- values2$ZX # covariate values at each pixel Z1values <- values1$Zvalues Z2values <- values2$Zvalues # model intensity lambda <- values1$lambda # ranges of each covariate r1 <- range(Z1X, Z1values, finite=TRUE) r2 <- range(Z2X, Z2values, finite=TRUE) scal <- function(x, r) { (x - r[1])/diff(r) } # scatterplot coordinates Z12points <- ppp(scal(Z1X, r1), scal(Z2X, r2), c(0,1), c(0,1)) Z12pixels <- ppp(scal(Z1values, r1), scal(Z2values, r2), c(0,1), c(0,1)) # normalising constants # nX <- length(Z1X) npixel <- length(lambda) areaW <- area(Window(model)) pixelarea <- areaW/npixel baseline <- if(reference == "area") rep.int(1, npixel) else lambda wts <- baseline * pixelarea dont.complain.about(Z12pixels) switch(method, ratio = { # estimate intensities fhat <- density(Z12points, ...) sigma <- attr(fhat, "sigma") varcov <- attr(fhat, "varcov") ghat <- do.call(density.ppp, resolve.defaults(list(x=quote(Z12pixels), weights=quote(wts)), list(...), list(sigma=sigma, varcov=varcov))) # compute ratio of smoothed densities rslt <- eval.im(fhat/ghat) }, reweight = { # compute smoothed intensity with weight = 1/reference ghat <- density(Z12pixels, weights=wts, ...) rslt <- density(Z12points, weights=1/ghat[Z12points], ...) sigma <- attr(rslt, "sigma") varcov <- attr(rslt, "varcov") }) } # add scale and label info attr(rslt, "stuff") <- list(isxy=isxy, cov1=cov1, cov2=cov2, cov1name=cov1name, cov2name=cov2name, r1=r1, r2=r2, reference=reference, lambda=lambda, modelcall=modelcall, callstring=callstring, Z12points=Z12points, sigma=sigma, varcov=varcov) class(rslt) <- c("rho2hat", class(rslt)) rslt } plot.rho2hat <- function(x, ..., do.points=FALSE) { xname <- short.deparse(substitute(x)) s <- attr(x, "stuff") # resolve "..." arguments rd <- resolve.defaults(list(...), list(add=FALSE, axes=!s$isxy, xlab=s$cov1name, ylab=s$cov2name)) # plot image plotparams <- graphicsPars("plot") do.call.matched(plot.im, resolve.defaults(list(x=quote(x), axes=FALSE), list(...), list(main=xname, ribargs=list(axes=TRUE))), extrargs=c(plotparams, "add", "zlim", "breaks")) # add axes if(rd$axes) { axisparams <- graphicsPars("axis") Axis <- function(..., extrargs=axisparams) { do.call.matched(graphics::axis, resolve.defaults(list(...)), extrargs=extrargs) } if(s$isxy) { # for (x,y) plots the image is at the correct physical scale xr <- x$xrange yr <- x$yrange spak <- 0.05 * max(diff(xr), diff(yr)) Axis(side=1, ..., at=pretty(xr), pos=yr[1] - spak) Axis(side=2, ..., at=pretty(yr), pos=xr[1] - spak) } else { # for other plots the image was scaled to the unit square rx <- s$r1 ry <- s$r2 px <- pretty(rx) py <- pretty(ry) Axis(side=1, labels=px, at=(px - rx[1])/diff(rx), ...) Axis(side=2, labels=py, at=(py - ry[1])/diff(ry), ...) } title(xlab=rd$xlab) title(ylab=rd$ylab) } if(do.points) { poy <- s$Z12points dont.complain.about(poy) do.call.matched(plot.ppp, resolve.defaults(list(x=quote(poy), add=TRUE), list(...)), extrargs=c("pch", "col", "cols", "bg", "cex", "lwd", "lty")) } invisible(NULL) } print.rho2hat <- function(x, ...) { s <- attr(x, "stuff") cat("Scatterplot intensity estimate (class rho2hat)\n") cat(paste("for the covariates", s$cov1name, "and", s$cov2name, "\n")) switch(s$reference, area=cat("Function values are absolute intensities\n"), model={ cat("Function values are relative to fitted model\n") print(s$modelcall) }) cat(paste("Call:", s$callstring, "\n")) if(s$isxy) { cat("Obtained by spatial smoothing of original data\n") cat("Smoothing parameters used by density.ppp:\n") } else { cat("Obtained by transforming to the unit square and smoothing\n") cat("Smoothing parameters (on unit square) used by density.ppp:\n") } if(!is.null(s$sigma)) cat(paste("\tsigma = ", signif(s$sigma, 5), "\n")) if(!is.null(s$varcov)) { cat("\tvarcov =\n") ; print(s$varcov) } cat("Intensity values:\n") NextMethod("print") } predict.rho2hat <- function(object, ..., relative=FALSE) { if(length(list(...)) > 0) warning("Additional arguments ignored in predict.rho2hat") # extract info s <- attr(object, "stuff") reference <- s$reference #' extract images of covariate Z1 <- s$cov1 Z2 <- s$cov2 if(!is.im(Z1)) Z1 <- as.im(Z1, Window(object)) if(!is.im(Z2)) Z2 <- as.im(Z2, Window(object)) #' rescale to [0,1] Z1 <- scaletointerval(Z1, xrange=s$r1) Z2 <- scaletointerval(Z2, xrange=s$r2) # extract pairs of covariate values ZZ <- pairs(Z1, Z2, plot=FALSE) # apply rho to Z YY <- safelookup(object, ppp(ZZ[,1], ZZ[,2], c(0,1), c(0,1)), warn=FALSE) # reform as image Y <- Z1 Y[] <- YY # adjust to reference baseline if(!(relative || reference == "area")) { lambda <- s$lambda Y <- Y * lambda } return(Y) } spatstat.core/R/adaptive.density.R0000644000176200001440000000053114144333461016645 0ustar liggesusers#' #' adaptive.density.R #' #' $Revision: 1.1 $ $Date: 2019/02/06 03:22:51 $ #' adaptive.density <- function(X, ..., method=c("voronoi", "kernel")) { method <- match.arg(method) result <- switch(method, voronoi = densityVoronoi(X, ...), kernel = densityAdaptiveKernel(X, ...)) return(result) } spatstat.core/R/quantiledensity.R0000644000176200001440000000513314144333463016621 0ustar liggesusers#' #' quantiledensity.R #' #' quantile method for class 'density' #' #' Also a CDF from a 'density' #' #' $Revision: 1.3 $ $Date: 2015/09/01 11:53:15 $ quantile.density <- local({ quantile.density <- function(x, probs = seq(0, 1, 0.25), names = TRUE, ..., warn=TRUE) { stopifnot(inherits(x, "density")) #' check whether density estimate was restricted to an interval if(warn && is.call(cl <- x$call) && any(c("from", "to") %in% names(cl))) warning(paste("Density was normalised within the computed range", "of x values", prange(c(cl$from, cl$to))), call.=FALSE) #' validate probs eps <- 100 * .Machine$double.eps if(any((p.ok <- !is.na(probs)) & (probs < -eps | probs > 1 + eps))) stop("'probs' outside [0,1]") if (na.p <- any(!p.ok)) { o.pr <- probs probs <- probs[p.ok] probs <- pmax(0, pmin(1, probs)) } np <- length(probs) qs <- rep(NA_real_, np) if (np > 0) { #' extract density values xx <- x$x yy <- x$y nn <- length(xx) #' integrate, normalise Fx <- cumsum(yy * c(0, diff(xx))) Fx <- Fx/Fx[nn] #' quantile for(j in 1:np) { ii <- min(which(Fx >= probs[j])) if(!is.na(ii) && ii >= 1 && ii <= nn) qs[j] <- xx[ii] } if (names && np > 0L) { names(qs) <- format_perc(probs) } } if (na.p) { o.pr[p.ok] <- qs names(o.pr) <- rep("", length(o.pr)) names(o.pr)[p.ok] <- names(qs) return(o.pr) } else return(qs) } format_perc <- function (x, digits = max(2L, getOption("digits")), probability = TRUE, use.fC = length(x) < 100, ...) { if (length(x)) { if (probability) x <- 100 * x paste0(if (use.fC) formatC(x, format = "fg", width = 1, digits = digits) else format(x, trim = TRUE, digits = digits, ...), "%") } else character(0) } quantile.density }) CDF <- function(f, ...) { UseMethod("CDF") } CDF.density <- function(f, ..., warn=TRUE) { stopifnot(inherits(f, "density")) #' check whether density estimate was restricted to an interval if(warn && is.call(cl <- f$call) && any(c("from", "to") %in% names(cl))) warning(paste("Density was normalised within the computed range", "of x values", prange(c(cl$from, cl$to))), call.=FALSE) #' integrate xx <- f$x yy <- f$y nn <- length(xx) Fx <- cumsum(yy * c(0, diff(xx))) #' normalise Fx <- Fx/Fx[nn] #' FF <- approxfun(xx, Fx, method="linear", rule=2) return(FF) } spatstat.core/R/circdensity.R0000644000176200001440000000275514144333461015724 0ustar liggesusers#' #' circdensity.R #' #' Kernel smoothing for circular data #' #' $Revision: 1.3 $ $Date: 2014/12/04 06:49:20 $ circdensity <- function(x, sigma="nrd0", ..., bw=NULL, weights=NULL, unit=c("degree", "radian")) { xname <- short.deparse(substitute(x)) missu <- missing(unit) if(missing(sigma) && !is.null(bw)) sigma <- bw unit <- match.arg(unit) unit <- validate.angles(x, unit, missu) FullCircle <- switch(unit, degree = 360, radian = 2*pi) if(is.character(sigma)) { sigma <- switch(sigma, bcv = bw.bcv, nrd = bw.nrd, nrd0 = bw.nrd0, SJ = bw.SJ, ucv = bw.ucv, get(paste0("bw.", sigma), mode="function")) } if(is.function(sigma)) { sigma <- sigma(x) if(!(is.numeric(sigma) && length(sigma) == 1L && sigma > 0)) stop("Bandwidth selector should return a single positive number") } check.1.real(sigma) #' replicate data x <- x %% FullCircle xx <- c(x - FullCircle, x, x + FullCircle) #' replicate weights if(!is.null(weights)) { stopifnot(length(weights) == length(x)) weights <- rep(weights, 3)/3 } #' smooth z <- do.call(density.default, resolve.defaults(list(x=xx, bw=sigma, weights=weights), list(...), list(from=0, to=FullCircle))) z$y <- 3 * z$y z$data.name <- xname return(z) } spatstat.core/R/plot.fv.R0000644000176200001440000006622014144333463014773 0ustar liggesusers# # plot.fv.R (was: conspire.S) # # $Revision: 1.132 $ $Date: 2021/10/03 01:49:22 $ # # # conspire <- function(...) { # .Deprecated("plot.fv", package="spatstat") # plot.fv(...) # } plot.fv <- local({ hasonlyone <- function(x, amongst) { sum(all.vars(parse(text=x)) %in% amongst) == 1 } extendifvector <- function(a, n, nmore) { if(is.null(a)) return(a) if(length(a) == 1) return(a) return(c(a, rep(a[1], nmore))) } fixit <- function(a, n, a0, a00) { # 'a' is formal argument # 'a0' and 'a00' are default and fallback default # 'n' is number of values required if(is.null(a)) a <- if(!is.null(a0)) a0 else a00 if(length(a) == 1) return(rep.int(a, n)) else if(length(a) != n) stop(paste("Length of", short.deparse(substitute(a)), "does not match number of curves to be plotted")) else return(a) } pow10 <- function(x) { 10^x } clip.to.usr <- function() { usr <- par('usr') clip(usr[1], usr[2], usr[3], usr[4]) } plot.fv <- function(x, fmla, ..., subset=NULL, lty=NULL, col=NULL, lwd=NULL, xlim=NULL, ylim=NULL, xlab=NULL, ylab=NULL, ylim.covers=NULL, legend=!add, legendpos="topleft", legendavoid=missing(legendpos), legendmath=TRUE, legendargs=list(), shade=fvnames(x, ".s"), shadecol="grey", add=FALSE, log="", mathfont=c("italic", "plain", "bold", "bolditalic"), limitsonly=FALSE) { xname <- if(is.language(substitute(x))) short.deparse(substitute(x)) else "" force(legendavoid) if(is.null(legend)) legend <- !add mathfont <- match.arg(mathfont) verifyclass(x, "fv") env.user <- parent.frame() indata <- as.data.frame(x) xlogscale <- (log %in% c("x", "xy", "yx")) ylogscale <- (log %in% c("y", "xy", "yx")) ## ---------------- determine plot formula ---------------- defaultplot <- missing(fmla) || is.null(fmla) if(defaultplot) fmla <- formula(x) ## This *is* the last possible moment, so... fmla <- as.formula(fmla, env=env.user) ## validate the variable names vars <- variablesinformula(fmla) reserved <- c(".", ".x", ".y", ".a", ".s") external <- !(vars %in% c(colnames(x), reserved)) if(any(external)) { sought <- vars[external] found <- unlist(lapply(sought, exists, envir=env.user, mode="numeric")) if(any(!found)) { nnot <- sum(!found) stop(paste(ngettext(nnot, "Variable", "Variables"), commasep(sQuote(sought[!found])), ngettext(nnot, "was", "were"), "not found")) } else { ## validate the found variables externvars <- lapply(sought, get, envir=env.user) isnum <- sapply(externvars, is.numeric) len <- lengths(externvars) ok <- isnum & (len == 1 | len == nrow(x)) if(!all(ok)) { nnot <- sum(!ok) stop(paste(ngettext(nnot, "Variable", "Variables"), commasep(sQuote(sought[!ok])), ngettext(nnot, "is", "are"), "not of the right format")) } } } ## Extract left hand side as given # lhs.original <- fmla[[2]] fmla.original <- fmla ## expand "." dotnames <- fvnames(x, ".") starnames <- fvnames(x, "*") umap <- fvexprmap(x) fmla <- eval(substitute(substitute(fom, um), list(fom=fmla, um=umap))) ## ------------------- extract data for plot --------------------- ## extract LHS and RHS of formula lhs <- fmla[[2]] rhs <- fmla[[3]] ## extract data lhsdata <- eval(lhs, envir=indata) rhsdata <- eval(rhs, envir=indata) ## reformat if(is.vector(lhsdata)) { lhsdata <- matrix(lhsdata, ncol=1) lhsvars <- all.vars(as.expression(lhs)) lhsvars <- lhsvars[lhsvars %in% names(x)] colnames(lhsdata) <- if(length(lhsvars) == 1) lhsvars else if(length(starnames) == 1 && (starnames %in% lhsvars)) starnames else paste(deparse(lhs), collapse="") } ## check lhs names exist lnames <- colnames(lhsdata) nc <- ncol(lhsdata) lnames0 <- paste("V", seq_len(nc), sep="") if(length(lnames) != nc) colnames(lhsdata) <- lnames0 else if(any(uhoh <- !nzchar(lnames))) colnames(lhsdata)[uhoh] <- lnames0[uhoh] lhs.names <- colnames(lhsdata) ## check whether each lhs column is associated with a single column of 'x' ## that is one of the alternative versions of the function. ## This may be unreliable, as it depends on the ## column names assigned to lhsdata by eval() one.star <- unlist(lapply(lhs.names, hasonlyone, amongst=fvnames(x, "*"))) one.dot <- unlist(lapply(lhs.names, hasonlyone, amongst=dotnames)) explicit.lhs.names <- ifelse(one.star, lhs.names, "") explicit.lhs.dotnames <- ifelse(one.star & one.dot, lhs.names, "") ## check rhs data if(is.matrix(rhsdata)) stop("rhs of formula should yield a vector") rhsdata <- as.numeric(rhsdata) nplots <- ncol(lhsdata) allind <- 1:nplots ## ---------- extra plots may be implied by 'shade' ----------------- extrashadevars <- NULL if(!is.null(shade)) { ## select columns by name or number names(allind) <- explicit.lhs.names shind <- try(allind[shade]) if(inherits(shind, "try-error")) stop(paste("The argument shade should be a valid subset index", "for columns of x"), call.=FALSE) if(any(nbg <- is.na(shind))) { ## columns not included in formula: add them morelhs <- try(as.matrix(indata[ , shade[nbg], drop=FALSE])) if(inherits(morelhs, "try-error")) stop(paste("The argument shade should be a valid subset index", "for columns of x"), call.=FALSE) nmore <- ncol(morelhs) extrashadevars <- colnames(morelhs) if(defaultplot) { success <- TRUE } else if("." %in% variablesinformula(fmla.original)) { ## evaluate lhs of formula, expanding "." to shade names u <- if(length(extrashadevars) == 1) as.name(extrashadevars) else { as.call(lapply(c("cbind", extrashadevars), as.name)) } ux <- as.name(fvnames(x, ".x")) uy <- as.name(fvnames(x, ".y")) foo <- eval(substitute(substitute(fom, list(.=u, .x=ux, .y=uy)), list(fom=fmla.original))) dont.complain.about(u, ux, uy) lhsnew <- foo[[2]] morelhs <- eval(lhsnew, envir=indata) success <- identical(colnames(morelhs), extrashadevars) } else if(is.name(lhs) && as.character(lhs) %in% names(indata)) { ## lhs is the name of a single column in x ## expand the LHS explicit.lhs.names <- c(explicit.lhs.names, extrashadevars) ff <- paste("cbind", paren(paste(explicit.lhs.names, collapse=", ")), "~ 1") lhs <- lhs.of.formula(as.formula(ff)) success <- TRUE } else if(length(explicit.lhs.dotnames) > 1) { ## lhs = cbind(...) where ... are dotnames cbound <- paste0("cbind", paren(paste(explicit.lhs.dotnames, collapse=", "))) if(identical(deparse(lhs), cbound)) { success <- TRUE explicit.lhs.names <- union(explicit.lhs.names, extrashadevars) ff <- paste("cbind", paren(paste(explicit.lhs.names, collapse=", ")), "~ 1") lhs <- lhs.of.formula(as.formula(ff)) } else success <- FALSE } else success <- FALSE if(success) { ## add these columns to the plotting data lhsdata <- cbind(lhsdata, morelhs) shind[nbg] <- nplots + seq_len(nmore) lty <- extendifvector(lty, nplots, nmore) col <- extendifvector(col, nplots, nmore) lwd <- extendifvector(lwd, nplots, nmore) nplots <- nplots + nmore ## update the names one.star <- unlist(lapply(explicit.lhs.names, hasonlyone, amongst=fvnames(x, "*"))) one.dot <- unlist(lapply(explicit.lhs.names, hasonlyone, amongst=dotnames)) explicit.lhs.names <- ifelse(one.star, explicit.lhs.names, "") explicit.lhs.dotnames <- ifelse(one.star & one.dot, explicit.lhs.names, "") } else { ## cannot add columns warning(paste("Shade", ngettext(sum(nbg), "column", "columns"), commasep(sQuote(shade[nbg])), "were missing from the plot formula, and were omitted")) shade <- NULL extrashadevars <- NULL } } } ## -------------------- determine plotting limits ---------------------- ## restrict data to subset if desired if(!is.null(subset)) { keep <- if(is.character(subset)) { eval(parse(text=subset), envir=indata) } else eval(subset, envir=indata) lhsdata <- lhsdata[keep, , drop=FALSE] rhsdata <- rhsdata[keep] } ## determine x and y limits and clip data to these limits if(is.null(xlim) && add) { ## x limits are determined by existing plot xlim <- par("usr")[1:2] } if(!is.null(xlim)) { ok <- !is.finite(rhsdata) | (xlim[1] <= rhsdata & rhsdata <= xlim[2]) rhsdata <- rhsdata[ok] lhsdata <- lhsdata[ok, , drop=FALSE] } else { ## if we're using the default argument, use its recommended range if(rhs == fvnames(x, ".x")) { xlim <- attr(x, "alim") %orifnull% range(as.vector(rhsdata), finite=TRUE) if(xlogscale && xlim[1] <= 0) xlim[1] <- min(rhsdata[is.finite(rhsdata) & rhsdata > 0], na.rm=TRUE) ok <- !is.finite(rhsdata) | (rhsdata >= xlim[1] & rhsdata <= xlim[2]) rhsdata <- rhsdata[ok] lhsdata <- lhsdata[ok, , drop=FALSE] } else { ## actual range of values to be plotted if(xlogscale) { ok <- is.finite(rhsdata) & (rhsdata > 0) & matrowany(lhsdata > 0) xlim <- range(rhsdata[ok]) } else { xlim <- range(rhsdata, na.rm=TRUE) } } } if(is.null(ylim)) { yok <- is.finite(lhsdata) if(ylogscale) yok <- yok & (lhsdata > 0) ylim <- range(lhsdata[yok],na.rm=TRUE) } if(!is.null(ylim.covers)) ylim <- range(ylim, ylim.covers) ## return x, y limits only? if(limitsonly) return(list(xlim=xlim, ylim=ylim)) ## ------------- work out how to label the plot -------------------- ## extract plot labels, substituting function name labl <- fvlabels(x, expand=TRUE) ## create plot label map (key -> algebraic expression) map <- fvlabelmap(x) ## ......... label for x axis .................. if(is.null(xlab)) { argname <- fvnames(x, ".x") if(as.character(fmla)[3] == argname) { ## The x axis variable is the default function argument. ArgString <- fvlabels(x, expand=TRUE)[[argname]] xexpr <- parse(text=ArgString) ## use specified font xexpr <- fontify(xexpr, mathfont) ## Add name of unit of length? ax <- summary(unitname(x))$axis if(is.null(ax)) { xlab <- xexpr } else { xlab <- expression(VAR ~ COMMENT) xlab[[1]][[2]] <- xexpr[[1]] xlab[[1]][[3]] <- ax } } else { ## map ident to label xlab <- eval(substitute(substitute(rh, mp), list(rh=rhs, mp=map))) ## use specified font xlab <- fontify(xlab, mathfont) } } if(is.language(xlab) && !is.expression(xlab)) xlab <- as.expression(xlab) ## ......... label for y axis ................... leftside <- lhs if(ncol(lhsdata) > 1 || length(dotnames) == 1) { ## For labelling purposes only, simplify the LHS by ## replacing 'cbind(.....)' by '.' ## even if not all columns are included. leftside <- paste(as.expression(leftside)) eln <- explicit.lhs.dotnames eln <- eln[nzchar(eln)] cb <- if(length(eln) == 1) eln else { paste("cbind(", paste(eln, collapse=", "), ")", sep="") } compactleftside <- gsub(cb, ".", leftside, fixed=TRUE) ## Separately expand "." to cbind(.....) ## and ".x", ".y" to their real names dotdot <- c(dotnames, extrashadevars) cball <- if(length(dotdot) == 1) dotdot else { paste("cbind(", paste(dotdot, collapse=", "), ")", sep="") } expandleftside <- gsub(".x", fvnames(x, ".x"), leftside, fixed=TRUE) expandleftside <- gsub(".y", fvnames(x, ".y"), expandleftside, fixed=TRUE) expandleftside <- gsubdot(cball, expandleftside) ## convert back to language compactleftside <- parse(text=compactleftside)[[1]] expandleftside <- parse(text=expandleftside)[[1]] } else { compactleftside <- expandleftside <- leftside } ## construct label for y axis if(is.null(ylab)) { yl <- attr(x, "yexp") if(defaultplot && !is.null(yl)) { ylab <- yl } else { ## replace "." and short identifiers by plot labels ylab <- eval(substitute(substitute(le, mp), list(le=compactleftside, mp=map))) } } if(is.language(ylab)) { ## use specified font ylab <- fontify(ylab, mathfont) ## ensure it's an expression if(!is.expression(ylab)) ylab <- as.expression(ylab) } ## ------------------ start plotting --------------------------- ## create new plot if(!add) do.call(plot.default, resolve.defaults(list(xlim, ylim, type="n", log=log), list(xlab=xlab, ylab=ylab), list(...), list(main=xname))) ## handle 'type' = "n" giventype <- resolve.defaults(list(...), list(type=NA))$type if(identical(giventype, "n")) return(invisible(NULL)) ## process lty, col, lwd arguments opt0 <- spatstat.options("par.fv") lty <- fixit(lty, nplots, opt0$lty, 1:nplots) col <- fixit(col, nplots, opt0$col, 1:nplots) lwd <- fixit(lwd, nplots, opt0$lwd, 1) ## convert to greyscale? if(spatstat.options("monochrome")) col <- to.grey(col) if(!is.null(shade)) { ## shade region between critical boundaries ## extract relevant columns for shaded bands shdata <- lhsdata[, shind] if(!is.matrix(shdata) || ncol(shdata) != 2) stop("The argument shade should select two columns of x") ## truncate infinite values to plot limits if(any(isinf <- is.infinite(shdata))) { if(is.null(ylim)) { warning("Unable to truncate infinite values to the plot area") } else { shdata[isinf & (shdata == Inf)] <- ylim[2] shdata[isinf & (shdata == -Inf)] <- ylim[1] } } ## determine limits of shading shdata1 <- shdata[,1] shdata2 <- shdata[,2] ## plot grey polygon xpoly <- c(rhsdata, rev(rhsdata)) ypoly <- c(shdata1, rev(shdata2)) miss1 <- !is.finite(shdata1) miss2 <- !is.finite(shdata2) if(!any(broken <- (miss1 | miss2))) { ## single polygon clip.to.usr() polygon(xpoly, ypoly, border=shadecol, col=shadecol) } else { ## interrupted dat <- data.frame(rhsdata=rhsdata, shdata1=shdata1, shdata2=shdata2) serial <- cumsum(broken) lapply(split(dat, serial), function(z) { with(z, { xp <- c(rhsdata, rev(rhsdata)) yp <- c(shdata1, rev(shdata2)) clip.to.usr() polygon(xp, yp, border=shadecol, col=shadecol) }) }) ## save for use in placing legend okp <- !c(broken, rev(broken)) xpoly <- xpoly[okp] ypoly <- ypoly[okp] } ## overwrite graphical parameters lty[shind] <- 1 ## try to preserve the same type of colour specification if(is.character(col) && is.character(shadecol)) { ## character representations col[shind] <- shadecol } else if(is.numeric(col) && !is.na(sc <- paletteindex(shadecol))) { ## indices in colour palette col[shind] <- sc } else { ## convert colours to hexadecimal and edit relevant values col <- col2hex(col) col[shind] <- col2hex(shadecol) } ## remove these columns from further plotting allind <- allind[-shind] ## } else xpoly <- ypoly <- numeric(0) ## ----------------- plot lines ------------------------------ for(i in allind) { clip.to.usr() lines(rhsdata, lhsdata[,i], lty=lty[i], col=col[i], lwd=lwd[i]) } if(nplots == 1) return(invisible(NULL)) ## ---------------- determine legend ------------------------- key <- colnames(lhsdata) mat <- match(key, names(x)) keyok <- !is.na(mat) matok <- mat[keyok] legdesc <- rep.int("constructed variable", length(key)) legdesc[keyok] <- attr(x, "desc")[matok] leglabl <- lnames0 leglabl[keyok] <- labl[matok] ylab <- attr(x, "ylab") if(!is.null(ylab)) { if(is.language(ylab)) ylab <- flat.deparse(ylab) if(any(grepl("%s", legdesc))) legdesc <- sprintf(legdesc, ylab) } ## compute legend info legtxt <- key if(legendmath) { legtxt <- leglabl if(defaultplot) { ## try to convert individual labels to expressions fancy <- try(parse(text=leglabl), silent=TRUE) if(!inherits(fancy, "try-error")) legtxt <- fancy } else { ## try to navigate the parse tree fancy <- try(fvlegend(x, expandleftside), silent=TRUE) if(!inherits(fancy, "try-error")) legtxt <- fancy } } if(is.expression(legtxt) || is.language(legtxt) || all(sapply(legtxt, is.language))) legtxt <- fontify(legtxt, mathfont) ## --------------- handle legend plotting ----------------------------- if(identical(legend, TRUE)) { ## legend will be plotted ## Basic parameters of legend legendxpref <- if(identical(legendpos, "float")) NULL else legendpos optparfv <- spatstat.options("par.fv")$legendargs %orifnull% list() legendspec <- resolve.defaults(legendargs, list(lty=lty, col=col, lwd=lwd), optparfv, list(x=legendxpref, legend=legtxt, inset=0.05, y.intersp=if(legendmath) 1.3 else 1), .StripNull=TRUE) tB <- dev.capabilities()$transparentBackground if(!any(names(legendspec) == "bg") && !is.na(tB) && !identical(tB, "no")) legendspec$bg <- "transparent" if(legendavoid || identical(legendpos, "float")) { ## Automatic determination of legend position ## Assemble data for all plot objects linedata <- list() xmap <- if(xlogscale) log10 else identity ymap <- if(ylogscale) log10 else identity inv.xmap <- if(xlogscale) pow10 else identity inv.ymap <- if(ylogscale) pow10 else identity for(i in seq_along(allind)) linedata[[i]] <- list(x=xmap(rhsdata), y=ymap(lhsdata[,i])) polydata <- if(length(xpoly) > 0) list(x=xmap(xpoly), y=ymap(ypoly)) else NULL #' ensure xlim, ylim define a box boxXlim <- if(diff(xlim) > 0) xlim else par('usr')[1:2] boxYlim <- if(diff(ylim) > 0) ylim else par('usr')[3:4] #' objects <- assemble.plot.objects(xmap(boxXlim), ymap(boxYlim), lines=linedata, polygon=polydata) ## find best position to avoid them legendbest <- findbestlegendpos(objects, preference=legendpos, legendspec=legendspec) ## handle log scale if((xlogscale || ylogscale) && checkfields(legendbest, c("x", "xjust", "yjust"))) { ## back-transform x, y coordinates legendbest$x$x <- inv.xmap(legendbest$x$x) legendbest$x$y <- inv.ymap(legendbest$x$y) } } else legendbest <- list() ## ********** plot legend ************************* if(!is.null(legend) && legend) do.call(graphics::legend, resolve.defaults(legendargs, legendbest, legendspec, .StripNull=TRUE)) } ## convert labels back to character labl <- paste.expr(legtxt) labl <- gsub(" ", "", labl) ## return legend info df <- data.frame(lty=lty, col=col, key=key, label=labl, meaning=legdesc, row.names=key) return(invisible(df)) } plot.fv }) assemble.plot.objects <- function(xlim, ylim, ..., lines=NULL, polygon=NULL) { # Take data that would have been passed to the commands 'lines' and 'polygon' # and form corresponding geometrical objects. objects <- list() if(!is.null(lines)) { if(is.psp(lines)) { objects <- list(lines) } else { if(checkfields(lines, c("x", "y"))) { lines <- list(lines) } else if(!all(unlist(lapply(lines, checkfields, L=c("x", "y"))))) stop("lines should be a psp object, a list(x,y) or a list of list(x,y)") W <- owin(xlim, ylim) for(i in seq_along(lines)) { lines.i <- lines[[i]] x.i <- lines.i$x y.i <- lines.i$y n <- length(x.i) if(length(y.i) != n) stop(paste(paste("In lines[[", i, "]]", sep=""), "the vectors x and y have unequal length")) if(!all(ok <- (is.finite(x.i) & is.finite(y.i)))) { x.i <- x.i[ok] y.i <- y.i[ok] n <- sum(ok) } segs.i <- psp(x.i[-n], y.i[-n], x.i[-1], y.i[-1], W, check=FALSE) objects <- append(objects, list(segs.i)) } } } if(!is.null(polygon)) { # Add filled polygon pol <- polygon[c("x", "y")] ok <- with(pol, is.finite(x) & is.finite(y)) if(!all(ok)) pol <- with(pol, list(x=x[ok], y=y[ok])) if(Area.xypolygon(pol) < 0) pol <- lapply(pol, rev) P <- try(owin(poly=pol, xrange=xlim, yrange=ylim, check=FALSE)) if(!inherits(P, "try-error")) objects <- append(objects, list(P)) } return(objects) } findbestlegendpos <- local({ ## Given a list of geometrical objects, find the best position ## to avoid them. bestlegendpos <- function(objects, show=FALSE, aspect=1, bdryok=TRUE, preference="float", verbose=FALSE, legendspec=NULL) { ## find bounding box W <- do.call(boundingbox, lapply(objects, as.rectangle)) ## convert to common box objects <- lapply(objects, rebound, rect=W) ## rescale x and y axes so that bounding box has aspect ratio 'aspect' aspectW <- with(W, diff(yrange)/diff(xrange)) s <- aspect/aspectW mat <- diag(c(1, s)) invmat <- diag(c(1, 1/s)) scaled.objects <- lapply(objects, affine, mat=mat) scaledW <- affine(W, mat=mat) if(verbose) { cat("Scaled space:\n") print(scaledW) } ## reinstate common box scaled.objects <- lapply(scaled.objects, rebound, rect=scaledW) ## pixellate the scaled objects pix.scal.objects <- lapply(scaled.objects, asma) ## apply distance transforms in scaled space D1 <- distmap(pix.scal.objects[[1]]) Dlist <- lapply(pix.scal.objects, distmap, xy=list(x=D1$xcol, y=D1$yrow)) ## distance transform of superposition D <- im.apply(Dlist, min) if(!bdryok) { ## include distance to boundary B <- attr(D1, "bdry") D <- eval.im(pmin.int(D, B)) } if(show) { plot(affine(D, mat=invmat), add=TRUE) lapply(lapply(scaled.objects, affine, mat=invmat), plot, add=TRUE) } if(preference != "float") { ## evaluate preferred location (check for collision) if(!is.null(legendspec)) { ## pretend to plot the legend as specified legout <- do.call(graphics::legend, append(legendspec, list(plot=FALSE))) ## determine bounding box legbox <- with(legout$rect, owin(c(left, left+w), c(top-h, top))) scaledlegbox <- affine(legbox, mat=mat) ## check for collision Dmin <- min(D[scaledlegbox]) if(Dmin >= 0.02) { ## no collision: stay at preferred location. Exit. return(list(x=preference)) } ## collision occurred! } else { ## no legend information. ## Pretend legend is 15% of plot width and height xr <- scaledW$xrange yr <- scaledW$yrange testloc <- switch(preference, topleft = c(xr[1],yr[2]), top = c(mean(xr), yr[2]), topright = c(xr[2], yr[2]), right = c(xr[2], mean(yr)), bottomright = c(xr[2], yr[1]), bottom = c(mean(xr), yr[1]), bottomleft = c(xr[1], yr[1]), left = c(xr[1], mean(yr)), center = c(mean(xr), mean(yr)), NULL) if(!is.null(testloc)) { ## look up distance value at preferred location testpat <- ppp(x=testloc[1], y=testloc[2], xr, yr, check=FALSE) val <- safelookup(D, testpat) crit <- 0.15 * min(diff(xr), diff(yr)) if(verbose) cat(paste("val=",val, ", crit=", crit, "\n")) if(val > crit) { ## no collision: stay at preferred location. Exit. return(list(x=preference)) } ## collision occurred! } } ## collision occurred! } ## find location of max locmax <- which(D$v == max(D), arr.ind=TRUE) locmax <- unname(locmax[1,]) pos <- list(x=D$xcol[locmax[2]], y=D$yrow[locmax[1]]) pos <- affinexy(pos, mat=invmat) if(show) points(pos) ## determine justification of legend relative to this point ## to avoid crossing edges of plot xrel <- (pos$x - W$xrange[1])/diff(W$xrange) yrel <- (pos$y - W$yrange[1])/diff(W$yrange) xjust <- if(xrel < 0.1) 0 else if(xrel > 0.9) 1 else 0.5 yjust <- if(yrel < 0.1) 0 else if(yrel > 0.9) 1 else 0.5 ## out <- list(x=pos, xjust=xjust, yjust=yjust) return(out) } asma <- function(z) { if(is.owin(z)) as.mask(z) else if(is.psp(z)) as.mask.psp(z) else NULL } callit <- function(...) { rslt <- try(bestlegendpos(...)) if(!inherits(rslt, "try-error")) return(rslt) return(list()) } callit }) spatstat.core/R/randomsets.R0000644000176200001440000000077414144333463015564 0ustar liggesusers#' #' randomsets.R #' #' Generation of random sets #' #' $Revision: 1.2 $ $Date: 2019/08/16 07:53:05 $ rthinclumps <- function(W, p, ...) { check.1.real(p) if(badprobability(p, TRUE)) stop("p must be a valid probability between 0 and 1", call.=FALSE) if(!(is.im(W) || is.owin(W))) stop("W should be a window or pixel image", call.=FALSE) clumps <- connected(W, ...) keep <- (runif(length(levels(clumps))) < p) retained <- eval.im(keep[clumps]) return(solutionset(retained)) } spatstat.core/R/simulate.detPPF.R0000644000176200001440000003710214145330755016346 0ustar liggesusers## simulate.detPPF.R ## $Revision: 1.9 $ $Date: 2021/11/18 01:28:27 $ ## ## This file contains functions to simulate DPP models. ## Two simulation functions are visible: ## - simulate.detpointprocfamily (most useful) ## - rdpp (more generic workhorse function -- actually the real workhorse is the locally defined rdppp) ## ## Furthermore the auxilliary function dppeigen is defined here. rdpp <- local({ ## Generates an empty point pattern emptyppx <- function(W, simplify = TRUE){ W <- as.boxx(W) r <- W$ranges d <- ncol(r) if(simplify){ if(d==2) return(ppp(numeric(0), numeric(0), window=as.owin(W))) if(d==3) return(pp3(numeric(0), numeric(0), numeric(0), W)) } rslt <- replicate(d, numeric(0), simplify=FALSE) names(rslt) <- paste("x",1:d,sep="") rslt <- as.data.frame(rslt) return(ppx(rslt, domain = W, coord.type= rep("spatial", d))) } rdppp <- function(index, basis = "fourierbasis", window = boxx(rep(list(0:1), ncol(index))), reject_max = 1e4, progress = 0, debug = FALSE, given = NULL, given_max_volume = 0.5, ...){ ## Check arguments: if (!(is.logical(debug))) stop(paste(sQuote("debug"), "must be TRUE or FALSE")) if (!is.numeric(reject_max)||reject_max<=1) stop(paste(sQuote("reject_max"), "must be a numeric greater than 1")) if (!is.numeric(progress)||reject_max<1) stop(paste(sQuote("progress"), "must be a numeric greater than or equal to 1")) index <- as.matrix(index) d <- ncol(index) window <- as.boxx(window) ranges <- window$ranges boxlengths <- as.numeric(ranges[2L, ] - ranges[1L, ]) if(ncol(ranges)!=d) stop("The dimension differs from the number of columns in index") if(basis != "fourierbasis"){ warning("Non Fourier basis probably doesn't work correctly! Fourier is assumed for bounds in rejection sampling.") userbasis <- get(basis) if (!(is.function(userbasis))) stop(paste(sQuote("basis"), "must be a function")) tmp <- userbasis(ranges[1,,drop=FALSE], index, window) if (!(is.numeric(tmp) || is.complex(tmp))) stop(paste("Output of", sQuote("basis"), "must be numeric or complex")) basis <- function(x, k, boxlengths){ userbasis(x, k, boxx(lapply(boxlengths, function(x) list(c(0,x))))) } } else{ basis <- fourierbasisraw } ## Number of points to simulate: n <- nrow(index) ## Resolve `given` for pseudo conditional simulation if(!is.null(given)){ # Make sure `given` is a list of point patterns if(is.ppp(given) || is.pp3(given) || is.ppx(given)){ given <- list(given) } stopifnot(all(sapply(given, function(x){ is.ppp(x) || is.pp3(x) || is.ppx(x)}))) # Check that the window (or its boundingbox) is inside the simulation window Wgiven <- lapply(given, function(x) as.boxx(domain(x))) stopifnot(all(sapply(Wgiven, function(w){ all(w$ranges[1,] >= ranges[1,]) && all(w$ranges[2,] <= ranges[2,]) }))) stopifnot(sum(sapply(Wgiven, volume))0) cat(paste("Simulating", n, "points:\n")) ## Main for loop over number of points: for(i in (n-1):1){ ## Print progress: if(progress>0) progressreport(n-i, n, every=progress) ## Aux. variable to count number of rejection steps: tries <- 0 # Debug info: if(debug){ rejected <- matrix(NA,reject_max,d) } repeat{ ## Proposed point: newx <- matrix(runif(d,as.numeric(ranges[1,]),as.numeric(ranges[2,])),ncol=d) if(!is.null(given)){ if(i>(n-ngiven)){ newx <- coordsgiven[n-i+1,,drop=FALSE] } else{ while(any(sapply(Wgiven, function(w) inside.boxx(as.hyperframe(newx), w = w)))) newx <- matrix(runif(d,as.numeric(ranges[1,]),as.numeric(ranges[2,])),ncol=d) } } ## Basis functions eval. at proposed point: v <- as.vector(basis(newx, index, boxlengths)) ## Vector of projection weights (has length n-i) wei <- t(v)%*%estar if(!is.null(given) && i>(n-ngiven)){ break } ## Accept probability: # tmp <- prod(ranges[2,]-ranges[1,])/n*(sum(abs(v)^2)-sum(abs(wei)^2)) tmp <- 1-prod(ranges[2,]-ranges[1,])/n*(sum(abs(wei)^2)) ## If proposal is accepted the loop is broken: if(runif(1)reject_max){ stop(paste("Rejection sampling failed reject_max =",reject_max,"times in a row")) } ## Increase the count of rejection steps: tries <- tries+1 # Debug info: if(debug){ rejected[tries,] <- newx } } ## END OF REJECTION LOOP # Record the accepted point: x[n-i+1,] <- newx # Debug info: if(debug){ if(tries==0){ rej <- empty } else{ rej <- ppx(rejected[1:tries,,drop=FALSE],window, simplify=TRUE) } debugList[[n-i+1]] = list( old=ppx(x[1:(n-i),,drop=FALSE],window, simplify=TRUE), accepted=ppx(newx,window,simplify=TRUE), rejected=rej, index=index, estar = estar) } ## If it is the last point exit the main loop: if(i==1){break} ## Calculate orthogonal vector for Gram-Schmidt procedure: # w <- v - rowSums(matrix(wei,n,n-i,byrow=TRUE)*e[,1:(n-i)]) w <- v - colSums(t(e)*as.vector(wei)) ## Record normalized version in the Gram-Schmidt matrices: enew <- w/sqrt(sum(abs(w)^2)) e <- cbind(e, enew) estar <- cbind(estar,Conj(enew)) } ## END OF MAIN FOR LOOP # Save points as point pattern: X <- ppx(x, window, simplify = TRUE) # Debug info: if(debug){ attr(X, "dpp") <- list(debug=debugList) } if(progress>0) cat(" Done!\n") return(X) } rdpp <- function(eig, index, basis = "fourierbasis", window = boxx(rep(list(0:1), ncol(index))), reject_max = 1e4, progress = 0, debug = FALSE, ...){ window2d <- NULL if (is.owin(window)) window2d <- window sampleindex <- as.matrix(index[rbinom(nrow(index), 1, eig)==1, , drop=FALSE]) X <- rdppp(sampleindex, basis=basis, window=window, reject_max=reject_max, progress=progress, debug=debug, ...) if(!is.null(window2d)) X <- X[window2d] return(X) } rdpp } ) simulate.dppm <- simulate.detpointprocfamily <- function(object, nsim = 1, seed = NULL, ..., W = NULL, trunc = .99, correction = "periodic", rbord = reach(object) # parallel = FALSE ){ # .... copied from simulate.lm .... if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } # .................................. if(inherits(object, "dppm")){ if(is.null(W)) W <- Window(object$X) object <- object$fitted } if(!inherits(object, "detpointprocfamily")) stop("The model to simulate must be of class detpointprocfamily") if(length(tmp <- object$freepar)>0) stop(paste("The model to simulate must be completely specified. The following parameters are unspecified:", tmp)) if(!valid(object)) stop("The model is invalid. Please change parameter values to get a valid model") if(!is.numeric(nsim)||nsim<1) stop(paste(sQuote("nsim"), "must be a numeric greater than or equal to 1")) nsim <- floor(nsim) dim <- dim(object) basis <- object$basis ####### BACKDOOR TO SPHERICAL CASE ######## if(!is.null(spherefun <- object$sim_engine)){ sphereSimEngine <- get(spherefun) rslt <- sphereSimEngine(object, trunc, nsim, ...) attr(rslt, "seed") <- RNGstate return(rslt) } ########################################### # Check stationarity and window compatibility (if 'W' and 'thin' both are present) statmodel <- is.null(thin <- object$thin) if(is.null(W)){ if(!statmodel) W <- domain(thin) } Wowin <- if(is.owin(W)) W else NULL if(is.null(W)){ W <- boxx(rep(list(0:1), dim)) } else{ W <- as.boxx(W, warn.owin = FALSE) } if(!statmodel){ if(!is.subset.owin(Wowin,thin)) stop("The window of simulation is not contained in the window of the inhomogeneous intensity.") } r <- W$ranges if(dim!=ncol(r)) stop(paste("The dimension of the window:", ncol(r), "is inconsistent with the dimension of the model:", dim)) Wscale <- as.numeric(r[2,]-r[1,]) Wcenter <- as.numeric(colMeans(r)) if(correction=="border"){ if(!is.numeric(rbord)||any(rbord<0)) stop(paste(sQuote("rbord"), "must be a non-negative numeric")) borderscale <- pmin((Wscale+2*rbord)/Wscale, 2) Wscale <- borderscale*Wscale } ## lambda <- intensity(object) tmp <- dppeigen(object, trunc, Wscale) trunc <- tmp$trunc prec <- tmp$prec n <- length(tmp$eig) indexlist <- replicate(nsim, { x <- as.matrix(tmp$index[rbinom(n, 1, tmp$eig)==1, , drop=FALSE]); gc(); x }, simplify = FALSE) rm(tmp) gc() onesim <- function(i, win=NULL){ X <- rdpp(1, indexlist[[i]], basis = basis, window = boxx(rep(list(c(-.5,.5)), dim)), ...) a <- attr(X, "dpp") a <- c(a, list(prec = prec, trunc = trunc)) if(correction=="border"){ if(dim!=2) stop("Border correction only implemented for dimension 2 at the moment.") X <- X[affine.owin(as.owin(X), mat = diag(1/borderscale))] } if(is.ppp(X)){ X <- affine(X, matrix(c(Wscale[1],0,0,Wscale[2]), 2, 2), Wcenter) if(!is.null(win)) X <- X[win] } else{ X <- ppx(X$data, domain = as.boxx(X$domain), coord.type = rep("spatial", dim)) X$data <- as.hyperframe(as.data.frame(X$data)*matrix(Wscale, nrow(X$data), ncol(X$data), byrow = TRUE)) X$domain$ranges <- X$domain$ranges*matrix(Wscale, 2, dim, byrow = TRUE) + matrix(Wcenter, 2, dim, byrow = TRUE) X <- ppx(X$data, X$domain, simplify = TRUE) } attr(X, "dpp") <- a return(X) } if(nsim==1){ rslt <- onesim(1,win=Wowin) if(!statmodel) rslt <- rthin(rslt, P=thin) } else{ ######## Old code for parallel simulation ######### # if(is.logical(parallel)){ # cl.cores <- if(parallel) NULL else 1 # } else{ # cl.cores <- parallel # } # rslt <- detlapply(1:nsim, onesim, cl.cores=cl.cores, win=Wowin) ################################################### rslt <- lapply(1:nsim, onesim, win=Wowin) if(!statmodel) rslt <- lapply(rslt, rthin, P=thin) names(rslt) <- paste("Simulation", 1:nsim) rslt <- if(dim == 2) as.solist(rslt) else as.anylist(rslt) } attr(rslt, "seed") <- RNGstate return(rslt) } dppeigen <- function(model, trunc, Wscale, stationary = FALSE){ dim <- dim(model) if(stationary && dim!=2) stop("Stationarity can only be exploited in dimension 2 at the moment.") Wscale <- as.numeric(Wscale) check.nvector(Wscale, dim, things="dimensions") ## Calculate expected number of points if the intensity is a parameter expnum <- NULL lambdaname <- model$intensity if(!is.null(lambdaname)) expnum <- getElement(model$fixedpar, lambdaname)*prod(Wscale) ## Get the maximal truncation in each dimension maxtrunc <- spatstat.options("dpp.maxmatrix")^(1/dim) ## Extract spectral density specden <- dppspecden(model) truncrange <- dppspecdenrange(model)*max(Wscale) if(trunc>=1){ ## Integer truncation fixed by user. if(stationary){ ## Coordinates on axes: index1a <- c(rep(0,trunc),1:trunc) index2a <- c(1:trunc,rep(0,trunc)) ## Coordinates of ordinary points: index1 <- rep(1:trunc,trunc) index2 <- rep(1:trunc,each=trunc) ## Spectral densities: eigo <- specden(0) eiga <- specden(sqrt((index1a/Wscale[1])^2+(index2a/Wscale[2])^2)) eig <- specden(sqrt((index1/Wscale[1])^2+(index2/Wscale[2])^2)) prec <- (eigo+2*sum(eiga)+4*sum(eig))/expnum } else{ trunc <- floor(trunc) index <- do.call(expand.grid, replicate(dim, seq(-trunc,trunc), simplify=FALSE)) indexscaled <- index*matrix(1/Wscale, nrow(index), ncol(index), byrow = TRUE) if(model$isotropic){ eig <- specden(sqrt(rowSums(indexscaled^2))) } else{ eig <- specden(indexscaled) } prec <- sum(eig)/expnum } } else{ ## Integer truncation calculated from user-specified precision. if(is.null(expnum)) stop("Cannot calculate truncation adaptively in a model without intensity parameter. Please specify trunc directly as a positive integer.") prec0 <- trunc trunc <- 1 prec <- 0 ## cat("truncation is being calculated adaptively. Current truncation:\n") while(prec<=prec0 && (2*trunc)<=maxtrunc && trunc<=truncrange){ trunc <- 2*trunc if(stationary){ ## Coordinates on axes: index1a <- c(rep(0,trunc),1:trunc) index2a <- c(1:trunc,rep(0,trunc)) ## Coordinates of ordinary points: index1 <- rep(1:trunc,trunc) index2 <- rep(1:trunc,each=trunc) ## Spectral densities: eigo <- specden(0) eiga <- specden(sqrt((index1a/Wscale[1])^2+(index2a/Wscale[2])^2)) eig <- specden(sqrt((index1/Wscale[1])^2+(index2/Wscale[2])^2)) prec <- (eigo+2*sum(eiga)+4*sum(eig))/expnum } else{ index <- do.call(expand.grid, replicate(dim, seq(-trunc,trunc), simplify=FALSE)) indexscaled <- index*matrix(1/Wscale, nrow(index), ncol(index), byrow = TRUE) if(model$isotropic){ eig <- specden(sqrt(rowSums(indexscaled^2))) } else{ eig <- specden(indexscaled) } prec <- sum(eig)/expnum } } ## cat("\n") if(prec tobs) nties <- sum(sim.pvals == pobs & sim.stats == tobs) result$p.value <- (nless + nplus + sample(0:nties, 1L))/(nsim+1L) ## result$method <- c("Monte Carlo test of fitted Gibbs model", paste("based on", nsim, "repetitions of"), sub("Spatial", "spatial", result$method)) return(result) } PoissonTest <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., verbose=TRUE, interpolate=FALSE, fast=TRUE, jitter=TRUE, gibbsok=FALSE, modelname, covname) { if(missing(modelname)) modelname <- short.deparse(substitute(model)) if(missing(covname)) covname <- short.deparse(substitute(covariate)) test <- match.arg(test) stopifnot(is.mppm(model)) if(!gibbsok && !is.poisson.mppm(model)) stop("Only implemented for Poisson models") ## extract things from model data <- model$data npat <- model$npat Y <- data.mppm(model) if(fast) { ## extract original quadrature schemes and convert to point patterns QQ <- quad.mppm(model) PP <- lapply(QQ, union.quad) Zweights <- lapply(QQ, w.quad) } else Zweights <- list() ## `evaluate' covariate if(verbose) cat("Extracting covariate...") if(identical(covariate, "x")) covariate <- xcoord if(identical(covariate, "y")) covariate <- ycoord if(is.character(covariate)) { ## extract covariate with this name from data used to fit model if(!(covariate %in% names(data))) stop(paste("Model does not contain a covariate called", dQuote(covariate))) covname <- covariate covariate <- data[, covname, drop=TRUE] } else if(inherits(covariate, c("listof", "anylist"))) { if(length(covariate) != npat) stop(paste("Length of list of covariate values does not match", "number of point patterns in data of original model")) } else if(is.hyperframe(covariate)) { ## extract first column covariate <- covariate[,1L, drop=TRUE] if(length(covariate) != npat) stop(paste("Number of rows of covariate hyperframe does not match", "number of point patterns in data of original model")) } else if(is.function(covariate) || is.im(covariate)) { ## replicate to make a list covariate <- as.anylist(rep(list(covariate), npat)) } else stop(paste("Format of argument", sQuote("covariates"), "not understood")) if(verbose) { cat("done.\nComputing statistics for each pattern...") pstate <- list() } ## compile information for test from each row Zvalues <- ZX <- Win <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) XI <- Y[[i]] if(fast) PI <- PP[[i]] else WI <- XI$window covariateI <- covariate[[i]] if(is.im(covariateI)) { type <- "im" ## evaluate at data points ZXI <- if(interpolate) interp.im(covariateI, XI$x, XI$y) else covariateI[XI] if(fast) { ## covariate values for quadrature points ZI <- covariateI[PI] } else { ## covariate image inside window ZI <- covariateI[WI, drop=FALSE] ## corresponding mask WI <- as.owin(ZI) ## pixel areas Zweights[[i]] <- rep(WI$xstep * WI$ystep, prod(WI$dim)) } } else if(is.function(covariateI)) { type <- "function" ## evaluate exactly at data points ZXI <- covariateI(XI$x, XI$y) if(fast) { ## covariate values for quadrature points ZI <- covariateI(PI$x, PI$y) } else { ## window WI <- as.mask(WI) ## covariate image inside window ZI <- as.im(covariateI, W=WI) ## pixel areas Zweights[[i]] <- rep(WI$xstep * WI$ystep, prod(WI$dim)) } } else stop("covariate should be an image or a function(x,y)") ZX[[i]] <- ZXI if(fast) Zvalues[[i]] <- ZI else { Win[[i]] <- WI ## values of covariate in window Zvalues[[i]] <- allpixelvalues(ZI) } } if(verbose) cat("done.\nComputing predicted intensity...") ## compute predicted intensities trend <- if(fast) fitted(model, type="trend") else predict(model, type="trend", locations=Win, verbose=verbose)$trend if(verbose) cat("done.\nExtracting...") ## extract relevant values lambda <- if(fast) trend else lapply(trend, allpixelvalues) if(verbose) cat("done.\nPerforming test...") ## flatten to vectors lambda <- unlist(lambda) Zweights <- unlist(Zweights) Zvalues <- unlist(Zvalues) ZX <- unlist(ZX) if(length(lambda) != length(Zvalues)) stop("Internal error: mismatch between predicted values and Z values") if(length(Zvalues) != length(Zweights)) stop("Internal error: mismatch between Z values and Z weights") lambda <- lambda * Zweights ## form weighted cdf of Z values in window FZ <- ewcdf(Zvalues, lambda/sum(lambda)) ## Ensure support of cdf includes the range of the data xxx <- knots(FZ) yyy <- FZ(xxx) if(min(xxx) > min(ZX)) { xxx <- c(min(ZX), xxx) yyy <- c(0, yyy) } if(max(xxx) < max(ZX)) { xxx <- c(xxx, max(ZX)) yyy <- c(yyy, 1) } ## make piecewise linear approximation of cdf FZ <- approxfun(xxx, yyy, rule=2) ## evaluate at data points if(!jitter) U <- FZ(ZX) else { ## jitter observed values to avoid ties grain <- min(diff(sortunique(ZX)))/8 jit <- runif(length(ZX), min=0, max=grain) sgn <- sample(c(-1L,1L), length(ZX), replace=TRUE) sgn[ZX==min(xxx)] <- 1L sgn[ZX==max(xxx)] <- -1L U <- FZ(ZX + sgn*jit) } ## Test uniformity result <- switch(test, ks = ks.test(U, "punif", ...), cvm = cvm.test(U, "punif", ...), ad = ad.test(U, "punif", ...)) testname <- switch(test, ks="Kolmogorov-Smirnov", cvm="Cramer-Von Mises", ad="Anderson-Darling") result$method <- paste("Spatial", testname, "test") result$data.name <- paste("predicted cdf of covariate", sQuote(paste(covname, collapse="")), "evaluated at data points of", sQuote(modelname)) if(verbose) cat("done.\n") class(result) <- c("cdftest", class(result)) attr(result, "prep") <- list(Zvalues = Zvalues, lambda = lambda, ZX = ZX, FZ = FZ, U = U, type = type) attr(result, "info") <- list(modelname = modelname, covname = covname) return(result) } cdf.test.mppm }) spatstat.core/R/randompp3.R0000644000176200001440000000256414144333463015307 0ustar liggesusers#' #' randompp3.R #' #' $Revision: 1.1 $ $Date: 2020/11/30 11:43:50 $ #' runifpoint3 <- function(n, domain=box3(), nsim=1, drop=TRUE) { domain <- as.box3(domain) result <- vector(mode="list", length=nsim) dd <- as.list(domain)[c("xrange", "yrange", "zrange")] for(i in 1:nsim) { x <- with(dd, runif(n, min=xrange[1], max=xrange[2])) y <- with(dd, runif(n, min=yrange[1], max=yrange[2])) z <- with(dd, runif(n, min=zrange[1], max=zrange[2])) result[[i]] <- pp3(x,y,z,domain) } if(drop && nsim == 1) return(result[[1]]) result <- as.anylist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } rpoispp3 <- function(lambda, domain=box3(), nsim=1, drop=TRUE) { domain <- as.box3(domain) v <- volume(domain) if(!(is.numeric(lambda) && length(lambda) == 1)) stop("lambda must be a single numeric value") np <- rpois(nsim, lambda * v) dd <- as.list(domain)[c("xrange", "yrange", "zrange")] result <- vector(mode="list", length=nsim) for(i in 1:nsim) { ni <- np[i] x <- with(dd, runif(ni, min=xrange[1], max=xrange[2])) y <- with(dd, runif(ni, min=yrange[1], max=yrange[2])) z <- with(dd, runif(ni, min=zrange[1], max=zrange[2])) result[[i]] <- pp3(x,y,z,domain) } if(drop && nsim == 1) return(result[[1]]) result <- as.anylist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } spatstat.core/R/multistrauss.R0000644000176200001440000002043714144333463016162 0ustar liggesusers# # # multistrauss.S # # $Revision: 2.23 $ $Date: 2015/03/31 03:57:11 $ # # The multitype Strauss process # # MultiStrauss() create an instance of the multitype Strauss process # [an object of class 'interact'] # # ------------------------------------------------------------------- # MultiStrauss <- local({ # ......... define interaction potential MSpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrix of interaction radii r[ , ] r <- par$radii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) # list all UNORDERED pairs of types to be checked # (the interaction must be symmetric in type, and scored as such) uptri <- (row(r) <= col(r)) & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] # corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) # list all ORDERED pairs of types to be checked # (to save writing the same code twice) different <- mark1 != mark2 mark1o <- c(mark1, mark2[different]) mark2o <- c(mark2, mark1[different]) nordpairs <- length(mark1o) # unordered pair corresponding to each ordered pair ucode <- c(1:npairs, (1:npairs)[different]) # # create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # assemble the relevant interaction distance for each pair of points rxu <- r[ tx, tu ] # apply relevant threshold to each pair of points str <- (d <= rxu) # assign str[i,j] -> z[i,j,k] where k is relevant interaction code for(i in 1:nordpairs) { # data points with mark m1 Xsub <- (tx == mark1o[i]) # quadrature points with mark m2 Qsub <- (tu == mark2o[i]) # assign z[Xsub, Qsub, ucode[i]] <- str[Xsub, Qsub] } } return(z) } #### end of 'pot' function #### # ........ auxiliary functions .............. delMS <- function(which, types, radii) { radii[which] <- NA if(all(is.na(radii))) return(Poisson()) return(MultiStrauss(types, radii)) } # Set up basic object except for family and parameters BlankMSobject <- list( name = "Multitype Strauss process", creator = "MultiStrauss", family = "pairwise.family", # evaluated later pot = MSpotential, par = list(types=NULL, radii = NULL), # to be filled in later parnames = c("possible types", "interaction distances"), pardesc = c("vector of possible types", "matrix of hardcore distances"), hasInf = FALSE, selfstart = function(X, self) { if(!is.null(self$par$types)) return(self) types <- levels(marks(X)) MultiStrauss(types=types,radii=self$par$radii) }, init = function(self) { types <- self$par$types if(!is.null(types)) { radii <- self$par$radii nt <- length(types) MultiPair.checkmatrix(radii, nt, sQuote("radii")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } } }, update = NULL, # default OK print = function(self) { radii <- self$par$radii types <- self$par$types if(waxlyrical('gory')) { splat(nrow(radii), "types of points") if(!is.null(types)) { splat("Possible types: ") print(noquote(types)) } else splat("Possible types:\t not yet determined") } cat("Interaction radii:\n") print(signif(radii, getOption("digits"))) invisible() }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) # get matrix of Strauss interaction radii r <- self$par$radii # list all unordered pairs of types uptri <- (row(r) <= col(r)) & (!is.na(r)) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) expcoef <- exp(coeffs) gammas[ cbind(index1, index2) ] <- expcoef gammas[ cbind(index2, index1) ] <- expcoef # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=dround(gammas))) }, valid = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii radii <- self$par$radii # parameters to estimate required <- !is.na(radii) gr <- gamma[required] return(all(is.finite(gr) & gr <= 1)) }, project = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii and types radii <- self$par$radii types <- self$par$types # problems? required <- !is.na(radii) okgamma <- is.finite(gamma) & (gamma <= 1) naughty <- required & !okgamma # if(!any(naughty)) return(NULL) if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delMS(naughty, types, radii)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) uptri <- (rn <= cn) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) matindex <- function(v) { matrix(c(v, rev(v)), ncol=2, byrow=TRUE) } mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matindex) inters <- lapply(mats, delMS, types=types, radii=radii) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$radii active <- !is.na(r) if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- active & (abs(log(gamma)) > epsilon) } if(any(active)) return(max(r[active])) else return(0) }, version=NULL # to be added ) class(BlankMSobject) <- "interact" # finally create main function MultiStrauss <- function(radii, types=NULL) { if((missing(radii) || !is.matrix(radii)) && is.matrix(types)) { ## old syntax: (types=NULL, radii) radii <- types types <- NULL } radii[radii == 0] <- NA out <- instantiate.interact(BlankMSobject, list(types=types, radii = radii)) if(!is.null(types)) dimnames(out$par$radii) <- list(types, types) return(out) } MultiStrauss <- intermaker(MultiStrauss, BlankMSobject) MultiStrauss }) spatstat.core/R/sdr.R0000644000176200001440000002214414144333464014171 0ustar liggesusers#' #' sdr.R #' #' Sufficient Dimension Reduction #' #' Matlab original: Yongtao Guan #' Translated to R by: Suman Rakshit #' Adapted for spatstat: Adrian Baddeley #' #' GNU Public Licence 2.0 || 3.0 #' #' $Revision: 1.15 $ $Date: 2020/01/30 05:10:49 $ #' sdr <- function(X, covariates, ...) { UseMethod("sdr") } sdr.ppp <- local({ sdr.ppp <- function(X, covariates, method=c("DR", "NNIR", "SAVE", "SIR", "TSE"), Dim1=1, Dim2=1, predict=FALSE, ...) { stopifnot(is.ppp(X)) method <- match.arg(method) trap.extra.arguments(...) #' ensure 'covariates' is a list of compatible images if(!inherits(covariates, "imlist") && !all(sapply(covariates, is.im))) stop("Argument 'covariates' must be a list of images") nc <- length(covariates) if(nc == 0) stop("Need at least one covariate!") if(nc < Dim1 + (method == "TSE") * Dim2) stop(paste(if(method == "TSE") "Dim1 + Dim2" else "Dim1", "must not exceed the number of covariates"), call.=FALSE) if(nc > 1 && !do.call(compatible, unname(covariates))) covariates <- do.call(harmonise, covariates) #' extract corresponding pixel values including NA's Ypixval <- sapply(lapply(covariates, as.matrix), as.vector) #' compute sample mean and covariance matrix m <- colMeans(Ypixval, na.rm=TRUE) V <- cov(Ypixval, use="complete") #' evaluate each image at point data locations YX <- sapply(covariates, safelook, Y=X) #' apply precomputed standardisation Zx <- t(t(YX) - m) %*% matrixinvsqrt(V) #' ready coordsX <- coords(X) result <- switch(method, DR = calc.DR(COV=V, z=Zx, Dim=Dim1), NNIR = calc.NNIR(COV=V, z=Zx, pos=coordsX, Dim=Dim1), SAVE = calc.SAVE(COV=V, z=Zx, Dim=Dim1), SIR = calc.SIR(COV=V, z=Zx ), TSE = calc.TSE(COV=V, z=Zx, pos=coordsX, Dim1=Dim1, Dim2=Dim2) ) #' covnames <- names(covariates) %orifnull% paste0("Y", 1:nc) dimnames(result$B) <- list(covnames, paste0("B", 1:ncol(result$B))) if(method == "TSE") { result$M1 <- namez(result$M1) result$M2 <- namez(result$M2) } else { result$M <- namez(result$M) } if(predict) result$Y <- sdrPredict(covariates, result$B) return(result) } safelook <- function(Z, Y, ...) { safelookup(Z, Y, ...) } namez <- function(M, prefix="Z") { dimnames(M) <- list(paste0(prefix, 1:nrow(M)), paste0(prefix, 1:ncol(M))) return(M) } sdr.ppp }) sdrPredict <- function(covariates, B) { if(!is.matrix(B)) { if(is.list(B) && is.matrix(BB <- B$B)) B <- BB else stop("B should be a matrix, or the result of a call to sdr()", call.=FALSE) } if(!inherits(covariates, "imlist") && !all(sapply(covariates, is.im))) stop("Argument 'covariates' must be a list of images") stopifnot(nrow(B) == length(covariates)) result <- vector(mode="list", length=ncol(B)) for(j in seq_along(result)) { cj <- as.list(B[,j]) Zj <- mapply("*", cj, covariates, SIMPLIFY=FALSE) result[[j]] <- im.apply(Zj, sum) } names(result) <- colnames(B) return(as.solist(result)) } ##............ DR (Directional Regression) .......................... calc.DR <- function(COV, z, Dim){ ## Description: Naive Directional Regression Method ## Input: ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## Dim - the CS dimension ## Output: ## B - the estimated CS basis ## M - the kernel matrix ss <- nrow(z) ncov <- ncol(z) ## M1 <- (t(z) %*% z)/ss - diag(1,ncov) M1 <- crossprod(z)/ss - diag(1,ncov) M1 <- M1 %*% M1 # the SAVE kernel covMean <- matrix(colMeans(z),ncol=1) M2 <- covMean %*% t(covMean) M3 <- M2 * (base::norm(covMean, type="2"))^2 # the SIR kernel M2 <- M2 %*% M2 # the SIR-2 kernel M <- (M1 + M2 + M3)/3 # the DR kernel SVD <- svd(M) B <- SVD$u[,1:Dim] B <- matrixinvsqrt(COV) %*% B # back to original scale return(list(B=B, M=M)) } ## ............ NNIR (Nearest Neighbor Inverse Regression) ........... calc.NNIR <- function(COV, z, pos, Dim) { ## Description: Nearest Neighbor Inverse Regression ## Input: ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## pos - the position of SPP events ## Dim - the CS dimension ## Output: ## B - the estimated CS basis ## M - the kernel matrix ss <- nrow(z) # sample size # ncov <- ncol(z) # predictor dimension jj <- nnwhich(pos) # identify nearest neighbour of each point dir <- z - z[jj, , drop=FALSE] # empirical direction IM <- sumouter(dir) # inverse of kernel matrix: sum of outer(dir[i,], dir[i,]) M <- solve(IM/ss) # invert kernel matrix SVD <- svd(M) B <- matrixinvsqrt(COV) %*% SVD$u[, 1:Dim, drop=FALSE] return(list(B=B, M=M)) } ## ........... SAVE (Sliced Average Variance Estimation) ........... calc.SAVE <- function(COV, z, Dim){ ## Description: Naive Directional Regression Method ## Input ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## Dim - the central space dimension ## Value ## B - the estimated CS basis ## M - the kernel matrix # ss <- nrow(z) ncov <- ncol(z) M <- diag(1,ncov) - cov(z) M <- M %*% M SVD <- svd(M) B <- SVD$u[,1:Dim] B <- matrixinvsqrt(COV) %*% B return(list(B=B, M=M)) } ##.......... SIR (Sliced Inverse Regression) ...................... calc.SIR <- function(COV, z){ ## Description: Naive Directional Regression Method ## Input: ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## Output: ## B - the estimated CS basis ## M - the kernel matrix covMean <- colMeans(z) B <- matrixinvsqrt(COV) %*% covMean # do SIR estimation B <- B/sqrt(sum(B^2)) # normalise to unit length M <- covMean %*% t(covMean) # create kernel matrix return(list(B=B, M=M)) } ## ............. TSE (Two-Step Estimation) .................... calc.TSE <- function(COV, z, pos, Dim1, Dim2) { ## Description: A Two-Step Method ## Input: ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## Dim1 - the S1 dimension ## Dim2 - the S2 dimension ## Output: ## B - the estimated CS basis. Its first Dim1 columns ## are estimating S1 and the remaining Dim2 columns are ## estimating S2. In case of null space, a zero vector is reported. ## M1 - the kernel matrix of DR ## M2 - the kernel matrix of NNIR, which might be subject ## to some change, depending on the results of M1. # ss <- nrow(z) # sample size ncov <- ncol(z) # predictor dimension est1 <- calc.DR(COV, z, ncov) # do DR estimation est2 <- calc.NNIR(COV, z, pos, ncov) # do NNIR estimation M1 <- est1$M M2 <- est2$M if(Dim1 > 0) { U <- svd(M1)$u B1 <- U[ , 1:Dim1, drop=FALSE] # get S1 estimate ## Q <- diag(1, ncov) - B1 %*% solve(t(B1) %*% B1) %*% t(B1) Q <- diag(1, ncov) - B1 %*% solve(crossprod(B1)) %*% t(B1) # contract orthogonal basis M2 <- Q %*% M2 %*% Q # do constrained NNIR } else { B1 <- matrix(0, ncov, 1) } if(Dim2 > 0) { U <- svd(M2)$u # do SVD for possibly updated M2 B2 <- U[ , 1:Dim2, drop=FALSE] # get basis estimator } else { B2 <- matrix(0, ncov, 1) } B <- matrixinvsqrt(COV) %*% cbind(B1,B2) return(list(B=B, M1=M1, M2=M2)) } ## ////////////////// ADDITIONAL FUNCTIONS ///////////////////// subspaceDistance <- function(B0,B1) { ## ======================================================== # ## Evaluate the distance between the two linear spaces S(B0) and S(B1). ## The measure used is the one proposed by Li et al. (2004). ## ======================================================== # stopifnot(is.matrix(B0)) stopifnot(is.matrix(B1)) ## Proj0 <- B0 %*% solve((t(B0) %*% B0)) %*% t(B0) # Proj matrix on S(B0) Proj0 <- B0 %*% solve(crossprod(B0)) %*% t(B0) # Proj matrix on S(B0) lam <- svd(B1) # check whether B1 is singular U <- lam$u D <- lam$d # V <- lam$v B2 <- U[, D > 1e-09] # keep non-singular directions Proj1 <- B2 %*% solve((t(B2) %*% B2)) %*% t(B2) # Proj matrix on S(B.hat) Svd <- svd(Proj0 - Proj1) # Do svd for P0-P1 dist <- max(abs(Svd$d)) # Get the maximum absolute svd value return(dist) } dimhat <- function(M){ #' Description: Maximum Descent Estimator for CS Dim #' Input: #' M - the estimated kernel matrix #' Output: #' dimhat - the estimated CS dim (assume dim>0) stopifnot(is.matrix(M)) ncov <- ncol(M) # predictor dimension maxdim <- max((ncov-1), 5) # maximum structure dimension SVD <- svd(M) # svd of kernel matrix lam <- SVD$d eps <- 1e-06 lam <- lam + rep(eps,ncov) # add ridge effect lam1 <- lam[-ncov] lam2 <- lam[-1] dif <- lam1/lam2 dif <- dif[1 : maxdim] # the magnitude of drop retval <- which.max(dif) # find Maximum Descent estimator return(retval) } spatstat.core/R/sigtrace.R0000644000176200001440000001430414144333464015201 0ustar liggesusers# # sigtrace.R # # $Revision: 1.10 $ $Date: 2016/02/11 09:36:11 $ # # Significance traces # dclf.sigtrace <- function(X, ...) mctest.sigtrace(X, ..., exponent=2) mad.sigtrace <- function(X, ...) mctest.sigtrace(X, ..., exponent=Inf) mctest.sigtrace <- function(X, fun=Lest, ..., exponent=1, interpolate=FALSE, alpha=0.05, confint=TRUE, rmin=0) { check.1.real(exponent) explain.ifnot(exponent >= 0) if(missing(fun) && inherits(X, c("envelope", "hasenvelope"))) fun <- NULL Z <- envelopeProgressData(X, fun=fun, ..., rmin=rmin, exponent=exponent) R <- Z$R devdata <- Z$devdata devsim <- Z$devsim result <- mctestSigtraceEngine(R, devdata, devsim, interpolate=interpolate, confint=confint, alpha=alpha, exponent=exponent, unitname=unitname(X)) result <- hasenvelope(result, Z$envelope) # envelope may be NULL return(result) } mctestSigtraceEngine <- local({ mctestSigtraceEngine <- function(R, devdata, devsim, ..., interpolate=FALSE, confint=TRUE, alpha=0.05, exponent=2, unitname=NULL) { nsim <- ncol(devsim) if(!interpolate) { #' Monte Carlo p-value datarank <- apply(devdata < devsim, 1, sum) + apply(devdata == devsim, 1, sum)/2 + 1 pvalue <- datarank/(nsim+1) } else { #' interpolated p-value devs <- cbind(devdata, devsim) pvalue <- apply(devs, 1, rowwise.interp.tailprob) } if(!confint) { #' create fv object without confidence interval p <- fv(data.frame(R=R, pest=pvalue, alpha=alpha), argu="R", ylab = quote(p(R)), valu="pest", fmla = . ~ R, desc = c("Interval endpoint R", "calculated p-value %s", "threshold for significance"), labl=c("R", "%s(R)", paste(alpha)), unitname = unitname, fname = "p") fvnames(p, ".") <- c("pest", "alpha") } else { # confidence interval if(!interpolate) { #' Agresti-Coull confidence interval successes <- datarank - 1 trials <- nsim z <- qnorm(1 - (1-0.95)/2) nplus <- trials + z^2 pplus <- (successes + z^2/2)/nplus sigmaplus <- sqrt(pplus * (1-pplus)/nplus) lo <- pplus - z * sigmaplus hi <- pplus + z * sigmaplus } else { #' confidence interval by delta method pSE <- apply(devs, 1, rowwise.se) z <- qnorm(1 - (1-0.95)/2) lo <- pmax(0, pvalue - z * pSE) hi <- pmin(1, pvalue + z * pSE) } #' create fv object with confidence interval p <- fv(data.frame(R=R, pest=pvalue, alpha=alpha, lo=lo, hi=hi), argu="R", ylab = quote(p(R)), valu="pest", fmla = . ~ R, desc = c("Interval endpoint R", "calculated p-value %s", "threshold for significance", "lower 95%% limit for p-value", "upper 95%% limit for p-value"), labl=c("R", "%s(R)", paste(alpha), "lo(R)", "hi(R)"), unitname = unitname, fname = "p") fvnames(p, ".") <- c("pest", "alpha", "lo", "hi") fvnames(p, ".s") <- c("lo", "hi") } return(p) } ## interpolated p-value interpol.tailprob <- function(x, q) { sigma <- bw.nrd0(x) mean(pnorm(q, mean=x, sd=sigma, lower.tail=FALSE)) } rowwise.interp.tailprob <- function(x) { interpol.tailprob(x[-1], x[1]) } ## estimated SE of p-value interpol.se <- function(x, q) { sigma <- bw.nrd0(x) z <- density(x, sigma) v <- mean(z$y * pnorm(q, mean=z$x, sd=sigma, lower.tail=FALSE)^2) * diff(range(z$x)) sqrt(v)/length(x) } rowwise.se <- function(x) { interpol.se(x[-1], x[1]) } mctestSigtraceEngine }) dg.sigtrace <- function(X, fun=Lest, ..., exponent=2, nsim=19, nsimsub=nsim-1, alternative=c("two.sided", "less", "greater"), rmin=0, leaveout=1, interpolate=FALSE, confint=TRUE, alpha=0.05, savefuns=FALSE, savepatterns=FALSE, verbose=FALSE) { alternative <- match.arg(alternative) env.here <- sys.frame(sys.nframe()) if(!missing(nsimsub) && !relatively.prime(nsim, nsimsub)) stop("nsim and nsimsub must be relatively prime") ## generate or extract simulated patterns and functions if(verbose) cat("Generating first-level data...") E <- envelope(X, fun=fun, ..., nsim=nsim, savepatterns=TRUE, savefuns=TRUE, verbose=verbose, envir.simul=env.here) ## get first level MC test significance trace if(verbose) cat("Computing significance trace...") T1 <- mctest.sigtrace(E, fun=fun, nsim=nsim, exponent=exponent, rmin=rmin, alternative=alternative, leaveout=leaveout, interpolate=interpolate, confint=FALSE, verbose=verbose, ...) R <- T1$R phat <- T1$pest ## second level traces if(verbose) cat(" Done.\nGenerating second-level data... [silently] ..") Pat <- attr(E, "simpatterns") T2list <- lapply(Pat, mctest.sigtrace, fun=fun, nsim=nsimsub, exponent=exponent, rmin=rmin, alternative=alternative, leaveout=leaveout, interpolate=interpolate, confint=FALSE, verbose=FALSE, ...) phati <- sapply(T2list, getElement, name="pest") ## Dao-Genton p-value if(verbose) cat(" Computing significance trace...") result <- mctestSigtraceEngine(R, -phat, -phati, interpolate=FALSE, confint=confint, exponent=exponent, alpha=alpha, unitname=unitname(X)) if(verbose) cat(" Done.\n") if(savefuns || savepatterns) result <- hasenvelope(result, E) return(result) } spatstat.core/R/lurking.R0000644000176200001440000005472514144333462015064 0ustar liggesusers# Lurking variable plot for arbitrary covariate. # # # $Revision: 1.71 $ $Date: 2021/10/30 01:42:37 $ # lurking <- function(object, ...) { UseMethod("lurking") } lurking.ppp <- lurking.ppm <- function(object, covariate, type="eem", cumulative=TRUE, ..., plot.it=TRUE, plot.sd=is.poisson(object), clipwindow=default.clipwindow(object), rv = NULL, envelope=FALSE, nsim=39, nrank=1, typename, covname, oldstyle=FALSE, check=TRUE, verbose=TRUE, nx=128, splineargs=list(spar=0.5), internal=NULL) { cl <- match.call() clenv <- parent.frame() ## validate object if(is.ppp(object)) { X <- object object <- ppm(X ~1, forcefit=TRUE) dont.complain.about(X) } else verifyclass(object, "ppm") ## default name for covariate if(missing(covname) || is.null(covname)) { co <- cl$covariate covname <- if(is.name(co)) as.character(co) else if(is.expression(co)) format(co[[1]]) else NULL } Xsim <- NULL if(!identical(envelope, FALSE)) { ## compute simulation envelope if(!identical(envelope, TRUE)) { ## some kind of object Y <- envelope if(is.list(Y) && all(sapply(Y, is.ppp))) { Xsim <- Y envelope <- TRUE } else if(inherits(Y, "envelope")) { Xsim <- attr(Y, "simpatterns") if(is.null(Xsim)) stop("envelope does not contain simulated point patterns") envelope <- TRUE } else stop("Unrecognised format of argument: envelope") nXsim <- length(Xsim) if(missing(nsim) && (nXsim < nsim)) { warning(paste("Only", nXsim, "simulated patterns available")) nsim <- nXsim } } } ## may need to refit the model if(plot.sd && is.null(getglmfit(object))) object <- update(object, forcefit=TRUE, use.internal=TRUE) ## match type argument type <- pickoption("type", type, c(eem="eem", raw="raw", inverse="inverse", pearson="pearson", Pearson="pearson")) if(missing(typename)) typename <- switch(type, eem="exponential energy weights", raw="raw residuals", inverse="inverse-lambda residuals", pearson="Pearson residuals") ## extract spatial locations Q <- quad.ppm(object) datapoints <- Q$data quadpoints <- union.quad(Q) Z <- is.data(Q) wts <- w.quad(Q) ## subset of quadrature points used to fit model subQset <- getglmsubset(object) if(is.null(subQset)) subQset <- rep.int(TRUE, n.quad(Q)) ################################################################# ## compute the covariate if(is.im(covariate)) { covvalues <- covariate[quadpoints, drop=FALSE] covrange <- internal$covrange %orifnull% range(covariate, finite=TRUE) } else if(is.vector(covariate) && is.numeric(covariate)) { covvalues <- covariate covrange <- internal$covrange %orifnull% range(covariate, finite=TRUE) if(length(covvalues) != quadpoints$n) stop("Length of covariate vector,", length(covvalues), "!=", quadpoints$n, ", number of quadrature points") } else if(is.expression(covariate)) { ## Expression involving covariates in the model glmdata <- getglmdata(object) if(is.null(glmdata)) { ## default glmdata <- data.frame(x=quadpoints$x, y=quadpoints$y) if(is.marked(quadpoints)) glmdata$marks <- marks(quadpoints) } else if(is.data.frame(glmdata)) { ## validate if(nrow(glmdata) != npoints(quadpoints)) stop("Internal error: nrow(glmdata) =", nrow(glmdata), "!=", npoints(quadpoints), "= npoints(quadpoints)") } else stop("Internal error: format of glmdata is not understood") ## ensure x and y are in data frame if(!all(c("x","y") %in% names(glmdata))) { glmdata$x <- quadpoints$x glmdata$y <- quadpoints$y } if(!is.null(object$covariates)) { ## Expression may involve an external covariate that's not used in model neednames <- all.vars(covariate) if(!all(neednames %in% colnames(glmdata))) { moredata <- mpl.get.covariates(object$covariates, quadpoints, covfunargs=object$covfunargs) use <- !(names(moredata) %in% colnames(glmdata)) glmdata <- cbind(glmdata, moredata[,use,drop=FALSE]) } } ## Evaluate expression sp <- parent.frame() covvalues <- eval(covariate, envir= glmdata, enclos=sp) covrange <- internal$covrange %orifnull% range(covvalues, finite=TRUE) if(!is.numeric(covvalues)) stop("The evaluated covariate is not numeric") } else stop(paste("The", sQuote("covariate"), "should be either", "a pixel image, an expression or a numeric vector")) ################################################################# ## Secret exit if(identical(internal$getrange, TRUE)) return(covrange) ################################################################ ## Residuals/marks attached to appropriate locations. ## Stoyan-Grabarnik weights are attached to the data points only. ## Others (residuals) are attached to all quadrature points. resvalues <- if(!is.null(rv)) rv else if(type=="eem") eem(object, check=check) else residuals(object, type=type, check=check) if(inherits(resvalues, "msr")) { ## signed or vector-valued measure; extract increment masses resvalues <- resvalues$val if(ncol(as.matrix(resvalues)) > 1) stop("Not implemented for vector measures; use [.msr to split into separate components") } ## NAMES OF THINGS ## name of the covariate if(is.null(covname)) covname <- if(is.expression(covariate)) covariate else "covariate" ## type of residual/mark if(missing(typename)) typename <- if(!is.null(rv)) "rv" else "" clip <- (!is.poisson.ppm(object) || !missing(clipwindow)) && !is.null(clipwindow) ## CALCULATE stuff <- LurkEngine(object=object, type=type, cumulative=cumulative, plot.sd=plot.sd, quadpoints=quadpoints, wts=wts, Z=Z, subQset=subQset, covvalues=covvalues, resvalues=resvalues, clip=clip, clipwindow=clipwindow, cov.is.im=is.im(covariate), covrange=covrange, typename=typename, covname=covname, cl=cl, clenv=clenv, oldstyle=oldstyle, check=check, verbose=verbose, nx=nx, splineargs=splineargs, envelope=envelope, nsim=nsim, nrank=nrank, Xsim=Xsim, internal=internal) ## --------------- PLOT ---------------------------------- if(plot.it && inherits(stuff, "lurk")) { plot(stuff, ...) return(invisible(stuff)) } else { return(stuff) } } # ........... calculations common to all methods ......................... LurkEngine <- function(object, type, cumulative=TRUE, plot.sd=TRUE, quadpoints, wts, Z, subQset, covvalues, resvalues, clip, clipwindow, cov.is.im=FALSE, covrange, typename, covname, cl, clenv, oldstyle=FALSE, check=TRUE, verbose=FALSE, nx, splineargs, envelope=FALSE, nsim=39, nrank=1, Xsim=list(), internal=list(), checklength=TRUE) { stopifnot(is.ppm(object) || is.slrm(object)) ## validate covariate values covvalues <- as.numeric(covvalues) resvalues <- as.numeric(resvalues) if(checklength) { nqu <- npoints(quadpoints) nco <- length(covvalues) nre <- length(resvalues) nwt <- length(wts) nZ <- length(Z) should <- if(type == "eem") c(nco, nwt, nZ) else c(nco, nwt, nZ, nre) if(!all(should == nqu)) { typeblurb <- paste("type =", sQuote(type)) typeblurb <- paren(typeblurb, "[") gripe1 <- paste("Failed initial data check", paste0(typeblurb, ":")) gripe2 <- paste("!=", nqu, "= npoints(quadpoints)") if(nco != nqu) stop(paste(gripe1, "length(covvalues) =", nco, gripe2)) if(nwt != nqu) stop(paste(gripe1, "length(wts) =", nwt, gripe2)) if(nZ != nqu) stop(paste(gripe1, "length(Z) =", nZ, gripe2)) } if(type == "eem" && nre != sum(Z)) stop(paste("Failed initial data check [type='eem']: ", "length(resvalues) =", nre, "!=", sum(Z), "= sum(Z)")) } ## nbg <- is.na(covvalues) if(any(offending <- nbg & subQset)) { if(cov.is.im) { warning(paste(sum(offending), "out of", length(offending), "quadrature points discarded because", ngettext(sum(offending), "it lies", "they lie"), "outside the domain of the covariate image")) } else { warning(paste(sum(offending), "out of", length(offending), "covariate values discarded because", ngettext(sum(offending), "it is NA", "they are NA"))) } } ## remove data with invalid covariate values ok <- !nbg & subQset if(!(allok <- all(ok))) { quadpoints <- quadpoints[ok] covvalues <- covvalues[ok] okdata <- ok[Z] # which original data points are retained Zok <- Z & ok # which original quadrature pts are retained as data pts Z <- Z[ok] # which of the retained quad pts are data pts wts <- wts[ok] resvalues <- resvalues[if(type == "eem") okdata else ok] } if(any(is.infinite(covvalues) | is.nan(covvalues))) stop("covariate contains Inf or NaN values") ## now determine the data points datapoints <- quadpoints[Z] ## Quadrature points marked by covariate value covq <- quadpoints %mark% as.numeric(covvalues) if(type == "eem") { ## data points marked by residuals and covariate res <- datapoints %mark% as.numeric(resvalues) covres <- datapoints %mark% (as.numeric(covvalues)[Z]) } else { ## quadrature points marked by residuals and covariate res <- quadpoints %mark% as.numeric(resvalues) covres <- quadpoints %mark% as.numeric(covvalues) } ## Clip to subwindow if needed if(clip) { covq <- covq[clipwindow] res <- res[clipwindow] covres <- covres[clipwindow] clipquad <- inside.owin(quadpoints, w=clipwindow) wts <- wts[ clipquad ] Z <- Z[ clipquad ] } ## handle internal data saveworking <- isTRUE(internal$saveworking) Fisher <- internal$Fisher # possibly from a larger model covrange <- internal$covrange ## >>>>>>>>>>>> START ANALYSIS <<<<<<<<<<<<<<<<<<<<<<<< ## ----------------------------------------------------------------------- ## (A) EMPIRICAL CUMULATIVE FUNCTION ## based on data points if type="eem", otherwise on quadrature points ## Reorder the data/quad points in order of increasing covariate value ## and then compute the cumulative sum of their residuals/marks markscovres <- marks(covres) o <- fave.order(markscovres) covsort <- markscovres[o] marksort <- marks(res)[o] cummark <- cumsum(ifelse(is.na(marksort), 0, marksort)) if(anyDuplicated(covsort)) { right <- !duplicated(covsort, fromLast=TRUE) covsort <- covsort[right] cummark <- cummark[right] } ## we'll plot(covsort, cummark) in the cumulative case ## (B) THEORETICAL MEAN CUMULATIVE FUNCTION ## based on all quadrature points ## Range of covariate values covqmarks <- marks(covq) covrange <- covrange %orifnull% range(covqmarks, na.rm=TRUE) if(diff(covrange) > 0) { ## Suitable breakpoints cvalues <- seq(from=covrange[1L], to=covrange[2L], length.out=nx) csmall <- cvalues[1L] - diff(cvalues[1:2]) cbreaks <- c(csmall, cvalues) ## cumulative area as function of covariate values covclass <- cut(covqmarks, breaks=cbreaks) increm <- tapply(wts, covclass, sum) cumarea <- cumsum(ifelse(is.na(increm), 0, increm)) } else { ## Covariate is constant cvalues <- covrange[1L] covclass <- factor(rep(1, length(wts))) cumarea <- increm <- sum(wts) } ## compute theoretical mean (when model is true) mean0 <- if(type == "eem") cumarea else numeric(length(cumarea)) ## we'll plot(cvalues, mean0) in the cumulative case ## (A'),(B') DERIVATIVES OF (A) AND (B) ## Required if cumulative=FALSE ## Estimated by spline smoothing (with x values jittered) if(!cumulative) { ## fit smoothing spline to (A) ss <- do.call(smooth.spline, append(list(covsort, cummark), splineargs) ) ## estimate derivative of (A) derivmark <- predict(ss, covsort, deriv=1)$y ## similarly for (B) ss <- do.call(smooth.spline, append(list(cvalues, mean0), splineargs) ) derivmean <- predict(ss, cvalues, deriv=1)$y } ## ----------------------------------------------------------------------- ## Store what will be plotted if(cumulative) { empirical <- data.frame(covariate=covsort, value=cummark) theoretical <- data.frame(covariate=cvalues, mean=mean0) } else { empirical <- data.frame(covariate=covsort, value=derivmark) theoretical <- data.frame(covariate=cvalues, mean=derivmean) } ## ------------------------------------------------------------------------ ## (C) STANDARD DEVIATION if desired ## (currently implemented only for Poisson) ## (currently implemented only for cumulative case) if(plot.sd && !is.poisson(object)) warning(paste("standard deviation is calculated for Poisson model;", "not valid for this model")) if(plot.sd && cumulative) { if(is.ppm(object)) { ## Fitted intensity at quadrature points lambda <- fitted(object, type="trend", check=check) if(!allok) lambda <- lambda[ok] ## Fisher information for coefficients asymp <- vcov(object,what="internals") Fisher <- Fisher %orifnull% asymp$fisher ## Local sufficient statistic at quadrature points suff <- asymp$suff if(!allok) suff <- suff[ok, , drop=FALSE] } else if(is.slrm(object)) { ## Fitted intensity at quadrature points lambda <- predict(object, type="intensity")[quadpoints, drop=FALSE] ## Fisher information for coefficients Fisher <- Fisher %orifnull% vcov(object, what="Fisher") ## Sufficient statistic at quadrature points suff <- model.matrix(object) if(!allok) suff <- suff[ok, , drop=FALSE] } else stop("object should be a ppm or slrm") ## Clip if required if(clip) { lambda <- lambda[clipquad] suff <- suff[clipquad, , drop=FALSE] ## suff is a matrix } ## First term: integral of lambda^(2p+1) switch(type, pearson={ varI <- cumarea }, raw={ ## Compute sum of w*lambda for quadrature points in each interval dvar <- tapply(wts * lambda, covclass, sum) ## tapply() returns NA when the table is empty dvar[is.na(dvar)] <- 0 ## Cumulate varI <- cumsum(dvar) }, inverse=, ## same as eem eem={ ## Compute sum of w/lambda for quadrature points in each interval dvar <- tapply(wts / lambda, covclass, sum) ## tapply() returns NA when the table is empty dvar[is.na(dvar)] <- 0 ## Cumulate varI <- cumsum(dvar) }) ## variance-covariance matrix of coefficients V <- try(solve(Fisher), silent=TRUE) if(inherits(V, "try-error")) { warning("Fisher information is singular; reverting to oldstyle=TRUE") oldstyle <- TRUE } if(any(dim(V) != ncol(suff))) { #' drop rows and columns nama <- colnames(suff) V <- V[nama, nama, drop=FALSE] } working <- NULL ## Second term: B' V B if(oldstyle) { varII <- 0 if(saveworking) working <- data.frame(varI=varI) } else { ## lamp = lambda^(p + 1) lamp <- switch(type, raw = lambda, pearson = sqrt(lambda), inverse =, eem = as.integer(lambda > 0)) ## Compute sum of w * lamp * suff for quad points in intervals Bcontrib <- as.vector(wts * lamp) * suff dB <- matrix(, nrow=length(cumarea), ncol=ncol(Bcontrib), dimnames=list(NULL, colnames(suff))) for(j in seq_len(ncol(dB))) dB[,j] <- tapply(Bcontrib[,j], covclass, sum, na.rm=TRUE) ## tapply() returns NA when the table is empty dB[is.na(dB)] <- 0 ## Cumulate columns B <- apply(dB, 2, cumsum) if(!is.matrix(B)) B <- matrix(B, nrow=1) ## compute B' V B for each i varII <- quadform(B, V) ## was: varII <- diag(B %*% V %*% t(B)) if(saveworking) working <- cbind(data.frame(varI=varI, varII=varII), as.data.frame(B)) } ## ## variance of residuals varR <- varI - varII ## trap numerical errors nbg <- (varR < 0) if(any(nbg)) { ran <- range(varR) varR[nbg] <- 0 relerr <- abs(ran[1L]/ran[2L]) nerr <- sum(nbg) if(relerr > 1e-6) { warning(paste(nerr, "negative", ngettext(nerr, "value (", "values (min="), signif(ran[1L], 4), ")", "of residual variance reset to zero", "(out of", length(varR), "values)")) } } theoretical$sd <- sqrt(varR) } ## if(envelope) { ## compute envelopes by simulation cl$plot.it <- FALSE cl$envelope <- FALSE cl$rv <- NULL if(is.null(Xsim)) Xsim <- simulate(object, nsim=nsim, progress=verbose) values <- NULL if(verbose) { cat("Processing.. ") state <- list() } for(i in seq_len(nsim)) { ## evaluate lurking variable plot for simulated pattern cl$object <- update(object, Xsim[[i]]) result.i <- eval(cl, clenv) ## interpolate empirical values onto common sequence f.i <- with(result.i$empirical, approxfun(covariate, value, rule=2)) val.i <- f.i(theoretical$covariate) values <- cbind(values, val.i) if(verbose) state <- progressreport(i, nsim, state=state) } if(verbose) cat("Done.\n") hilo <- if(nrank == 1) apply(values, 1, range) else apply(values, 1, orderstats, k=c(nrank, nsim-nrank+1)) theoretical$upper <- hilo[1L,] theoretical$lower <- hilo[2L,] } ## ---------------- RETURN COORDINATES ---------------------------- stuff <- list(empirical=empirical, theoretical=theoretical) attr(stuff, "info") <- list(typename=typename, cumulative=cumulative, covrange=covrange, covname=covname, oldstyle=oldstyle) if(saveworking) attr(stuff, "working") <- working class(stuff) <- "lurk" return(stuff) } # plot a lurk object plot.lurk <- function(x, ..., shade="grey") { xplus <- append(x, attr(x, "info")) with(xplus, { ## work out plot range mr <- range(0, empirical$value, theoretical$mean, na.rm=TRUE) if(!is.null(theoretical$sd)) mr <- range(mr, theoretical$mean + 2 * theoretical$sd, theoretical$mean - 2 * theoretical$sd, na.rm=TRUE) if(!is.null(theoretical$upper)) mr <- range(mr, theoretical$upper, theoretical$lower, na.rm=TRUE) ## start plot vname <- paste(if(cumulative)"cumulative" else "marginal", typename) do.call(plot, resolve.defaults( list(covrange, mr), list(type="n"), list(...), list(xlab=covname, ylab=vname))) ## Envelopes if(!is.null(theoretical$upper)) { Upper <- theoretical$upper Lower <- theoretical$lower } else if(!is.null(theoretical$sd)) { Upper <- with(theoretical, mean+2*sd) Lower <- with(theoretical, mean-2*sd) } else Upper <- Lower <- NULL if(!is.null(Upper) && !is.null(Lower)) { xx <- theoretical$covariate if(!is.null(shade)) { ## shaded envelope region shadecol <- if(is.colour(shade)) shade else "grey" xx <- c(xx, rev(xx)) yy <- c(Upper, rev(Lower)) dont.complain.about(yy) do.call.matched(polygon, resolve.defaults(list(x=quote(xx), y=quote(yy)), list(...), list(border=shadecol, col=shadecol))) } else { do.call(lines, resolve.defaults( list(x = quote(xx), y=quote(Upper)), list(...), list(lty=3))) do.call(lines, resolve.defaults( list(x = quote(xx), y = quote(Lower)), list(...), list(lty=3))) } } ## Empirical lines(value ~ covariate, empirical, ...) ## Theoretical mean do.call(lines, resolve.defaults( list(mean ~ covariate, quote(theoretical)), list(...), list(lty=2))) }) return(invisible(NULL)) } #' print a lurk object print.lurk <- function(x, ...) { splat("Lurking variable plot (object of class 'lurk')") info <- attr(x, "info") with(info, { splat("Residual type: ", typename) splat("Covariate on horizontal axis: ", covname) splat("Range of covariate values: ", prange(covrange)) splat(if(cumulative) "Cumulative" else "Non-cumulative", "plot") }) has.bands <- !is.null(x$theoretical$upper) has.sd <- !is.null(x$theoretical$sd) if(!has.bands && !has.sd) { splat("No confidence bands computed") } else { splat("Includes", if(has.sd) "standard deviation for" else NULL, "confidence bands") if(!is.null(info$oldstyle)) splat("Variance calculation:", if(info$oldstyle) "old" else "new", "style") } return(invisible(NULL)) } spatstat.core/R/plot.fasp.R0000644000176200001440000001333014144333463015303 0ustar liggesusers# # plot.fasp.R # # $Revision: 1.30 $ $Date: 2020/11/17 03:47:24 $ # plot.fasp <- function(x, formule=NULL, ..., subset=NULL, title=NULL, banner=TRUE, transpose=FALSE, samex=FALSE, samey=FALSE, mar.panel=NULL, outerlabels=TRUE, cex.outerlabels=1.25, legend=FALSE) { # plot dimensions which <- x$which if(transpose) which <- t(which) nrows <- nrow(which) ncols <- ncol(which) # Determine the overall title of the plot if(banner) { if(!is.null(title)) overall <- title else if(!is.null(x$title)) overall <- x$title else { if(prod(dim(which)) > 1) overall <- "Array of diagnostic functions" else overall <- "Diagnostic function" if(is.null(x$dataname)) overall <- paste(overall,".",sep="") else overall <- paste(overall," for ",x$dataname,".",sep="") } if(length(overall) > 1) overall <- paste(overall, collapse="\n") nlines <- if(!is.character(overall)) 1 else length(unlist(strsplit(overall, "\n"))) } # If no formula is given, look for a default formula in x: defaultplot <- is.null(formule) if(defaultplot && !is.null(x$default.formula)) formule <- x$default.formula if(!is.null(formule)) { # ensure formulae are given as character strings. formule <- FormatFaspFormulae(formule, "formule") # Number of formulae should match number of functions. nf <- length(formule) nfun <- length(x$fns) if(nf == 1 && nfun > 1) formule <- rep.int(formule, nfun) else if(nf != nfun) stop(paste("Wrong number of entries in", sQuote("formule"))) } # Check on the length of the subset argument. ns <- length(subset) if(ns > 1) { if(ns != length(x$fns)) stop("Wrong number of entries in subset argument.\n") msub <- TRUE } else msub <- FALSE # compute common x, y axis limits for all plots ? xlim <- ylim <- NULL if(samex || samey) { cat("Computing limits\n") # call plot.fv to determine plot limits of each panel for(i in 1:nrows) { for(j in 1:ncols) { k <- which[i,j] if(!is.na(k)) { fun <- as.fv(x$fns[[k]]) fmla <- if(!defaultplot) formule[k] else NULL sub <- if(msub) subset[[k]] else subset lims <- plot(fun, fmla, subset=sub, limitsonly=TRUE) # update the limits if(samex) xlim <- range(xlim, lims$xlim) if(samey) ylim <- range(ylim, lims$ylim) } } } } ############################################################# # Set up the plot layout n <- nrows * ncols # panels 1..n = plot panels codes <- matrix(seq_len(n), byrow=TRUE, ncol=ncols, nrow=nrows) heights <- rep.int(1, nrows) widths <- rep.int(1, ncols) # annotation as chosen if(outerlabels) { # column headings colhead.codes <- max(codes) + (1:ncols) colhead.height <- 0.2 codes <- rbind(colhead.codes, codes) heights <- c(colhead.height, heights) # row headings rowhead.codes <- max(codes) + (1:nrows) rowhead.width <- 0.2 codes <- cbind(c(0,rowhead.codes), codes) widths <- c(rowhead.width, widths) } if(banner) { # overall banner top.code <- max(codes) + 1 top.height <- 0.1 * (1+nlines) codes <- rbind(top.code, codes) heights <- c(top.height, heights) } # declare layout layout(codes, widths=widths, heights=heights) ############################################################ # Plot the function panels # # determine annotation colNames <- colnames(which) rowNames <- rownames(which) nrc <- max(nrows, ncols) ann.def <- par("ann") && (nrc <= 3) # determine margin around each panel if(is.null(mar.panel)) mar.panel <- if(nrc > 3 && outerlabels) rep.int(1/nrc, 4) else par("mar") opa <- par(mar=mar.panel, xpd=TRUE) # # plot each function for(i in 1:nrows) { for(j in 1:ncols) { k <- which[i,j] if(is.na(k)) { plot(0,0,type='n',xlim=c(0,1), ylim=c(0,1),axes=FALSE,xlab='',ylab='', ...) } else { fun <- as.fv(x$fns[[k]]) fmla <- if(!defaultplot) formule[k] else NULL sub <- if(msub) subset[[k]] else subset main <- if(outerlabels) "" else if(nrows == 1) colNames[j] else if(ncols == 1) rowNames[i] else paren(paste(rowNames[i], colNames[j], sep=",")) do.call(plot, resolve.defaults(list(x=quote(fun), fmla=quote(fmla), subset=quote(sub)), list(...), list(xlim=xlim, ylim=ylim, main=main, legend=legend), list(ann=ann.def, axes=ann.def, frame.plot=TRUE))) } } } ############################################################ # # Annotation as selected if(outerlabels) { par(mar=rep.int(0,4), xpd=TRUE) # Plot the column headers for(j in 1:ncols) { plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) text(0,0,colNames[j], cex=cex.outerlabels) } # Plot the row labels for(i in 1:nrows) { plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) text(0,0,rowNames[i], srt=90, cex=cex.outerlabels) } } if(banner) { par(mar=rep.int(0,4), xpd=TRUE) # plot the banner plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) cex <- resolve.defaults(list(...), list(cex.title=2))$cex.title text(0,0, overall, cex=cex) } # revert layout(1) par(opa) return(invisible(NULL)) } spatstat.core/R/morisita.R0000644000176200001440000000240714144333462015226 0ustar liggesusers# # morisita.R # # $Revision: 1.3 $ $Date: 2020/11/17 01:30:18 $ # miplot <- function(X, ...) { Xname <- short.deparse(substitute(X)) X <- as.ppp(X) W <- X$window N <- X$n if(W$type != "rectangle") stop("Window of X is not a rectangle - Morisita index undefined") a <- min(diff(W$xrange), diff(W$yrange)) maxnquad <- floor(a/mean(nndist(X))) if(maxnquad <= 1) stop("Not enough points for a Morisita plot") mindex <- numeric(maxnquad) for(nquad in 1:maxnquad) { qq <- quadratcount(X, nquad, nquad) tt <- as.vector(as.table(qq)) mindex[nquad] <- length(tt) * sum(tt * (tt-1))/(N*(N-1)) } quadsize <- diameter(W)/(1:maxnquad) ok <- (quadsize <= a) quadsize <- quadsize[ok] mindex <- mindex[ok] unitinfo <- summary(unitname(W))$axis do.call(plot.default, resolve.defaults(list(quote(quadsize), quote(mindex)), list(...), list(xlim=c(0,max(quadsize)), ylim=c(0,max(1, mindex)), xlab=paste("Diameter of quadrat", unitinfo), ylab="Morisita index", main=paste("Morisita plot for", Xname)))) abline(h=1, lty=2) return(invisible(NULL)) } spatstat.core/R/evalcovar.R0000644000176200001440000002155114144333462015362 0ustar liggesusers#' #' evalcovar.R #' #' evaluate covariate values at data points and at pixels #' #' $Revision: 1.36 $ $Date: 2021/04/08 03:42:40 $ #' evalCovar <- function(model, covariate, ...) { UseMethod("evalCovar") } evalCovar.ppm <- local({ evalCovar.ppm <- function(model, covariate, ..., lambdatype=c("cif", "trend", "intensity"), dimyx=NULL, eps=NULL, interpolate=TRUE, jitter=TRUE, jitterfactor=1, modelname=NULL, covname=NULL, dataname=NULL, subset=NULL) { lambdatype <- match.arg(lambdatype) #' evaluate covariate values at data points and at pixels ispois <- is.poisson(model) csr <- ispois && is.stationary(model) #' determine names if(is.null(modelname)) modelname <- if(csr) "CSR" else short.deparse(substitute(model)) if(is.null(covname)) { covname <- singlestring(short.deparse(substitute(covariate))) if(is.character(covariate)) covname <- covariate } if(is.null(dataname)) dataname <- model$Qname info <- list(modelname=modelname, covname=covname, dataname=dataname, csr=csr, ispois=ispois, spacename="two dimensions") X <- data.ppm(model) W <- as.owin(model) #' explicit control of pixel resolution if(!is.null(dimyx) || !is.null(eps)) W <- as.mask(W, dimyx=dimyx, eps=eps) if(!is.null(subset)) { #' restrict to subset if required X <- X[subset] W <- W[subset, drop=FALSE] } #' evaluate covariate if(is.character(covariate)) { #' One of the characters 'x' or 'y' #' Turn it into a function. ns <- length(covariate) if(ns == 0) stop("covariate is empty") if(ns > 1) stop("more than one covariate specified") covname <- covariate covariate <- switch(covariate, x=xcoordfun, y=ycoordfun, stop(paste("Unrecognised covariate", dQuote(covariate)))) } if(!is.marked(model)) { #' ................... unmarked ....................... if(is.im(covariate)) { type <- "im" if(!interpolate) { #' look up covariate values ZX <- safelookup(covariate, X) } else { #' evaluate at data points by interpolation ZX <- interp.im(covariate, X$x, X$y) #' fix boundary glitches if(any(uhoh <- is.na(ZX))) ZX[uhoh] <- safelookup(covariate, X[uhoh]) } #' covariate values for pixels inside window Z <- covariate[W, drop=FALSE] #' corresponding mask W <- as.owin(Z) } else if(is.function(covariate)) { type <- "function" #' evaluate exactly at data points ZX <- covariate(X$x, X$y) if(!all(is.finite(ZX))) warning("covariate function returned NA or Inf values") #' window W <- as.mask(W) #' covariate in window Z <- as.im(covariate, W=W) #' collapse function body to single string covname <- singlestring(covname) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("The covariate should be", "an image, a function(x,y)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) #' values of covariate in window Zvalues <- as.vector(Z[W, drop=TRUE]) #' corresponding fitted [conditional] intensity values lambda <- as.vector(predict(model, locations=W, type=lambdatype)[W, drop=TRUE]) #' pixel area (constant) pixelarea <- with(Z, xstep * ystep) } else { #' ................... marked ....................... if(!is.multitype(model)) stop("Only implemented for multitype models (factor marks)") marx <- marks(X, dfok=FALSE) possmarks <- levels(marx) npts <- npoints(X) #' single image: replicate if(is.im(covariate)) { covariate <- rep(list(covariate), times=length(possmarks)) names(covariate) <- as.character(possmarks) } #' if(is.list(covariate) && all(sapply(covariate, is.im))) { #' list of images type <- "im" if(length(covariate) != length(possmarks)) stop("Number of images does not match number of possible marks") #' evaluate covariate at each data point ZX <- numeric(npts) for(k in seq_along(possmarks)) { ii <- (marx == possmarks[k]) covariate.k <- covariate[[k]] if(!interpolate) { #' look up covariate values values <- safelookup(covariate, X) } else { #' interpolate values <- interp.im(covariate.k, x=X$x[ii], y=X$y[ii]) #' fix boundary glitches if(any(uhoh <- is.na(values))) values[uhoh] <- safelookup(covariate.k, X[ii][uhoh]) } ZX[ii] <- values } #' restrict covariate images to window Z <- solapply(covariate, "[", i=W, drop=FALSE) #' extract pixel locations and pixel values Zframes <- lapply(Z, as.data.frame) #' covariate values at each pixel inside window Zvalues <- unlist(lapply(Zframes, getElement, name="value")) #' pixel locations locn <- lapply(Zframes, getxy) #' tack on mark values for(k in seq_along(possmarks)) locn[[k]] <- cbind(locn[[k]], data.frame(marks=possmarks[k])) loc <- do.call(rbind, locn) #' corresponding fitted [conditional] intensity values lambda <- predict(model, locations=loc, type=lambdatype) #' pixel areas pixelarea <- rep(sapply(Z, pixarea), sapply(Z, npixdefined)) } else if(is.function(covariate)) { type <- "function" #' evaluate exactly at data points ZX <- functioncaller(x=X$x, y=X$y, m=marx, f=covariate, ...) #' functioncaller: function(x,y,m,f,...) { f(x,y,m,...) } #' same window W <- as.mask(W) #' covariate in window Z <- list() for(k in seq_along(possmarks)) Z[[k]] <- as.im(functioncaller, m=possmarks[k], f=covariate, W=W, ...) Zvalues <- unlist(lapply(Z, pixelvalues)) #' corresponding fitted [conditional] intensity values lambda <- predict(model, locations=W, type=lambdatype) lambda <- unlist(lapply(lambda, pixelvalues)) if(length(lambda) != length(Zvalues)) stop("Internal error: length(lambda) != length(Zvalues)") #' collapse function body to single string covname <- singlestring(covname) #' pixel areas pixelarea <- rep(sapply(Z, pixarea), sapply(Z, npixdefined)) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("For a multitype point process model,", "the covariate should be an image, a list of images,", "a function(x,y,m)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) } #' .......................................................... #' apply jittering to avoid ties if(jitter) { ZX <- jitter(ZX, factor=jitterfactor) Zvalues <- jitter(Zvalues, factor=jitterfactor) } lambdaname <- if(is.poisson(model)) "intensity" else lambdatype lambdaname <- paste("the fitted", lambdaname) check.finite(lambda, xname=lambdaname, usergiven=FALSE) check.finite(Zvalues, xname="the covariate", usergiven=TRUE) #' lambda values at data points lambdaX <- predict(model, locations=X, type=lambdatype) #' lambda image(s) lambdaimage <- predict(model, locations=W, type=lambdatype) #' wrap up values <- list(Zimage = Z, lambdaimage = lambdaimage, Zvalues = Zvalues, lambda = lambda, lambdaX = lambdaX, weights = pixelarea, ZX = ZX, type = type) return(list(values=values, info=info)) } xcoordfun <- function(x,y,m){x} ycoordfun <- function(x,y,m){y} pixarea <- function(z) { z$xstep * z$ystep } npixdefined <- function(z) { sum(!is.na(z$v)) } pixelvalues <- function(z) { as.data.frame(z)[,3L] } getxy <- function(z) { z[,c("x","y")] } functioncaller <- function(x,y,m,f,...) { nf <- length(names(formals(f))) if(nf < 2) stop("Covariate function must have at least 2 arguments") value <- if(nf == 2) f(x,y) else if(nf == 3) f(x,y,m) else f(x,y,m,...) return(value) } evalCovar.ppm }) spatstat.core/R/mppm.R0000644000176200001440000006326114144333464014357 0ustar liggesusers# # mppm.R # # $Revision: 1.102 $ $Date: 2021/03/29 08:36:08 $ # mppm <- local({ mppm <- function(formula, data, interaction=Poisson(), ..., iformula=NULL, random=NULL, weights=NULL, use.gam=FALSE, reltol.pql=1e-3, gcontrol=list() ) { ## remember call cl <- match.call() callstring <- paste(short.deparse(sys.call()), collapse="") ## Validate arguments if(!inherits(formula, "formula")) stop(paste("Argument", dQuote("formula"), "should be a formula")) stopifnot(is.hyperframe(data)) data.sumry <- summary(data, brief=TRUE) npat <- data.sumry$ncases if(npat == 0) stop(paste("Hyperframe", sQuote("data"), "has zero rows")) if(!is.null(iformula) && !inherits(iformula, "formula")) stop(paste("Argument", sQuote("iformula"), "should be a formula or NULL")) if(has.random <- !is.null(random)) { if(!inherits(random, "formula")) stop(paste(sQuote("random"), "should be a formula or NULL")) if(use.gam) stop("Sorry, random effects are not available in GAMs") } if(! (is.interact(interaction) || is.hyperframe(interaction))) stop(paste("The argument", sQuote("interaction"), "should be a point process interaction object (class", dQuote("interact"), "), or a hyperframe containing such objects", sep="")) if(is.null(weights)) { weights <- rep(1, npat) } else { check.nvector(weights, npat, things="rows of data", oneok=TRUE) if(length(weights) == 1L) weights <- rep(weights, npat) } backdoor <- list(...)$backdoor if(is.null(backdoor) || !is.logical(backdoor)) backdoor <- FALSE ############## HANDLE FORMULAS ############################ ##------ Trend Formula ------------------ ## check all variables in trend formula are recognised checkvars(formula, data.sumry$col.names, extra=c("x","y","id","marks"), bname="data") ## check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Argument", sQuote("formula"), "must have a left hand side")) Yname <- formula[[2]] trend <- formula[c(1,3)] if(!is.name(Yname)) stop("Left hand side of formula should be a single name") Yname <- paste(Yname) if(!inherits(trend, "formula")) stop("Internal error: failed to extract RHS of formula") allvars <- variablesinformula(trend) ## --- Interaction formula ----- ## names of interactions as they may appear in formulae itags <- if(is.hyperframe(interaction)) names(interaction) else "Interaction" ninteract <- length(itags) ## ensure `iformula' is a formula without a LHS ## and determine which columns of `interaction' are actually used if(is.null(iformula)) { if(ninteract > 1) stop(paste("interaction hyperframe has more than 1 column;", "you must specify the choice of interaction", "using argument", sQuote("iformula"))) iused <- TRUE iformula <- as.formula(paste("~", itags)) } else { if(length(iformula) > 2) stop(paste("The interaction formula", sQuote("iformula"), "should not have a left hand side")) ## valid variables in `iformula' are interactions and data frame columns permitted <- paste(sQuote("interaction"), "or permitted name in", sQuote("data")) checkvars(iformula, itags, extra=c(data.sumry$dfnames, "id"), bname=permitted) ivars <- variablesinformula(iformula) ## check which columns of `interaction' are actually used iused <- itags %in% ivars if(sum(iused) == 0) stop("No interaction specified in iformula") ## OK allvars <- c(allvars, ivars) } ## --- Random effects formula ---- if(!is.null(random)) { if(length(random) > 2) stop(paste("The random effects formula", sQuote("random"), "should not have a left hand side")) checkvars(random, itags, extra=c(data.sumry$col.names, "x", "y", "id"), bname="either data or interaction") allvars <- c(allvars, variablesinformula(random)) } ## ---- variables required (on RHS of one of the above formulae) ----- allvars <- unique(allvars) ######## EXTRACT DATA ##################################### ## Insert extra variable 'id' data <- cbind.hyperframe(data, id=factor(1:npat)) data.sumry <- summary(data, brief=TRUE) allvars <- unique(c(allvars, "id")) ## Extract the list of responses (point pattern/quadscheme) Y <- data[, Yname, drop=TRUE] if(npat == 1) Y <- solist(Y) Yclass <- data.sumry$classes[Yname] if(Yclass == "ppp") { ## convert to quadrature schemes, for efficiency's sake Y <- solapply(Y, quadscheme, ...) ## Ydescrip <- "point patterns" ## not used } else if(Yclass == "quad") { Y <- as.solist(Y) ## Ydescrip <- "quadrature schemes" ## not used } else { stop(paste("Column", dQuote(Yname), "of data", "does not consist of point patterns (class ppp)", "nor of quadrature schemes (class quad)"), call.=FALSE) } ## Extract sub-hyperframe of data named in formulae datanames <- names(data) used.cov.names <- allvars[allvars %in% datanames] has.covar <- (length(used.cov.names) > 0) if(has.covar) { dfvar <- used.cov.names %in% data.sumry$dfnames imvar <- data.sumry$types[used.cov.names] == "im" if(any(nbg <- !(dfvar | imvar))) stop(paste("Inappropriate format for", ngettext(sum(nbg), "covariate", "covariates"), paste(sQuote(used.cov.names[nbg]), collapse=", "), ": should contain image objects or vector/factor")) covariates.hf <- data[, used.cov.names, drop=FALSE] has.design <- any(dfvar) dfvarnames <- used.cov.names[dfvar] datadf <- if(has.design) as.data.frame(covariates.hf, discard=TRUE, warn=FALSE) else NULL if(has.design) { ## check for NA's in design covariates # if(any(nbg <- apply(is.na(datadf), 2, any))) if(any(nbg <- matcolany(is.na(datadf)))) stop(paste("There are NA's in the", ngettext(sum(nbg), "covariate", "covariates"), commasep(dQuote(names(datadf)[nbg])))) } } else { has.design <- FALSE datadf <- NULL } ############### INTERACTION ################################### ## ensure `interaction' is a hyperframe of `interact' objects ## with the right number of rows. ## All entries in a column must represent the same process ## (possibly with different values of the irregular parameters). ## Extract the names of the point processes. if(is.interact(interaction)) { ninteract <- 1 processes <- list(Interaction=interaction$name) interaction <- hyperframe(Interaction=interaction, id=1:npat)[,1] constant <- c(Interaction=TRUE) } else if(is.hyperframe(interaction)) { inter.sumry <- summary(interaction) ninteract <- inter.sumry$nvars ## ensure it has the same number of rows as 'data' nr <- inter.sumry$ncases if(nr == 1 && npat > 1) { interaction <- cbind.hyperframe(id=1:npat, interaction)[,-1] inter.sumry <- summary(interaction) } else if(nr != npat) stop(paste("Number of rows in", sQuote("interaction"), "=", nr, "!=", npat, "=", "number of rows in", sQuote("data"))) ## check all columns contain interaction objects ok <- (inter.sumry$classes == "interact") if(!all(ok)) { nbg <- names(interaction)[!ok] nn <- sum(!ok) stop(paste(ngettext(nn, "Column", "Columns"), paste(sQuote(nbg), collapse=", "), ngettext(nn, "does", "do"), "not consist of interaction objects")) } ## all entries in a column must represent the same process type ## (with possibly different values of the irregular parameters) ok <- unlist(lapply(as.list(interaction), consistentname)) if(!all(ok)) { nbg <- names(interaction)[!ok] stop(paste("Different interactions may not appear in a single column.", "Violated by", paste(sQuote(nbg), collapse=", "))) } processes <- lapply(as.list(interaction), firstname) ## determine whether all entries in a column are EXACTLY the same ## (=> have the same parameters) constant <- (inter.sumry$storage == "hyperatom") if(any(!constant)) { others <- interaction[,!constant] constant[!constant] <- sapply(lapply(as.list(others), unique), length) == 1 } } ## check for trivial (Poisson) interactions trivial <- unlist(lapply(as.list(interaction), allpoisson)) ## check that iformula does not combine two interactions on one row nondfnames <- datanames[!(datanames %in% data.sumry$dfnames)] ip <- impliedpresence(itags, iformula, datadf, nondfnames) if(any(rowSums(ip) > 1)) stop("iformula invokes more than one interaction on a single row") ## #################### BERMAN-TURNER DEVICE ######################### ## ## set up list to contain the glm variable names for each interaction. Vnamelist <- rep(list(character(0)), ninteract) names(Vnamelist) <- itags ## set up list to contain 'IsOffset' Isoffsetlist <- rep(list(logical(0)), ninteract) names(Isoffsetlist) <- itags #### ## ---------------- L O O P --------------------------------- for(i in 1:npat) { ## extract responses and covariates for presentation to ppm() Yi <- Y[[i]] covariates <- if(has.covar) covariates.hf[i, , drop=TRUE, strip=FALSE] else NULL if(has.design) { ## convert each data frame value to an image covariates[dfvarnames] <- lapply(as.list(as.data.frame(covariates[dfvarnames])), as.im, W=Yi$data$window) } ## Generate data frame and glm info for this point pattern ## First the trend covariates prep0 <- bt.frame(Yi, trend, Poisson(), ..., covariates=covariates, allcovar=TRUE, use.gam=use.gam) glmdat <- prep0$glmdata ## now the nontrivial interaction terms for(j in (1:ninteract)[iused & !trivial]) { inter <- interaction[i,j,drop=TRUE] if(!is.null(ss <- inter$selfstart)) interaction[i,j] <- inter <- ss(Yi$data, inter) prepj <- bt.frame(Yi, ~1, inter, ..., covariates=covariates, allcovar=TRUE, use.gam=use.gam, vnamebase=itags[j], vnameprefix=itags[j]) ## store GLM variable names & check consistency vnameij <- prepj$Vnames if(i == 1) Vnamelist[[j]] <- vnameij else if(!identical(vnameij, Vnamelist[[j]])) stop("Internal error: Unexpected conflict in glm variable names") ## store offset indicator vectors isoffset.ij <- prepj$IsOffset if(i == 1) Isoffsetlist[[j]] <- isoffset.ij else if(!identical(isoffset.ij, Isoffsetlist[[j]])) stop("Internal error: Unexpected conflict in offset indicators") ## GLM data frame for this interaction glmdatj <- prepj$glmdata if(nrow(glmdatj) != nrow(glmdat)) stop("Internal error: differing numbers of rows in glm data frame") iterms.ij <- glmdatj[vnameij] subset.ij <- glmdatj$.mpl.SUBSET ## tack on columns of interaction terms glmdat <- cbind(glmdat, iterms.ij) ## update subset (quadrature points where cif is positive) glmdat$.mpl.SUBSET <- glmdat$.mpl.SUBSET & subset.ij } ## assemble the Mother Of All Data Frames if(i == 1) { moadf <- glmdat } else { ## There may be new or missing columns recognised <- names(glmdat) %in% names(moadf) if(any(!recognised)) { newnames <- names(glmdat)[!recognised] zeroes <- as.data.frame(matrix(0, nrow(moadf), length(newnames))) names(zeroes) <- newnames moadf <- cbind(moadf, zeroes) } provided <- names(moadf) %in% names(glmdat) if(any(!provided)) { absentnames <- names(moadf)[!provided] zeroes <- as.data.frame(matrix(0, nrow(glmdat), length(absentnames))) names(zeroes) <- absentnames glmdat <- cbind(glmdat, zeroes) } ## Ensure factor columns are consistent m.isfac <- sapply(as.list(glmdat), is.factor) g.isfac <- sapply(as.list(glmdat), is.factor) if(any(uhoh <- (m.isfac != g.isfac))) errorInconsistentRows("values (factor and non-factor)", colnames(moadf)[uhoh]) if(any(m.isfac)) { m.levels <- lapply(as.list(moadf)[m.isfac], levels) g.levels <- lapply(as.list(glmdat)[g.isfac], levels) clash <- !mapply(identical, x=m.levels, y=g.levels) if(any(clash)) errorInconsistentRows("factor levels", (colnames(moadf)[m.isfac])[clash]) } ## Finally they are compatible moadf <- rbind(moadf, glmdat) } } ## ---------------- E N D o f L O O P -------------------------- ## ## add case weights moadf$caseweight <- weights[moadf$id] ## ## backdoor exit - Berman-Turner frame only - used by predict.mppm if(backdoor) return(moadf) ## ## ## -------------------------------------------------------------------- ## ## Construct the glm formula for the Berman-Turner device ## ## Get trend part from the last-computed prep0 fmla <- prep0$trendfmla ## Tack on the RHS of the interaction formula if(!all(trivial)) fmla <- paste(fmla, "+", as.character(iformula)[[2]]) ## Make it a formula fmla <- as.formula(fmla) ## Ensure that each interaction name is recognised. ## ## To the user, an interaction is identified by its `tag' name ## (default tag: "Interaction") ## ## Internally, an interaction is fitted using its sufficient statistic ## which may be 0, 1 or k-dimensional. ## The column names of the sufficient statistic are the Vnames ## returned from ppm. ## The Poisson process is a special case: it is 0-dimensional (no Vnames). ## ## For k-dimensional sufficient statistics, we modify the formulae, ## replacing the interaction name by (vname1 + vname2 + .... + vnamek) ## for(j in (1:ninteract)[iused]) { vnames <- Vnamelist[[j]] tag <- itags[j] isoffset <- Isoffsetlist[[j]] if(any(isoffset)) { ## enclose names of offset variables in 'offset()' vnames[isoffset] <- paste("offset(", vnames[isoffset], ")", sep="") } if(trivial[j]) ## Poisson case: add a column of zeroes moadf[[tag]] <- 0 else if(!identical(vnames, tag)) { if(length(vnames) == 1) ## tag to be replaced by vname vn <- paste("~", vnames[1]) else ## tag to be replaced by (vname1 + vname2 + .... + vnamek) vn <- paste("~(", paste(vnames, collapse=" + "), ")") ## pull out formula representation of RHS vnr <- as.formula(vn)[[2]] ## make substitution rule: list(=) vnsub <- list(vnr) names(vnsub) <- tag ## perform substitution in trend formula fmla <- eval(substitute(substitute(fom, vnsub), list(fom=fmla))) ## perform substitution in random effects formula if(has.random && tag %in% variablesinformula(random)) random <- eval(substitute(substitute(fom, vnsub), list(fom=random))) } } fmla <- as.formula(fmla) ## Fix scoping problem assign("glmmsubset", moadf$.mpl.SUBSET, envir=environment(fmla)) for(nama in colnames(moadf)) assign(nama, moadf[[nama]], envir=environment(fmla)) ## Satisfy package checker glmmsubset <- .mpl.SUBSET <- moadf$.mpl.SUBSET .mpl.W <- moadf$.mpl.W caseweight <- moadf$caseweight ## ---------------- FIT THE MODEL ------------------------------------ want.trend <- prep0$info$want.trend if(want.trend && use.gam) { fitter <- "gam" ctrl <- do.call(gam.control, resolve.defaults(gcontrol, list(maxit=50))) FIT <- gam(fmla, family=quasi(link=log, variance=mu), weights=.mpl.W * caseweight, data=moadf, subset=(.mpl.SUBSET=="TRUE"), control=ctrl) deviants <- deviance(FIT) } else if(!is.null(random)) { fitter <- "glmmPQL" ctrl <- do.call(lmeControl, resolve.defaults(gcontrol, list(maxIter=50))) attr(fmla, "ctrl") <- ctrl # very strange way to pass argument fixed <- 42 # to satisfy package checker FIT <- hackglmmPQL(fmla, random=random, family=quasi(link=log, variance=mu), weights=.mpl.W * caseweight, data=moadf, subset=glmmsubset, control=attr(fixed, "ctrl"), reltol=reltol.pql) deviants <- -2 * logLik(FIT) } else { fitter <- "glm" ctrl <- do.call(glm.control, resolve.defaults(gcontrol, list(maxit=50))) FIT <- glm(fmla, family=quasi(link="log", variance="mu"), weights=.mpl.W * caseweight, data=moadf, subset=(.mpl.SUBSET=="TRUE"), control=ctrl) deviants <- deviance(FIT) } env <- list2env(moadf, parent=sys.frame(sys.nframe())) environment(FIT$terms) <- env ## maximised log-pseudolikelihood W <- with(moadf, .mpl.W * caseweight) SUBSET <- moadf$.mpl.SUBSET Z <- (moadf$.mpl.Y != 0) maxlogpl <- -(deviants/2 + sum(log(W[Z & SUBSET])) + sum(Z & SUBSET)) ## ## ---------------- PACK UP THE RESULT -------------------------------- ## result <- list(Call = list(callstring=callstring, cl=cl), Info = list( has.random=has.random, has.covar=has.covar, has.design=has.design, Yname=Yname, used.cov.names=used.cov.names, allvars=allvars, names.data=names(data), is.df.column=(data.sumry$storage == "dfcolumn"), rownames=row.names(data), correction=prep0$info$correction, rbord=prep0$info$rbord ), Fit= list( fitter=fitter, use.gam=use.gam, fmla=fmla, FIT=FIT, moadf=moadf, Vnamelist=Vnamelist, Isoffsetlist=Isoffsetlist ), Inter = list( ninteract=ninteract, interaction=interaction, iformula=iformula, iused=iused, itags=itags, processes=processes, trivial=trivial, constant=constant ), formula=formula, trend=trend, iformula=iformula, random=random, npat=npat, data=data, Y=Y, maxlogpl=maxlogpl, datadf=datadf) class(result) <- c("mppm", class(result)) return(result) } # helper functions checkvars <- function(f, b, extra=NULL, bname=short.deparse(substitute(b))){ fname <- short.deparse(substitute(f)) fvars <- variablesinformula(f) bvars <- if(is.character(b)) b else names(b) bvars <- c(bvars, extra) nbg <- !(fvars %in% bvars) if(any(nbg)) { nn <- sum(nbg) stop(paste(ngettext(nn, "Variable", "Variables"), commasep(dQuote(fvars[nbg])), "in", fname, ngettext(nn, "is not one of the", "are not"), "names in", bname)) } return(NULL) } consistentname <- function(x) { xnames <- unlist(lapply(x, getElement, name="name")) return(length(unique(xnames)) == 1) } firstname <- function(z) { z[[1]]$name } allpoisson <- function(x) all(sapply(x, is.poisson.interact)) marklevels <- function(x) { levels(marks(x)) } errorInconsistentRows <- function(what, offending) { stop(paste("There are inconsistent", what, "for the", ngettext(length(offending), "variable", "variables"), commasep(sQuote(offending)), "between different rows of the hyperframe 'data'"), call.=FALSE) } mppm }) is.mppm <- function(x) { inherits(x, "mppm") } coef.mppm <- function(object, ...) { coef(object$Fit$FIT) } fixef.mppm <- function(object, ...) { if(object$Fit$fitter == "glmmPQL") fixef(object$Fit$FIT) else coef(object$Fit$FIT) } ranef.mppm <- function(object, ...) { if(object$Fit$fitter == "glmmPQL") ranef(object$Fit$FIT) else as.data.frame(matrix(, nrow=object$npat, ncol=0)) } print.mppm <- function(x, ...) { print(summary(x, ..., brief=TRUE)) } is.poisson.mppm <- function(x) { trivial <- x$Inter$trivial iused <- x$Inter$iused all(trivial[iused]) } quad.mppm <- function(x) { as.solist(x$Y) } data.mppm <- function(x) { solapply(x$Y, getElement, name="data") } is.marked.mppm <- function(X, ...) { any(sapply(data.mppm(X), is.marked)) } is.multitype.mppm <- function(X, ...) { any(sapply(data.mppm(X), is.multitype)) } windows.mppm <- function(x) { solapply(data.mppm(x), Window) } logLik.mppm <- function(object, ..., warn=TRUE) { if(warn && !is.poisson.mppm(object)) warning(paste("log likelihood is not available for non-Poisson model;", "log-pseudolikelihood returned")) ll <- object$maxlogpl attr(ll, "df") <- length(fixef(object)) class(ll) <- "logLik" return(ll) } AIC.mppm <- function(object, ..., k=2, takeuchi=TRUE) { ll <- logLik(object, warn=FALSE) pen <- attr(ll, "df") if(takeuchi && !is.poisson(object)) { vv <- vcov(object, what="all") J <- vv$fisher H <- vv$internals$A1 ## Takeuchi penalty = trace of J H^{-1} = trace of H^{-1} J JiH <- try(solve(H, J), silent=TRUE) if(!inherits(JiH, "try-error")) pen <- sum(diag(JiH)) } return(- 2 * as.numeric(ll) + k * pen) } extractAIC.mppm <- function(fit, scale = 0, k = 2, ..., takeuchi = TRUE) { edf <- length(coef(fit)) aic <- AIC(fit, k = k, takeuchi = takeuchi) c(edf, aic) } getCall.mppm <- function(x, ...) { x$Call$cl } terms.mppm <- function(x, ...) { terms(formula(x)) } nobs.mppm <- function(object, ...) { sum(sapply(data.mppm(object), npoints)) } simulate.mppm <- function(object, nsim=1, ..., verbose=TRUE) { subs <- subfits(object) nr <- length(subs) sims <- list() if(verbose) { splat("Generating simulated realisations of", nr, "models..") state <- list() } for(irow in seq_len(nr)) { model.i <- subs[[irow]] dont.complain.about(model.i) sims[[irow]] <- do.call(simulate, resolve.defaults(list(object=quote(model.i), nsim=nsim, drop=FALSE), list(...), list(progress=FALSE))) if(verbose) state <- progressreport(irow, nr, state=state) } sim1list <- lapply(sims, "[[", i=1) h <- hyperframe("Sim1"=sim1list) if(nsim > 1) { for(j in 2:nsim) { simjlist <- lapply(sims, "[[", i=j) hj <- hyperframe(Sim=simjlist) names(hj) <- paste0("Sim", j) h <- cbind(h, hj) } } return(h) } model.matrix.mppm <- function(object, ..., keepNA=TRUE, separate=FALSE) { df <- object$Fit$moadf FIT <- object$Fit$FIT environment(FIT) <- list2env(df) #' track subset ok <- complete.cases(df) & df$.mpl.SUBSET nok <- sum(ok) nfull <- nrow(df) #' if(keepNA) { mm <- model.matrix(FIT, ..., subset=NULL, na.action=NULL) nr <- nrow(mm) if(nr != nfull) { if(nr == nok) { ## some methods for model.matrix ignore 'subset=NULL' mmfull <- matrix(NA, nfull, ncol(mm), dimnames=list(NULL, colnames(mm))) mmfull[ok,] <- mm mm <- mmfull } else { stop(paste("Internal error: model matrix has wrong number of rows:", nr, "should be", nfull, "or", nok), call.=FALSE) } } } else { mm <- model.matrix(FIT, ...) if(nrow(mm) != nok) stop("Internal error: model matrix has wrong number of rows", call.=FALSE) df <- df[ok, , drop=FALSE] } ## get standard attributes of model matrix ctr <- attr(mm, "contrasts") if(is.null(ctr) && !is.null(ctr <- FIT[["contrasts"]])) attr(mm, "contrasts") <- ctr ass <- attr(mm, "assign") if(is.null(ass)) { ass <- FIT[["assign"]] if(length(ass) == ncol(mm)) { attr(mm, "assign") <- ass } else { ass <- NULL warning("Unable to determine 'assign' in model.matrix.mppm", call.=FALSE) } } ## split if required if(separate) { id <- df$id mm <- split.data.frame(mm, id) # see help(split) if(!is.null(ass)) mm <- lapply(mm, "attr<-", which="assign", value=ass) if(!is.null(ctr)) mm <- lapply(mm, "attr<-", which="contrasts", value=ctr) } return(mm) } spatstat.core/R/ord.R0000644000176200001440000000231014144333463014155 0ustar liggesusers# # # ord.S # # $Revision: 1.9 $ $Date: 2020/01/26 04:32:04 $ # # Ord process with user-supplied potential # # Ord() create an instance of the Ord process # [an object of class 'interact'] # with user-supplied potential # # # ------------------------------------------------------------------- # Ord <- local({ BlankOrd <- list( name = "Ord process with user-defined potential", creator = "Ord", family = "ord.family", pot = NULL, par = NULL, parnames = NULL, hasInf = NA, init = NULL, update = function(self, ...){ do.call(Ord, resolve.defaults(list(...), list(pot=self$pot, name=self$name))) } , print = function(self) { cat("Potential function:\n") print(self$pot) invisible() }, version=NULL ) class(BlankOrd) <- "interact" Ord <- function(pot, name) { out <- instantiate.interact(BlankOrd) out$pot <- pot if(!missing(name)) out$name <- name return(out) } Ord <- intermaker(Ord, BlankOrd) }) spatstat.core/R/bw.CvL.R0000644000176200001440000000202314144333461014463 0ustar liggesusers#' #' bandwidth selection method of Cronie and Van Lieshout #' #' $Revision: 1.1 $ $Date: 2019/09/30 08:01:20 $ bw.CvL <- function(X, ..., srange=NULL, ns=16, sigma=NULL, warn=TRUE){ stopifnot(is.ppp(X)) W <- Window(X) areaW <- area.owin(W) if(!is.null(sigma)) { stopifnot(is.numeric(sigma) && is.vector(sigma)) ns <- length(sigma) } else { if(!is.null(srange)) check.range(srange) else { nnd <- nndist(X) srange <- c(min(nnd[nnd > 0]), diameter(W)/2) } sigma <- geomseq(from=srange[1L], to=srange[2L], length.out=ns) } cv <- numeric(ns) for(i in 1:ns) { si <- sigma[i] lamx <- density(X, sigma = si, at = "points", leaveoneout = FALSE, edge = FALSE) cv[i] <- ( sum(1/lamx) - areaW )^2 } result <- bw.optim(cv, sigma, iopt=which.min(cv), creator="bw.CvL", criterion="Cronie and van Lieshout", warnextreme=warn, hargnames="srange", unitname=unitname(X)) return(result) } spatstat.core/R/sysdata.rda0000644000176200001440000030064414147573214015424 0ustar liggesusersý7zXZi"Þ6!ÏXÌá.Mïþ])ThänRÊ 3Å$/fj€[JFh3ðHÛ ÒäÝ5(­Ê¶˜Æ +ŸÉ|3¾“¬­ùÊÝpwóMv ‡\%ÂuàÎíÆ(¿ifÓŠÝÝ2#6‘%Ëü;‡òMœ· BsÓ÷c´›ËK‚ˆŸjõƒã|çõ%we—·ÛÞÆã •ßܵBÈ (,æs3öÓ~<Ù†M~×Ó1¿H^AiäâŸ,¦å ÂË «HãæÒãw¿ƒ—ïì==vPz¦–íÉǼ:u8ì6ï3¬»Ox‡ìOMÅk€LK%„© kàëbòµ]­’ "lÁKuºæ à«&´  ×Ò–l¼ ~<‘¸ŒÍÙY?Íï’ Q¢_Û°„¶ßóÃÕ¬©çÀ†dãëœRåmq3€WÞ`O ŒŽþÄu‘´Ÿ8HHÇ…П§á„Ô]rvZÓKnâÇDßdÝÁuÌüç¥ì å](<œaí&Wûl@3p¸Å8²5È%ìÛ]“WEQщ‡Ú÷Q[Ä–1Niý¹ ,è°< EvÁF*ä°«` c[—XˆÃ&¬œz‰‚ÞëÚŒn².üŠp+×JÑã­ÒpóÚþrÉL^³ïÈtÊèϽþæ1)µååµ»ÿ|å˧ĊSJ\á._Fu¨¸\ÕlÃpËéy msbi¸‡s'6.Œçl[ý 3cÝ>¢@Ò'&úïd4¥Úè ! aeüò+)÷f2ÿJÕ¸R!Ðów½¬Øv‡-«O7µ¸îÑPÍ¿% (ìÃÇÛr@­¦€\+:"Ý¿ö˜üðãàÀ{[M†²?â Âq;æ!¬+Òà>âÜãÏD™¶uÉ=ûÑu_"8%{Wfigv¡Í hÈ£ê,ؤ=”¨ì“Ø<¯úܺ8¬ÆÜC(Ó]Ã’œò¯„;ÌQÝ»±-ó¨NöjUp6ô⬕-ô;2:£`Â,e•àQ€òî6Y€l§£²¿/<@ÎÍ–’Ù«¨í§ojV£&ÃNÑd¯Ã)J>êŸ ç¥ýêƒ=šäÖH„>q-ë(Æ¢sæ7ÿšKê€Ðç@†nÚŨ§=¹åïN;•™¶Ë¤ºÙ¥ÿúŸ\KÆX‘3 äç(Ž’+rXÙMòì¨Z,h§{i{m‘…-ž%¬3<íÂh~Zа!›?—e#$Š5“žãHÓ‚co7£EãÒqžÞKAÛÕ+Ý~\óϽÔ&=êelEìøF0sû·R½¸î©ue2¹X¶Kv…¦["{?Ú-¤åV{ìË2›Q¡<¬¸> ìÿ R§èý}ëÀ§\É;xÖ¾-™sj:®ÎÜ 2WŽßßýÑ¨Ó Ó‘8°ckôÂÂ>2µ7_^†4û…u'@û>O6Ï`ï²+ÚRú`ÒÂ$hû“ دY‚pz£u ]_HÝ[8ŠÉFÝ•óó! A{À(Ý$5UÒ¸>G”_±`Xø®ÒÊ­ µ—€¶Êt‹;’/U³:pú{ÉÂtY½óå}_¨mÐ}†d ª^®L9qÅnzãù'8–A½bQÊÛýéÉðÔ™+ŠXõ“::ô£¼=žöd;ƒùÕÔ)¾É|¡Š^ÌÊüàë-NÕeÇq/5YXDx-;±Ö"ÈpÏ´Ø›Äчú>b]\Óîˆ 8eÜÌ\ÕO§lÁy:ã ¾m€ \Ó_Øuž-› €9//Î|ýàÝTu+=O>|·ãPDÝ ÿÛ^D¾ÌO?Õ¥±½R´¿ìƒU§ ÈU)Ú%­û¹yA0îúð <Ô_lÚÌö9éõ lµR:çÕ´0”]: £²—žÄùa]+XþÇ\±ÁiÙm9+ÎU-çA–ú³q©bÝ©GS"@¤¾Z=./¥½twûbQ09ØÙ‰×€¸ÐmÅË´‰–›ƒ’ý h©l™ñ›4È’DRß« ChæÊ>å-²¯‹¦²&]úvOöÍöÁ—+#‚M±ÌÌÁg“a¤¸²o/$;ÙÇ ˆì…±ÜÙ~tUtá®;#Î Â%z]ÿô  <'ßÿ=l3é«;çØ©šR±æ÷©v ŽgÙ´v¶9š‡ùþ̆ktä&ŸŒ‚ˆ”Õò–ù©Oä\üÙ3ö÷p‘½ºMs›°–Ä4ú±ßY·Â¿gÂ[ÿ¸DTl©„¿+qNRl¾)JT1‰_K1ü’HB,wºñž®‚Å´“?ÿÄ¿H_»£Íý9©Kwê=cÃY`ãjãóÝÑöˆAà7„4&ÑišÂu<ï¥Ù‰úþfÂG{—9X¬:ºa~WTþïøð%Ó¹"€V'9ÄÑ~u™=I‰†[=}IQkæãÕ‚ùè¶Œ®UÊð$Ò> S…}GáÌ0šSR^ΰ ãˆ•΀ ~Ư÷õfk°I ³Ü~í_ëã¡i>ãžÔBJô„KFBɧ}ÔÀå%°ä¡¼d"¨>æ×h€‰5«-2%±‰‚I÷Äìá‡IZW«4sÂŽ(†óaÉ••d{z«öxofžÔqP‹‹Ê(‡„|ähY~²~‹yJëß{}‘`û{.05QئOuöEÞ±£køÑŸ)ªP^1/þäl?k uý)xR]mæ&dñ:ìâŠûüÀÐW°©²mq†jóúYº‹@Òíâ–2Zê]§ïШÐSm¬9*hZ#òâTi-Næ®ÐåÇå>ÂýÅ5ò7ès†¡RyìÊ!5P½PP<kŠÇªÆæyÿR´B?Ž@© ™Â©^‡ä(úfYv"]óÆ¥ þ""R‡QI6w˜oCg³DRøSÁh GU`Ùy¸Á~Ü@Å©rΧ ß´»7Ã7Útڦ㈖6¦¶ß ~$èÆ°½œæ Á„Ƹ/Íè¦<ÎŽŒç‹!˜ÎÝ2´.K±¾¹œAêfÄ˱ÈëÆK_;šâ7&ù­Úó‹êFÙÚ€Š2´­bXâJ÷XÇUö,(¡9Cϰd èŸÓeCZÚ¦×U2‰Ü]þŠCcï¥! Î0rr+l9×|éûÉÔeÌ×ïÖs€uUGzSHm$ãoíP¼¢¤æØ|·€·yÓD”wrùù„áP¼¥ÐŸ\¢Íkœ\È®‰ „rKþ ¼Q–bè¤Ü‰Ö7ã(ò˜ÃÈ(Èmóõ¨(‰ð‘Œã{_HÒƒ-'Øæy›“ù|²`%ãTFJª‘·ß·'JЫ†g>ï^îw—ÊIK'$"U¾ûÖÔ`÷0žYÁ–zÔ­ñbÛ½­) i–'˜aH#q³ïÔøZ4‰¥i ¿&ˆš\vuoßu.ŠšÈóŽ°Ý©TCðv×¾èÁç0ÔG!ôÉNÍ4¾¨ó]¶‡x Š‹€7C$fåÀÜ`»zŸ87W^¡Hl߯w¸T̤Ÿ¢É–0sç¼óTP‡FùoZ¥XÏÁ&¾Ûl¿#w­d-ú,+©~ÇJ™ÌW"-4õ:I…¨­™mdÝî ª¹`ÌKŒÆ òö@‹˜*3ˆ_jäš¾òXw®AÇèþ–øÓ”¨mhΣÿikÉq’Z|P@ê8U÷¼ÄÆ+ùïhƒÜgE´ú!ˆàØJ·ÈrV%V§HˆzªµÂ«m´½ÊŒHWZLÖ0Oÿ0r›yÐ4²kºí8T´†Ì¸}"&^ÞÀ.0(Ô¯Q,E÷A$®Àxf‹£G Û!ë#bŽÂ±O/{²)YvQ†ÓÖ‘hòŽÐfÂÖØ³Ç„‹Ã¹ÕêÑmvPŸ›ïr½²lAbVÐg|[ˆ)Ö0{gïU’ÝâiÁð›ëщ"(Š•Ng‹ä€:K¸å—&~ÿħQ}ÕµÃ6êͳšå@â#’øßf ±ÿ’( å¹y V›ôù°Û<é].Æ1Áp7lÜ=UÚKZ*ÝåD€ÙŽo³ Wà†XÓk+Z+߀?eò ®,¤9‘5÷…ˆEe]OUöwü³œ,nøóàV°ŽÅÕ¹ÅAð?·*è‡'ÀúDجÓmúGI ƒ;„ÉV*ã ˆˆ€ ý¤\˜¸p&h·7ϼÏ[¥åÝó5zU¾Ÿ+Íp¢ã^XrU¹”j.ç Î£Š64ño+Égƒþ¥S•õý9ÓoK=ew¿úJª"¬žQpã=ûΡ±5ˆ¦G·ØhÌ·Ü­ÑCó‡Eõ,ómwÑ+h‘7qºQaL!yÄAŒ†Ã¬-UƒÉ³—c‰êoÝ!Éî¡W¬“ì,Å!=¤,O†1s*§WÐï6vOí)#;yÃ%"#QÀŠ’^ŽÃ°…yD“×dö‹¥¦lxÏwf˳ñ¿‘}¤!“˜ â‡+¹Kž–~P °ö©Î5=RY;ÿ³?ò;nçksFøD}Ü·~âFbˬ4£¸|¯î¢5\r.ª·P*í•F„ÑS¨Kèüˆ6* é)e,7"úê>½!©Öm±w¾&1—ÛÈö#Ï%4r÷çkÿ½|ÊKdâè®Ë?ö„–Ñ¡ì7ƒ¸ƒÃ3*±Ÿ§7IQŒÇþ6o‰ûc"[Ì•á¦0¬”µ¥ß‹›@~­ÜBÁÑCÈ^ŸDà‡ÄÙ$ñŠ1 <éÙp÷ypP¿’(Ÿ,ïc6&úfN0ŸØï<¿AÆ‹³ ü  ôšÀ²'¸‹xS–Óbs¸×ëSð3Y.rí\°ód#¬‘<Ïbŧ© ÷à,¸’·3f.“i&êî ou¸N\âSi'lD-iÅ?ÊÁÃév“H§Ñ?©*®!ÂCD:ƒÿ†eyûjáé©rT®£xŒÇMÄv·Í2e{\ÿGËIËBÅŠ+ËeEy]ëÕv©¹Þ츂q^«·ÒÀG¿c°c™ª¡µ€O/ 7¯àìäÐ}…Ù= ¡æ^ƒž~·ñ ¾ThòpmÀ§oª¸óÔȹ½Önæ_¿ž&ý– jNÜ_ŽFyÞ‘"Ît0¸‘mn/uްÇÜA/ÙܺWUU˜šbö*/@Õ ©|â|Sr×Tñþ tW ÿ? ÌQ ¬²ÌÏ×®µ"èø½öšCÃä„)MÔº(ß~Kl2ÝÛ-Y†ÁàKüâý ÄXON,ƧJÓÛœD6úTÛèCõ-Ž©­îçÅ5ãÒ,\ßïÑîòTvAÝÃáæ"Ë´ ¤ô€M[ cÍø¥n×ðfé*½÷c 2÷Óáé}Ù_c†ô ¼iíð¤‚_~Fk'²©ð©âÌ‚Yé–9—÷ÆÄXP´×’¯“õÉ€ëb‡"3šù ’^ÙÀŠ$ÃO¿êÿz_!Wãº|ƒ%OYfÜ\î”^¦ UÒímè°R…=‹å^¾DÙW]Kž+}2«+–¡ÁÜm‰8Mçø~c$½ßuûô¦kpÔŠPË™à‰ÀeÛÑòÙézfmºò)廲íuÃÄUà¶«3«Ü°Ä}÷‰*½ª‘ZÚ0K8[¦-1°´˜jƒ£º© [MjK*e(S·]X°z"¦êdüb®*O‘/ 1ȵSÕ—qî¤ö“¾-mY4^TbÅÌ¥Üë°$U¿ ³(ŠÑì’ã/¼­jÌ¥·‡)̓熟ŸŽÍ­õ‰%!SiæEwG½Å è—¼[H…! >.s«~=f‹‰Ðåþ‰Š"Ìa–ð ó±›gS±V sŒ÷G4 Oº.®}_œGøj ¦Tœ²ù.?¾0´ŽG¤ÛÜÂ!ï`pÉ~]ê´Œ©?ëç·¹úr6ú„› s„XŸnÌL>VË ‹Ü~^÷,Dòñó·¤iŸ#üÖòå{ \÷ï1òÌ”NK׳ßAMÔQÍ{‘Á‡¸jjÀ-PÙZ)ßÇkÞ‹ LÃç€6Ô€/¾ö“ƒXŒ®(#ÍZÈš“÷]Áó`³·ÑéwX“ÄÿRSø^Í™À§ù~Ar´¨VæXòLØClæõØ©V 2†Ü޳$°gØþ!à_]øÙxŽWá\·!9 l8ŒEQ¿ eb‡3xõ#znMìÉ2]‹œá|Eeþ%4ëw”Dg(•Ç×ÁT=àåä yKpœÆêê=–1ˆ§;Œ–W¢œðC'-ý8åHýðÎnH&m[“xNÁU©ë϶ûê.ýˆíÙVñPÂK&¦‚@)Ä”?ÇåïכЪ&¾4¤dLºÒèfž›ùR ׫»Ñ™(}Q_q[Ä{ZG= JZç™E77˜¬´–!=H†Ógxôƒ×+SÀÓÁfÎ࢘aûÝópªf»•ƒkêÙXt ôI[¥wÀÏ‘mãsl\$‡Wéx.˽øÜöèÆ¥B–¶C*ޤ‹6 m3y‘p›Ó*W^x<ý%;ôšåfgWá2!8<-d{v2¾†½íûâÍõø "B)X¼mŽwwYqñnæÍu¹âø¸ ‹43}Tg¤&è@lGg÷ºJ³¨Gñ¿,â/s±uûæ~t³ ø£Ù1IZÝ¢_Þ3°ñv|T0>©\S ^k¼ó„&q6ÕóQéˆþh=ŒUJöé7†YÙê B£ßÅlê®Ò« ë’;¾yè7L¡f”†C•¬Ë¦sYDè®®áí«ã{8¢€ðäêcuD˜bU†tz<ü±z.e›Ù§žL[B©±/µdÑ#Õ&Ö‰=ï7\u —¿ïЦ²ƒÌ"ï*žöoÜ¥l+›O SgÑ÷è·pÑõA$¬\Ë8•漺z„ç•ò€Ùj{2Hr‚Ñ’hÏ U>¼íëÃïÀ}¬{¸‘=Ú¸t+hZÀEàò0ù_¶Œšm×þÀݸª•çb eO¢~ú%ÂRÛWæV(iTÇm«Ö!Ú€ƒ&Z‚÷Ò FM{F¹™KƒuߨZ^ãĈqâK¼\¦Á(l‡…j)¤÷¶<ƒÌ[ëÑX˜²î8ÈïùÂxj¡ÿiüó슞#Ÿ/ñ›Ÿt àåv’oñùb˜D¯Rq’‰ÄÎ"$Í>’l*ÜîF¡Ç‘ŸO‰¿,5Ã1NcÌó‹€ ¤:©Ø7‹û)?Þ€º®Ñ›?]#|ôí"1­ûTû‡ÝÒ˯Ù*:ÊAaÓrøà Ý{!†¹-é›Àéú¦Y>*‹n 'wä©–Ä”zW´ÈÖÖK7ìíè_œÇ£éQTMqb•ÊΛïwö Z3«²Ú¼Ù¯.6¾:œ`Ÿ¦®¬ëí·¶ê-“±±HÝkŽó¸µ¤UONÎOjŒ/zÇ…åÁ¸êúÁç#´ÿ*nÿóÿ¬0êêãÒ øHh‰MLÍjÊ`·b0Ý$ùëv£Í’M£n8Èl 9@GXxÔ0i|jÚ{}Ûî€Ïú…ý J½:9„Ú"B¤Á¡jòm ’™¦£ò qJ­뛆§|ÄùÏ^ã«c-çµµQ^2«’ùÔj$Çœ&„ Æ‹$Öga`Ôe©åùòñ€þÊŒ¥M×kq¨P5ß~˜ ܵÿ½÷®¿ïPRŸî ÃÃΈGÕl”Qý¸ä’OlƒOÒ£4‚‘dJVrx÷Š­c§WÈoc‘`~Ò÷«¤?ˆ#zÛ3¦ùš[ú—YÊ™¼…–™_W}­zy£9‹È#/MEnãb¼ó¢—^¿Œ> O„)ižŠ&^ì!Ø M½¨=ªä àdºÄHl•½(*•¹ALžâ5µZ=þêé:Ó²m6í1cÂgºªUuB·è{üÎ_æ…œµ¥Æ ¼`ÜÑCçÇwó-P\Lfà ÎÆÓÏì_Œ}ÅOzZ#?Ñî>xÛºgÆ¢µ¦Õä} ÍßÛPy#ÿ‚5:H]¤H®eÂo!½ ꤊ…¾u`NjëÀ-y±$w‡ «æH ±´ŠCd‡S™¾ü÷m}^™ö0M±d v“±7ûÁa¸gv_;•w ëÛ‰‘›ÕÍÁuú„C(—Ü-ú´,Ðý¶—ŠjÍ’W†ŽÌY‘DméùSYÚaîX"å^ÁP¶Ý»­ýHìIü&ϲ* 1_‡×òGnÜÀû©uoæ»—ŸQCÊÎÂw_Ê”¬å঄0+‰áÞõƒŽ¿RX\ùúsÞ5HwÜk¢Á²*8ö*1*—s»»Ì°–üšÁ†,›PÔòƒ¾µ”|L…˜¥ù¾ÙöfJ·öu3Ôqì~î¦Ê+\(ÔMÈ?«l¦ä°m ›7ê c‚0súmW&¸t”rV5äþ4‚w‘j‡ddÔd#l«8Ÿ‰Š±½K?À™Äá'Â< x{Œo¼$*J}oŠ|]¨U×mÉpH¬qæ [âIlÌÑ)îDì1;]6í“a‘›¹×:8\{:SëÏ92ÛÓ`žÊÊÖÁŠ«*óL:i˜-Ùæò‰ªUm–®ÿï;`Dö”'Yߦ¶Œ{}d´ÃಙHÂsÔ«_²’ ÿ¸š¤‹AîÊÉ—Ëá%m”—·ZÖ©TÉuÅhÃßß–Tã3N…s:â ø>µTg˜‘FRjËPÞïsž“ní„.¦Ç”é_¥ýæs[€Iu¡Ä €£zÞÑz¨šqÜ'Äzöÿ›HFh#`NZ³i†#bI±ø6]mŸÔ/pè«Ê¿]gm aíÝŽf2>Øttí1ü¿”Øájà‰þÃ;‡6>’¿Ï>‘NÝ00úîxh›.|]#ß_zÍVõƒþ0¯Ô&;dðþÆÁسX‘µ½D±!Mš¶ŽK|$úš ÂäÐ ÷_Â^¹rdüWÌÁ;’ÅWœáa‡¤ƒà, nâ”Ç ÷Ó5Š-(/RëŽÊ¤”©Òt®f·ºr‹ŒÁÞ] ßkÍ>áËeat/}±æ»zFá1ªSVÖÀ#åÌp,2‹,Ð;YƨpOsTé‘—®z(~Wk]Vq„¦Ü*:J#“n›%.â¿7?pŒÿbØñ[’wÛÒ["à¸X1©ë[‰$ d””ð)æìÉ>È;„ÔÛKy—Ö®NVQçþ‰Çr¤˜«†mëcb4KLtªÖ±g+Ÿ§{é5µÛ`Éu($ˆ˜‹ÖZ|ÞëÑÀ0Í ±#Ø+^#²lI˜Å|ëkxœçi‹ÿ]Í#ËJË H¿½¦yÉþííúÛ[&Ô©Ñi'œ^Æ)-VX_12Ç!ñ7ÅâAšÁ$ácü<'ü2½‚f;GýÑvͧ‚PÉ:\|àiþh²/§¤Ëø±d¾_yõÒrTäõ®thl`ŠýS}'ñÀ.³ _ÇlÈÎ-Àmÿ ƒŽb#:ðàýrsz’Yhµ²†oÝ2Œ\zªÖö W-G“¦„(Ë@hÍ?p\ÞæÒ,DÀåN|&%RgýêÞ]B¦ÛO'‘•¾¢Š<=¦kÐNºH€ˆÆ¯–/LŒòºÇhË¿3ÒN{Ö§è©4ævß÷ X Ò_×ÃÖspÖ5­€%—Ä"ß™BnìÉ%Ë”È]nxü†tül;ïÑœ‚% >U!¦û­„±ß \mOE<Ô<Üò'{ìÑ÷Ù³·Kšá |î ò™…¥Ð&( LUùµ¼UVÿÂâ•ËÈ¥z’l÷ô»Z]Ì0?=™XتÓÈ¢’˜¨ºR&‚u°¯l˜úPõؾ®òÃ/nõv×!f¯ÁyÛ½ ágª¡8‡H†¦ålF黽±3Q濵XëG 1©œ­Ã¬’L^œÿ¿–ZΈlRR“Ãkªð“”r2 M:8A3- ¹¡æ¾1Ñ^C$ÜÕ\ö_pJ{–™xcˆÄ‡|¹DÎÇŽîn¬äøg‘@é| E©™§1H4ÜÁ×GæZoïëŠ2‰ ´¯È.Ÿh2÷«-bslªb¿nµd.6ài=wŠ¥n;Ì­µ®‰·íÿ•v#\GÛ#au«hBÎûŠÄʽUr“[›±Í‹DÍrX2ï½b¦”iäÈä*Y#-‹#mzšð×hݲ{÷Ï9€™VhË µQ› íŠT°p< 1 ¾ÑT‡ãjc{óõïî®G§oÓ¶“e×h€Ý:ÜÓeKú)D^~â×óì|ð„_âÕéY·é¯™€ÄˆT¯SFâbI)…¬ìµ|Ó ælËá»ØTðo‰ DÞnÒbËy Óæ² µùÖPÚ¬}wDÃÔ¬¬x†K×<Šrj€ì¹=ކã.ªÑ€ÜCi ¬ß<Ø2ÿœ`L«Æ ™öd!5XÃ4šÿ¶±œ•@ã¼|ºž°‚ט[¾•dJH°x2µòHE®ªô‡'æM¥¬*y’¥ÿܨwÔ‰dC͸YÏ i~Þ}µ¢iö/_¼ÖWôÁÒ0û]£­ ŸgÅÄô]Od’š"G×Û³Ò.+!~(ùÈ—ì塘&g€… Îæòe8æ{ç[ã‚X•rs^1Ä [ÁGâ²ÄªŸ=¢ŒÝ6®é§on56tøÇõÃËàE}G´_j¥ïBÆYg>¡œ—Eò.4°¢Ï´­a8Öøi«iÓö<`•y<Õs>!§Æ4T:c¸EUUÎb7áê—뜖 nzÊ|ŽC©*ÎÒ^vŽ€¹PL+†ÝÎ]h(Ô¾T3ºØR¢ Õu©¨Ô³7C¯ñ#[ þ.åçvt)\ž““ÇGÁXÈj]”£œ¸Ž÷+¹²:æv´)K¬Ú•ù£h{û ȪîúÒR Ò:ÏMŸg[eí\Ûd!çœ+JŒóÿ7ÚÌ‘Y½§“6Ew[&ñ\YÊõݲÙ-uM÷=s®>:îÔ¿J7DSРm'3ðC ½ÉÙÂø`Å’ýè/š’§Æ…¢¢±Ü!lÓÒÿѰ‹Ž óøO7Æ({©Ä°ºÑ^òËžO%2Â5wÜvʽ4YçÁιÌWAbBí…ò÷¶¯Íz½Æ 6Ä fNǸ:ô̺€ÒÇu³N@!õyš&Æ;Q­¥ðev ¥]“¼7·sQk/²n¼Ì¹ù‰t”r«0‹ž,ëRÕ¤‰Aõ¾` …ކ| ~úb—M,¿Xz›L v~JÁm°ÙdïRi¹®šŒÄ1¾ÏÓæ…F„µåê­âL9;éè“wáÿ%À£ôBÙæ ¼‚Ř¡Ø;õ- ¤½¢úóÐÆŒ-»O=¯Ý!ô×±Èk|â8PŠÐ6¢Œ6²¨$c¢¹SŠdôÈ*¢’fÕ4==ÉyBƒÒ;NkÐReªÇhªü©.•Ž(=vK¤ÈRÔ4ñ÷À o”ñÂLl¼Eâ VÌí}ËDRèÁ˜ÇÔ´ËW™OçOq Ûçüv.XóúEbÝøí§g”PD0È%ÿ&jÂ×ròV¥*@µëTžX<øæØ„a’WÄjÇÎÉV†öq$N ÕùÀ¢ÈëäàΟò«žÇ«íu µ¡AÔ¡7a:ÙÃ-õB¸³%;låO/›ØoªDÉÎ>=éSF¢†>ò2âæº¹žî"íÉÃå2X@U4Òç-Ÿ޲ϑÜÓPz[]ï…©ŠP§ל†Š!¤Òr×: )6¿u4¾^Z-¶@qùǤP9&.^Êþíã»ßX€p Õ@ù ö¤£Þ…H©Þ"u·ÊQŸ²¸nÛ• `œXì/ÅfJ*X|?*<´r¼(½Ô×mPDýÍ«b³ãêaq®_éÛ~‹9ÜW_—ñ¬…EÚÒóÊ,øŸýfìÈ]A!¸©Öó'Fñ”•‘•XÇêðŒ}Ì%Àæá„>À¯¡IâøòˆÂó/Où$?¶~þò:oîîûU[Êéƒ&B¦V¬BéËË ×5–%Îñ<ùÇűw2IÍ‚X'Ø‚U u‘ á̽H°pN‰½Wïr¢j¼ ªTÕ"Äô¡¯+QÌtŒ„ï¢TõwuÙ©c[OK®² ×:â:ÆòÚÈŠc\®S,¡àOIî1šœFÅž¹Z½³:ðÀŽø¬áY“]?T¡—…­§SŠ3æt/í°ÀÑÇÎSôܘ;ÍÖ“0~_Tùæÿëaø{ØÖ@‰]9 ]Žàöe —?ÁÄQx­Ó@Ù¿}’rÐSüünaZ<¥®Û®ñ.Á¦¨ŸØÃ:Áñz0£Gà ¹}VÄNûcØýød?CÐö^ºpÞ…Ï|×-6âËhË›Ð;¦»ñ— ÝÑr/X‹M‹–u£èÐÕ›¯´üq ¤¼U·!˃t¹Uæ!›®nÀJÕ¯s§ºZØÌk,æÍ:¯ë*4·¹aBã%Ðìl$¡a“áÖøp¤ÅÇY9Û‹<">`[?"`Û&Þ iG\‚Jü“›1šÌÅ…z½“x÷{VmZ¥ÿY´¶^—g(ûY@àçòœcäÆZ¾Œ70­Y¼K¬Èàs©@YYž—Þ!Ò9¡¯çõ¾ÑfÊÿ¬þ7²4¤{©ò.GÛü±úUo8 ^CÁY‚ó¥ÛàÌüz6I쫨ÒðÈÕ—*ÈDýsE¯î“¿ÓoÊÑ¿oq ú ÕAv‘솑¹S‡ÈP“f;èW¶ÊÃÊE`;‰­Mq™ÒÅ ¬†¥D!/­$´ÓÇ­Í=• P^™@˜íЗ§pozÒw%y!xi‘ÛG½‘Á(½HÎx}æ&ÙÄ=l]Œ°­î´B3Vá7•±{P‰Ò‚¢©F}`-‚ Þ½{©K#ãë$É5)rÿØS£ƒv}>Á/×+K¥s¾‰ÜÿK8²JÐ┦6£\Œàeÿ.;äÝÜv¥‰=iB—Kû|çéΓÆÒ£9jf¼=·8ÄD4EeZq=¾éZ&@‰@þÉ S÷/÷‘ª:êÎúê¤`·¸§•áŒÇt¯=9z£xº¢¯B½L÷uË4y#øÿ ’n|÷”§$’ŒвÏ‹qÇü‘a@8v’ˆŸVÍfÏ<·/Ô-èÈÔ’{œÈ8sQ$Mª)W1‘+|ÄÐ¥f­Eщ—Ò“÷õô%Ä|{êÒËËzý'"áo^Š”lN+}ŽËCr7F¨î`±†WJŒ1\ž—´n /Žù´§ÑßÍ•Zìž&~"]›ø&ïò=!%£àܪ¬ö¸Ð”{N—W=3Øh¶¼#E¥»k)£p´Žfö hÛÃü³b¨ß.Yø—B€œòÓA&¹ëc»-É¢A&üÃãPŸIw ,ÛÁ/:9Þ³aôG´‰2O|›bwÓôæ]´ËÐ/2î(RC±*™Q©¯ë7í;Zg8.(q"ïkó° a¤@IKp™Ý/ÜÕ1£]ºªz }R Ø¢“Å18žnæfʹ5þµR[¤Jr ˜cj±å8bVqi_ ŠƒJºéêÇwõŽï“ýºæzðjoUŸ&ÔëãPtˆL}ÕV±æwÊØ?î¤srk‘™ÆòuäüE3’&j÷ã¾´òÁG#é=Év6ÃS\ÿ1åÃ;Y;èu¤Òî#Sú¸¤[F9'LT]³o}„„®¾6Á‘§ïÞÚ%ªªJqý¦ Hã‚Á¦Ç6Œ^Á‰%=ɽR€€­/ž.ZÃÖF_2 a®oþDçox^…yà8ªæÝ+ 6cVy²©ñ^j†Àå½y\}/2=I|Ud¶:;/´ Ÿ2NÛ3øÁõBê9!$Ö¦»>dCö,Ø—ò0€þ†MÇàÐL‚­U>Ëû©†y÷Ê[â•JöW¡y£g ¢ÅåÝ6UFò“¶éÝ×@2OŠA;#r¦Ä'.NW4Ëkc%xYÒÒÏR*ß;‡-ìla±p^úMž€´´$œa8åÓ‚‰g1)Îéw³H\Úé¼äÔaïÖkì§'!àëjŠò³ò¼po ÑhKÁ†qƒ³ÂÃ>>_*¡ïA謨)­ˆLA5QP%[wã×áBTð°Q–/CS¢À´/¨í?[õ0v6î2=«U™tÿšz™LéÂV²ûÐHï=O’Hïq5è‰>ì>þ7¸ª';«v½à·†•`ÍáXS¤¿NϰØÉà¤Àà…ÈÓä·¯<‡QýøÝž½ó .weŧÌ%„×Õ-re"v³ÖM¤Fˆ¶#eÖ¡›ž3ÿñ.3ÏíU:_zGšœ–—‡Ê/ÎHâ<µ¦ *D¦d¤giÎÌm<˜ú.¸Žó|©úXY¯_÷±`F¸½t,c²”Ó¤¸,íý¿Oƒ\'J0é(•-?rR}í£ Å¨˜·S\\>…ngœc¡|/âÒ¹Caë —&Üñáß•m” õ7EŒ€¬Ö¨0« Ô?ù°»a©=mÍ*ÊY«én¾kÜTÊ6bjå2þ@îÄú¨‚*¨óϳùÚŽØÚ,þxy"3Gc‚ØÓŸ.KfÅ~ßž±!¶úÝ:@Lõäe;ÆKQçM%a >£¹ž‰•z!]û~düõÈ^]«èÚ3ãEÔª‰›\[öƒþN`ËQ®tŠ®¥(2””—|è…DÅyfÕ£$Ž?^wÒöÎi4Ì9´Çh¡‹í÷š%;Á0èÚ²ÜgA@kÖÔ sê¬5sj… £™ô/‘æ(xç–Ò¯Ýx¶D÷£j]œeìqäØªiO¯ßvR«bÐNf¸/Ô¹„H}ËÚIÏŠrõ˜ 6›C€1gàzeºž^»Py¬5iâôSNzmZ» PWLypIv¯“2õR¾|¡­ƒ‰fËØÎ%è2: æü<‹ß¥]¤ùr=[Õ÷gãÌ£¾2"zªìO:t"ý Í…Çî VÓïô”s ^2 QæÑ9:%þÌÏ—‰<¸’ •®S虉;(/µ#´ó1Dv爊Í.Eþ®}7鋚SÕ (ÕMhfòЈHâ…uv89Q$I¯2V€)Uxü)CçÐQN‹$;Á„B›îWå+žæ(;@•|âäŠp·¤£AÀýYÈõ†Æ ]v#ñ‡¬ï~×ý_ƒÝàΖ„Ĩ—&ÈR˜žëìÛ²±4"’2§çÛM€?ÆI×Ç’É¥a^ºM#UÊéiŽMŸz…ð„˜Çî/¾‡¡ª¨Òš¡X´hËÝ‹¯qCPxY~® Oxs¤>8óÁi/þu?y',ÃpÊHÅèJ5Ðyµ qñ-ý*YH³N1i@þH†½¢¨¹{Äí›øæÂ^äÑYÙb‡ï÷—‹fyÐCåSÄ€ˆ8 ^F–Sù—U÷¹åe/™@ÑA\“*žtö$n‰¤ –æ0ÀJ˜ÆÍ)iÓ ò}I(³ÉÃð@ÿùe¾º¦Ã€Î°—pC@†þRIr2˨%µîÝ1¯JÌÞÎn.J 8R]Äe>{ õ·ªa“Ú¡öM§öàÛG xVDL®T¤œ×‘«5 Y5“Áö‘E授ïÍ·í #q-Cßõ~?î³}d¿$sÿ¶SºÙáDê¨^ptE½ ËÓ:ÅÔv3)÷iüWx 5žxñ]ú+ó\ÃÔ=zô[¢•>1›‡ÊRÁcÊ\xŠí·ÄudšvRàfvƒs? «9í¨HÖKŽÛ]‡ñ­©Ê\Zã.rïÙ‚ü…ü×ìNJ¥—âhóx3ru³ÞCGbÔU´qàæ†l„Ó×yZFB44² ÎUûê¬ÜJMädctÿeÇ{f ÈQâÇ2Xf"KcÒõmq–T,у¥Ê("ÍSëlÚä‚+ík7ëº4;¼$UuåëOwáZœ×Æg¯- ÂSñ¦Ová5S©ˆ2 7ê?Ð-]'ÐÃLµ Eαä.`õSì/#U+€àñŠ~"#,A³/ì|*¹npZ$Ýzp2U«™ÉÈé‡ÐkûJ–0*ýSxï…P(w7Äñ¨Í¦=íEmñÙ3ÔÒ#4ÑÐÞ£h´Œ‰æo ý@šýÄ9‘gÜ0UAgBùhkBQHB=Ýèž'‰-£ -¹³lk,?€äÚ¢‹ß„Ì‹žR˜"aì‘°ÑÆ¡*z @üƒØj\–˜+°{rSY õ¸`}49žZ‡C´_38;rxêF½ŒIÒgéòîÜÃ-œâp_"ë%Z=­ÁN§±ZõÌ- ÙÙJ¤–G‘BDÐ%aǪùž·YùúRÚj^¼7忳üÕŒái ÷Y…‹B`e}¡xîØ„è/óX°V¡Â9y¿ÊžPî#Y÷xr&7ø†úûujx'[€Ù.Ñ+Åå¨xå~†ÍégSaª 6×s¾:ž¦»¹N…"òôKÿ®}Oå`£7c»‰GmÙÌ"\À¬}šjÌ$^3{;OÄßyGŠiL¿Þ(uT ¡ŒÄwˆÃL{‘Táܵ.[|þÏ#r 7’ŒÞ‹/n˜o-Œ¸PRóª‘H2K“•Å|?6&iç8p¿sÊ#&ÇÑY3Z¤ÝÕø=HïlÊ´Ôüñ<;̽h"ph°)ãû»h¯Âx‡‹ ãgÒ50˜‡ آʺ¯"«Òˆ¶l->G?•cjï™ =á¥èKú·Önš- œ’3´EK¶¡¨“šœu8›Š"E/ÙG|:úÀÍ-aœ6Y#ðð’h9_ßÊÝX›·H¸Ea®4(â±ì$Xa&÷ÝÜDÕ»ŽŽ¶]Ke"˜§¤°  Äk[pDµ§P°"óJڗʃLrÆá°{¬ÆWð†A Yoõå—¸_9ðÏ ‹Þc'*™ôýŒ`¤ÜbgLaFU˪ã!c‡¬OÌož„¡Ž“ $ZŠ…ûÕuðóÁÍÙŽš[*" j%¡ÃN9¾¼QÁ¼QeDŸÖ£Ÿ§£ôQ±þ=NEp4œl5ÃÒ ¤zäÊÝÝz\ýlì/òÒ¬ï%¬ðFŠ02yÍ–+ßDJ€¥yEJîf†|‘ –òZeðc5¢Z¼½¢Ë½_IZ2$¿gs”Ñwa[ÝG‚HQwÐÑÏFÇü€G\–¯âDi%Å/·g ^ÛÊÏAŽ´U0J?ÝŠ(bÛM뢮oÝ$Z<Ђæèàî\—È•\¤¥ÂùÕ¼õ>WÖœ=£ìÕ¾)#¶‡ û2ï÷AÖ#Œµ ŒÔšüŒ­®–¥’à®ÛÛ]6›ÉÁyè‡Á|Ž ;Ì{û$°Ê*wY&F©m¹3{Ÿ[„9“*â¤OÖµ$·xí¨Ë4À ¬uæóœJ=‹·õ"ÚËþ;Ú –20xœ†!C‘"Š÷º4/Ó ÂçÜ©Q$îÀ•8 Ó“®÷‡c;\ÝPÏ¡£ñå.(…9#SäŸ!íº2Y É¥½Š´!@Á$CèÔ^‡»F¬ ãrø@ò¼aÊ%íÙWøSu_wÔ1J½»7â6!$z@âTˆ1_Pµõ¦"¥$Ý}x@ÀŽßaZUòy$}¸;™ú ù&XÇ!H°%isÌo2·°êé±4¸ØÊ’f+´*%íN•ÐX¢iÁC¥%±Õ r»¥"Ð'x(÷Áâ “ôr•~Ê¢zæŒýp 1$d jÂ$;.’’Õ`c#–hûç!Õ÷¹kø »×ÉRú¥Ýl2¹Wp(ÿ²ÙÐŒ>óÂR*7=äÕxp»añ‚RÚÇá}³àñàÔ Ê;³E,Œhq‹Òu&V£Œj–¢éf/k ðžë¤! 7hÎú͇­>Gm Æ /ÏÀŠ[sìðògŠ[¼ÐÇ á[)ÙeèPF0‚Ò|Ò%¾i÷·pÕéO\LS>»3`Å2Ñ.0ú`tû:´À/€·iuÞëöþˆºBZ€ »ä Aƒ:ËR‚f¹ ÛJÒ6í~? ºÝïÐÑŽuEÛ¹ž”žà o €t{¸?Cƒ¢îœNÄÇÅâð¨Gqq2ðm›|¦ …·×(?¬!‹«r0–¸Ðīʴã©7šZØu%Õa,¬a†Ÿ’áïrO"špŒàÄ”0R©f"ITE …!GyZî+àƒ¢i¸Ѐ*ÊæÄ[‚‹‹Cb~‹Bñ½†Ø|¸ñ¬½[Úý05±@Ê©e“uë ·’ø85ňUC»vÉv¤Qþ&øc`ª|íy=©êWÆâÐ ¯|Ƽß$R)qæ­A%{®ù‡-l¨•}аyLK[© Î2{Þ{•ËhÚ 3¼añ‚”o{º³‹Ã£[JAá2§6‘¨2ÒïCêMÈØuzrᇱ…™ >‰ùûXXŠØ±qèÍ€ð © fñsµAŸ»þ+…ÿÊ 'ïõÆØ¸kçØ ¾_¬ç[ΣÙ3†Uq›ü ÌòÒÐé“›p‰šã= ä=Ô*ÆlÕ˜•­È:pñú>£ ßÿ÷¤E‚†¯ Qïv„TùõÕ[nJ!ÛôëxV¾jK³JαÉû {6Eœ5%ü†|ž Ôçç’2|¤ŸBn lQ?w]km[s piÕô €„c’K+¢2K¸¦Ñ}ÃF5y•T@Ém¤x'o1Tðê¶œ}œÿÐÊ„­Zkîúü_¢3m¾>—É…á».éyÀd 5IZú½(î­ úº–È3Åtí —&݇€ðC¼ ê#þÁ­Ž g€#ÜMK¥ìÀO¿ÜÂÇÑžs‡SâmÖë.D/ýH©ì³"fŽKÝ¥0‹×…õÈž£¾§VkÉÚ–œÈ66%±óõ}ÔÊ2+¢1=ò²›5mÍy¸QE+…ŒÙË@Ê›«#å ù°Sµ­GN4à&ë>9ËÅšöÍ&qYÙUýó-1/b ãQEeÀðtREˆ“±þy›òIz¨ Óp"èp^ðdÇ…—t—§Ž3áU œP²˜m!wž‰rÆ`¾I†iú <³L.Äãn(0äöeKÛ³ÍÛ…#V§åZæ»±ƒ8ÙÁ§,Äÿ~–iDЪ95e†YmDaîUÕ¦y^6>°¾LFÃåPéÙî#Ù¾ãL˜Æ‘²j…A›MŒlŒ¸Þ=¨–‘O£UnT“` Ç7MÆ{1IV]‘yZ÷Ud`»îŽ¢›ô 8¡= ŽRm-ê;w¾¤¬ò&å¼°‰Š¦v"¡0$Z5ÃsטÜaÙ¿ò€qÄé$å%zÊ÷V;4ñ O‘.È`SYɃÑ{+xÅ=„ÅkE#g#²?«˜èÞ3¨ÏÂäéw\ÅdȪºjŸCã¦D®Mdæ)ï®M×e¥fê( ýà%öé>¹¡‰\ƒ·ˆñ0Érè§e¦š•¯_§™Ë’ku Ž!2¿G»»È³)$Ê7÷-§}R8K{É|ä1_ƒ0«£z{0rãD2Íž¤Ôe•:ÌK¡»Ê™ˆ2iúå%¬Ž«E“_àh»nGŠ·lR}p~;'Ö‚ûA[ßa™…¥2a:ªÓïoH†ŽŸ@¬º g»•mÃÆ2ô ±38-ÂŘ>„ß%ÂBüe „žB!öµ8ÚaÅÔºúd.ëlúªØ¥s·IzßéÛÛZ£}· m?ˆÒ€Õ'WÐ-o—Íˆàˆ¬Ø>{Làîk¿Åß*6!÷È9Þ µO“I×ÌjÖ:²iø—™èÄ¡´ÓÒ-Wkwë(ñ¿IÒÕ¦Öyì‹ÇzƒLâP—As™ø`kÀ¥2%ûƒãä *lÎ~-fSëëhP¦—K¹‡ÁÙGzù™æ†Å;±þÞU*OHIv;{»û³¥Á®yûªõÖõ'œÁlËEz7Ç Â9LŽž‚’´©Áw·UëéD|­NJä“Ônû“ ñk[0W6Ǡ眜Ñ¢å3HY’A„ù3²P„ƒƒ„æqÝF˜¶:ºT%{\¼>A6£—Ýœ°!×x‹æLh§’È´P)Ú-vŸ\”¤GMÕ€1ª(w’ÉÛÑ8'íåÛ¢¤kÊ WB²vmkÓ§b.LoÑqæ™×8íË€8ÔF6ÑQ­®Eï5ðY{;7óõ£Øôr*Iw9ý y%8%î˜1êÁ‹a=[ú8I€X[OByÎìê’F9’p*úþÆn^õME¨jð¥s[ª#(8!Ý-ìFŠV;Û²Ã4óqF#•LħíÆGCÉʆԿ1ûba/Jp-Ó£ÜkCécÿË–½xÐÚ"Òã ÷TÜX“ S#aYÀÁgººO7' ÑÞ™¿mE~vçûVÞ²ÚNM÷mœN%¢»¹Øš ¢þ2—ÌŸ¯1ܪbÒ|ùÊÞÁÞÿ»§5c2˜òs¡DÁï~ý4æAñ@“ŽUCa:±¼6f‹'˜&Q.×0¤vˆ‚ªrðÇ´Z¸ ò—K¤‚`F)Õ€£¹qUcæò$µ@;xx@ç©u±lK 2kì" ü†Î ™Ê>.aÛÊTJ–V®ð·‘ë©l*%ÆZLÆ0ú° k6V1qÐ4p"ð$uÉrRwSöÛ'” H#°g‰«4+ýøàò ˆ«ˆðïNíä„Jd»î„z9Rc?`i]ä<—Þz\/Ï¥æ—%-E9ýF˜Ü†A[6žµ vš\ $çÊÉäÁ“‚°¯:ƒWJ2ç# Ô”ûÕᓯÖoŠ…và3¬WŒÇL…šÜª Ë‘Ëî^°7”©šü•lcŒŠöï&ËlW&y÷ZV–,¹Ü× †.Ùø8 îÔ#õÁ\ IJ:Q/ŽMS¦S_ÃkÂÉ¢<y7‰z¥«–fˆ÷Ãè1î…¸ß0(~È^Îã2H® å=äêH­|9âÆ?Ÿ•ñÍÐ3Ø=V‘ñjp§Ú¡úcçk§ÑƒÐdˆÑ<ÁÞj~aP0_:ÿŒÛåå ‰Îa·78Ýü‚ʳgÿß ¥\MØv¬¦—‰íâ+¡½@Ðó3æ yÉÈ”‚(ç Ì¢¦cw¢ÏN_©P]ž×i“3áœÙÝŠ4fÎtÂ(un,ݨI¢ñ˜7sÂðŸà§|¼Z#þ¶ÚÄlqS·À£êD*z‰ð_?]'0z–‚#Ä´9€C>ûg'U©ÌÀ[œ:­&»ÛšÕ°fWP$Gl uz²Óóΰ£É¿S‘“`‡mQÅ‹é gŠ•*u¾Fbç;ãôæ¡Ú6*kIêÊzßh©”rSI[‚‰– Þ¨áˆvû8)ùIIGóãÞäépé³Fª¨¤ƒµõÌœž×¨‹Å$ w&›r…ÍÙ%“T­ÙTqdNt€A>¥ÅHa7aÛ·¡!}Bûºáó=9¶*aãdt—±þ.v˜h¬½{«Å¬!Y {kD(uÂ[x´º†Ÿp3”$oçnf…§¦g2ëü›gy¼öëÍÇY˜¹!ð)æµÒ´ØBzžNÅÊ·Kˆ¯Uêrøü2'?kœîç"ç¡Ú±=%îSŠè2‹™ [å{Wb,hd¨*ª Ó‡¬¡È*ðç©á½µÿJ)FÜå6yš6o÷<ðiùwA:Ó1<:FׂôqcA¼`ö¡øÌÖ}¾/Ì쑎ÔäºsgÛÛ^´‹<GRŸÕ¢–£.º¶1òž ¯eÇìÜiánåÔÎ/ÿI½ïæñþà½ÿŽ´³•ĘÌG,PÌhëŶ]„6BýØ×\¡ïL†hµy)>¨TN¨¯'›ô¡Bì<,—˜ ´jyò÷uRPj·µò£ã#LÈ1ç¾–h|Lv’hYqJ~?͘(v8@Ë=ÞåQÀéSíQŸ+Ö"¨¿=u¡«¡¦t™"ûÍÒUÄ)„õ€Ú/ ’u&æíH”(ûNñ1Â1 Lû%ÔÇAvú:ÐZÄ+~ƒ¥d„Ó~:b„AGäU €Q6¡Î}гEæ°1$žòýÙ¸a}ÐÚZ¼”T¿5žý-c¯!à´:{ÎŽ¤DË!bÏÀQÿLÚŠ]£¡‰\HNÁÓ8¿“W÷¾#+þrÔƒã+ ”"ÈÍ ¼ÈÅ]!¿ù?¢¤÷RŒ¶8ší?:~ß{"êù÷ÊÀ=n®Ò‡'V2¾°€ÿ tÚvè.)ôß|îí/{Úx˜@ÆPq_°½7ìy•»9ÛÜ»¢üéÆ]žÎPCI»à¹Ér¶o˜_;yØ6Ų¬ýyüÿ°m¯*Ök’ö¢µ(´î©‚DZÛGA`aÈyäP«I #:—TX)¾KÒ³çÃ3T¬Là½ºÜ ºˆY‚SXÞË¿` ;M‚¢Èðþ!GžÒ½fqy—™ËH°üžS厑ã­‹üÐlžŠjjDi>wãÊv;,µGœ2`Xk›®Q»™Q LmÄÆ]g©®CoÆœõ•¦?+fQGÎ|pŽW- = Î6Ê:9d~™Qô#¯w)n˜õΚߥ„òð‡vÍn+0¼Å~÷LÞë¾{E·eðÈ&ãÕóÄ­ï_Åž©"O1Ňy^ZL†œïŠX¬ã%©½9Š-FV»ÓÂý†ÅK=í6¶æQRÝšáEê̪Ë;†ªæAx®¼Š'ÎêÜø©jüÖ‘ z¥ß¨-°j{˜¯ª|—rTÑ¡£ÊÝ}§)‡ºråã¢-X¿iX)gNŒ€º§ªïz¸‹»‰ÿ/¹ÎVý¶#-á™âŽöÈÏ ñ8„{ôõ>÷˖‘Uúmuq/ÎíÎ0Ó%m ȤùNÄ»ö¹ÿvÏÖ½¶CÅÝ€ ÷GÒÃÿÄá Kî¶"ÈgxÙ:ôÒa§5° 5½¹]ŸBh1 “tð!>=ƒazà![ØÎ£üîb4ò@õq$á±Lß#˜V€Fà@\jiè‹£–~œ›Ì–èP†ÎžUQhQOŠó)¬Ìp§l¹SL™Ÿñ˜6ùáEÖ€Fx«¯+N§»°À…©Lˆì<ƒ þ,µ.ª0À_ç…Œâ‘Ùžà˜×è®C«q¢ßŽþœT%™TæyEÜNFsMw+…n;ÈH²õ¸å¦HáÍúäú¶Dž¡BÎRŸœ ›«¥@HÝüÙÇK)Ìz™U;s]Ì£kèÞFa-k×/ˆc#bâ BÐI#vUqDÃOìÖzw£`©¬ºÙ”;ÄYÄ wM٭ꙈšRÏÉýøj@)ÁcK ùÕÿMl-`‡tyfËI¸„¥hà;ea߆$FGSKçvw?u´Ã²xÑL2Rù:Ôa½œêÕzzì6Å!X0å›ÁÔ§0oD28 *õ"S«?âe‰Ư(4ô†¢]¯ÍÌÙt îûùK»{I]P¤'ä\„7}×+"ï¡5ª¯å2?+¢§MËoa$g‹—Í?—Ý1m“ÅEº'T¯öÉiuxœZ{®d´üÌuÈï2ã»#}¦t¿êÒzDf®2Kb±Ü—$¸ñ¿åÓÓ3CƒcòO}° …rŽŽÐ .¥–s®×Ð*þ7)iT9/d„X”$Äðãˆý-ß—ÎM=¼ãDÎã†HdáÛËÑâ­%dÉÚ±pä³w’¹Hà;öîûÆŠ3u¬èùøW³¤“Õ£Zøé!<{Uû-%œ‡Ô\‚ÚkÃB÷¶ª"`sõäX>e_a´ˆ­ û¸$´ù ÞwQ>Ä› #Å4NZUgvþù=œ¾ûH^ªŽ3TX`˜>s ”߀=•êa«Ã6‰(ßÎXv® &ªña½u¢ì|0S.@wæ«Ûèm>ÿ:v‰ÄvôÙåDº_œbñìÙ¾½;d6¼ÌŠVZEfù 'F ±"‰}hÏ—$ÉÃíœi¸7úÌZ ×xí{Œ7üY²í’¼–ïéØêÖ½’°?ù®HhõÚäóŽK‰šÝú†«·„øñ­T„4•ã8èÐ]ØG¹žŸtb‚aݹ‰òÌb–°6Ÿ×°Bè¸ÊÔªéôÛw‚Ÿ«žŽ™K<Ùd…J+Šã„X±ªTž¨¤êhy×½B1PQzA)Jt™ÿ‡î@WF¾°‚ÅábÞw¸õoPò~7|K 2L(¨‰}oõ‹æRùs'êáí•ú ‘íç9õ©”§ž“ØÕTrÔ¦’{‘b”Ô9×1U·“Ø[à3‹ÒRä,QÜãÈ §¢ŽL6yH7À.ÄÌ¥¸ƒz†Nâ·naó‹ó} ^›}Ów€¿4q·^üe|À½)2•?…pejFÜØ¢É :ÛEõÁüÔWBT° ´ï˰¶I†¦¬ªêžâãæËê.2Ó®5]kºàJÔ¶réƯÆ×SÖjJL‘¨@Š F#ûª?Áî+‰+eÎñŒ$EÎ¥uÐø·\¤§´*£OlHçªØ\e§Ê-¢Ê›Å ñ—PF®D9UÎmמ74 ŠiÊú\ÄSÚìŒo7²N6½ ´;cŸqý æ?l‚ê!FsÙ¿ ˆQ’ÈñÛľä.k1¯½Ë¶žpÒÿãßsùh í!µÈ˜þø¹P_]ZBÇ×±…öõO˜v6(Ò°’·ÌŸ§Ààè¡·ˆeˆÙÍmƒ¸-Úžë¢QlU¤Ðr ;ù]‡­YmvòY7 cƒið+vÛ"ŸP´.6ó¥QAί¡LŸ%‰D _>¹T[8 ³“¯PhkXYÙE&OÎasCÐvRï[Q,E™¥ÈÖ¬ËYihµ{Ï/ËafÚ›a2Ú‹ 4¬]Hi+Hµ-RsuèDÕ´éÆÑÓ\üE”MY;ë ô?{埙3<‹aj%šDì‹ûvkÝ,}„?&L©÷ÇûkÛþ|ó~L&£Þ„½îåŽ>?hmJbDÿ.èYì9~4ªd––¹èÅ$_€øÇ”þŒm8yFf0Æ(^ <Ò^|EU”žÞ>7’žˆaê7Ñ0†MçÐ|%,h^õàêu v¬7~ uƒÁ"ÁYóïrUçÉTV¸ZDä :Ö¹—퓳~µ±¼I¼Esb,¤µ]@{®®¢Bt²²sB¡†t`'ÊtÊŽl]¢&›e'<¨”Bߪ'I€Â&GË›‘’M.½0A[<(ƪ¹õɾe‘ªmvÑʹE)"+Yáî{DŸ•)I¬-˜P?5ÀWkñ‘®$ßi­zç2‰r /nà÷–é4TG’Aªw³ÓMú¢«Qi.bS8ghÌ8EM{G|Tؘ$RdtpAÞSY¥éƒüwtX«ª2î_5°¡ôëÙôxUÆ 4`7žlAc€ðÁ¯Ìt*tËÞ~¢Læ9“Ñ=ä¸?Éd®Ì,µ¥0b®WãÓµñg´MŠrmJIl”7ɈªÒ‹¢19® TŒÔëÚ66pÚàG$ÏØ“ ÏQñäLWö÷³©Ã؇.Š Ïö£È†Ç¨’j6›v7Š;h¨é>7!€¸°á‡ÆÛ€×‹Ié_•ŠÞÔò™+í©UÙаG#Ф³ý~»/ ½º¶ñÀ—r憮n䉎€ØrÛ¹ ?!3f‚ïTêà†{·ŠÒþ=Jý Ò¼•è˹ßKñ ÌvFº‹¤Ã݃Bø–¥sÂ7Où6Ó‚§èd2ÛMIˆ{£Ãc{榶s-¨ûÀû¥"êç¡Q(¹.kî]qŸœÚ­9ÀOø3ÓÅb0ΜÎÁlS"µš0ìŒ=ÕlŒãõœ$6<ÓB`¿qÆËÎ q—Ú,Tc¢Ë÷ÙÒ@×îë ¢C}µ©`€'ºïs§:¿Ï°`䬴â-b¦{Ø:Ú=2/@Ü-µ;AQi°÷"ùâ´ Hg3+Ü©…@û>“»³ÜêâLÔ¿ÿ¨¨zJ¦áÿpiØ;/éÍêS.=E=ÓÓÓ馫ֿոƒé<7ºâµ±·røÊ¶ã£íAǬ=B:÷“bò8Ul²wÄeƒ”(7ÒèÌ<©û©Ä_B¨÷Ö.ç ½È˜õ;=¬oD–Zd¸Δëò(WBTœéν0¿ XÃEX’ôZÑü†ÅÈm™'¼§iDë]‘§a2;Zç.^Á 6õ}dAÄDŒ°ê6YciÁh„'ktò k6c©À3æTÉt_©÷­»…ÛaSªû ´Æ3ýŽùçQ¿%urbôËps5­ªµ®a#»›h{÷½‚!*µÍmt¸ôì:9*ÖŠõ¿õ‡ÿ;ø·¬ÉèoC¶‰'—-ÉhZ¹åny.ÝîŠq¤}S(ÎÆÙ0 QF4'€q e-¨\ÍPë6v)nÉÿ°û ø`,õsn¦NÜJ…ë?’~´šãÊ€¨LE)’®—4˜0a‡¯aÛå¤0~Õûœ$ë6°²ZDâøˆ[S¯oøhz/‹±/{¿Owç‚?¸üš’xæÖ S`'e˜ïs 3¦Í¹<û`5µµâó$Èû06®ÊµìobŠQB:¥š4!nɉQ…Ë+hp$#wrÉÖ’2-ùa‘í²‚*¨ÖÇë¡Æ±x{c¡áÔ¡˜m@€_r±5#›r§ÍÊÄ4Cðlwn¥SMÑÕW¸ +É„ô¨57uéÅoª¥òþ6àx6 Üaÿv”Šd³íÒâ c&>Üæ€¬®{‘TŠç‡3·^ùOoþxÝ0Jæ¶Ó.ÃÎ}d1Ñó8'”“î½6^m^©Õ\s¬Q;›‘ÜsSóDÅž?&QfnõÇØ®;ÜŒ¹[–‹Ï cr܃ú¥®Ê§| †[·÷&Ž1Bþà)£\Äx¡ê |.Ò o#‡¾ºá¿@¨†ÚPl.¢µ>zÆ›Y]ˆÑp*ìäJRáaü4°ècž6£åFe>ÎÇl땦 _ÿ;Nßìjð«ÌKÿÜ´ÚÔiî›,Lïˆ89wÊ5_˜ß°¹B1\ô(„€/ìv; ;Á,ý¢B¥MAQ>¹øQ.­Oëtéƒí8)µƒøÈqjæ%³ÓOPHÿÉ(Üg51&r ~U\ÁÈgL9:EÎ¥®ô)ªÌ– 6â‘&THD*-v®¢‰¨”¨÷¿u|fOÖ“VjÇÄÑm!Uæ…‘ ÈÙ×WÚD¦þ9F©Ì:n&§Äô©îÏ‘¥ÞY>Ùñ)3‰ë‡Û»iN4€twÅÓ:¾›~9ؽ±•ôni^‘Óq,4¥õ }Y^Xh˜É>ñÊU^S C¿nõŽꌉ$7ð™Z·AÜØÆh|âübÎ z^0“¿®ú±«NýÆEÍ€dup˸7‡!^u…àÂhtŒLè©7•ŸKÙÇ6“WôG¼_ì2 ðp‰¼:p]r÷ÂW‘ ¶o—qeŸP@•êûM¯3F¦¿§a´bÔFÈ/ÉS9vÉÞ³). Ó¢F¾Ôé$§Ø÷²$áVë””v(PGyÿ¤|¾¦^…ÛàLžÅ˜koN÷-$tÖQÂY‹y ¡ió?Uâµä!¦Ç_Û+'•ÀÎùsi§Û_Tnˆu¨ÿwªqø½.ø‡í4Ÿgz¼ qÏ;f7Yú ïÿeôu¿‹µÂæž8w×HÿÜׯ0 5Þ§í*«aû9Ⱥá^dfô–§ÊÜÜÐ20Ðô\Ö¡‹g&ž;U£I¤ÔòM}ºþ×ncpöØÿE?Œõà X(Æ:æ:ü`‰h™®H¨oU°v0Oy%}ºi ‡(þáÏ(¼’ÍyVâ Š~à ’•þÜñ€âêÐÔn‰ŽÂ™ª‹§·±Ð7¡D&#“%bj–<ñu=­P¹ˆi»¶ ]H‚hc²Gh¢ÛûÀñ1þ‘ÏR/hŒ¤™ŠglW‹Aj)3?¥Œ ü,Y€VÀò¸PL–¬½€µûÒ«ÓVDÍD,¶õÜ`2b  ­ß)v:ºÞ®ñ Íüï„Zp©ã®Ðºÿè£0±ÇB(™Á¾ßÙœ¯Í翞lØe¬yVúN»òü€Àá+™pÖ6uÂõ½R:×Ãå‰{¦¿o ò±Ë±ø®ó÷  &µQŠv˸§Ì6÷/,mw*ÐPE/öl½ZZiˆ}¡øÓƒN Fš[Sò !cʼoH¸ $pé57—DWèÿ‰”Æ‘¤¶ iÊVµ„±ËF…]=u:?´©¸é¨x„éó±Yô®æ×|ëÁ§å‡Ý, æ>H‰ÀiòÕ„ §Æ6÷.`:¾ì®§Â l':T‚“ž.„„§ãKY^<¢EmÁNÍv“}o!ˆ¯]ÔLbÌµÒ tºÛl¤²D.Bxø»)¹~è!ïéX(R|œh³Üzœ‡Œñû÷Ó”b:ôÄóÞ[€còMÉlÕ$LÂ%W…Ê~Øk–gÚå£ñ,Zó±AãVõ÷ý`xUÙMöuÙÀ.vïÁJ_,P,¿z…ì,`õÂø6_u"z>ÂÎ1Sl ÎFâ¿|©fø9A{53²>"ïæÝ–!*9”¥pÚÖ­ÑÑ¡°«ëd#9@õL£HÐÏêr˸,AKõcÙ¢Kp¢5­öÊÉJGÁª%Å'/ª÷ûýTx| Çl¸>›9ÖN=#dm1ÿc4¹g#aádzn}¥Y=E9ºAÉQ:j1–l™à­hƒÂ';›—MûÝ\B<5¹Auy¾³‡œàMøw<%‘Ö÷N©vѼ¥ ßPŽZ"Õ_(-/µn—¬FÎü1lgFóE|s¼Äí×É7/Èza™•èyüÿbH5ûÆ ã á‘=œíu\ç™v3'…Ž2R¼Pp9µ±ˆÆåóŒ ødÔds*æÿY»ç ‘gIQÚ$ã3.^ :±äåå6ê$û`>6‚Í}6ehOAØ@ä¹øãw³HH„H ›t6c+“$Ρ]™ï8Ø,‹-ž –øýT»õr<}^iAÅçWKÑ@>·†WåúŸŽ,¨ÑÿÐÎÄ%û? ±ûr}Ç¡( O«¾'J0#™Ÿô e8ÃÎK¨ù$¨ !€+0¼>qFOP·% ž\) U#·=¥Ã§wOömD¡ÓófIc;êâÔÁgfœcøkðиµ¯ £m›“3Mý«ày®pà ´•¸\oÑPYŽ¿&ñ¢nµÂl鬅R9„aá‘¶ó’Á¨vðÁõ2pVVÊ•|†5¤NÕPM÷›P8âsð<$ªQ'‹É6ˆûøm6g~ZD1:ÝÖ;©g/(ísF‚në’ÝØ)‹}4øÓ蜪`õ¢‰m”ÀòôãEÇ¿wâR: àŒX¾\t£õIͳӺ)c'ÏžÔT O„Ítgî0câ46Ãföªû»--—¡`p?s²/ñŒ¢Ü.7.ŽƒËÇ8#”5AÌUÙÒÝÎ$KµêÕbû;? Ÿjâµ¾Ì`Å´úÏîNÏ_ÌU¼|1-¨nLˆäßqZxp‰ûçÜ’ó‚ yS´‹0´7ªj’âwµÄ[Âdiî§f¤©PìêD°¼׃矱q>;YœÖTûKèI#¨jäW–ÿò[#µ^/+Y£3ÈXÁ´´^¸BçïíûÐLýîneÎ=&x¹,ëŸJcÌG ô`¬·’LU[ÿÝ0gxbwM;"•`NÑÙù!0òЪÕ@ëSÔ¬G„ ó🷉tJf¥Ý’‘4ÊuKs‚1J<Ù³*‹ò—¼m0ü©x‹w6f"c¿˜oóà¡a]÷ÐÈ ×õDÙh‚vVͽ Ÿ¨p¦îˆ7)Üê@±XÎy<£°Qe½9?XÈK2\=nuqd>õ^Ãýð(¯íÄ™–Y£0L®Ì¢²^L®$c];ÔÃNåÝ4ˆ]ƒ¹#ý8k ¯"w™DÍHgà“ŽTãŠüÙû³ÃÓZûÿæFÐiÌV.ZD>ÎVZ«×MáÝg¡f¸UƒB’gëŠåýòSãDqÇFaÐ*ØZ”ÿ5UÉ·¤Ýñ©&Å6Yý}+åhs=¿ z†ñ.Aßá |­v¶.K½:‚vÏ*lC¯¾@­(ФL  -šŠPt{Ö4±Mãsv ¬iVs¬´R…ó/ËDSní•‹› }÷ºb]RVdeG‡[–3¢#~,©j?SƒÐJ¦â >Æþñ³´µRF ¨ÎûDû¨žfíž’ ¯„Ï—Ž_EÝEç¤g‚2~ÇÇT_ÐÁª*Ž£ßð¯ÂÏ ©<È!As)y»ˆ÷åWkÑy Se†àþ„’çL²¥’§H&ÏÝÁJð ós ±ÈŽ«˜ôy0c¢ ”‚± cÌJ·©Š?Pm!žl pp±eæ±8]@7¦ÉiíD¸“=鯿ç®5ÄSÖƒÒM`bc&Û.Rôù¥ûgElÇÊÝPšÐíÓDè…p'’²æà‹ÕܱÂj#Àù&?#ÐŽ8²å±åßщëbØc¢käJ~âòåÅ}'ƒº¥¤3FésN@R«U¼7•È0m¾Ýa?è0CÎã¢ïš(,\½#u PÉÍàeÉÃæ‹6™xà&°S('ªýúSØ5 çã!/gÍífdO-/­s쟈Á\<šÓZŠpïÍbW¨³;wÑ•†Gªm—ÃSØKóž³÷qàŒ‰iÜáÕ2©¯æÇ¸âÈVx^Â\lc;ƒíÏPhVç5Ôéd¯°ô‰ByE¿þHÂàµl×òȻ消Â~PéŒÃò—¬óQèìjÇÉ^¨g­–ã³B}ŠFÓ?à–T¨~×άߖ¶¼¦HÖ‚‚BT™«MÖŒ¸½þ5D2zÕ.0‹Æ‹]ßsÚê)Qof†úÒFŸ§.iÙgÎCò—À%Yª¶“ãâ¶^¡üÉÆmµß[ô¯z‡‰4ƒÄÎ7wèhœŸûÍ)î©5cùåšÈEí*³‘]U}×£ØhÄnDp ºq  ³5·ÀD!ŸÑ£ì´Ÿ2ƹ»õ F9éd\oBF@‚¥àÑÁ»‚ÊthÛO¥ÒŸºµÑ2éI|È£¾Äk! èßAujŸŽYi9HÆÄ»)Õ¸9àˆ"ÒR"â*Ã@è›øYÜ (£92…n ¶ý™Ñc> ?ÒÍ¥RûŸ1®@½cuœ¼Ÿz/Ë$HxÑQh#¡p.ÅÌüîÖöðؾ_…û³É±öjk¶O€R¯B±á¦wÕEþ~8øSš„Y>ã–¶ÙúòdX[&ÒD¡8ÌÚNU"³ÀžkȘ£ -]í>þÿäI v0òqâæ\»s@Jö¬Êrð/=§`!ÑÂ{E+ ›½×WÜÊþûÀ¥”`1t\[mtâ¤iîb ùÈðüÌpWc%|îB."ì/…T¬é­ÿç‘K—é:+º± (Þ%+ëA>UŽçÅl±¯ëš<Û4жä.¶‘ˆ8£>ìq¦–µ‡u_s‡4B·8šÞ\Ä`ퟮÍy*¢;fžbÛ&'Œ>¸ÑãµS1ÿÉIb€‚›t9£JÑõpÛçYÊ[äíâW‘â?¥vV—zÆw’ï|ŠD‡o°Â¤âFÿÚ©J1z•[W— OD¹²¥=¯£{HV|^+³yÒ'IñŽ'CV³"Ø<~§òÍܲÓ8%_,·™§:Û?k>Œ7;”½»;´)¬zŸušÎBêoÕÓ;‘µ·‹%W“Þh©‚ÎNa¹@BÆöß·÷ 6¦Þv aÃ’ÏÀul#søÕÒIÒ ƒU‘*]9U=h–ÇiþšÚ["Ñ™Ó[`íÜÌ£¾YX4 Œr›SˆpÁ¡xHk&¥Ü뉞Jfì6Ë.ÎþÏTÃ禵¿„R”B|&K†jnC”j7££šØÛš¸,#4=º 8ëkÏ›]/îó‚Ø4p9+4mõ;£Z3Q>8.¶ñäjp¤>¢ŠÂ¯1"!àáò˜uó:‰9š3tàÄ+º›Äs˜3W¾´§ûé&Þ$uÏÇ„Âý‹(Ì*þ®å”bÖ¶}Q¡ôS%½N”Õ•lèÛ×ÈH!÷éý` Dì&㯖þ‹m¥¢ âÌÅ‚CºC®ÍD¤|楘ÚôS§>ÕG=Åœ^S xËYÿ*¼¹…ž HR òyS’^b=Ï,†aº´Í,˜Ž;|ß®qPÖÆd@UZÇiýõs$8Îöˆˆ›Î˜"Qâ­V'EEYcLSÇQMޤËš5}ÌGv'J6àž®Oø?Ø“'F}Ù:½›B¢–YÃJ¡»Ñ1>Ð!z*QâL2ž¤}úk(ëdðë ú‚_Ç„Ëþ Ü~4&“ׄ¹Ÿ ü\±¡×=oã‚*1é‘÷ûLÜÔ¼-IbÐ8Un ¥¤©‘=šìpÕGr¿3ΕÇDeÜÙÌEÏ,<¾šP²;ý·“eÑ/Ô6´È9˜l Dg+óSs]¸ßW†Ï¶êÁ­2&Ã‡ÉØ"I ×…s³èüõŽ\ ¥Å ɆKEŠakœÒ•©R;Bã¾Äñlíç]”z#˜ÝŠ˜r‡~pwöRgöÚq°MZƒým] °[„ cNMÍ*°œ•ŸÀ2¢¢^£”œ#xí©û†%»„—Wj †à" b'·y£è2ÀÃ6âG_ƒ3–áfá›Ña™ï˶̮ý{_œòÿcùŪ\£Øî9Qm!õµin ó‰+X¯{óMÌ¡&°å—Bº{ñ©úH_ o¼p§wc• S-ÈØíÙ—ôJ™&ù$ëúÜ·Վ Jo((£‰ûì³Ï¯È†¢{ø1EÏ- £×æ;ôÿÚÃN¿Œ$ jtÿjzúr7»^»…¯1Ò#¤yÛ’ë~hîH#kzÙ²UªÀ'¼A‘ ”s¹È*â÷}TD&oU·žh[HTA ZÚÿ1¢ó½åjA(QÿIóKd5§ Sx:Ì[®l:´¨ÿÍ”4³Œä[<‹;C]W`ŒÈ›lûDñ¯‚±¨GSîÑB¦q‰–e´OÐ0-R¾ÑÔÜþoFîEþ×[eÌ/½1¶&éJjÜÍ~óÄɷɞꪨç‡-ã. •1t“~5Ûâ!°‘Á®šz&€_ØTÀÄÕVø-\lÅòƒ¾(¼èWÒ&ëŒrâõìU§I¹½¹1ÇyW¯Rw'áñ]|"½P2À$‚°¸gϘ3ñ¾½·G 1«×EQhÀ®tîË•îÛŠé”áeÁ†èçÄÙÁæÖľ" ?`3„—븊Öã‰>"MEsEn5P“»<žNí«PãÀ\¯ ß~Á:w ¨ÞqñúA9vÿ¿V“Ç¥w$õ{Û6Ê%Ù­%JQ¢/~=*¯‡õ|˜`J6¿V¼Í´ýûŽž>E2×8•›‚Ñ‚LgÁ-Ʀ æôDø-y LÍ¢•Í7J£“ ™zŸ<ÿÂá± Y:Büi;æ¸-! õ¤`+–ÃWH3Xá-šÙ¿—·ÓÔ›æê5ª{­È5å¨ÝFÄ/™ãú¡ˆÿ6!49£ë2ÀŒ‡âS€(½ìÛQÄVŸQFîc õ$x ò©Ø{”L‰#þ×$Ú/Þ>cÚÚÏ× g¶½`UŸÅ;†Ù?Ü÷‚4Çe&€ÚÁ¬—ë…ù¶öõn]G$ÛO0GtüàÍÝ?Qéðî†wFøNf…“·ôËYåBá„­ ‰hEèU0OïÆ” À-Ï¿®yÕì„tÄ‚ûYI›U³B/_0ÒXºgûÌýyu~#:¸1tym6: s¨jk³H]üéM®\U››}ÕW‚ãGL¾4Ù8Ô?@1tP«7ÒÖMXJçP×_­ßM|TÇ[±j4FûE4úkM/ãŒbžùû§±Ôt…6þµ‚oôkÕ2µ"Ÿ”ùõQ; •(/w i;„ÂÅ=à«Ç×ù™½H{±‡ÞúѨádž Hó¡Ñ°÷ #“¹½ÉОìH˜VÛ艹;‰TóDðé¤b«ÉÑwZà à^ê–j„³“5 òeý·¬ßà)¡åLä–‡kPçuTL Û ùcìÑ;°78AXa+@ÛÖW€ÏÌ$o…âh 9ƒJšá×rrÞ”¨å6Æ@Q èa÷ëÂL5v¡Üá½—’óÛåhÈÞ¥W§ÁPÂl“Ð8loÙ§s»I•h¼Í\oÒ„ÎWZ‡ÖáªëÕUœÎsûÑè]Uù#¦%¹q¦¤¥Ì6^TgK ¡tÖZ2h<è¶

7mŒ°:@á%U¬Èøw»µ«þÀè>)!쥲E-N¼ÚÖmx3‘¼<Öƒœt5AôŒz¦‘3l w ‹0 ß›7C@’K%ëkÆìä@¯V( ±Þê5jplT+—C54UÂéFþ3.òÕ€#0Uçj²PÇ«(Æ` ב@v]aùyÊãà ¢ôZ‘œÅ(÷j ‚:#¶~¯,³c‡03”Œ  i9ªœ:ŠX`®Oàœá:½žoà J=’©=RÉ`$¨9#õ†mjˆÔw¶hx.s“+úi´#bÅÑœúþX—¤ä5ÎÎ9BVg¹„•=­!SÈä )4;UÛyžåEýkʧץH¨‚ðøíƒn:©ƒ »Ùî !Š'è­°°¸&ª?¬Ñ¡Úôc&‰J•a„;<iB”QŸ.í¸5ž3^@ñ}w"Gë{3þ:f§°œÿýë ‘óYÙQàênOQpaº^„ö&[áÊB ±-T§K3ÄѤó÷D2²ç&xIpºÚ5§ þ2ƒñpAuØÐÇ·.kÈÖlOQ¿D‡]> %…¥ÉSù7®Oã·÷èðmïŽÅ0 QÌFcôüŠý`!©C3R_2©„Ë„nß=xsÈ'¤VþõåD™tûOiÅݸœ(uMÇ›¸P#AâNÿœ«¶ŽŠÔ°ÞÚëXªûׂ8q,ëkaøëçˆó[ƒ”¿×rJƒ>ÖÛÅòND_9#;éÕ ²R*í³ci陨êK휿WÁžÙVòP‚Õ!‰Žœ:åªÏnº™µ9žx#ÀFpýì2ãXžì£$«xzOxµ#²5mØr­¹øÒ¡ßÀÜ0ÿk1¤IÈ·*¬7ƒ9´úÛ99¬ÿsU?…HÔÞ&ÿ’…,A§á¨Aš¼u3é•=™ˆÓä–èKw³°ƒ¿l[åÇ„Lô[R¥Õ‡@Z̸ê=ÓéNB¿ÔèÁ8GñÖ‰½ ª¢£öhc‘'–¢ÁRËýÍ»U`õ8ÂX@ÎóFhÝÏN­øû ¡ä•ðת*µ­‹x+æWgØ”J¤Îw@ûÃvžŒb-¤L˜†ÂMf5…Ë7Y¯×|ú@ô¸¯ ŽbÔËM“Gx–HºÔe»NçÁZ½UB£TWœ呬¿žöFŒâ þ¼d}f(¸J÷Á‡¹Hý+h›)¸¨?ïSqqФÖEšªú‚/èw–ÆožR«d¢FgtÄJÑ Zÿ'9iÉ3i_µ2ºRžNc—ˆ>{Эì­o`Ìþ:³:6š®O¹¬û˜Ý”&ö$·mž¡È‹gB•AËÇTõC·ßW@½ð’“VŽjå6ntñ2" 2Ñqͳ—x·äÍ^¶¤kqÎú«%«Â~—Ð7=zßB;¸Š; ;-ƒ-Õn„–o4›'!XîÚJ’¡c†‹: ü‡ÒÙx•§áÐ'ûµò¢®Çn‡™Þ7f‹³ËÛ±"Ý1TwK>ÃN¯esåKȧ•ÚdÖàÂÊ3'7EmU6™é!èX™눅{ôXš³‡Ò0‡ÜèT‘P/ù¹¹›·¥¡/¨¼1(±US(Sâˆî;†ô³ââ‘S•û[_„–¬5`ƒ™sJudúò€Æù]ðE±ßÏÒ•ËÒ· ˆK˜`<ŒE¹/§Ü8Àéªu„M½Qôh- ÑÉq[õ…/9bFð.P;ûY¬–½K´'NƒÀ2¸â<¦OÉEïêvÜ:ÜéXiîô¼²¦v3£1Ø‚Ú@Òl´ÃâÅ{¾å‡|®*I´ƒ;§ ‡‡†äë9”Ô&ƒTeE«¦wÉõ,j¢v^U†ºÇeÀÆõcYÑý]Q‚iuwdÜP÷ãÍ/èšf¼S|ì`ǯ¨0tç2‰{W¦“ëFÿ¿'ö€&ºÔ7Nd |è#12ãò“WC¦°ëò£ -ê}³‘Œÿ^² ¢ ­ëGÌ3ohY4DCd"ƒî²¸:xØU—£qw¼5g·‘zµÀðñQЋ¾>2þfûö”LÝ Þâ¯ßø¥ƒ¢’-Ú…Áí°‘£wltÆHï?nv*} ½ËR^iU{³\}GÀYq‚ç ®»¿ñឈxÎqL0 rálPÓ.ÿÆ ì‚Ë–Ï:ns0aHk$η®EÇ‘±—¦Œw ±%³",’‹”qvôåãAnåËþ>5ÌIż 0(㉄—$´†‡}ðxy{9¸ `†út-ÆŠU8¸Jaàª9^­½‘{š0R"­WB@‰•cöÁò†ŽV?é﹌²ÆùmXÀ‘â 0Ñ9I !@ïÝÎŒE W=¸NO¸¦0&æ †¿D’¹ =¬ïë£:x³ÛAƒ,qæ¶6ž.ÂívËâÅåªDŸ†5Ö7éš”0@JÞôÅéM;Vj½º’éH[ÑžB‡Ò»Ãóeg®1o ŽAëz^à}ã]&tþ¶t–†Àu‰Ú+øÞà`¤…±¸)ŠÝ0:¡|“ÑoH¦jðnÛÉvû˜¤2³S—äÂeùvÓŠhˆßœõ‘)-dLÅ•Ð€îØƒÁWáZIˆ¸ kŠ0Üt#èM¢²>!ñÕHÇbÄœ*ÑIž±gÞ‹kOɸÀX‰,–÷èéÄ +ù1P.žoh´=±‰ul¨Óäu”w¤™¸ž`­2ÒS›Åà ‚´Ç¦gOÄ ªéªi¯åóqƒ~¸âX2.ÕOÓ@VŸè}%Ž[µsÅûXÚÝE°Íüëfz]”å“éÞÜPÑýf<_6b†‰sBà˘Ânfš+Þц{ÔçyoãUÑÔó›}à½pÛNy«çˆzr1Dõn2èÊiLý+ºÒ rO÷i˨ %è(–-Æûpäp1¿‰³ó¿7/%s®Õ+V634~ŸíYKù›XMJlã•CÝÉœÜ9ù©+79Švzé1HU[d¦T°n(Ã]§¡µ¥ "¯[zè¡X ‹¾ùZåçý$|t´3Nˆ§sähÖ#Ñ­mÈTízÝÑå€,pHËÞÔö"ÎM[§ôYœÌ¶út>æñÏ{]ŽÕÑ€O|ªåàœb~½c%{?†uù6%C¬`‰×7öÛ¢¤@eiE'T×¾ `sWw§²àÑ‹Œa Ôz ãÒîú=9í pû .Çõ–R#Ú¶¤í~nÀðL} ’Éø`Q 3–Û ¸b0-,ú;Üg¨¼•¡Rà¯ôûÅÓâ…áq|š Xq»Õª<;ä0$\•ã£wq“.@QŽkëCLîÚØ6Ònɬíý4K©ÿ¹-¨Ä@£ê\$¢¯hc¾ï•R„ñèIʰbãÝPLã?fñð"?u–Sý9¼J»ƒ‰¶>ÇUg£Û Ý1Êw³Öù×ü÷ ³{PŠ‚kQ?Åø iÁ’"¸²8N‡âw!#""6(ÎTJ°ÊÐfÍ_ŽMÌ Q€4 MšJˆ]*w½À„KÜz°ÉÌcm%jì'S½ç‚†i¥þ¶©*ˆ•F°hG z¨uA ²¹b5Ä0<â×Üo¸ˆ›¡¯|vs§k:À!J†jµ§Þí\/zoׄuÒ‚qq´8Ü g!š>«‡f]â³o°Àƒìhž¸÷¨Œ%óßö6Ί¨&o9ÿ»kx]×0ïž»4„¹;f myEb7Þ8-ý0jâd£Sá8!¢¡HÔØŒŒ]n|Æ½Ïæèüfq°™”]' îðD›«µ ¡Ü•]‚šlž]ºg HÇ„£bø4Tùl_†ÿEÃr•FÒ§šDY™Ú§Ýì®1Â/ d˜¬X¬ÔYÙš”•ï‹v3› °=!­” ߪ>)ç¶©|¦$ÄB®X£SaH|© €€è|âC㣷T¸é jž/ñÖ*ÿD=ØÓ©$¦ÍpcdŽ¡6¼Åè‚›ƒâ1ðò»ïàJð„§dIæZT­uŽ °«tS¦øP˜3Ìθr|‰HËÇ+žIk}Ÿu¹?ieŸpó1ŽŠˆ•D´ÐC»„Ÿk¨Èñ ÖçU2}Uª*öx½-f³ÜÖÎðkfÜá0vÞºê¹Ó»xħ_0<wË1ðt)pZ®ÓÏÜ€Ô8¿¼PúLÊ‹»g"ª™Ñ¼X-.}N|ù™jò†ê ¥ž‰ZŸ“é‚Ǻ 7B ©¾ÂœùS+mìv^óÆ •Ò#IÇÿµ¼^­'@°~ӻ̖Ӽ³– ·!dQlêïo¦fjðHóõ†ÅsHb4@?ŸtCzºœ°C!˜ÏëbPJ¶À ÍÆ¼K]S¾O¹ðʧ§µ~ûÑK½NmI7ʳ±nÁ»0´×“pr^>•T%q¿ãb—zG³"ï5AËV¦†°=ä&y[Åbåvu6˜ÊXD–9Í61òíÓàãb*‚i‰)cÀ2Ú‚Òá²0µð”ûHàµgb»ñt(†\˜h4;y¾‘C®÷5#1ZÚC!^CA¾Ñ‡½Ö0éÕ=Ä–’ê æŒÁÂ#‹RªÐ’y.™¨ï ºm­· ,/Š+ÉþÂ¥ÞÓAD¨2Ð:Ù¹É3ŠN#»n áû€ù“®‡`v)Ì´Ì™ùî{‘9¢Ã¡«fÜF—r!ÔhxbF¶n?ô °±0/W«wüô'DƒŒAB´ªÑwÞ/$@oµøxh|ðÅ€žÃùAKDAFð¢Œðƒ$ùã u4û–åÅý‰žéã E)A¢eƒø’–AËb/Múð'éHÎÿþ_ ó\‘7X±™ Û·- Öç>A³‰ra2iU¡9Ѓˤü!èíQº€>ï6™CI°™s*žÕ±Œü`OtÞ]V裚·=1-éþ ¶—©S:Nåkâ޽ùîÚ3]¦O؉]ë¦J!ŒbñnýÛ¬®6lÈùh6?¹j¦cBïe›!_½C4m:ùi9,S'U@)¡³†y\½èéãTX¡Þ˜|.rˆ tªx0GŒœ¾>µ6ÐŒƒ•8È«¢Ø{§]ül {kOý ˜Ï¢³˜fC+qŒ‹c·Jƒ°ÀñTGK¹{aß0Ê¡Á:šVrM3@9aØ_uÞ$`•‘€>A5'oJ‘'·må‘?!ŠÔUºÌ[Êœ·þ¸ÙêœÅoäážÓW²ß™\OÄâégƒž—8y„‹ ó즩QcM™ Œ |ˆ·}å˜ œÇÓº˜i¹o¸µŒƒi|4Û*ˆR"à2û;ZÓØÕGÅ .à§üãÔq«P¤ë…¶,¨\Ž uŸå³‘^âC9Õ~ÑELª6Þ‘Ð84ñ þ¦D7ºA#R= ò"¸³Ú>ü{¸}0„8cÇ7ƒ€Ocoe­~G[ÝSoÔ.aæ-¾))©1ãDÍzVׂ#$•ŒEý"um µèü7¹Âü¨~mƒ¤‘G^[Klê$„[oƒ&¡0læƒü…uè†à¨VÖ&¥d ×ÄAlÇ샸äi î½Àº=ÁÓ›•«!{o:ëÝñ93S—{gìn"Lß úP8‰+Y¥Æ>[äî®ÂÞ9hY$­]š‡ ™++N[y¦÷QÈ~¶þ\eݯä»ñRFOõ §0_Ѭ¼Ä )PÚÏQ™-™—2ß:•÷×û¥ØË™²¥¸!«S˜®×@ØÀ$ /Ö¶/8wì"W¹M* 3 •¨ÐÕĉ3úþ+…ÑŸ}‰Ì:r0\Ñ’z¦‡™ñˆY¬‘"-ªV³ÿ¶1Vžßñ0ÚÃ5uß[øvaÝrkêâÞb‹Ðm±‚ĦÒ|>…nú¦þv“fÞzüb1rÍ„^Ã¥>å$Y,žH¥ô¢¥–ã,JÜåX;ä‹ìÀbGëW·‹86@â{@¦H¿¹Åiú‚N7°~×ã'õ»•ß›Äì UÃQ¬@ÄEM¹é(Ñ)t‹¿ì+ªÞ¶>;ˆ\Þ¼”ƒqkO5Žõ»ã JÓB“"*N1‡Oš£Ú§±[-Ãâ–·Ch ýXýðëq¼e“ÇuXÑÖw 8 QbFɶ¡z4»ïlÒM4¸_päYêDÞáGä€ÐÙ»ˆdmìg:ÆL$îpôUš(™4–äùüäü²úçP¤Õ?‚RñÓ”4Dw‘N‹„©ºc†•vÏl=×fÉrQJ¹§O²Íí]Ìv0¿z5‚7Ö!ÎŒª|)ÿ‚×Î]øž¯ÇÒ7¶¿¿—5gÙÍ[›O“ÂêJ r@b˰I Ý)y$«·‡N펽k’5fôÁžd%%7 ?iRÈ®³ÒTÈp'pŽÇ¨Ûê9…­¹]C ÏÕHÐŒói@–}Vëî`¶Z³Y¦ÿ%˜ÀÄ'xäþ¾ù¬N/¥°Ï}þF^ð(Ã2*_2Õ¢*añ7NžkŒÂã¡)Ò4e=(!v‰©e|ö k¤ž—,y¥÷ £Éå"½¦C+õÎ~µSÕäÏ?Èw –1eò²UŒË—©‰Ó1eêÜíôÑÙ ríkŠÙ*·»bQWÛ¡}­ú#\˜g˜g-–£Tz(…eaPôg<ÅN3“ó"÷Ùq–­`u¿Y_Z d¤JÄ­™ivcÿ‹Â›Š«X#>^·S:ºÂöä‘éê†Ð¹³¦HV…yþð!ŠÍ0ÃwuÅ¡ƒ·ê'Î7öÂr3íõ|4xy¥—ïÅüjÑe×°üxðÌ\þ%£œ‰ÓÀ¾tá¹¾a ånß,[GYþD0øÀU,xÕ+QÖl«p±¬MÞEäÄÜ/h…#¢³œ<8žÌÅ6ÉÏDIßi=H>¡ù,WŒ)hf A"¿Á†ð™gÍÇÌè^é+½¯…®VpÚø;ŒØÎ®pà‰ÂïÎÜ™%‹¨%3lk|O*á~u+Êî’€GϨÈG-UŠN˜Ÿû ƒ9[/Š*ãË$øÀè7`¿c‰\I^.þ=™šëýÎÃ0âùš©!G¼‰G²EŸ¸—'o$wÁ·–åˆósd—ä«ÉíǶëd|¬1å5¦ûF é+ RwÀ‘Þ\À\ÞPÚ6÷[© ÉŸšQNìvÞìYŽ«—ÿH† }ôHH{“º?g€) | ÄÑKÖ¸þüãޞͼ/T}ˆ@¥Z³2j;°1ØDjÃÃõ{ÿ\s{üÞ òRGhð´Ñø"PdÀÛ»¿f®= öJ‹á#ë¯ò&³¤õ $¡ð:¶øu¼Y¹E<ð!hE ”Rça[˜ì»ô–²)| @š8síÚ-pä‡ô¶tê3ú*ýVoìåéß%‘$eBÃ×?F½é®á‘V:¡‰@­ÌBñõXÎ*HÒäR"ByÇ.™µ¥e_Sù(HoÜZù©ÞüøJv(W§äÿçÃbóog±å@–Ï/Ê9[²ïU$g6jH€Ù< –žWj¿eÝÚˆŠÒ¸î]9ŒW§"ªNᦘ¢¸'Ns"S>Òyÿ´ae½Ö Gÿ˜½¸GßžiÙ&븯Š4޶ñç̼mè²{T¨¬Òp×\©Ä³47+’S?Íp©È‡›Ø2!‡¥Ðs M#Pèy¼. à ðšoÁÊ|l äà.mÌÅE¤b|£25ÿk¦“µDûð@h¶SŸr"XÅÜT¦‡O"®Ë:è>öŸ÷]ýÞÆ,ŠZY»pdñ¸TÒ µm)æò¤ —_š•¿DÐÌÂ%`+²|ñí»‘ü¨Ž+­wŽžXt-°”¢ZY<l#K1«ä€‰> +¦#(§_×>èBå¸T=eÔK}™+´6„ªEÆÈaÜŸO'ðŒ%'‡Ì§¬Ž²—µåhÛ÷K(_T=DŸõ\ºÝW{)øBÀ»Õç»y²Þªc Áºãj½´œŒ[¦™&-fŸ!h+æQÙÏmÙ"ß20gãU8JQùÍk%dl â*wT<Ç¥QKú9ß„]¸Vi ÄÞ ¡² øÀ +%êl™ŽDn”Š2½ z>æ–«0.… 8û§EhqëH‰ð:ÌLn"h†œCú›€Ž˜ ’Ó[•ŠàCœF\¹~`«°'Y[@iS1 ÊŸk&\_Âz/œ%uئK„Qð2mš˜1SùšT0¹1‚D)-G±É07"¨bí¨z|Òk£d=ÊÀhÚ-;½Œ¬5À·¤6¹¢ŠÎUé‚¡!Oá“XfXri>ýZ9Œûiÿ]w¡gáé¾Gõ›q»µ¢ÔLù߬²K¾‘ò-墆W]tÆÞÒE› |\Òb¾ol>ÝvM^)œà¼S˜;åÃ78Ç%²8†’Ér-·?®´njÅGmÓl›¤@0¯ohÎ¥åç ½{{@l“ÑI.@V¦ã†öƒä««G!²®ïRª¿âa˜è né².a§‹iOªŒòLƒG´†gÞjd.•ÚNˆž¶„§(ÞÝôų”zn‹F)Â×ÿʺ‘¸Z²ˆ]/­à"Îp¹vU²´%)ÿZYâ0~N?“2lÒLiØךêÿcâÅ%™}9"qs©!¶£D™ö¨]R:¸÷LóEi:øŒd‚SRh‘KÏp¯ Õò`²T"¶Á˜b°„óèq½5:·Î*Ví»¾vË9‹p?4;…¡coÉËèß{|müv¢?:°ø äçV;ZìZ²#áî£2Qá¸;*›E¨ï¥6b¨‘³ªà”À"‚Ó✨×Úñ'0ðvÉÇÂŽ"1J̃Ê@‘™fQ›×@ÄÔØ•ûöù‚r„á.ˆ¼µ[+"éñÒD“*4¸d‹¢>ó.ŽäÏbuEP¬Ç !Ô PIŠAþ³' Aw)…°<#·³EŒú%ï_jÝr¹Ä5¸äakåI‘Št3«¥a‹Í¥œ¤Jã§Ôƒü̱ “}ßÿEâ@)2¤ã¦‰ä×%Îg…ªÐ~Ù¬B¤%¿ì^Ê`mIh\¡Ù1p“‡òàY‡š…0n„`-»µü1žP[aWÉëiè†T\È)ä#סc¡Rʤ§ ñ0 EÑ™ŽÑíYз’ A¨¥•ÖuÍJ?NÌRUÄàFN!6(®P¡[Fåå9Z2‡¸Hé%Ý»üõ`J]l^J¸„èuC²#ÐJ_ª ý‘øšœ¨9ÑwÔ¤˜ÞÑÎýÃ@§‡õ0z’× ب-‘q³b%Ä ¦[˜TWƅ˾ ÆHdŽP3à¯rq–Ï[Ä6&ÄݼF¸ŠKJT÷}Vk,¥5‚Hƒ$ûÁS€ “Äj—ø¤¬@½K"ÝÃN–èqý&|ÍË”öÞ r…Ñ‘¨{ []xûÞ𵹦1Sc(àžÛ'ÁIFØŽHwûµ³. IÑBrç‰ÁFå³^¾7 lVtCïkóÌ1Ue]Æ¡u ekÿ¤3Ðlr”S3æÞË™™°ŒXBZŸ¡ªE§~ô±béPÙJþ l/wõjä”­;:ûçí7uÆ gÕ´ó`6’”‚Oæù×÷b‰È„ì4£gg·ñ¼­\J$˱Dâ°Jû^fZøk;‚“uÀÿvÇË3…ôb©•àA2kÙ‚chÎ?0n }è­âtæKÈGÞ{ ñkDÓvUE¢X´-8ˆ¿wV¥¬><­ŽAüS!Ë}#zMÓ¦–„@$ÈTȹÜ,g;ú¤ÙHÄ%Øf³d÷ À?iʺ<«¶:6­±æI; ~«ç^ÃÆØ3:™¬äÊÏѽûŒ¨[„â›®ähaþ‹%,a%ǺÍaËaAüvçu훵F–TAâÃÐoC#côH3*ÒÒ”Q(ÕƒDuк£T›ême‡*â°ö ƒÐÜ,ªç%ml![B#€]þ¾J.ÛXò(‘>×Û"Ò…åôÖ§ä^è9hú3Ä`_ða˜4•Jw8wpapÿ1FÌþ¸¦5ÌÆF×&\¹D@ÃfïÔ1¾ŽU`üfî Etf5„ý¾¸÷ÓÛy$ˆ®â¯Q€êÎhQ' {mÂS>‚T§ Œ LqªtdQ;ƹ3-<&ÆYB¤ÀÌ%ü2:dTIFÛ Ë’³%rHÈär ¡ ‚rg²Çz|ZÞ0Ó<”P׿·°d:‚ ÓãØ¹¡Àíü`¨D>“äÆˆ$"V ‰Ÿ3\þ1ß jp[Â:yÙŸÒäzÎQ_òNíÜF:FvTàwut1=k·D2ß<Ç»œƒ2Yé-B¢–s‘ûˆßü²h–ßHßJg‡âÌ@ ÄC©8ŸN2-˜‹ UúÔòQƒþ:›ôæ®Vðe×Ì¿vŠF|yÉf[kø¡Ô‘YìiKt2ãl¯_î´,Š ½’žµ¨O*+{ÐöįÑîÄãxÿÅU¼è–DÞN•¾{*èùõ-æÝ¼Šì~Nc^1[mÿ¼;vS¤—«ïqɆ$Ù¸9ýÖÀãŒpÓ YÒ*ƒ– PІñ¾‚…P)7¡]n ³¢Tú_îgÒøþ,öË@§Ð~ìCÁöŠ¿åÜ ÌUYqŠç—šÛ¡‰²Æí8“³X!íŒñ3»5Qã*YU‚IgÃïí´¥PnëmS˜ÙSM¿àžoÛ¿±-¶ÇQè¦|¶YîèøV±»4]ж”ÊX,nMJ!är¦ 5±“iü¹zI2ëïJëÑì,‹" ?º?“!¼ÃWÁ0îttU~i¼}è¾ÜtOñû˜×JVÓªíèŒ A9æÅÄÎnLU·¢_öpìDáWþD÷Ó D¹Eï%¬Ð)Fµ-ö5Τþ…˫ߌ„_ Øñu„kä í-ÚÐÁ5ÿ”$AL9’Nh¾ñÄ)šöÚ”ñn]‚¥Ìû=œÔÜf$˜1b¸S‚ã¶žÛò@$.°3<·Ú;ú[†®Ôø`Ÿˆžãõ‰js?Hu¡!𹨿±Q“yqA©¨{Ná–vHù®­/I©ë纑½†Î. &·nal…ŽYžŒVœÞTvä“óñ”ûÛ5æK ¾,kè(Ðû¹†›rWÿ€ŽøZ™Po´œ³óYGLhp³ö»½,ïº}‚~Ç“|V7(ý£`ù´ðÅ“7ÄïH¿mñFÀ ¤áFoö‡åFcX»ñþ—i!},ÜqùÛ{dõ›X¯qu·Æêü¯Òº$¼æe(°X¦œ5þõŽ/¨î~ë÷ß©23Þ–)ê(þØG»„¨û Cð± ™¯QœIí‘Í uhº¹=Ï„ž¡Î$È/¯ë‰ðÿŽŠ*É…¸´ÿÀþöà&t„·õ>ö ÜNÝrg†36ƒä$rƒ»úŸ5Y4l™€ éûð|´oLÙb×A³F?MÓ™¡]§DÍ8`Gjãðe‚keýºírŒ_yïszß#ޯ݅¼CM¼ÜÙå| Jëª<½${‡ñâF§å×€X½öKØÊÐÙ5œ”|™ ­lìþÇjL¦<)›O?ϤÖpRp>“¥„ƒˆû‘ÕˈÔ‡«:3¯›ò¬–Õí¾B«K)Lïx[žrp=«ÊÜcÆWC•3-ûU®í†íç•Ì 4Ön=ïl9›.è…{ –Üܽ“0ÍŸ¯ÅëÙÃiÔžªßS-sØSF«I\”¦+~Œ5Š—z CI @àá‹Tœ‰k–ód»-žÉð@]R«SoüűΩr m /•£Ê¤'ÜD4]°Ú·*K&|b4¥DÒ3²lè©ÇeûIA!Ó=åSž-xÕØW¹j•˜ÙÝá0–ÌcqWn %/2s›êú3ÉûD¼‡S ˆ5éEÀHtßsxIÝ﨣_N@sS*e]Ç´E9Ôù ÚxN6ÿ´Šâ›jèñ¡œ7MrÍ‘>竤ðÉ@¿3~˜,$b[U¾†g“G ¡…#+$œ²8~̽.÷‡ÔàE ™¼'ÄPýjÓ8£‰Û`ƒ–hÐɮǶÓÜ0äföÁÊÞ©ƒõoVK2P ÇÃ…(t"r0†Jµ¥Ahœhqømÿø7ŠŠN½ì–‡k²ÇT&ÐzD­ç`4@½ÖÑOœÍhûµáT~œÌÄoà´i£sÆ:ë`˜hW’þˆÏ.K¢Ó·[Ä FÎ)GÇ3=(È6™á7c \ê¨=Ê]QŽö ]º©,×e6 †YðË9 5QÖµ…IITù‘ƒá¥ìOÏÈ1ÂÁ%'íP»¸cÄÛ³5|sÖšÅÿqÞE]O²æ”UhòÃvt¨Ë-úiG$z”3a"Âå%6ª¹G¾° kQ­PŒSBÍ“~ÒþçîSŠkDoåvDD”Êù ¶Üß㌈G%5z7Ñ]wc%¦ý~2í%ÑyVè‚åsÅ}Ñ%Lª$ð§OVWÝy‰\fçe`Ãg4`±Ý_B’Ÿb<"ÿ¸ÚŸÃ<šL7V­Oê691º‡ã·Þ÷¬Tšã. 4Ö‰‡üªÚUr ‰ûsÁS òOöö2B\r›Ph54P vÄ[ÁÛlü—ûð( YÀšuöC`ű|%·P—A ±‘ýqêÆâ3<>9%zà ©hûV.¾j?9_AzD½Tªnž^˜u¾ºÚ%šQÀj™ºÉœ"ÁÿíM})ßNRÃSŠHûjD󊮥FÙÍ&Šìd†L6uâ4¨…Þ gƉ(1¬µ-Dœ¦“çl>I+0°W– Ì™ƒ³.†Ö.MÙŸ–=:åA‚ñÝmÜËu&’Ú/·¹qdG± s¯@o’ä\°Š“1ð#Ø™˜ùšÖ­-•Œ>ß¿!»¥}­yJÑŸi~ôÿÇ Ç)ÒG9øB?j î'D™÷ŒøÒ ™›3Äœä8‹ü{x ü+JÜ ™îúƒ§è™ÂJFÈ/0ŠÐßì ¶ÔZÙ.¶M=ó²1~«‡±€ÅþTB±ìâK-ÔwßaÕÈDZ¡Ã‹d^Q—…‚›’UÓíüD MeÐÙ£|ãû’t)ê3 Ñ@¯n-w¯ ž"MAYr?‹ZjOÝ$Å|u[½3a nW~kÒ9¿Ûƒ+XÌ7#²Ã,05¿'Ü¥†zœòŸTgFN,9:€Š/ZÆ0 ¶C­õ´öu}©\eòRã8É ìáÆ­B~@ÜJh‰pG¬¹9 n‡%^ìdfâ¾ Ä‹õ/H±Ôt§M¥§&ùœÞO£g}C=¢¤_†ü)Bg$Р‰8b6dpmÈKHhö "ù) –GrVª)Kþ+³·$Ȭæ)ºŠmÇ⋬>M稩üe_uÛW`æ$Vvðšæp4I«–ÅYÎ5s(Òéu×®G„/C¾€*¥ywAð¡¸:NÖó±mŒÌú—bKXOct°Vغ)á¶œ —¾ÅRöß_Ó|÷/d¤v #þ»Hθ•ÀA×ð†ÈŠÈ‡¨S´Ö·a4îb\ëDææC¡‚ŸÂ¤/‡ç4©Àgåà ËðÒ=gyØC=³œè]˜˜¬!úH¤@#ÛW/XÞŠLŵg,®Ë93<&¤¼ž;L™Šjô½š%¾lïYN“uSÈÀVÎÉ´§ ¡IÜûæÕ]0Ü/Õá쟎´iø[/WÞ¦2 IæšTÐÁZ;bì½»ÝJb˜ZQçj‰ôPërÜ Kî`¶+ÌoIÌÚà`»¤F«Ÿ&«;ýË=šOO½:}€hÅœXlÌ¡Áu÷O5fLšÒø>çµøË!í"ͪ²ëN&Hv£ƒ,‡ÝG#Òª}”ÂìãëÃú=~k<‡´žÄóÒ ó¨ˆ«èãq7zƒÞþP¦âjñ0‹Îþڬ㠈a§ö§ïî„$™»çBB&¶À;’s®-3¶s;h,u9Ÿbzë U_ŠÏþËn(iâÊ6Ã$7y²Mwƒê¸ «¿WÇ— Q1ÎÜFp®»d›$¢¹;RùGÇ֣̓]ÉGÂVåÈ'ÝÄÓ8uRDØ;s³«¥aÙ& k&ó\¦Ç8ˆžˆ^…gB¿’o@Èægí fú ­x0–š9@HêAˆx¢R¸ŸÛN½VÖð`1s¤ZD™t$§[ëÚÙܯ+íÑœžê—ó+m}0@…¥„ï¹;YºÞ‡/wEÓ6¯YC*§@¹"s"Ž5Zz³9*Ú õ¨µ–KŸ¬°‘ömoÛ;·Ÿ>…q\ Åâûé„_ËrYš÷€A8lˡ빽ùФ²ûWî.#HÉ­àºZ×W ï¦Z CæY–JsM[QîŸWS¾“yÄnìˆÄMaT#n9méy&“óÝwTH­/ôÜaе×1­w…žëÍ©¾V_+å·JÁŽàãùYöŽ{­ò9mÍ5о?†²pzØ{Ž¥³S-ë$%“ZJæºÙcá–`/r÷{2(C2,7»VÎ]-}¦ßðõGÃ\¾ÓÙñDÊ™« Ýù^vbÕtSêÀõ Eïq¶©ÈÓ¾MyÖü‚ÚyZ¡wî_ަ”·\ùÄLÃZ£}jjzí@´ÎÒýȨåèñ»¯7€+éqJp$œÚ›!”›¡Æ¢Œ‘D…¬ ‰Ï="µåg±g“©ïÓ\áäEfÞx«õ½^Pwp¹Äz`ÁÖÊ×ùgm0¶·Þ“q-H_¯(´Uh®w—ÂÈàùu¶ÔÖ;†€xHxëk2}@M/°P mTì`“ Ôsݯ‚Ág³lrKöM//Kœ+x­]nps0þ” | ^Õ*1´ÏqÞX±Ê5Ò#o õ0Õ:3„M’Í|P€äCc_òÅÚÆLÔ´£Ÿ{«4Ôƒ¼’äC™è—ãzI©ïÌÔ78UÀÛ²̲zÞ’VÀ²® uDÙ\G&øéxµhêõ sûEUA3V•«~åî—³¯¡w0ýÃï³ày yó¿õßu^[1tŠý>•àã±s@fK% ™ì§Å+æö³Ä 3ÜE%¹È–ùb"Sk ¹¯Ê☙fŽØpc“£xb,'õÖL, Rh \¸)c3q¾âÀÿ÷Üäë$‘sÌá&šnþ{b€S½†‹DgpܳÒOÑö£ Ë™\•eâ§›è úL Ôv¬o•¨•åêÒ-j|¿ú#2ì{ßíÒË1`—öX,¹Cˆ·)7O_ œZï☈¾Œ©¾v{¯—Èñøo[/Ù÷×´gmqˆÀLŒØ²îö°pÅA´WèÓ4ê|ú¨°£ ÷†ÆóM’W¯*$Ô¶Ý‹"»Ü.õÂUúfbHHŸI¹›Ïƒì.M‘2b‘Ëâbõæ}6V#òO³®Z—wi¼$¼ŽÀèJ=%dðÄôWÞõÚ7 ¾\*Bw=/Ä l9E¯n÷g&à-gê5A»6|Ç!Èš‡‰ÈÎX—ré¶ÛûH߯‰içïcguj9–ûº\'£dä>v*#½ ìΧÕî¶éBGßDw< íULÎ[‹A‰gl{oæªigÿðá1Qy¢|#µ°WkP]é¢\ê†øH–ƒ -ÅèÞ|<\Ýá;<á¥ììIRĮ̂x‰7}+ch,Á%­“ Œ*PåÇQ5Ö!óˇÎH7 ééÿÁFoSùéò2¾®ÆÙê5!ÊëÂåÛ•bxõ- ¹R„3Òz:òš± æ…º8b†®aÆwšÙT=ëÑÜ;“§Ö8]¡Îªè€Ïzc˜°H”XÔIÞrƒé/f0 ‹24.Èâ¬ÌË5úÕCœ;`ò˜J§C0›êëÏàKþãàyOÎWl‚×ß>Ý4âbnSþ–Û†ÖÂûaæéiêâÖÀË ÷XÚÒóM ª%wàÆp¡¿ ºå@h¨˜Õ’Ë‹n7ÃŒz¶«hwF#´IôâW;÷rqÝx+áÿ$f}¡ZI•+M’y]‚”m6€"°ÿYëïù`…¹Õ@Kú­–±@ɪÍ*z,P84[QŸÿ}’tH႘YÈ÷ê™tÓÁU«ÿ åü &Ár­i³ZñLCIÿ¡N—°lK`Õ0‡Â†}²W6„…îÞV¡ûmofOUlõþ{~BA—›©›8közJ‘SŸXêL •½ñ[®o†yö´•ðµ¡Âæ¿û¶Hù³2eM›û]ËÚ¡¬!’9žXü¶ÆÅg`åù´ÇQÃéébož—*wz…7ÚlrI_»Ú¬jÉó¾nÁÛ[%a,Ü|_$öÈ %"óv-¨Èê$ ªíG¥\üpSÇÄê-ÙÐ-CêçŠjA®rn6óˆÃf{1Ͱª‹¨2é$¬Œ ƒáwòõÆ]K˜$—ªæ( _6 o"d/Œ¾’Þ8J“éyÔb”ÂusÁÙ6 Êâ}<%»ÈY¨wÌÄb£S¬-ª–²*h‹4_ƒž™ø¡C˜ ”4m$FZx7ˆߥ?ÉNŒÛp‚½ p)Ÿ»äöiðLuu BH¹XJî߸ CsßBü<µÄô¹xŽ'¨‚+Áag¥•/R6ââ\aŸ5@ŸR-êí‚¢‚CÂÍÛ̵öÌ{¬N½®o<~÷ùÿŽ? ír®<^;vPߥÄBYŠÅ¸ œ=i_xXÇRQ#v%d…$¥1Â!ìIæL’'ßU'Ú'€ÅÑ)‰»ë´½ìÐu飻éÏÀì¦ÕÁ«Ýv„. ÔòÑmhºhöñ­Ž‘áSÝÜç h2@$Ò©þ·ÿ½ðM”žà5:üò˜¡”ï½)R£âW}åŽÀ£å„×1m×ÝéŠS°û#hœ§öBz;ËâàPI:CCœqÊâéÕNn¾çÂ[_?ó¹ÈGcCüg&ÄWÌõz–´PÇúyÂOõâV¨opÈ$« /Uo˾Âô©—‰bkȧö,ã¿•ñìJ‘nìM«Ÿ¡MEóÎBÆs¡'µ¡>úÞfr<j`}L/14àÄ–u#LrÞiÚÌÄÅŸmëTjs|¸J¼!ò7¹W*3Çʱ†BÁhדÄrFð(A¦Ocy¹¼ žIˆjÕ¿UPòkêOÛ¦ŸQoÙljÅvâtŸvûÁ?À<÷Ùƒ|ÉÆ'ÑŸD‚dkìéà¼WsMš<- û7¤ƒ^Øœã!'Ç ðsž]°eð÷~Ú¨ç®÷œs¨¨p_«ü¨&âƒËí1]fÐÆ+ÓǺ)8„ƪ•¹Õ±‹D½Ê4úòÜP¬1 ádudWJ{N«qÞ¡‚Ó;®cN|tºjy9œÕ¡ ÆPª‰upÖƒ8î 6?vü‘¦vTÑÓ2gÝѧa°pø[R‰¸(Ûˆ "nj4¡á¬d(oKŸôqªF-æøä0PÀE;E‘ öí¬º–aœˆ[ñ±ˆ(×øö¡È˜6)žRçX\æW°Ù—ĸÿJB› ûäòõq ânÜ“ÑC±Qvz¶±¶ÿZsar‚ߟ±%ÚÞZ5izãs~EF«n£b³IÚv.ßÑâSù· {iÚÍájTb+)qŽ¢Æ­:g3Ž!¿ùøt\øµÄlÖ"¦¦{²ôÜvúáÛv:Ò™fDBÍ|Àé4oÞM4œßÎ ¼áI¯êÑ:mŠi°k£p)à[#‚`¢Íò<Ém¥žíº&Ò%ŸíN¥¥šG%L¾,¼[A«ÂD?( [ Ñeã"¿©ÕgÉÔ—ƒÃ<(‘;Z^ñÁ4ZHàdìaØŽv¹Ž‡¡3Pïô‘¦ª!~ ´€ùnš‡ƒÝÜX8IÇßmmoTöwSxŽë'p™ðë§œö;Cëö÷5ýlçŸ:Šm¾KÔîTWŒ\q”â仲lw ä_þ1±qÅ,kýßc“¤òR?#œB¼UüI=ÒÔ Á¹ÿWÕ¨û@VŠgyg´i€B[‹C?Ç…$VºôcÈUS +9ÓpJ’>Ü:"Ñ#ï€Ø õ¶w×ú2•<1UQ¤`å!O˜=ÇÊø•5€ÑŸ¢ ¼,1ú>ýÖ´èÏݲ÷ cL §k=psþ蛳R¢«ä˜£WeEËSSŒÀëJ1=x¡¶*½h!ªÃà6Ò(Ü£\ ìàÛøÖ¡cC»²sD¢]`¬}à_Œž–ÒíGB˜ÔŸ¾´Iw ŽbǼ\Û+¥}0:RSe!ék} ¤KÏ ˆKÃØ„`§ÁžùyÊs‘ÆDÑLáͱ$åjp! ôàYœ&„?¬‡*yÐ@Þ¨'›Õp­ÿó³ S×S•+¼íû̾cå ®v$RM m=X½hÌ~!{9if$lDP‹{ÁùaÝ4ù*Ž1»Š+"ôiØà.füƒ1¥²$-XŽ}}Û3Í]Eƒ;A“¡kƬQ½"YHÚ&äŒ=m礤Xæ®$ ½žHçøjžÞ«à©ê“#äGO[Wò¯:I«m÷Ó4[Œ[¤'ªçï¤Æ©^:i¼Šsˆ¨HL~x „ø“ÙÉkÇæ}Tꩉ‡»÷¹ïàvïȬ¾ÍàÞÞÉfùU-ÁûIRõÈõÔ­µKܧpêöåÔoG¦·¦}8¶+±"„[5Õºq×lÓçíkϳQ1ïèù-¹ÈhFÆ:{«·[(ã9˜V ÃêHÔ±* ×¤˜¸)µHà2ÜØù Éñe3£#‘îåÏ|z!áàœ4eûθw—®ƒK &§†ÝZ˜8z_?•ªÆoÈB‰P8€· >U“ÇQF]ôú0’ÞÓŒsƒÝ ‹mÐò^&ÔXb‡öØ{2äĹœv,£ò_Q ¾ ,¬ñĨ~ú‹ƒ²¾÷Ô ãk× î1&Û¾Š÷àK™Lå¿«/¾Í¢šìã^¥øÙÔǺct#|RáœNœµÕ—{qíQøè3î{ðßàŽðQ¶{%•òUK¹g8 é¥†îÁú_¾•†°äŽQ7îJ30÷½ÍL8¾Cî¾>FОc©• L.v¦Ò\|KˆìK¢l³ÕÕ3“p#£«SÞ+ª¬çžñYnj1p¼VPì½zœ8ÑSµyjòõMUu›>q)üé ù0{¦u\ºw_KåR0Ü Ñ¸ìW‚ëh¶$…s—9f¼ 8­N0 ›ã?m»©ÿ!í÷ ƒ~pÇ ožÉt+Эà6X¸C¼ 1Jé¡kí¼‹ýzŸÁfßšÛÊ×%6%yõyÅÒ"6ùÍ¡s6d?ï p¨ìzpÜ%ïªöI+hÊÊ^ùmpugY›U—¼~pß§E‹É“¿÷àyl³0±Åðq²)?”N´Úä#wIZÛ/ðïßwªã Z õÅYK>e6Ót³è²(jRĽš÷袞)9w lߟ~<[¼/K#Áö,”’bRXøzüˆ’Q:0o`¶Ú‡´Ìfß9à„`·,9"ØnDS·%بݟ³'–…Œ¡-™Š,`Ç®IøÞ–ÊGV±x™DR#Àe£!k3ôŠûmRÌáýÌ'D¿ËZhx¹€¾ú*ŸúÙ™~žDª¹>½){Í©Úûò¦ûÌsØ+*5|ÇB¤(˜å-È}.~B˜[4)õ‡‘_ pÖî?ºôì2ª¬Ô‡Á2ó@®+û¾Í¬Ï¥;Žþ<9 ïF-Ö(Žá7¯²îçqë,„Œ2î\hó’õ÷ÀšYmê}J¢x<ØQ9E¥~ëSGöjÏ‚:2F7ð.A(%]½d^`Xã€éß›—Ãq®9¹úhÙÞŒ D™ï.–ÍóVd|ä¿£÷róe™³Ä¦UPæp`Ðæääk÷­Ò莤z«ò·ÅàcÉ•ü?íè,%ØS¨ÓÍðÕü±”„x…ጟç‰>H,¡WZˆÅDèõ|®f¨r¹*ú¦EuÜMýVv“XðuQ·­×k}ŒÿZe“-èø‰Cîeÿ ?À$”wèÇ,{O5q…YÓ²WÆëKà{(ÇU¿*±9ºð˜Ý %øÍñ á •U¦þb=þ½‰F߯û뢹ðkº(똯Jl[}@/WI=W¥žšCu&Pb»J/-o`’õEß`R¨Zp;aÍþ& ן!†›L[2,/¼‘ *{.ñÒhÁ‘¯°¬â‘3 ¬â÷ÞËÿ-ö߈û|wàã|Ÿø„ÆûŠV“š-TèÑ"¼ò ÿø:ÅnèÃÛ¿eÍ4¯t?/ÿ‰IR!Òí’æoa²o Ôeü¡ÒLmÃÃ¥ çŸõâa÷BŠË¸ý²¿þÐ >‰p鹇4Poµ„s­XϘšZ8ј—1ùuÕO»àC¦w­oÝK­º!ɪìòÖUhÄ–°*‹L%Ï3øÑ‘ÙŽ þ¤öû«|ÖÂ¥þôˆ¯ßò%Þ,¾®YRp£hJ[ `Z¦÷ĶøâïèýÊ{A¾¸3ü,Οó(§Îñûø©×íäø¿Ó Ìž@J\Ë–úñ˜J–!¹î)Ãý«áA_µ{oÖc@<±“¹m³¤´xÁ8Þ¾~‹‚EbåÆÏ šVA>5b×’_#Ưÿq%ÙŠ8¶ù¯üÕË]ÊØ&_œÓ¹ÈñwµX§[³Þêp3zµ²Þ!Žsاp™ïõüÑvZðÕÖŽpVi9Îtšqd,…Ì­N»œ 1°Qf¼lHgéME#Ç鋾a­iáÐn~úärÎËkÁã<ÂTÅ"aKèZààÖ  üƒqæxü!ú×Ûj¡o ‰Å%a”§¹>˜»´3z?µÌœfîƒM">Ÿ‘#‡GVNÉû(MO œPTRæ äú\³5³ ]$ºÈþ¶ä´‹„¾ÅIÉæ»µEpñ¹•éü²û?'b©éÂfñê¢aï’h õ×ò.¼rz—M«”yÝï÷óD*zà“.,‚ñ_+áw›ç°aä–êI \ïÔ/›‰G†“½y¤íÝq˵¸± M¢äâM]ÃÛ;5„K²) êlŽ?àFz(²µ·§´äÁêL„µwhÒ<0±5`¦®6`ļ¼ßùR†TÎB3ížfº™>a“÷ŒH/®+{ êüyÜ¢)h0>2÷#x‹#DÕX„sÖ6T±×º˜R·`;þ`É­pLPp6,‚TŠSÍÝÍJÛ48o ÍM¨0þO—Y °¨j}M\ ¼²4–·$ŠoÉ|ß¶Tc¤|¼úsÖQþ OqD—ÂñåÚ‹«ÁdÖ!¬¦El•vFÉÌuÆ—áe$çGë*œm•™ON—ó“ÛþMEË–¯nÍíà©âáyù‰Û,’…‰doðµ¼h }ŠKñwúáøñõàvIš©î×Í©…RæmXBÅ-¤ÙoQÆý˜Ã:á¤ÑìÃk½àl1;ýQmurA&K GBì‘B˜‹š‡µÑ;hP®Æ‹xªdêüzvo†l¿q;ÑzÌqéHãâ|̈°Ò®¶+Ìzç2sOjÕ2n)6s›‹|ÿŒ¦Oµ²6`/x¹(´-²”‰CéSûǼlÇ>=Æ"û ˆðKjM3ïܺõ0¸ö|!HéV FÈQoî ZÖÊÐÿKv„ ÉHÝûŸ—IÁ<­§Œ@RñYGî5jnÅGbJJ]Î>œ¶³ÉŒ¾ï}ì ü[êûax%†y-©+ÿA®ÃR"êÜ^ˆ—å8FVتGíK0ÉùÏ.ȽEºSæ?° ,B¾è®i½”tÚšàà”vpDW;ƒ²§´ôG_bo>{²%D<]!™³ÙIcü¸áÜ»S̳"9}B”Œ¤Ø GBÜe»É•%«Òù ÈxöUû‡÷å`xãàˆ»3{Ü)N1$w|E×ó“ò›è¢ŠÖ³øFBvI¤Ž%C-~—kþ¸žg}^¬óˆÆ#ÐÔA3Žž»±¢é\ ´ÌÒ+‚¼µ1 È6™¾0F½5Ucø5)Ý b\O= [TkF=2Zì¹I ˆi®»‰'ýžSUšŽ<ÎOZ;˜‚‡‚1tw+èRº_B߸ÚME)@ê°Ëø¡„ïB±üù«Üt#o…ì=Çø*W0å¸ZÙùRCg†$ËÿÖúÎ0B~i> Î,Û8›¤ÏXÕ80nŸ§ aå¡ré%âkãý­®g·2¶Ö™SÃ÷Û ƒX‘U±ýígÐ é½h»*€ô€»šFp6ÿôs¿Ÿvȶ‰Î-ðÿ*9a³98¿ùhºÁþW[pü“»÷øF5ÑÄLƒ8ýf_å»ÎØþ®°,Ê>ÉÆêë _KÌV§ñµ(}0ÆAÿàã”YÕëWÔ“,âÙq]#õ*Gâ2÷è)êì-6$mg"DJ}STM‡­BL¹ß(¤]Ü1–ój}ú:ƒ u"#&›SËí¬Øð(zkâ··gŒXjJèÀC?ÐÒ‹£]ñp"À¤,ä>2·yè ê=ßÍ °97`X8îËýsܽFIoWàT*ÚWÍlÝü N·^±+×jGÙnùYŽ®ÿÓahš2j\™H6´¦º‹¥äŒ-J÷ÏÈaíÕ6§ñÑPW0¸MÈ!;;7 Lç‘®à´n ÿÊ…éh ì™yH¼:…C¯ñf´%<œ=íRpþ0Eôæk¢º§Y®QÅ_ 6QÒ{‹šÉ,Ô€WsP¯&@îñ…"~lW_–þê)ò;ÊîvþÛ×i&Qͬ—+Uú¤p+Ƥ Ûå*‘78dƒwHø|rGgþKù4qä*ãÖU bçHü ¿ý°¯"r¿ðFËiŹK\éXqJô&Ÿ˜Ö!‘FÒŒCõ_1.=R£Ù··þ·Q,Dè註)wšÅÎh¯óóeÉ¢JÑÁu¯ö­fS`[´KÞB‰ å‘@ìk8yUO4óû&Lõ»l˯¡BÕš¦|SÛÒc³° |1e.þXÌ0ÐRþ¢0Ñkÿ¢Œz‘@ýÁc/¦$®ÜNLÂs[$4CÅ;ñPªâ¾TIÁ¥%‚Ò‚qNð!p–bI¼Šö&È­»–;D@àÇ•/Ù%ØÎÛß“– ©ç\¢w-aጜYþ/b¹aç‘›‚ÏÂŒÔ|UxùIc?s(êÕâ›ÈÔübiI{]U´Pj‘Mú:go «@pÎ#tQ$oY-*®5•­[n1œöªÉà„vL3ýš§ –¼%n*Ij“š{7¥&ÁŠFß.Õ+]´‘ä,²ŒŸ&ŠÈÙBÑÔŒBø9~sð[:_Q꣤âÓ¶ G™sZ1WÊ—‘cV`ð>Åì3‚¨—}ßÙ\¤–ºô‚mÅä®añbþ¨ǎ¿Ô«È#v¥oÇÒ‘%9Aç|VÝ!eåÀt‹kÌt7lb•—ªk£,(œò/¦,ÛB›ºÎÚdÖŒbiæýUjŠ_¨pÌ0ï~.}IT!‰ý³ô;@7e%S÷>Ûê\pûz½`SμöKf3Êi¸šä­ÃŠ+MßâÝ0bäÏ~Ên’ïUpzžù’{‡Ý¯ÙË’*–-Þч4Ë[4VÚRq}IFqdÆ,H{¥%L¹ùÅÌimóNñéâ˜KWŸcC7Žgæ,Ëš¡WùùcÐTZÍ©‚Z×÷Nxl,é}fÇ|ÿêͦ¢òÚ KW–WÊ)%^¤P³Ð2ðM£¾n*O/nE`w‹LͰ‹d]ivDZò…êE—²Dà9ðGÅ+n¦Bi¿?Æ}<ãÐäôj9‡Ê³M&@äÉô*B8ÃÌÇBÄí*@{cp›KàTXBqQ™;Ô,â…ðfßšö¨VQÎÀ³ÿé1îÄ'E=ƒ};S,ð1,¢D±¾ëtõkvˆÝ1J0d#ë8Íʯd¯µÝKד»úejsTйO§øåÂ/·^FƒgíP‹nî®>¾O?œ ðwê® 'ª×:¦!†Ð)ލ3j çLèñ0ªûÏbHKêWÂ9³r~ÏeÆ´Är™AÖ bS„çv]òޝº·u* ÍGο;±{WB¥y‰è#ùPJà jú<òH©²{çÁ°ÅväÀm°÷¥…¸ßÒ®“¡Úì’;RS8ônŒò½©h“”þuƒ|™eb†^ì[Þ° Ô¡)MﱤT0 i¦«¡i×`Ë$(ßÐfÔÈ”®GÄQßDïÆy3©Ý@|‡jð«rrSæ¢ê‡‰V¸f¡v!=-µÐ{T¶ô‡pÀ ƒØ±ÇÞ$€½ÀÝ#yW§ëqJàDsxtF—ÏöÉæ~ÚÓØM‹¹ a†~ùO¢ò$žÍ·Ÿ¢ž*(5ȸFAŠ1MøA¡Îh›¹r]=zPDL7bÜÌH¯^)¢1ùQOŽ ,Ò}\a(ȶÏ2ô÷Ücq‹s(OàŸÖé÷ÊÖqd&~TË3óЀU%Yþj0,¥”AÕ×9I'”w2ESÆSî#nT\:0™rrüV¥Á>QL°ã»ßVžÙñÍj×[d´Jc‚cz\”µùGåD‡ÑICÓEyRjÀ™¯ ±™qö,à›OEZe*ë’0qeÖâLú¸rƒDڮüG˜*—äõÒl2S1Yw Éíþknƒo 4 ! `¨ãñyvì)šÕ}Y1§hú¡ô¾ê”µ VMü¦³ºlˆ›µ2}¤$™iå$¼€î(âØzâµÓÃQHOM “À•4$_Жœ?#I-K '%i€SXtùó"K–ÇòÁò˜ytøK¿Ì8Êü ݼ޻ ßµtfç_S kŸ¦ Ôªó™å|EåÓG7ß|°_"€±¿hR‰_"pŸ2ËŽuϮº5—L ùAÓW¦^‡_‰+‘W)³¬ƒ«áóuùî ,ì³zØb™zQjSAéÂY\ÕOÇ8Õ“«j Wx~Þüõð5LÉòÔ¾›£âSsã¿S¤y8ƒ—ˆTaqþ×É=ÄU2ø[Þ+wvš—c~õ JøÄ˜á#kò’2 Y,Ʊ€J«Ã‚º×[sýt\¾‡ 1»­í.7h’Ú¤x‘ìP?Q÷ç–àXRì Guðñí œŸíp–p´ÂÇÿ <ŠaÄÈ>˲m’þµ3çêãs°}‰0+LÀ¥¼­×Ä}­Œ=Ö4›JÔì³°'óÁ™ ô<ÙÍI‚ñô¡ÿ™¢OçÎñ¾Ý¦r™_èe†ÞBì9F­Xq,‚ý±½üUžP{öË GzM$­3‰}±ÿƒÔ9|…L¿ø‚b¸Å ™y:0Tt!âù¥+¯ì@Q^>$v{ÏœbØè>ý¿bM 'çyiô–|Q~ãÿB•5Ã8v{¡-J§•Py%/¼Y\=d²Æ¥À1qÃLº€fÑ 6Øýv)åÛ|¸xŽ¹Î 1g¶˜‰GiMy»<Æó—b¤òªþ9Ù%ʬEÌÛWÚ¯~WXKÖÚ6„Sˆ¥˜åכ乺KZØ­F ô0³8íå^HçötÁ—à!º «šûQ¦gþcÑl«˜ùU»á·8—ÍÜ®‰RÂÝ=Õ{³¸;\qe.àuàØëì\êìÐ¥ô&-ü–T“·ÁsÂfÍ?6å¸þ€²Ç³”Ÿjáµ)±Ôð˜‘Q–É®|ÁñÚ÷›ý9tm½®'º#ïéR° @÷:"R%a²u¹½¡ò† ¹GèÈeTCÜ£Ö»¹_|&{žè${d0@ž´Ç(X>:#»qõô˜=þE¼š†¯æ<¢`o ÿ¿ð‡j±u¨Œê¡ùØ›^Á¸Wåü~ºn:Ÿš_¹™{©à¹Á•1[Ð ®Ì±É½*Š0ž(ÎÖcgǘ§6~äLüg­QöóNˆý云¿|Æ7°g®gºb£­\åðÔƒµÌànÃçŠ8¢®TztÚ x¹)U«ž3°‡i¢³:eåqòµÓ¦Ò/–&f |5QúÖjçn+i»<-4M_#¨¿çH ¨•¤€y7Ø;{éw¸þ¤J½G,À±+­ —À%¡ÀýYÃg.#©¨9&&Qðu•Oˆ~— “E^)<ç¥ëVŸÞs›Š±È—iR&£DέryŒ¸¹‰±’üŒyžì„>RzO¦“D_™®qù)‘f¢ »ÉW}”¯ÊŸä½¾†ëg¤É¾®â-6°Kj¢…>Þ­t$c«DßæuJr_õZTå{]=Á8EîÍØµI_…rîwÅ~Ïd XNýàÝÍ h¡–X@ˆÒ='ò0&6U7bÂAß‚Oœ(5‡òŸaÑ«ÒEfZ3MeÂÄo¶L¤!¾êb™4 ìžp VWÍÑg/…7¤M½Â`AZÃõÛ¡8i8kmò.‘Ëc“Mö>Pœ>d‚#‚yàÕc5ˆþaá¹°žyôÝå'¯:À­üVõ"fhH¦pž½åuSYþµIló™@ÙŸÚšõR6n7i<^ \U Û_; ~´Œ®ßƒ S¤(Ϲäî¨s]¬«k Ü2IKÓ¾-X Û:àµ7¯|s°¿WºÖ òÞ‡½ÆÝÈñaã õæu»-csñ¤Šõ¸ PÓ ›ô{ƒ³9£Äi·_Lr(ˆ¤h'µSBçèãéÉÞá-n{³c‡xôèK¡){O÷ü簋ë[ü,˜Ø ûQ&sSµwþ‹Ä²Ê„¤‰ª‡‘R>ÝîœÌï2î·™{\‹\|1¼VËKæ×|ª÷’ Þ¿ÒÖSBkfA.óê{—;ЭZæèˆÎ`Ù‰9þä'&…g¥Æ^I hô“ YÚð˜ôáZ—âÚÎBg K&RóFk–W$ý¹oDœÛø¿ðø+z­%[üë¯i ÏÏÜÇ£ÎöSðNKáÜ•dÜšý«!õ"¸ÈßäÆL« 5ù,A;vÏSWî] “à>úˆa„ÚÏCùÅŸÅôÐKПöýèq¹æj¬œ&·§À[±¦wj²T$Úo§%ÒÌé€Þ¾`µ)i-ßíà°™…4ßHŠémepÇ^‰Åu§| ©YE|”9¾-j6óÖŒb€Ùm#ó==Ü®ûãmkèU‹ÒK˜Îѯ/±mž?LðeñÁ¶§„ï¯æ ˆaqÑXØVU†ÁlÓÒ!2xr¶âá´cz«QwyýHZǶ0ï+ú›¹Œóéìc²¥£=TKV©‘–2y0Üéìÿ8=([8Ÿ»`R6í‰i²Õ(x§ q8<‹ÖPv?a6 §Dã%Êz›™™â†‹a–Wc”S&ãOFzì®ð\Jß!ŠÊz)ÁBc†½Ý=ÍHû>0ßË$‹§;úÌøÈ 5MŒpY vºÝúôgÄå‚ä_SÐ޲˜ò Ó¦š©z-Ö畟\‰AÖØàöL€0öÃã×wY) £îD°¥@IÂIÐ>€xÑé%õË7Á¬å|ŠH†à¸áá\‘‰\ÄÇ’½'܌͹BB|Øz&"mP 0÷2 È+‘-Bäò+WRˆTsÍW¼œ= ÿu̵BL½{´¼0Š ´RW9³ì ~̃tVc ùݧnÌlL./èÖÏKâÅK_Ê»SÜÚöj!£¾ŽrÄï…c”KîŒWý×mðXp…²ìUËúÛ Ë‰mVú‰¿†!4чùéÌåkÑ4§ùPMÃ7ýPkI³¤s ymV÷)žI¼®¹×†£eL«“”@üuhîF>HCzø#ñÆÏA‡ÿWÅô×vØSó4ïÝ^I’m¡‡‘xʼnÇI3‚+Jèu+9X騅¬F‹âK:éí<´÷Ï*4z¼” Eÿá~²¤t}ÇÜ6$õµW¼™÷lpm.=o%ñþh¶ã?Û)¶´Åæ×0úw2¼¥ìŠ~¦à-8hô¿KYR–fCUCjBQ¨]snAœ'bó€c4œé•+ûÑaÚ“%ÆéÕÅœÄD¢—bÀ÷¿!ãÄ«ïÍ{E´!Ðø«¦%F©z|æ=&®h_/¹æ>ÿ[ê{;âƒJŽ¥EÑò˜¿™õå骛Y`<(& ³/ZÜçñЃ0ßÃä·XGçq˜ôÚ£XŠܳͱtÂtÇ]­åÏõPˆ¡N4[TSÅðuÌ»7=uU¿…%J@, ù§4bD*\þC„'?¸’³ï‹—ãUv¸T¡Kþ{½qÚ{d!<¸@¡QŽXÊPøŠÝ&ÿÞ¢,Öü5úúWd/;[§ÝÖÕõJÇ*sN¥žLÍç8^ò"a?Cˆóâ¾ÎÆz°—ª,ϣ췙òFY‚/´á@“D3˜¥ó¢CUëZ÷êÓÑG“,‡`¾QÆ`®B”bÒd0òÌ.?¸ÀfÝH¢Ã³ùJäÚ”÷PÌXêæ–Fç²ÐE£Ûó,ƒ<&Sy¤AŒ¼:ž\z]Á«]€ó4v®3Evã¶üó—z±UÌÌ£ŠÅ1C"—s/ðk ¼kNx:ßœÅ6„ªÅž q§ø’g¿z¹£>”~Ú–4›*¿#Ýô"Oöì¶œú÷̬úöö·…2_ÔÈ llR@¥*¡ˆ=ŸE_÷Rzº R•ÊåʲMLÀØ$¬Üf —gñ §TÎl4 ZâlààB¼Ãìh›;Xdµ¯9Ñýjá…G} ×}àÙÊC'šSÖbþõwH1Qð{Ù$ìßܨ¡‹O$¦çcÕ-säMOÀÛ€7v›m_ ã_MÚS½S£’ õîr¦Ù¤ïB±ZµšÔx£ aã¯Õ¾Œ²QÀn)ÃiûmóÐp_™¤‹þÚ˜ÍÓ\œ}vU8ü»ÕÕ]« ’˲s¹]F{A‡2çýÙVȧ+áË„Þ`ŒºB$ã‚ÅT:æJÝÎÙZn({†ý8[s?gXûø¿[èèM n\Eiƒ>zÌ#C÷óõ i°zóãeÜf¶?’…·3€x˜€^v2Ü[IVí;fûó•§” ‚®|Vô˜Â‰eê,:-ç7ú¶Š$‚>óã™k™mÏ0QÔå:êëÀ„HÛ¡Hu+ÄòS<†=¹‡CóÙÅrÕÑÇ´ Y‰ÍÎŽî¡ôf7£á& Ó¹ªe“|>KÜ÷«' %€úUè ¦Ê_HžÅ.uƒÈáV3>&›ð&ÀZ`N”"ßáµ5øã¿@B êÈÒ†&ÃÖ°C¹‚KºWrÊkä5jàJq0>Y ŒŒнÚÐo`ŠZ¤ÒÔEðçÊè[_ß°aê¾…Í t©›RU pwO²x”•ý "šÝX5ø¥Þ9–jidÁb(‚[C¨{´qؘ4ÁÛéuzùɈÌ$aÛƒ«rÒ¼Í`æÐ¥B ¹ÑéØ `ÄQÝpPù ÂGèИç +HªhÂoêÌUiÞ*‡à7M Þ¥B…ï+–ÕÐhÙŸÙÅ?ò •’;¼¹¦¬è=@/ëäƒ9‹ÅÉÐ'U©ZÊ,k~"Cø%_1*.“;Ü“vc1åÄo»fÈã÷ÄLׯf@S‹BZ«Í¿nø8Š í3ž$Ë>Snf_I¥þó/g9îíñªÔ×AÐ(;„©÷ v€j>°Êl=؇è\EiË»=cIÆøŠ³úÝ dwfµÒ=¹_J*6Јú &uϪg\¡OD»_™Ð|¡ Îp}0_oï[­áE ‰ðUêõc˜“Gʼ8;Äk–†&’‚ÈŠg |0ÝžâšìþH±ûá?•®—AÂD¢Üš‚ÚèxFÈû”°H3è‚!ØÆSVqb…ÿ;ApômY’LJ†X¡è¨q¨ÇºKb÷õS‚k¬Í&Æ =õU†F7àÊå@­:r1O•1öÞýǃã`}~ûq'‚­¢g~|tn?jÇØ ƒ"?g½q¼ÙO>lßháâùO£)P‡³9ÂÙL³ki äòâzç¸j±²jûE—ÿÄкè»ÂÂ÷öáZïçqõ‰® aûEmÙ_÷ÙJD—*P'6íÏÙ Ù%r³š¢ÁÐÈJ:<Ýàêâg’má| û õv7ñ4ˆšÃçÒO@<º18aPø½/´ð"NuR'ä¿nü5t2M>SB‹uÆéØß{ñÙwøÖ¾ÿŸ 2ó ðßåö¬(=_®·Pæ¸ÞûnfãY?‹îçþÞ©Ä#ë^y]â’7¿VÓ¦Úðê™h Ûʯ»ñ8Ìæ‹É˜Ãâ{ ayzü.…‘©ü3½H×­!r E虜ð­eà­‘F~úB³ü(º¶®¸à³Â!¢·jñ7d7ÂJá5_á=†ÅÁ¶ø Fvo2æÞ_«JÄýàyŸuÜŒ Ü ÈYe§ÎÈDöÄç•'zžò:úž_f€r ŸQNÞç uP°"(ã&•zëÿJNÌ ¾iÁiW–BÈÔÍGiç9ØU´JøI¸Ì×Dæ™ùù¹'XÃö%ȵÄp5LÚuyR…Ðá9?¼Èæ}eH´””ŠfïÅÌÙÑkéÜ®i¯½¤У]ÒÞö´Oã~åÑY-[·è꣭]æz®àú€%B Î=É„ÔÁ¦"ññ6š…h¤"ù®,-÷hEi˘lU‘NÀt\ωӸêášg·¦øšÂÔT7Þ´|7gˆ/Ó®LQØdœÐCñ÷-ÿ€ š,µ º¦˜ú_K‘cd[6.?ñÅÎÞŠ_brSTŽ¿,yªÊÆ…3´j…ˆQzuKôªÀ¿MèÓïåÈÁ+f§´çì7£\‘â嘒Úûý!اËä_-¼\«½Àz!Û•w-ËÙú¶K5í ÕRgªš|XO™å¥ê·:óС,{$Ç?C­KÈíÎŒÊÅ>Ú¾ë‚ }`þdàPVºjË+âcm ð³"Î-DoëS1Š¥˜çGp/Ü&Ðá*ó{ã1/þ—™D ”sê­ÿ\¥­„,GY"øŸíºXP û]¶äš´­æ[á¸Ù1¸ç¨ÍÓëö½j9Ïú …xtn&.xdÎ}@e¥@k¸*óÖtûþ ûµž¿\jP‘˜OͦS–9âgÝ̰ˆY¼~h3ï¿%ñ³DØ„ò—9±Zágöµ9ï7#ã¼¾ÿ2¨Wo:¹ü;iâXéí/]ðúÐ$a¶{‡ááŽð4 Þæ¬W.ЮÿŠÞLâ’tÌ81¿t¼ÖQIÙ+ù—†¨ž+Kœ+BéÉ-v{Ü&ËÅ&1 «ð—‹c•€ŠâØ|Å4ÖzÄ'¦õø+Æ'öÖŒðú¤ŽSdFX”"¨ã;]„EÂOäo·,7 dbÚfã ¸ñCÀY¤j,S;è¼8pê,…´¯kЇ‡²ÌÝb?¥XšÊu«Rdîr3Ä;d—ñÝ7#-!{>0ŸÔœó`‡‚žDl$ë¿3>§mGô=ß*"ÊB~>ƒ^Âqv¶T2iz5‹Ié£+·Ô¹“9OboÁ'-Î4¥í#& ®¡^FÕ2-4¸nð x*±g©GXmI'ñÚœµËøtÊù%—~ÿnç!¯-Rn5ffWñ6!¼ Žç•2ƒqޏ€x$¾Ÿò‹¡×OùÕ¾ê›àÐјŸ.ì¯(RßrL5ÁÌÊñVߦЊ}qÏÑ ½W†¸ø L2ýŸ2ÐUZ»h.Xõ½ÕâÝËg[7 a†•p!FF½s–‘÷ç(C…tÐ&R(6Ç÷q¿½åkr8ÆŠQÈÏŽ8«gmüÃPyÐà*íõ7µg>|F§ù—*p¼ @ýöA=Tä• ú<ÅëÛ'Bç×䢞âI¤¯ª¹5jÀ‰í<ô?Ä1ŒÚ˜F€­=˜s;T'po‡s¾÷«þÊÆaí‘ÌÂPñ&¹]YÖ•ÇàÜï(Zsñ!DïÚ>/uì}êjêÿo±x¡)MyÏL(¼®Õëv‚iõ‹|Ô'´S ã·éž³ÄZr¤nÞjîÀHaHï@- Çx’¾Ã¸ˆõÙÛñMñËPúÚz¥=çm½rw@qRK0 þ¬~©MŒœ^{ @‚³Ð’T|½cH^îÐrC˜ïi<.ôìûÝØââã• F¯d‰S@©LS9ˆ˜Ž*ðœ8ÛûÎ²å ±Œj—KÉNž×úçmtïÖMð »f¯–ÆyÍ+~k÷†kÍ æÖòØpï˜ði¦FYæÁ>‡ØD7a'>n9ÂÊ|Ê….±DNV ÔŠù.ÙîžQüfÓÓ'Q¢åíè^OÃór ©1‰C #=§€;«9-g¢tü% Pm¼Ùh”mÓõÇôEw—¤ÖeïItŒT4{‚;æGîvÑÓSSPÐw:~žP‡:n´ ÊPŸOŠmúVûT¿ÌJ³`ÙØ2„í^«®aê‘i牡ùmˆRâ™æI@" ½òè jNš³þš²H’wÝI¹:ÎüÍ{u—HÜ×¶Jk¦³fØèé ý­²©·ȯzÁ§5~Ñ)ðJ/nGÇ*_Ã/®54oÌAàqcT&¢)_D4vO¶ ap œmoI^åßñmÕH”ÊœEÜó3Øw¨Ÿž ÚðáÌî*»P¹×:ôMÀ¬á’ZÌÒ-t,1Ók‰[‘JÔܲ÷u¿Âä!ŸFSæK¹‹}2ûr“pNSËøé6˜‘®¡n’Õ\"ìƒ}¼›LYË…¢sýñè“«ÐM-ÆŒ À·K–ï &ÉÞ”rrÛnqÌ5—©yM¥:Næv/ÿÖ™çdøËÿ%€7ú-|ªJIa¨!ýXÀ¶ú5€Ü¿ã¶~ÕMÎ^‘Ìý»~èˆXBÒšY]hºSØv}¶€•ÆxßZÎ×dƒ4äÔiòVà?êáöóWÝÑÛ+ÌO†÷d~ñœqÁ©Q,jY»Éø&⨋hƒÔÀ€¤ …~œF ®‚~üÄKN޵ŠiÕÀ9tð°öe.Q†éŠ®ÄÀL$­\Ó°Ì!jR9P[òb‘ÛžgáÞ”Ù£¿YC˜h‚ú©ÅN—ýM¥»¢–‚ÔØà¯ó’sÎ\ÆÊétºnajÑS8=*¨HEÚ] ÒÓ#$°’Áº‹™Hï +®éž×ÿÌ%òçPòÄÞÒf8¥¸kºÚ…:¯~f}Œƒ* |7ŒˆÚ íj,‘¥Ò\/?›àZ€ÓT‡XT²áV“ʵA©pÞd,ô`íáé  Íå»íåA`ÀALŸùÅÿ²“ïyJßÓYt—êïϽ¾«º~&»ƒÔ«sÒ+â<Í™HÛQ¾‘b åÉüls´lëQê¨RE´·•:·+ñ „§÷ás€@hi¾yG!Dì󔉨úüƒ`¿ ï4Aƒ WÊt‡´…˜‚ú=Xÿ¯F™¸[ ›l²yM.@hPŠO½0lƒºru¨¼D=Ð"ûÚç™!é)Á´31n#í n/^Fꪛn‚&Ðíg|‹üY8TZÍGg³-uòu¤¯|@?åŸ$~)ËéߎQ‡ôÑÄ[#‹%böèSŸ»0jwqÉÑW‚’»ºðYü²u(J$Wp¾ˆK†‹~èà œÄºR\ð¤½( :6Àoöò¦„† wý‰Õ~gy@fÚc³NI^_šÌò,¿iņ›Åþø|-iòŒ lè´Nþš¥iz¬Rîâ2õ [HÜÿõŠÄ?Ÿ?ô“]‡ J|_·3Úïæ.-û@úãT& ®DÉ'0Å@ôŸ¸5ÞÝ\)÷‹Ò)zo¢OZ L¼=Ûh³›Q7·æyÉ ºO3FÚÜbêÛü{ŸßvÒuãë«`igxb™“‰/!æ%¸þéü_Íy€ö¾d°aZ¹í  ‹NA.ÛÍ…` ¯–!ô(Sâ¨p—œ»awÆÞ<8†hË©ÔV»YÏ&ËL_2¯¦áþüÊ‹û(\Cx=þž#å!ì†0¹T‹‹TçÛ%ÅYÈ›Á1¹óÉnv×…E—ªùp—ñ#÷±Ðõ‰0$<*Ág=ã“D£ˆ1ÆãÃÜâÀ>Ïñ¥¦TfóÄo¸ÇE‹lVoMÍ€Q}®Ë7EÃç¿Ý‹Òí%üZ‹uÑåH¬mý@1ͨ«d#•èMËA£âÀ—2ò­V‡\ˆM³‘kñ|¡Ì6.“Ø¥8QoÚ °P>ÑúÝý¶æ ëŒÅ}ëRq2ͼÓ1‘..}sM†Zò­š]¸T÷¬Y}&ñCT&ó”# §þó“&Gš¯u©Uf¢æØOEØÒaçùQ1“8v­¢s ÝÚ¼ äpÅÛš™výå㸖² ¥û‰Ô‡ÊJjÔ>¥§I»ª‹,Ù[×ÛœØj£åŠÅ‚Ñ/4v/n60³ O‡÷ÙÉ#2B<ßc+È óðÍŒßm+»Ü²‹¥c Þ^Á‡¿ùîW×`g¥ƒ •<°[…”î͇…6I†§ø\¯À`Õ±i/ŠÊÅ6NžtzïZÆøt•[ñ­®aéû™9~>µø&•··Ðƒh«™ÇÔb‚¨»IW^tCب–ôŽ~,™šßèšâh¶„Êñ` ògœ¦¤8q|ñ 5 WíIмés ÷BUÇ4Á×Ä»™ý}q¡!M“vqHP¤(³·þ„àst¦Xkí3Ô ª4ä*Di>òˆÎˆð9ðY¨êwÇè²þO…Ìd:_f&Gë˜íH:²Ë´÷ç¯Ý¦¾!“tD}äœC‚±¶AÜ(¬<Ômpòˇµïx²ÞYÁú†­Jzç]§Ô*7I?Cã\î_ýñjÌó &nžÇQ-7ŽÅèçЬjJª¸Ë‡hŸ¸sIOÄÆÛÍ?ÿ7,#2‡ôGÂaÒ›©d˜¸äs!vbйê“w°ªýŒŠXÔ‰5²Ç%• L?L¤²N“/f—q¿â®nDYƒE4“Ô±Rô#Ö„ß"°Ð#ƒ‹ŠBfªA>¯xMglÙßS=8DFF™ž_‚EÞSª”J>qED)RDFæÆ?lÔ®õÙaxcÀ,ºñLŒvá±!‡B“Ê æµ kGh”“Ç9 £p·êl|ïÓ(L#*’ŠfíÚÀägS,X•är›ò2í²£NÜÿ@¼¼eT­ü8›>å®AaòÑlö茚ˆ„C»3Ùñ­!ö? ¥gõG+ÃðoÑÆ=íçÏIÉ0¦üö3›ë=Ô{´r™`CÀþ²º®Eom©ÏY~¥Ì r¾^±ø Ýè¨wÃüµtÊ­1i"üÐú†$ îÓãSÂEt*lþŒéºÑ;_¨x%æò·é²DG–ØÖÂ.75Ë_ÿ#Cü]!áâ‘{Ë{ДOœ*ô2ú'8y]¹®£ @sÜúÇRy~,àˆùçVÐ*‰èÚlpÝ+îc§ŠÅÞnìÂÑ ¦~WaÅu‘ÛÿJˆ¤ãm£|Xàš"¸ÒˆRÅÀeÜÓ¸­í²÷VÀ—ß­eŲY-+6(u4Ã-ÉÄ;ütž¦–ya]«Y:]}°‚˜JX·f"{ /µ^߸Nüºua¶\‡TÏ¿?ß@ýåd¢Ý"žu–Z“Å7óHžÒù[£ À~ؼ<$jœ`¿™©šÑ:©âpÏNÜ)~¢JÅÁ,;áÈU?pp‘:¸ü vÉŠßO1ú|eó¨ ›ª$Єaü|b÷@ýR±¦’ÿ6ÛÈ'ÁJN9€Ð•ÙFp¹¢·a³<·<¦à’#Ùáùtè‹ØEæÝ7îÅòW!ß!ƒ®„TJ}ÿ ¢b<‚i®hHhr°áLV @ßð°šy4zŠðÿo­x'× D8U“À·ÛÍ“´57ý"# ÓR ÍÁ‚°¶èŠDÓá22w†mk),|]‰F“{­_àL;TõùGùayy‹°s\ARJvŠ+‰´¼àûÎoR¢Q¤‰DŒ8‡8ßÁ.'^)Ù%€[äêf ó¬ÚﯔOãŠS¤ÅYpú÷7ÿçÕýj)+‹ª!ì¹¥mPÒdž/‡|IS ¬*ßþåI†ÆUг ›$Sq‡T/}QeMÐ)–§(}ºµ‚Î6Õu9h6"ÿðMh ¡Þ0˜±ádœc¶BM€±»@ËAVtΨó8ú8áw»ei«"3d±8ü¢üáýΜÝÕ$dEIÍÁÝ&O08è{wÁ1!ÑÒå ¹LëW ÚÔñvufºK2©+Å´# µ™ Ç|¯JâYú)™õN c¥‰E~·á-7m7¸^µÓû…L¢ Tw®Ä†XfYغ@zð#»`¶då+ýl„½èUÓP©fïßqDö²·Œ$økÂr· ůNÔdt=hk²Zz‰\ÇšJi¼/ôäÐø4eßÑCßÊöÜVÉ»iÕ¥4ê¢Asµÿ…&Ôx{7»Ín>*°ùfóg€ŒI5h!ä Å·H†žý/¸Æ@mËG« ¥êtÖ8~ÿHù‰­#aÝåó9`‘2™)hJH—Š£X‹ˆU0¶–’’ZêS —GÀ÷ûš­ÖÞÑæPû<®ºæõ r¡Rÿx·½ãÒ„å¥>«Å&RÚãê¾CßFìë‡yVpVA˜!Bj$%ÝKÁÉΰFñún\´K»£àê>¤% ŠÌÿ_«PÚ @}þçèéÃdµ|.{ ìEQu~PAób¬ §7íSQï1§7«ÏôÎt¨w¸el”ú ò³“î· Í•á×yZ~$YG½‰I¸1Û'ûä$žg&Xón¼{·r÷–Æõ½4Î.Þ-dÝ㬡ÔM fä—œ1ºgYná5‹q׸´@èYQvÍ”áX@sBÅñœd2X'“|ÕÃÄ‹Éc!Á²7DûäY¤eÊ|<À3'éê-“)41¡ÜVK=K%¼?ÞgýCh*!ô;ä3À}ñ£õ·5Á­|+˜þÑzp†dž@VúŒ¢ üûm9ÞM KŽAŸQëB“«Þ¯ÁË»!IJ°*¨Qç4FEEÄ¡4¡Ž26w]&/û™÷==tI6œ"(ÅUäsuO‚gTÕGæÑ8îèùÀÁŒë«ä:.˜„¯éÄB‚ ¨ñ ²jÍ3)×U  aÉf TºƒáY„öEV|×ôÄ4î÷(uæªmçÿŒ;êbE!k™ò×7³‚Zh ºvò,Yˆyá#ûR‘’{Ú³"¤p¶L& ŒQ9øE=pˆœvŠ' sqÕÍüŽ)rýµ{vˆóFºÓ©¯-Ý45øÏ|ÀÏ]A²Û‹£íY>P9¹J&…Êö=nó:è~„¤™žH(}õ/euaQ'BUÔ¤=‹§C㈑â\>è¯{‡SªQAyÚ 8¹X…ë.6•ÝnL4«é7žC¯mÀKšï³7 ¸·ÁÁUD@_C sA=ÿ†¢[Í-j=å¨ÿžˆÏÜ6@`°ƒM Ì~åø ½Åàg§ìåŽ‘Ó‹Îøü¨íI ­iãÎ 0u1)ªÓgÿ­Xö²Š¹¦5ÂÙmOÐJEA¥¬Ê®5ˆ&ˆZ?S:\Ÿ¦žže1ÓÊUÇ+…A•ª¶‰v’§dBD‹D슋£é––*pÂQt>†ú²Ct0ÚD‘!”ÞÿPXÉé–ÑRjÖM,b ÂÕÿKÏLõt½4“»‚>./ñ™_Ì…”‘Ó(¾Á8Onì‘*¸Í€dl{p‘ íy/‚]ÄÙšŒHíÎI-…pÿpVònÈkšôÁìR Àí×MfoâÐût°ý,ƒŒ ÂiqmsM(Ï^¶Å”£; ±Ám¬á²/gÚ˜-ýté5ɘú%8S,)»›Ôð•jáE‹öš$Ä|ÖÜÀo ªè\¬ÉIS­Ä6;Þž? ÑIî[væLà«l=È¡F;Üny"0‚®àÓÖ‰r¶šØuoMÒ#VGŽÀU e² B¾–¨åU owAµ"…CDñ —æ±oÛÁØ•œÈ4Ž ¶X¾ÿeW´,V,W>ö¼œ›¤‡C³G#öKf†›F™Ž8Ø{IÞŠÐ2nûépæžI˜=Ò)ö'7&ìp.ûÌ.Ï2ן§»Ü׸s™øž¬ÁМ4Q­º¸¶ ExV¼œè¬µ> X(§Â©¦k!}£åc‰öhDžÕ®< h}¸ôœcS“£Ó½ 6Sl]ƒDŒ$ݦëIìe'¥SA§†¼ìößL–«ùŒNpoƒˆŽ¤¾üÆ­LΤw f ©rŠO@9 K«ð¨! X[v­ø‹ %A!òœºÑ9@QœÆš¼|¤ ý‰÷¾®3ÚÉ¡ë=ÏàE|ǯš_íòÃHÿG’Ÿê¿E”›·ìùA“êyN½veG¸Dµ³Ïqéy¥IÞ~!$TXG$ÿ¿„ÓÅ)HÓ½¤¼_’g:Lzax:(WA¥?¾.1ý ·5SÐ$¹¤ç² ðy„4Ê‘*^¿ºÏßÊŠéµ}¸Ç$~>«âìL5kè’œ}š?ãжÜÐñ³ [7M½BNO_&jF¥^ î 9)†¾ôµõß¿D( ˆÀ6¥ Ikü¬¹ÁPèF²y ÔË«5ë[âž)¸„‚4ŒÖÀ½$c0ɹÂw¦‡í~Ú2ÂRJ L#žÄœcZ½Ù~¦Jü’SRv´Û ‰GB{*äu¨Ç<$’x7ÐÝÎÓ9il©$o¼‘™E©\ݶ>†y'- ‹™{éí˜EˆðN¾ÉÃ/“VŽñ^ÎŽô8/EpH¡5+áç•1‹_‰ìIßPßÅdaÕHè]D !q· k‘‚2£®·šò±ãÑ \±5Mö¯cUƒ`¡A_N=¢4î©æ¢ðóª¯>RÖ½™Gª–ÁY3p—ð&ÄÙùXò•䈗 “àOaá@emË¡¾S3ì~àÈ‘ ¾É#7ì=‡N!?7ãc¨ù?‘Å|1$‚è òóð@-ô@ª¢@¨Zà1Wš+R(Ħÿý8Z gÂ÷©QF¿,2,‰„oRGùk{7x'ÿRàµ#¦º€Nrʹ1lÝgŒ £%=q«%-¶q€hɈ¸ÄK<˜,1[˜X:Æô &0qá£úf‚l‚‘¤q÷ýrQ·”ÇI¢Ö>º6©°Õ€Í@ {ÒâÀÝI“xeÅUS¡~OEZì­ê¿XšoâüÍwhŽÆ-t÷+³›õû·t‰:§„ÆŸ›¶Ò9›þΑMnõ¥;÷=¢Q}²¯2°+JÀ¯ÒüPf·ÌÕ^4çN3D..`–rƒ¢£ˆé·_ý¢z×»{µýÖ-îa”©GÂ)6·ƒ ‰™7M$RD°»?XÛ^{0ÚQ j”æhâ)¸8ËÅ)^4}ùy°Â(è"b-1Õ{Öé‚5|ê‰&õa(TbIb•.²Ô&•¿“С›#»lÆ&µ8D`3ÛyÕ5¤$ܺþUÚ”ªÁW+n+Ó_@­P•Ǧ8Øãuê¾ •©ƒòæ²=à>¥° ½á6aýÁ¹4ì/pú\ ¡ä\Ë!õ¾DÎÌœ½Ùâ› ø°¡:ßv¥ °è‚–ì”@R—\¹i]_âËÛØ]%B¿Ái ßôC‘çf|l¿ØJ{| í^Ý"Fc¬óMƾ´'â]Îf‡ØßYä[7óvC¤ÖXѤ“œd¢ìTa“õTÕ^áj[àïúƒjháD²û{–±ãúö ïþˆí¸ráøŒ'ò ê!OõÄ®ð³&Ù`È¿*Œo=ïPG)uJ.¨“1žÛƒü 6Li]o^•^ö€ÇÖtÊGôû®ÄÑ‹ÄÊŒ§4ÔARBÝ4òšaM­p9}LFycJìȼCúé ^Hqtìu}ú¦PR-½H1´…QŸuãQ1j+Ó Rz, ¯É²írõšïEt,+c潊ßÝå’¸é,òÝ_zW4?Ÿµ¸ŰG€ù=Úá~ÖÑrÿ¾®‡Þ‘‰,÷‡p-pÛòß~§wÔd4-C.ϲ@PD¼I=ÃGàUƒÒÿÓOz £ÓÊàe¡5è›úXx+«ß7C™YR<ªMàk…¹‰Gø‘Ñ££³ô‚pA&Qô8ýDW'V¹Fi’ùº$¸IQ®µÏFϺV,"µÌ]΃ZZ{Ìù~£U¶zÿ¶¥ìî¶i¡.V¸PJŽD…*|# ¡-Þ‰•¼Ä €rOgFqW‚؇I¸ÏˆÎ¹›þn£xYÿgzaêr^™=š¸†ÖäT/•c>1Ù*t¿Ï©Cî¶g´‘žîB€-òðçÚ&÷V@ÿ0¥·&ºkF†‚¶ç€ô£ÝTT9q/ÑÜ…»•Wø1ø4Tº2l&+§yÄЛJ$StAhùoˆdNÜûÃw a÷¨¼J ±íÒÝš%©=—¢t_qþ4·ßº†ˆø jvÖ ³©êK­ÆUa{Ô-íQ™Jò¡„t£Ë¼•ñ O•'õ8°ÕSþHη ‰…pbââkoÊŠeSLޖϘX?çŠCäòŸçW6zó ™ˆÁ Å·2T ¬pÞ(zÚµe(âqðª¥¨ÓæX7ÝlÏ@iå^œÍHûúuö¦%¡IsÒÝ‘52¬ ì¦Ðïtg¹í =hÔ3Ò’AßÏ1I¹§ÇÎ''Äñ>@l-˜›‡B5¢ï.üÏ;u0àÉù᮪àÿGóæA}÷{1”rkýO^2¨²‰2¼§Ý¡­<ÂNt”‹zIÁpxâ×4Cöy›ž ,yL#S«׺´ÅÀ³V÷-äFÿ»Î^¬ÃÆô28sXÏ7÷v?“²m•ý3–59ÄAXzªâ3‘8<OL˜Ÿ3✧gܲñ¦«Ä£Ä",M±2äíÊpGã ™¿iÀG9à9ÎzI  “ÖZK¢?‡—/#!ÛWÀL)Fúô| ë¶ËêéÉzª¨ÚƒEÊzëöö¿™xzòÔ¯‡Õ®ÞÔÉíz]cæÑQ¾9¨žsŸî@Â6Q´+vg+¹>¾àÙ „1 ùÉHEÚ2%ØP¬Ð‹¢e~Gm¥ÐHlÛ?¬Xxü—(–Ο‰(·6¯€H‚s>qÍ”õÕÉ¡0Ê`>•‰"´}ŸËPkäFV'‚x @û(ë@"OçïìØÇMµÙþÂÊFÒ‘Æd ±„µÑK›…3~ˆ¢è^ãÕ`•jUªÃ%²>×Pfåkü–µ¦‚¥ï=PòXÎ_Á"°? = ¸2ØØZ(žº6wW@»¾|Š>úË( ÙÇôÐ}޶ÉÃiCÛ9˜U³ŠCSôý^TW @->Г:žŽ•ö¿/½ßO…]<àp7±9¾AŸ}8ÑÅä:Se> ÅOmÅv›—abýx¾«ÁW]1êr#V;i ]Õ¿ =§;¬ÈŒÌ·µ‚ܾéäAIRÅ¥¥(E‡ÙŸU\ïa€ ?ÜçÈ,õE0Bó»W¾ô`8«D$ŸœTâ¼6m¡¦žákDUCR%1×]FR@‹«±CTÑ!CÜ=ó±; ?EwöØý¬æí7kOÐ<Á ̆ôáû;î£,Ü¥Ð)†ì1Ê™ÎLÇ:±†ÿ>m’ºÈ_˜øËæ_À¯$` >*âï[wݧü´GzäÏ­‘4ZÊ|TÓæú-¦A`ÜÝ|”ÀÒ÷âÛäþ?þûÑ9½@—ã-$ ˜hµÁnúQWFau[îèÝEÆÝ²ÑmQ7 v>ê2Å1é¼âü~Öö+hÆÆ¥QðñØÑÄ#åå9ØaµN•v°}çòßï;C°åŒT^Ð]H£È{^«–ε²šÅ+@›½¶¢™†Õ­ osE–×L,z07‡˜_Ë Y Îm ô¼Õx_BßlAËwq Ö&õf¶‹"³Þü,ëÂy¢Åζ\v#û‘xÔí1 ¥±†¹üˆ"QN·Ø6üÇIIý/5|P`,¦8”¯¾­y`ÃçX¦"óoIs‘ otÚ]†ðзk¬±^hœ•pgd£¬”‚iˆîi2´˜4í&{Ùv6&8ÖÞR7f2´NgŽ:·C»MKébF‡nž'´&w³¡ØõkbDZ†‰Vþ`*´±¿Ò™•䨞q=J¯æ½ sÒ£gîiç<­×øéh+S_,ã÷Ý@´Iafí™ùs¨Ð½¥lÀÙ³{Ž{ô¤c2^H•åEdçnìÌ'Ìœà>¬ý^ÂîÈÔŒö’ÄÁ¹dÏ^úËà3Ìá®–è#qw„âMMÓ‚, µãwS^þP&™ƒ Â<€K:¶òö—ùúµ1OÓœ‚vžÓ‡nÒâ­zGs—DÖ“bMc- h ~̰)l=V´ˆ%–Bùɘ«AÅtω̙U£¸ú[æãt´k[J4A&pøÍ•î)RÅ„9Ƈù*ZQ %2‰í»´Á³¼f& ‰IXö8ÓÏ ©hB”µ¨¨ãÚ´án‡ ³>Z ÞÕæíŸß]`œ6MhÞ úÀLŽ£æ¬Jvªù4¿ç÷\KN¨çvšÄê)Ü÷Ý!¥š‘ã8W¹A]îÔåÀD¥”7¯¾õ¦8~ÃÏðJFlñó*¯ŽyG?š¢ g1¾<Ã}B.@R¥‚«Ÿ‹§ô±72·g%€@›Èö5¦;LÎx_7(£¨ ?›6ælü3%þ¡WRˆL:„tbþÇ>¡TÚÞõÛ~ºs1÷ð©F_èdà-Kù”b+§IãÝ1MÞ`§#¥w¯ê a5«Áv6Šp ¤……–~St­ÙÛo4¶w¶PIPÛ/Ë¡¤Z¾jýÒQœ½Q·uœß5§PM¹ØQàB{5šÙcä,ý YÓŽk­ÕËl¤ãžXf:([@jÌ¡D¶¨A³ƒX¬9¡ÎÁ—`Ð*,4‹%Cy±åò 2÷ð¾&ãºem@–a‘¡4 vî81Ï}†¥Ÿ_•Éœà×;ÿÁüR;ÀFí SÊ™uÐq„îý¨Šeþã.„o«ÕŠ)ö`bÿiÍ© ÷ªÇÎÆVV̬œÙ©õ¾É'³[PÂéM¬æ¹á€KˆâU7Ú ¸Ù`_;u¸GC‡•q H'yõnM´³–XZ%¥Y@_Çl˜“’âbAXºÓ(Kì~–^Ê+¢­GÔµQîº`¶q>¼*µÎ²9C<¹º5À£V ÈM¤ˆPŸLg`èðTÅ•#ÝÆuC–Ô3º¢:ï¾ñá»&X‰>:¹s,¶ KÁ7´¢©¥û~¼ÚÔ•êB¯¶ò@+R»uRøýŸ?}™òâ0v9_]ºùUDÛÁ"£€×Ñܶ‘ùXÖy·_{ˆc›õqh„ˆ³ÜÚi2ߟ‘–8õ'Û—G³mù¬1(O䂹ØcŽbÔ¦QR—àr÷ô–ÇÛ7´án– q4‹úø¨`Î*d犲znòÝdVA˜F£Å/¾¼³ÈßVäHäC%›ÒêmDÂ÷ÐÈÍü5i µ¶s¬6mWêÕ5ªï¼PGâ5l”¨„ê»b—ð¯iÓ,_8V3ÅŸW;c³/‘I‡È—ÔmC?—$¢5£¥Ï-|bWØØ²ŒK°ïZÿµ¥^ÁëmazñRÍO"Ðj|«Œ¾ãšÓƒÓ¸ô ÐóyàÝ,åÎñf‹V?½V´üNÛ¤æ¸qŽ®ø×~5u–KªàögJ?ØHådZC½mß2¨ _$ŸÇ¬5ƒA邰߯C„³û°è;.4€Zíè‡CÒqÍ[U(!Ú€¹ºÍÞX>AÃn lZÂRµ+”Üg¯(Ù-åæõ¤#œÿ£1Äî #wJàvuêj¿-¨ œžˆ,¦u~S‘íRªÉ0ã|ïV-¦Ñ¡e'zúõ½ qÒ¯ªDðй]•³™®åÁ‹´xûÀU Núrï()=ßÏ·U$öÓ¥\ë ÷ТZ~±Ð£5(òÞnôqKû—FÝÔuö!~bÐ'†·u*M §¼!ò—ZðÎtMU`/¯±k¼_ôq¯ÁШÜMÿ8 U£R.> Uü–¾ÕŸ&Áošló¹N2iáš #³Ý öý¤²ÓK5 ç…G,,þ¢ªØW¶ä»‰ýƒê}†L3œÖ½RðÿÈ âÝ3Dz¯áy”È UÜí”3ç¤Õ¤f•ü·<ú.·Œ±ì™v‘Ï=‹¯_ˆŒÉ7%{;5oþÁ<£¸{}aÂàÏóÕµ©fÃ0Å–{Só{ýcŠúÄQq¨tv—1J”]צûûkÅA·£œ±}¹²pð0|½ˆTÎ*ÇÒß¼YñRèVùÙ[|ön{þÂy1iãŒCë§÷ î-^KFcêk©ú]Ö éA`‹K‡F…âƒ}/K¿74ÑGSò2ó¤ÜÓ èì ·c¾š, ¬!JE?l„ø1êgØ\´#Ìšy+ecΔu|ëÖbx…UËM4¨›c&vÍûÒP¶¹MY´õÚq`AÓö’ü^ÐÓ³×ò2ÍÏ€™æî§#ÏYo€ª¹âuŒYr›•=%¢¥Ý‰ÁgŽå&¸²£”!#äû¨™ˆ‰õó ËKÂx)xÌQl¿€n€íD)qÙ ê–’‡Z¼ÁayV+ )ØEÀÇÊÑj¤ÁÏÚ¶ ¼$. “- ÁÜ¥hÛ ÔV!"-¿Áèé ŽšMö›™ÞÎË¿A³GÁø“JÚ…æD†örA]‰jFë¹²TWîl³-nlú«ªû7á†â¶¤_“ÙFôT…BÛ[#"…Êêß6ëRhó—8ÜkÊîæ‡É† fL÷hà@5H±ÕFùSÄÅ¡´b_‰‘8ÍÒÈÚÜ¥Mt•Û†¿ ϳŽìEÎÔÒoײ$M`WøDö[KÀÕ…5n·Ï|Zß²dl¾ñÌœ­ÀçG#܆–X8‘TÙ÷2ŽpoÜ¤ú&IøV³›Š-xõ¦*¶¸Y-W€†2D>šÉ‚PVµ:ÉX)!r)«Ñ„ØFiÔ,˜üT ô‘Z'c›¤í$€¤®BÒ~ÅvÔÜZ?©Wô»ßÃ5åXnc ¹Ê_3\Æðîù!ý¢ê»Y"Ë–4ÖjÈú<¡è³Y|£‚hQZùC™e`òÇø­°¨`nþ@®ÿ1îNòF¦á1€ÍEu‹¶cUýo;ôáëx+8Xˆ¸øT ²¯5®/—n”‚LÜÏ\ìRš•5kbÝ-W_€´MwßVÌ–E /v:úÀîC—²©ÄqZ|*p8+טsŒú_v%p¼hóAG–dÙÜ£Lº?µ›Ã c5=ºûog/ô yÒŽÌO¢+%f“‡Ÿ¾¸>&ÌN13ö¨ŸÖ¸cJK>gM˜ÄíeÞ±Q6Ç,–«üäZû2¿áN;¡*챤¸ì{€á$F‡M–ÕH_¿}w|·ZD˜‚q‰Ñòq´”8¥uüøŽºã½àŸ–Ú¹G¹ŠšèÙ'²î¢_fPÁR#*Ý{ëÀs肸üuáZé’B{@ïÈ_锿15„¬SÏG>hŽg L7•˜ç»¥J°’â Ú†RþðÇ=|9¨áœŸÁD˜kW(îdè+øúËg(H²© n-ÆÙï/ÉÈçTÎ74üº:ÓS‰”|hËzÇ´ @¶åÄ]÷s­ÿ@;åò3;úp$ Á)È}ìK_ZB=ðmcY¶íõ•…É]æÄ?x_äàŸNr‹ªcù¢ ë¨@ÐåÜà È–ÊPÂüv††õ°ÎŽ˜S5F\\wüAW?•·ò޼¡T’Ýóí 1f_-ÔXÎLìJ—sd°óÊz†ó(ó¯˜2ËŽ§¨M4D•`¡ðÝhýÿ‰Y,?ó¥{˜V“uæ±åîÇ" ç˜héØTÃ=ô }ýKR¢y<Ëž÷V’ìNØj;×$îgÞµ4ŠNRuÏÞYwùCÈú”ža7¯„qC3;é=D•~; µº» »K?_!g@é_[²DëT}üŠÎYo©Ì0Ú~jh´ÓϼßQUª›°>._t"—[=6Mñ¿6$–'\+€n¥j¬šБé!,CwêÊ©\CšK¿®²âÊ.–VO&‚ëñ /ôºc×$…/š4a¼¤·;pIž…ßuéø£ „¸qšB }ºÆÖ³Ï›½O°YSŽ'!Gqټ›Ãg0W‘¬/1›[Í:ÜDM ùÿˆ‘ =eëEž&G>øï˜&Hÿ)û5`]qÒD²0Üô=Óy&‹Æ8ü£S•9ocÛ ޕ儋.m›ž‘GŒÐ“inš ïF¤wÕ·ù¾ËDçãÿöâÍXË‹ÆÅÜž@%±šg>ºNá Ä3öV¯ ìâ?‹XDFÓïÿÄi©ñ¸£aµ«ïyk'ØÔ!(Gè¸ñ3 äTD¾åe0º‡»”håû9ÏÄ#¤ø/U•8éÓèîJíÆ@>z.ÒØÜ8¯–Æ‘[g ø³ W)×Ї1ëœ5ôôúy<‡ÞCÄH'cåE s®~$ÇÓ˜èz J‰Þë8ñ›§ö·¦Ù¢¹-±ä_"ëâ-94_§?@¨ˆ1V'n§ô›L¦n;ç¢3­ƒ…oŒâÌVÊ»°cIŽUá! Vv@iœŸj*gþ>{'¢éÊñȘ§¸6I@ ÷>RÌŠVý ÚôK¡IÖ†ÉU9õiOèÆý¹"ýéìÄGvÇgT ªGû a¶ôžʬAl\~XJשHaðA§xcÁÈáÙõ•KÕB;>“äˆ `kXÚ§x¼çÊï­`m¯Ø…$FûÛ ÿaî/Ù[cæžÝ`F@g ‹+úÜ(zÑ¨ë ‡oÊ»&4Õ ŸÄÔ£"Á0Ä7ý|éuŽõl’¸¹C6Л‹b#ï/GÊôùkÝGÐ0º»í¡lA°%L.æð 룆Lñ¤0wh‘s:FhEw#´€E¬GTÓGf‹.MëÝpÔt 7Šhá¥3p×7J‚ò#zM H‘º@¹ùhJß°B}‚¸¦ô…›vÒ¦ê\Aþc~OË>ˆÕZEåAD—ŠH¦Ðݳ$´-6[ÕöéHî¥ÿ_Á\­`¬\»s©<¥wžˆ_ËLpüéY/¡òzpm! mðÖì ç€(õítXâç@ôrTn—ß#C]X”½’b)Ôð*é"sÔÙ pÓ§dtpEÛæA`æD›ÉÈ.*i•íWÆëħ-–¶šd‰§-@N}¢RðÉj*ú»åík+! ÇŸ„€Î¦u<‹bbœ/Qšg>`Ûi¢!œ ¾€I޾ÙÞ©Sl†Ãª±¸øÝö”ÐcÃ:Oð@®NpÙ$5ù&€Ü.:¬¾7­ÈJÔ<=¸#½ Ö¹>Î[4ÀŒÁ“Ô‰›J³ÙµÄ+§>?m¼Òù•cKûy/L?áÐ0Ñ©2>F­·$@ãòªðIºI‰;Ù¯¸”M7ˆ¦Ä™Â^5#p5XL>ºOæ3/)bÐF¨rªßNÓ➣˜%˜öÿqþdºaSÔo½½QåÎ*Ìú}’Òr†Wo:¹á¹_hLÎqó ²X%ðÛ(æÿCÛÁ¬Naˆ#Á*ARœ Û¯AÜ÷* fÓ·|Ùƒ¸__"÷Ê“zàtšÖÁÛlý™…X—ˆ>'}Ž<Í$LAÔpØL{å+oÇ:iàÞpH@"¢,lhœIþqrm_Ã…]'À}‰Ùkx¤‰u±¨‡8VÝF§wT:Ö$xýDckèü¦ÎŽ®‚vå!glØ8;k;¢”Næ‚(°œÛa@Âh##%i¼ K0öÿ…1ë&}ª.s}F{&«Y"p[CÑü¯1¸Í³DžSúÈOV ¢iBE êäW¾zm9UØÞìco*“Æ,n5ÿ0ÛšóëÒ[²h¼10Æé„Òy_#Üæ²öZ²e!¢´?â›äïQ Óhk}³?H´òkÄ9ã\aGXMÙ«K*çh—L7âß` ÑX3´rLçáÌî´ &5 Ìn8Ké(çþ÷‡>Ë·\ÎôJ÷R k,Ñú™tÓºl b#û~ÁH` '… '’Mm!ÙPbïM¬ŽcŠsS~(u n Öö-á·ègPÂ2Zóê)Ž)^Üáì0©OO ; †š$x‡(Zyu 9 &vº/p€6^8íQðü8pQã4¿Û8F›hõ2†T A•«Û=Ïlq‹CBDÚ5»ý㪃¶ÃÅí/›îœ*¦"j©õÃyçãµ(ÿ £LlåEþ‰o¤q¨ï 'R~Y«7 K’· µ”oì?]!1lè´î¹&¦&µRªrN$JN¡Ï­µê§Ϫ´ü¨Ç ˵¨Ãò 9p$`±ËR ¼óó=:‰ÒI‹Àõ{!Ö\ãÁèžxS€EëYƒ†|…0ĵÛ2ó+ü(ó¯ôTÜðú0öC ¢‹Ò·KJP”¶å E"8Tñ Ý8çûa]¡ìsö&Æýq‰'g„kbÑÑ× L¼Ÿ!òÍ~Cä>Óàîð½b€`‡zƒÆ¾‡if™j9šÏŽgyÅ «fÑ@q]5åã!q^P»ý[¸5SßG'ÖdÖäø-+]Ö¥©ˆº¼ì÷…S+#e $Ø9Ч?—çÒæP§«è¢Ûu—9ôå0a¿{*{¥Hã{.£) GÖѽÖF¸Ä¶v¼1.”(Çåséé3ûÞ)6O᫞Wˆð¹(év–”žÙÉ1«€¤û2BJ7m©4ñÕðg‘iY*¼¼<¶;Bàó¢.XÞÅÀñJpŸ†hÊ #ÚªòJR|`µž-—1âK`ñ0DR³xÖºBÖ$Ø=¨Ó¿^áÔ¬°ñ¸¸•EƒË{žÈ,r´ã!îù«%çöo»«-2¡¤Nýž©XU”³°tU{ŠEÄS½ôûÇçSƒœTœE`'ÐnÁ%fpI$ÌZ{ûb°6 ¹ÌŠù¼§óùÂ:ˆ=+ËèWúkvůoéH¢e—*‡• ™RRŽB® b^¥ký©ûÞÒN팽˜p]ë^᤽PòÐØw7……ÛvW¼@3ðDÜVl”u™~åÉ+nËŸZê&®‘ÄqjÝ £#|ëîv,"YØžC$Ž*ÁìHê<ã"ˆæ–q;ÃI l$ ·êþ¦c;Ü òkWÆ»IsÌXœ´8ò²þro¢*ŽYº¦ÅòofŸU»ÿ ì ±8ýO®îÖÎo¥†9D;éLÏCö!Ç}à:£1»¨ã&ñ Iè¶ÕÕ€XÊIí\¼ûÛV†Kjmƒ{᨟´"g*âh¤¬ËBEÉHiF’"ñ’gL°–»=êÈ?…ÉËPio¿ÐG®ã—?<CîÐþœô|£,Nt&óЃҎø¯¢ÂÃFõ ʬ¢R'ý^înÞĆ%𳕾LòSdùbÖ{nC—²Æ>€£0aûúTò‡4e>hDýe¡SÅ×­ºÑ'Ö–³À⬜ƒåÜÕûùÎnåë©fV€?Ôä³`¯£´/T924[Ñ1Äœªâ¡ów¶¸ç5ý2—¼ùä„ó¢¹&Ñ +Œº¼û¥ê»]§ƒdˆŽ°TE9¡kcLe6±3(8«ÈüèÄ5 WÊÝ®µJ0V …äÂö“©õ®‰ ôKl×w/7ú5žø¡S%C"BjƾœÿG6‘ê° þ89zïp£…Òk"V8À^ÔZ«¡qÓvÙT >Ó­’ 't"Jõ@Ú]Vj°VsMå—é?º<ØÜ³úæ`p.v±(–9.KwÚÔœ°K’Ó­3f!ôÙg.èìä\ß žøæ¶‚#%‚øÍöÈä¹n¼4ØQZPUEAm§YÊ-È«”›Úï˜ÉãÄÁüÄ23ñOX=¼êPqê4C”maøä;wŽ.æ›&Ö ÷¦i™?úuˆ¸ØIX»:V§ì©ßiírj§ ÚÞf¬>dÄ.iÙjàÞkŒbø“mIJMg IšmœöiÂ)'Ú&?¨É¡Êe¿´Ž«rÒécÿ â©"Ý‚!;KƒJ ½’¼K‰ÇŒù/@ôfpŒ’+¨aRG”`úbökÇ;‡8â˜äÓ^Àè‡þQö 4FŒÜm7ji‚e0hikâ¯eÌÈ^à+—üÊ/Aß'P| Þ>!X³²né ¸Y•îúøïbp_º_®E匿$$õýÀD,mTN®A´»?{α µÑ,߀H.„~êúQªŒâÕ<@ERs©kaô:áô*L›X²íX<§¥ßj´Hóö¿I Mʙʡ[ñDÜOõAÝ/5à1˜Évîr¨¶Zº®,›—Ñ]®=5a'—q‚ ÚKý`Ð8gøyìMÓ!®o0]Mkp øš4òyò‡R?%˜§)Ñ‚¼?OÞÆ±î’h&rïôZáÄ$`Z³?0Àa÷ÃGÓò¬®Ü¢"AŽìðI‚¾¦¸¢"ñ¯ü4Uz6§Z‡oCU9.Gk|¸çN½GŸÇ±dÓ=¾ìéu¼z…„¼(†FÄQ£ðÝZ/§ò` â…£n(3ÝY¢„úšAÅзÀF÷zE’ØÍz`| w©I âä!ö³·N•F_riLX\Pê*muÏ r!`<¥H£1 —QA‘Kt4öJìKàˆ±ZÆŸ=îkÔ»ÉX=ÔÎÝxG2•½r/î­ÌÙ%“¾‰<Êÿ_ÔYœ³ZÓ)^)Í)Å%Ô;È/ê#Ì0¨‰;Å‹ÙA…hˆÁiãû…”ZÙl—žl1ª•¬[Lr¾Ó´Ù$;ö2FûA°~Dë¹H:’ß‹-,;pÆw¿*ñy‘ýî¦ô[ g-?Õ¸õÀ›0‹tå¯l؆d?$¨»æl’ÚÈÔVÐ1\ú‚ØÔ.¸l9Xkï}ÕíáÍÑìˆ10ã­-M¨ºšÓ$‰ù+ÝÁ0wÁ‹Å¡ïÐCcB–®CÓWDwÜüLpЃDó슲ŸðT&G?|E¦ûà2õŽÖjî{1rùˆ1ú6žAœíšÊM [®yî¶íô8¬‹v…/ªR7‰ "XážÅr˜ÈcÅDÓ/Æ–øónëC„ÄÇÁ[mÈ6¦Ýo qø£DÞ_½ME×Âww‘¼k®<‹>qã[8Úš*pWJ¹I‰UöÃnË:—šK.%Ÿ“¾V“ý^ù!®½+pT.bÒÀ±è3mwMñ‡’üÎÕb:R{å¥ïî®f›–ÖÀFí.Hš… Éã û²c*@±6Ü&MüLV¨çÄ_zó;K CŒGºeí›…r0÷øÕ×Dœr†AÔáôÄóÚ¬ÚÏð1.ЮÍá)S6„Íú: m)y¯ Õ@ÊQÁ¨áyCã—FX[ ²r ·úf*–«g– mœ+Ʋ(÷Ò ÎŒ“3.Kʵ 8&.êjUßáÓ+Ó èPÔ‹ˆC¡Gt霠˜â=E¸i‘«"i÷¡ Žõ=¥Ó$]0¸e£MnÐÞ^SÎ1ç¤4›åzÅÔ/½ØhYÑHÈæ*Q_5œºElõzß§€-έøþ/L.f½i(x…å½Î:¯™»à()³ô3gì¶DþÃg·a$×öl2Мj.C0pw½ñ¡ãíµ¨¨w!ss_¦f‡·KX1rœlZ*ä*IeŽþ¾¥~¦dˆ÷©ýú5_2Û_--ªéçH-aVÂNjRYx±}ux!æ5Xž´#CçŠÂ Ïˆ¿@ÓTbLx.Ù“¥QÍRFËOKøåÆ›ATí=:†X´—ªµü”Å"Oˆ¶,‚â:¤CLpÔ$J)k‰_crI御ª¶õ4´Ê9czk.¯–†>ó`ÙÊdz·§ùÀP›:‚Ë0·Âm=‹3âòŠ™|'¥P øZkõÄ$÷²›.y`Œñ6aO¨ †¨€v<FÕ+Þ¨¨ÑxÄbZT*1ÅÓ×_×3ÌeNSÚ¨‰^¸R¼ëúÖ'tMa¿p#¢êMý‚g½Ñ– ËØÇ–^øÔ¬f¶ì Ô ‡¢Ýý;ô-UXv÷д œ·¿ó[uKh1b$4ßö÷k [pôÕ11äžVfÝÓ½ˆé§+DwÀtxÂ}J'ù{þé}¬¹bjnKoÆ9ìue€®…ƒ/üÆÔ) ¾©´YM>>UPÓÃuÚa§2]2D½¿ÂÛYö"µÓ2ÿ'OWÕAù9uű¦“‰n ®•[0‚/u…¾:Û2 #{‘³Ò{¡øÔ íÂo[³:ËÕu‹r øwÞÍebã±¶P¬!°¹>âÔ_+Öpì×5­´¯D -âå²S€CÁ2¤Zƒ‘!©Éœ®Ë¯nzädI³u=•WËÝXà4™%¥ƒìÏ—šŽ„¿ƒã„]ÕÆŒ¹Æzó5Vñˆ”ÚBYSà7E;|Ò>_J\ÞÖïÆ=R:gR‡Œþ“l© ~r´ `K¸K›póBä+Äæ uÿä먃ªÉ)Èp˜T2ãYÍ¥Ôâ„B…ÐLªû|`ûq¸Vð鬰“®caj“Ó™¥X÷aOs¢sXG‡÷ý Æw´T‡jh/ŠéBp}×T¾þ¬ ’†°R ‡ ß„N˜üØU£@:®š")eeô ~òm´OÒ¡ ÁÝ›´Í(³ŽÕ¢ub:w€ÆÞÅ*¾dyÐy•B‡ÿð†Â(-dæGšßn` ö:*ç«aÝ“±GðŠLÆb;/©ÔàÜZ:wd7¾ˆ!¢ÓeFEpzt $oªç„݆֭njǶöWåès3§ƒeÓмS¹a•«È|Û˜/xáá«ÖG‘pÏ\üÈ¡ û_iô±}x‰ŒM *œ2yÆdä#˜L 8Y(/®È)pèØ›¯FÈ“A'V¶ ÑŒJ½„I[b§*LÜ]É툲ÌÌŸ2rDk·k˜[Ž@Õ¹œŸY“‰÷D¤n©ê&>œ,÷òãÜË©÷ÑŸÿ¢”»È‹ <¥¸f·é`{ZAÞsQÜÂ]ð:™kÚ ÊP}¸ÎžðûõJ““Ú¯)=Mé;î€éŽ4|¾VHØôꢞQš‘6­üj\ÅzßètS$&?;Yvò XÈž¨ì,°†s=£™|¹”gUëØY Ð+>Û#Ú5»ñÃ*»ž2–l¤|w A8” O¾Ýß|sC7þM0ÇöAÜ)ëðC˜:L/÷¤{ý¦ä4$®à1 Ç€+ž•cÔ-÷D"ƒ¯tÚ*Ú-Lq†ó3»£ç,€&Žõ c›‹"8èÂ:YÛ4f< ¥r‰R ¾£Y”\6/í2Úüda"þcÿ4ž2ø=øÕʆª 6r&¾tÁJìçŠ×¤~wùVËÒˬ©v¨Øé‚›*^(Ëœ”‰#õ•ý؆`Ÿs3{;(µçL£‘LÒòEÉ7¯¤¥Ÿ–Ç~:µýÁsð"€Œ-k'­3Œ²LÖk• "Ñ vcæ»Hœþ±êzÿ£1u6÷ùÿÀù‹}Ÿz4ClW^MÐ Ñõ%¬PPò‚˜Ç©é% ן]ÎÂû¹—(¶TKbàö>™ü‰‰]Æ£úgo¡Q)ü(>‹º­^˜-C½å^fŽ»xóbiéíN´1 >Ÿ½üb)ÕžÀçߺùÁv"£ pgi_úN±]ýéBÿÐȵb3¡Æ+ÔPÇ ®’£Ñѯ¡ëmQþfÏž—îÔѧD˜ D›•FRäžÇúcr…{®ãõ—ÍÈ+%…OD¼ Ó•Êœn-2¼ùø­ëkt,p@ÿx(mtT%^ ´Ò96PI#øÕû4ÅX\Ú»§QDÓH\ø:Ò±µKÿ'2Ö[¡U|0-⛨›ûãK‰ç)wb¢‘DfR®áñí›;ذ¼¢Êâý«Ÿ›Its9Zö•Ré=þ-%-ºÄ½½…öWOÛ~¥}¨Ñ]ŽÄÚùléZ[¬˜t­¢ÈQôŽP|¡wI¨@\´*y O áÍ4ﺤâÕveá ÙÃ;Zƒ­o‘v²ì «Â¢ÒÃ]VxüÔ-¾úÚ}< DX ÎT>0~±»œ7®jz¢î4éh¿÷ Ïø;=·Q(x <ùF±‹¦Ü‚bÆeÁ·p)‡f§[ÅÁ}{¡¸˜~„¢asµ×šaÐÖµr0QË«´dY\¯\Š^DFŸ ð'ðq’†_†»}¤âÃNÎR×]ÿdj Mî‡ß5oŒ~ŠØó¢) ’93öŒJÊ’¥¸´ûµ¢#ÄlôvÊWÛ¾Œ*‰©)¬­õùïD鎊ã?›ÚçÜ Z_?¸®jí¢Ø‚ì—w €Ië["nBh^7›À•Å„e»|߉M@ ‘¾q$Z-2'3fø YÇ öÿS9(r[´ïÅ1§etÈt†¢^fÿïzB”zm5µ!Ÿu#FCásì.s{ß—^‘xÁ7˜.›D+Q>#çp#U Pk®ð‚©¨ùqÉÁʺŽÓ%ÕmqüGâ·ØÎU &E«ÄOáþ>Ìýþf¨{0(s !—;Í¿ç€ÈÞ§®ih¯»'·í^;uI2|ŠâýZÙ±=„ "–£ˆáûÓúýEŸþ'ÁOL]Ss j®v3‚96ì¬áÉ6OÕr«ezx¦ØÚ°½kèü¿10m«÷–á^ƒcéëQËa>ö„’b<€îÕi>ÊÊÜE¬÷xÌêÖ¤åÍz®9bëBÞ¨ÝÊÕíð[C/„N4Tâ°.C5Ü/ $9=Õ†gBÓª@³ºøgiX‡Š1TO÷¥­Í6â ¼˜j¿™ôµ:=_#33ø½ëí¬ð}Åè´Ñ*#d¼âîŠÊ©Ka…=Ÿ9žk&@Å£‹µë, ™Îò˜FUÿƒ}ÜŒPÆýG¨½ SS”¾ƒ 1I1³M‘4+”52Vl?â ÍÃÁ_BÕZŽèÛ¢6€ýIyöW‹7Å&Ê_šN”¼Ýz›%ÍÔ¶Õž®ËàgÖsÓj'3¹º®ˆ4ö o²Ç*!`ÿŠOªã3‰.‡Ç¹‡&@4ÑÕ¤ì¯~˜{}šç$Ûó™à€¼À`•%(ÄejF§^µ‰㞈%m6¸åWó«è‡ê¿ŽÒÚ(X*QºFd‹òSâ—&”_¤?Ä\r§åA|âH~ Q!èô Œpíþ\Æ›L¥W:ˆ¦„{b@ÓH &J/¤TÛm~•C–)ÒÿÅvºÔkFöÄ£UÍ{Oz f™kÙs2ºópI_}ùèU~’`›Dapœ«ËŽómšÂ×Ê©†î0oaûú$•©5Ïúï~6J^  ¹û¥d¤¨ à€ €J³Œñu¨¾GxÛL™ù6‹áY¨'G¾‡ž›úò_ Ä‘öû_™'u -¾#®v¡ÃÇ¢Æü ͼC":tŠ1=£$oܤÖjàñíO¶FÁ0£æཀ»¹›‘*äÄm¼Ø‹Ö¶ˆbÑ¥7Ôqˆ["°‚Ä‚’Rl7úà±áT4˜—ÂŒî<­!ð ;£C;ò‡˜]°L–¿ïET10örDeIñgæ"ºüb˜¤×Yjf¦®¸8òÐyÐ{ ¨ÖX]`tÞí#%D?Ã\Å™+.¤º¢È öP˜Ç´M[†Ð‡Ì†ˆ'gç:x& Ï“Ù#¶nxµ-%ºŒ ùÅQ"5þÍIÃV÷¬AÔÅmÏS[ö¬R3zâJû:©\{Œõ6ìŠô”ƒhœŠ’*²laëz ú¦Dd¡¥h©­ÿ<†"eáò4‡§°‘Èáù0Áˆ”Tûæ‘e’ÎF]SYY¬ }¹šMIƒfÚ VÕCaðÒuÑÎXb¼°9,¥}ãéiéÇB)Õ´¬7<òÜ:vxÞ4²ì Zw)»©Oy‹ÃÚô8¦XÝÝ^Yj0ø Ìë_¢ÔÍ 5qÊ‚¶?6¾Q‚ì}ŠuhÄœèÌMÚšu²‡çá>‡16Æs»›åX‘Ô©Zd|Bªz‚åqpO(j¹ñ‡¹›µˆEDZ…x/ÒJ)¡+‰4^'¢8³Ÿr]öH‚Þ‰%¶íBIŠ´Ê ²3–7Ð0rüö«a‹B™ý’™²\âó4H¼)67s¥á†ôjøgÔÀbãôè'õœÇàÛ>ø€FŠò>j馚uù„æñ´ç”ÚÈ~+ ¹–#ýØXžgÍÅ*{KHî_ ¹”y ¯ ‚þÿ/?f o2VýÃÝ„ ‹lí ˜Û½¡|+*æ]CÞ#wÚÇO;ë§?([P@‡/–C‚Ñ{‹kÀí&xw¶B¦E¼„kÁ.9å¶Ä›ƒö³¦|™ãIÌ…#2¶Šg"?âÝ›´L+}û!ÐLð¢m¯}iµinÚïK±¾ã‡Ï—qÍ</GnïLY€kÍüŸ½—pá +a‡†º÷ñܰŽäoÒ·uÄÝ»ZRî¬få§Ï~]ªdðÎjn.UÔÚ‚ 5·g¬åÉ+~õuç¬ðÂÅÞfŒ ú>ŸBôuBkªöÎÈ»{¥¦ø›Ý2úáÚ0ðüÕ…cÓT.ªpÉPþ£Ï÷åÏHNW²7ý³ÝÙˆ¯³kã;¤-à›Q_>Û™ÅlqÿQÈ®÷áer³¥ $fY|yfGB܈X˜ãާx!ö\ÝK¹ôóæº™Ï†Ï|ËqE†¾Þ™xÖƒ u1„Gvî‘á%cølׯw«Bî“9ö‘êȪÉe¦Bá ck’»¶7^Ø“½¦«³öÛ=RˆK‡Iô(b ¤Ë¡KôÕ%4¿$5Qöÿ7[-¡~ÌVXa9ߘcØÿ±Žl+ýC×Ô‚4Mã<Éš$ävd}!8ðBº³QRø«¾ì5Y³=Z™³TY‡•p§%}xYlP¬•m”±×ˆ T¥RÕ "ɤHâÓë²% ØGŽ‹ÇIú¡×šPÒ¯µÄ$óYÇÅ€Åñ‡#`lf·jÞõ n{NpH)uíûKe'à¡·¶,ÀŠó*÷F42óI´C‚±NüBC‘±¼÷j§‘ï%B;î%åæ;ÏF‰Tþ0¹‰B1<]¥`È(MW[=…&,ç´¶¶·‰=¨ý±œ ôØ(€ä*¢Ó{•X4'%v´¾„˜¡ôãh PWúvt‰¦_“±O¥þB±°æÉ!¼'œ_e; çÖ;+$í·4nìHãì¸Qœ—–‹d¾5-Â-oòÂ[‡+¤'C'!- óÌ¡2Äšö8Óne·ÀÝfË@T›Ê-ôÌX.súÃ…¯û¹ÎŠ… œ›9e‡IîðXú9'ŸlEMï)»w©W'ò•1§5@[‹´… K‚æåS¤Ïâñ±l\bC é_'^„s…=)*aôÑ6–\8à•èÔìÕ«*'"]óø¥Q]çW¢l9³'šŠ• m¹Ú‡ëS¥®O3`f‚g8«¤Ýä{L(ËJ µ†ã"GJkTÆûPl¿ˆTËHa_Cz¹#)_I°='9Ï““‹J+ë$—M8ÎÖ™7Rñ¸ï¢åèHx¿gÞE3£`èûQl¡ Í…¤w«Nþ"M<¿bW|Ä¢—ª «£vÛl‰@pÖKÐ6±ÁšE­øVàmÕã'*¼+ޏ¸NQöÐl1ŠêÀˆQ(Fu2zú+évMì¼ESJ™à[â HÎ_ >®J?“ÒP3óHå¡[ß¶ xð%yÊ{C(Ë#5â¸_å¨ÀvR^ !û |˜ãEûËt„¿XízYynæÙ3iM{ÄËTí´¥}ÑwÖ!‚Ž8­üU¦UóbRm{6dáJáá¢Bì¤&évÎd:£áµ3”Ò&½-ŽQ–åŽf6ãoKÛ³€eD{7<»Õ#{LâÕtEFø %UNJMõrnðùšŒ®`s9óè¹²´Ù¸°¿§h«E^íXM Ð_Àš·p]853œ ¨…Q¢¶ráë¿wª©xô'­ál^•ƒ?¦ìµ<çuŠ—ážd—6-‰·Í-º§þ|8Fì_b‹c¼QÅÒôÒôz¼óû÷¯wvX[Ð7`/€QÎ×ô"Å-û4›=ìŽdÜÖÛIíër[ˆø|-Œã*èòìŽÝôp¬Ùy…›R{D/ Ež ï;[ÅõÊõؤ…^n¾§£*LÏO‘ÿ©ˆ­<“#–d§RyÍEkÊ>þ[—÷\N×u‡(Фkø0¨üÛzøÄ;¾Óc- ë'ìø´¸Z]58Ö{mI#·¥µªè'£• ýú±ì1¶©ëvk3è,ÔùÙBÇã½jaä˜Y†ÉŒŸž ÍtŠ1í`üs¸ñGRßðq–(D€ÿClý’|žqϘñD¥Ð%$€zÖi¬ïәɦQ(L²Œ›Ä•B.9›êsïmYy@bp>¤cùx_\¸]³$Ef}3‡*»-¡µ–'‘Ê™qÜSÓ†Ædÿ9¾ò³³àÐè„e‘Ï>ö½§FÉ〃Ðà|ÎIqøÒ…ÆêÍd#àÁ» Ì»ù•öncòI+R·]òDÿ°š‚E2$} 5_€UjfÏÜ/¢%Ǧ(â(—\FœŒŽèp^ÓXTbš-¥e aèÅD#t—”V5ý©aí@¦ê*€hÅBœŒouÀ¹‰ÉCJ|&c– ¸1  [ùPùJ½ÁÆÓX‘é?Õ&Aþ%¢"fkÐ K \娢‡Š¡óÄPëKxªi`eJâxëžØÂ4¸òÇ*7«‹EÍïô!mëSÿøg¥ÆãÃ3¢ùMS.{}]!>Çý,²HËÞLp'%¸"^$&úáëÂÄH3¢±P0§HP’œ‘&U"ÉlÈ)4åæ+ßO‰¾XvÇGY+ÜÆ‘,f¸Â©ÕþVß^4bï–„ÝíRH,à’!4aì$/ô¿¸]c&Kñ4 »GAŸ©Åb>˜eýh„Åž-„cˆ,aê=¶Áü!Ff¾Óíp\ øËÇÝ{Íás-dR½Š'µno…”À^¤b·ìÕ¼ãh¼,P×=ŠSô…?äƒà»Ce~5jó0,,@’溎åj˜Ì…x¶óÿäuízP[oä]ÌLSñVhêø¶mØ EÖŠ|%¹Wž³Ñ•„²‰Þü7‰kƒ‰ë¾"êFn/WúÕ„_xÙ™S‰µÕËòã½;­7|±ÅIÌ5û(¨Äå^1åçêí厺K>oMi sτିÒFìíÆ®Í–¸Ù<ÙÞÇ:¾*ä\†×Lu¨o;Ës–Ua†Ý>«êõ¤ dR-Žª­ô]¤OYcCy0c'‡H¡œt¡6"ÂDß] ¸5ž4¥ÚŽQfdÅ|q1h›HÁ‘«k«à…ÙVŸ››ÕQàÞÀU7µ>hw!gXzlå^)ˆJclbˆ>Ñã§Ì;”÷û‚ñJ6dà}xZ™ס.ܵ-lTçƒJH"xQâ O9w+¸ÁáRÍ”– À®ž¨Ö¼n™X} òÔCwñ4)ÊeZ»ëÂ,•ÿ¨¥ÒÈP¾]¨¬'µ÷©â<øP‘Zï‹C”Wߩ›c%®‰ïq‘¨,b½òSòän„¹Üò¬ESùĤ_Ma1022¢¢@eìœÔù0ï_Ê'R-1Š'”\Å£]¹6péb×ØÑ¤]T}øÝ÷£…í·cƒÁË®çEo¹¨ÎUP­6Iœx·B| »»ò?G'züToi¢%6wš˜FùïXz7º$= @ƒàÜi•La`i]ìÛ㟠1z\‡T¢øp¼OÕzÏå“z§oxTÆ·¼(ÞW#-#f$yÞæ<’=TÄ&’û_RXJ{} ƒ6B³¸™ŸÍ9¸öÀÔ¹úït‡Â‚®¢ÆZ¸œL„G€+F£þÐó·<`ÖSh—¬0ø ½¢üÝú~·€âÌtFY«Ûyòʲĵ› ýtÐÄtËfqŠÔf¥ks_Ì·J'ÿ©°*“k ƒ¿Š;’ŽOÚ6:âsk»É-¢>_¨Ý²Áq“ˆê5–ôÏËۙˮ¹mÉpç£Øl@ÉpH-G.áß•ßLM‹™>Ñ€üOü¯;'- Qa…T[ÙïwÀ(æGX¼½êüäÇt7«¯ÁðíÄ>N¹/ØŠ*qŽ„î•m¹9ƒ÷/vÈñ$A©±8¸­Ü9ö/J9(ßT‘“Ï•<ÈC=·3æÂްœß&DMYâ6š|Pyž ®oÝ|Mddq‹„y®Öð«÷z*/«¥;ÿ™1ð¥ay1jsæ (ßC{s =Põm°±KJ›ñ¬Ô(Rž·M¶ýÀC ²²1Ú? ~Šø[lFg|Ãg©öʪ˜„úâÝ÷¼®…Yháb8ޤ7“ïÃî­~ÙäÈ`g»Ó“\tA[ÓçDNé,/|G%MÞw¼rS郱”&dS6¿ÍŒÅ+É©R52·|WGq»=6«ãNNÓöÒÂÆ£ŒÅ£óôo½!êáyHkÌö¶¤§ÅMü¿—ß¶ÃÓn »¢urªF{U±ƒ;Fÿy5´&9 BðùÔmÅÕD ªÉ0oÂÔÈp( {ˆh#zÊ+k@A›_ Wò6×ÈVÆ¢î}¼È99ºÇÑŒØ[ñ…DDäp£¨–Ö¤ JàÈL&mŠç¹ê ¬èP>2?†KG·n5ÛU¢Y –)½=–¨íû¹Ë ‹[kp5Ï$ Õõ~éHÌOp$Ç9êˆ-3Š8³4óèV± ŒJÓHA}w-Ä29­)®3¿L ¼&R¬{bBßû=+ÈÎleRgÑN­f2Oƒiª]‚³Áø=µ à°ú¤×ßZÏ/8ZWÕÀú|øSaN+X³†#w»(ÊŠ–ú`/Õ“šÆÐÞ¼ïqˆ½q|àëÇò5¦ K^z¬vˆ‘U–Á:c×ù±µbU‰òŒ”— Ž©nxælⵞE£€ØMu„ÜMݸÏá ûdôÍj av,G¸E+æzÇÀ½{ƒ5²}…î¸Zçë”ÊK&üÿ“g‰™Ôd£’þ‹¿³h”»ŠŽcï¨ Öq-S1ŒÇÝ»ùp` ý„½*§ÿb®dòo” œØK]th*m=¨7¿ëH±rýPÄ™TAc¥ÿX-’álB"Žk¼ •øê_ {>K÷²^å¾ü…eò‡`Xk/9c{•©V¡]âÒÎDŽ~‹lžÔX0«Æ‡L>…YуßBm²çÕƒQõrç3“¡À•‘8å¤JeÝ劫d° ¥¨-Zúˆì‹¼[¹)ÉÏ8ñ—>¡C|äª*&o=~“B¢]]„V’š ÊëÐrÈ6ÏOèe$°>Qª#t©ÌN¾‰t\–‡‹F[ï³ä‰h‡4O±p_V‡ì7b¿3 ¿-ëeÀ|káµL1›ó}â÷ûþ<ÅÛNj ‘Œ>¦úÝu†•Z«¢ gŽ©#Ôt V–a]p¡n–vGM¡5óøš‘]+ã¸c(î3 ÃTåC·"c@'áažñ¸÷©«Ìñìvñÿ£^ñ÷b£1¼ŸÏT€–¿ïu§Ø2MÒ¾Ži¨§It*Vd •!õQ[ûý‘NùŠí*×v“ûT`~ÂjÚ³ÊY×EÏŽ)W½êw¤ÛR}¦Ì‡zÞϾµÍXgþQ._P³¾²üÎ{Ñ÷øVËàq æÐ„‹¶òrÚ9Â].ÅW0ÐŒ;¬˜>N½-269”㘘LSµÏZÍ!Måì‘¶ƒ ?ë†#w€%HoßrÎ •¥o¿Âé3;‚êgØ'n'æêWãÃQTCä™eXÊ^.¿±hì!ž²M²¯¶EpÄ®7¦!«ÑYV’s¥jaÈ`ßÕu„).m“ÝûYߊUñßoØñ¨ø[x¨‡¿5]:%½àΆ¹`LËD¬Rü¡˜qá¤jÝpëýo”ÓNI‘ #Iñ&|?.°ùOƒ²HÉ"µ¿v³:‚|%¦n× k7,$›Z|ÞïÊxø&e5Í$DJEˆËÆÓ"¯ŽXs‰Ž„ —씹^I0ªT(pÐ`ÕèÓÒý¡–¿µðyM¨?¡ •Íܶ‚ï&!FU ëî–TöfÂðÁôבž_ΔŒÖÑ)fk¦×ÀaMwcByÇyàž”€„™Ý"ÎæüTâ(.íQ¾˜k¨†LŽ^½ôt92å ùr±wsv¦fÊÙí{š8bÅtÊ'×\£¾aSv(¤sAuÒÿ=¸ä€Žkp~²üÀÿ_ý5΋ xɆ“ñ–\r?^Hp°Ö`ÿ@F'ý=Ý,ŒONT…·ª;•—ƒ¿­€uÞä8q,—NQ@Ñx „WN· €;Ùr;°ø+ne‚$Nû-ÓÝÿÝDe‘ôYˆxáüdS§!ìT¼rúþW[ê´%ìKé6â¶ÂÓ }¬KØõQl j•§À¥Äô8<ž®q]bÔì`Ö4~éfb²‹ŽÔ<²6RE Ã(S‘°ÐUSõSª›‚ûmíÔX3ÎXž5b‚2šg”\ús^ŠäXˆŽV‹7~˜œeßÖkv}éæv€ï£ÆÌù{â´á㯇;«¾jýxƒêÚÚB8sÛY%­}ÌÓŽdâ ì­°¨é ÌoÞ;òüüqF"5Þ¾º̲ëÚ¦›Úc€¥¸Pè Ã+W1ô{Ú®áѳ˜x›kð5ð¢Éll:f¯–GR_Xò a fS NÁ"Ôgš̸vqoŠtï6zý¾NJ'Ð;èeÔršõ<œot‡\cwöñ/Ÿp&BÇ¿Jõ/ËÓ“-¢,Kè pÕÐÒȉI¼æSGPZ«gœ£ÂÞwZRNp’ $‚ݳaÜ©úÀêa‹¦Uî2ÞùÍøqÎ!«—ÏaIQû”É+t°%Pñ„ ¤ÂDšJ¸y^°ymÄ´úeÜã« ƒ!ð–š—)líd¬jLÓj7t;è—>N 1fÜÎ߈ _ ?ržn§)Î'&¯9n[òÍuå¿%>ÿ¨£ÞÎMÅt<‚4Í(Мô=Ù<®¨\{‰H QÎßÚíçÀ¤>ƒFkØ5L©uVˆy€‹ç$ó”‡"r owŽ„€‡ï½Ø.wPÝÌQ àX&RžÒ}'û€À­Ù8¬=š$Ag/àifÖú-ëð”{´°Ár¸orÎþɶèc†/woO¨Ñ¾<¿‰„¸Ô˶¶cÇòX£ë¶8›9U”4|Dÿ™vF9¥? EÄ©.87¿%\p‘Ø?v4Xµo]4t¢<À­~”ë ”+ÿµ#’ÎIÚ†È7U22ÀHgH¶1œ~ÌTö'è¯@ÁZW©ÑÁà (Ï­†Ñíû«ýU4‘‚,H\g= º¼ à=Üœ€îÈ•Çôrt«’Ô 5‘?&™…ZÄíN°]«â¦a™9xí·TPï¤'ÜuêÞ5×8ë¨ó¬¨Ä¥Z ž»~µ.‰9û#‡ˆ9Gq¶¦7^úÐ\%ôé,%Ò°|3ç°â1¾ƒfx5—aCcq–níÖýä]•}Aú7I7@Åå©’þq ä‹„-¾¨¦›Z@;" ƒmb-<6~BCÕƒõd+-h‹Ém‰Xæ  @khèbâ½-¾Ò7kMËf{â”'Aë`»x®LËô1óvo a“^ËÏ®$û‚½2‰î…ëšÜmñš„V8Ú(mô)¢bHÝ>iñv0$¯æÝbXÏõpvw vq±Êgd@\Æ1knö¾6ïµçÄÉw*A_OsõÜP¦#’·âª<ÌAÖȑה‘§Ì&ÓK|;…Ûì=È: ÇÔ蜴§t<"çï#Óß²â Jæוlq–OB#b5[ dAŽ@j¡Ábë)%ì”’Ñ|kë (€â¶\û4piß]tµ ­¥y²hV¡NƒÍÓí“1ÿÈPkI4Ô¬ï‰hóûDå+oÛe¤ià6m$= ç­‰“ll7„‡ìP¡á´$å\ÿ¾·.—cÁYŠý›CƒÍ:¹,BRý©ñÑTÃUŒ¦Í_z½ŒÄšºŠ^5Ò¡¯L‹ÎÓ îÌá_Ö§E__²a¸ÞÅ犢é|iì÷åÀÉíîƒ}&‚âk.Áø­fÝËú~Õ¢²…Ãa¤vóþ¥Å{Ë;ÿô¾"b•ÛAÇõÄÞ ßׯjO¦†a芴2êyX.V{ókþ¯F-b.TSÈ6øeb‹±û4ân«¯ ¨ÿŸ} }ZYÖàícøÙ¾×\ŒT FÙsÜ6ˆÖé!ÎsõÚÔÂænƒ$ÙØ²Ø‡†=¨þ ýK—½ù ™»T‘êå{Ä“ö²î%çõ¼=´ß,mÔµ¯õ®¼TÂ÷e~¸@Û4ƺsÉÑöÆÿÛ<ò&mø¿ñ=D(Þ& Ñé*“0â!æav2.¤¸K*¦‰ÀœóÆr8B8{±‰¦²DX{î:8c{QAkÓߢ$Ìc·Dâ~~W~ (³…já?o£&y£Ï(Í›(…}•˜óú/,/EZÚÄQ¼.r)ÊñÑÒÜdºü$ì+χÕá÷}иüTl¯‡QWÚd±”µÝž¿Ú·há‘A©©x¼s°o€7gaær‚þp"!ò‚¤á Ñjá1ªŸ¦âCrzOy||»á¦žKû«#ù ·ÚRíd˜÷pÙ›Æ`.¨W{%|~Aá’0Ó~”Â$ó_|gï.-}ɦWàÖŽžÅ½â!ñÏyý<.û÷âTµNdY×P± /z!l5¬ø÷\ëoô›ÍW›éHlu€í}¡+€ þß*Á$Æ8†Û¦²ø0ŽR ç ï€Ù[,<ÒR@:s,K¼!¥Ëª¹Rtäó Ȥ°ZŸñLr¹q=±÷Þ«™t¾EL¼¤Æ–’sk-[;|_¶…BßAž; ƒ¤jšd¹ú[!OTmª«@1ÙÃËŸC,Ó´i.Z® Ø55’|fÙd4ô ±ù·è£VЍáºJ…DbÛ[Î×Ãe'e´$‹#y@¯ $S½,#ÐD ´+Œ’`âÑ•lS./Þàrñ–Fš˜]6¨-Ã_£N[#ÑÄý±Wô¦å^!Æ´Ô‡ü2z~¼Á&h¶×ËÏnÝN?lžûkÿ¨>”cáþ­œ˜ì‘f `e ùCa0qžrDKõæÝþêuëñ±úñ$ÕðƒjPO×”­*gçGÏ£Ög›G©Š…Ùí»^ùŽ·PúÁd‘®Vó0¸âx°7u´8e®€RGq¸ éçù3Taú•¨dãû¹¢öÏõõpT¯ÃL›¢!özë’r©Sã‹ÏÌ/ æçöþy0ÿñD¿°OZigO›´=ŸûÑÜu3+~] íDÇÍÌtÚ›”¡YÜ3]]™!J" mYÚÄ]N$-+¾—Os]UçÉG¢õ‹ä ÂÔ1~®H4b¿Š"–†hP…ý»pÓ€±)†@>üÅÉ kWD09rá£a¹[öà…‡U‘0öÅ‚ݹãá–³ׂ#RsMàFH€-TÅP IÓ‰ÐyígvA˜¯h"Üø3],O•Åôù†1ÿع¡n$²‹ óõø  ZýþEuð¾O £ýYx6ÝcI315ëBUø‹ &Y„ãüM|ÓòMú"e`F§\v£v;~ÿçË'·¸a-¯ÞDT¢Å*Â6ZyÓ—ÇIy¤›ŠLN¿»c}ä^b½¶½§†€«9[Ò‡*ÃóP\J÷]¡ªÛ»VÂâ¾Ã¶ƒÐ\³JdÇ1Ì ÷A²lqy»Žñ„»Ët)!§ÌVßmeiÒÕ»E³Réê§ó´®%3^I¹fúÂÓCô±ÛèRu':[²6a”5;!OOÔaXê}&ª¶ïã,öê<}DÒ¯«·"'ì?Îc· T’ë/a‘n2&ihÐ8ûÀ àП ¾ÚÊuô§FÄÁgÇ¿{9J²34~ÇB­• Ït]a92B&ž°«z;KúÎ"ª0óv¦g FbŒœe†¢q?£èa‹õ²¹dBDCŠQd1Jîñ:Ô"m…Ö&ÿUcæ*sÚ£{Åÿ:ÂyÖw M·ŸýWè•Q±W­x­³*üKî" ƒ¸»®ûóÑ” ;¥(cg4Q¤Ý€é’{<­Ëˆî!Tl‰9ù•»ìŠRðƒ fVóu>Y¹¢ŒÉÚd‘áE5|—Ü­š¿Â.wÅM.¤’'ÕÜÒ²ÒŽäöŠZ½}¯DHÀx¯3 QX2ðNókÇQß¡Ëk"}²¾¡ÏM”¢éw½[R‚|M%Õ«@¬{2ÐEo ÕRh’ºóðÚ¾äeÂZN›/¯W2YL¿˜ä`8SÝÝqžƒ´ ovã@ÁÐidBv ‹>¥ß"=ªÝþ”RøMÌÀ1ФʈóŽ<»–vX¸UPÍ}(¤â¾—,o|ªÌ¿LT(îŒÑ°‰ïH©Pé"6ѼRÿ1Šu@ü½¼½— °L:öG"‹ìlùn”o)þyõrç‰xÝÄJü¸À=×e¹«ž‚žÖ­r| 5|ÕVº°«Ó£Š¼1—ÞÔ¼“ ï”§Lÿû“3ÁnD%Ë?µ¢ôÓë`ÏÃÌ­@Iü-êõÍtã±伜²c¦v,“ر±Å>ƒL–½Þ©,Ì{úu*e¿*#©Š20 GçKìGZNˆucÞ§SäÈ”ü#CÊg°Ë‘íçÛ=ˆë7uáÌTX'=ö2{?ÙÏuR×2 R_$FNcä-0Ée‚žÒBi œ¶YhÌ•%Ö*j7‘ã-¡²§4_²L`(}kÉ90£Î‘˜±aRYih cŠæ³3|ø~ÏŽ]‘WD’sb-±ßÞ šö /³h ɸ°h ´¹#¬TËz/Ü– WçD‹1G•ý– à£aN•ÝoæºbqÅ'´t빂Xeâ¾ïJU¶W…ü;."ÂÖ~€®)Qß’ Ö¦ì×;·]7Û#íè÷´E\Óã•VÍ‘50ñÏxÆãÆL\‘ËÇw& ÆÝ„žGm+;&Ž.f|æ—v54"ÈŠ÷0¡ßéÜ.Î!¬Ý©¹…ϨG׌øÜ7,à4”þéáé¾}+AÒîFèNÙcaשj”t¢î·Î×Ioͺ>EUÇð°xâÖóÿ‹³ÕŒÕŒý´Í^,.óvÖ´¿úYÎgÓÆØ„­Û[Ô1"ÛÜ€e|~™QX;{À™«1OA0ó1übT™òn͹ÂÒû eè³EéÀ¼韼³<öÄ~;-ª÷µàÔxhC ª¹»ÿgÌEÖ÷ç³ÆSÀ—ñ#˜ð.ú[Ö]/åVD,×vLžB÷ÿÃðvºûdÓ/¸?â&¸¿Š±À2’Tÿº_ R­§€?³P3&ŽƒÞSD£fu–%üHÅà™xÌ;Ä5—ƒŸ&þg3BN-²Í"§:+¯é”(¨SMŒ=&±æa{W{Êu·rŽ;€{àú£ÆÒnpà“+cÜ_` ßqPο«€+3H RÌq`JCñœFcxÙž–s(1K¢s³%|Ü@{”k–õLÛ)IÛÅ2DüyJ ņ€ÞÝzA<#(Žõ¤m e Zb6_ìgLÁÂŽâ’4•µQ£RÀƒ%ñ0Z•]$ÜžÚ ʱo5ŸÚ§©}˜´ì(zõuxG¿·1ÏQ24ZÅ;IÉÇ6ñ'Ÿ·¥æOWƒŽÌ¸1GX… ‘1èiÅc™µµ›rB,;¿^ºã©æùs´ö„ðÃEFk{Uí\PªkF%•©ås½ÙX$Ô„õÈÕáI+Сiº ésîkË¡ªíÈ„z wu|(ŸMÁ¨o÷+ZÛ…›ã;‰)^ÂDÑ3T›™O®ñâ$Pf°Âäjw¾ÜY„¨ßkž¿¾ŠÎUti6bÀö¹9IßIn¯é¦}S¾]Wé9ŽòÏ &3p9bÀØ ‚Á¿í1Ñ&±öôØHQl.!–_S®ÝéV„.êt-”>[BÛ™^¥î(Â1½½*чY©³KðÌ6g?i(mÚAÅaÛ“˜KY}ðÝûÊjkà*§yL'è«™ì¥ê¿\àl(µÇúCú Sy&®ãÝm€àYÙœ*0ªšñXkB>,…¤ª활T“sWNRcÙtê6ÈwB ±Í»~ÁMèÔRD“¡¤èkµÂIC¿–ü7ðPŠPÊì¹ÿÈxöƒÄÀr1Œ½º‘\ìK«±3bN¤f`"lÉySZûKŒhW<÷Š-&dÆx¹j3ÍØGeb–K=7Ç—÷f}xjÞ‚røF‚× 20”3…×£\«˜°`ŒÂ¸pÓß6ºËpúÕ÷•µ[ŠaÁó©Æ;ËŸjQ¥æEØðDR{X|yÀZ؈ÂJ=©’wYÁaÁ£„Û]«@:’ÅA½ÑÁJG™8Py DLŠ•ý$-EUðÿ‚ìíhï/Œ>0 ‹YZspatstat.core/R/scriptUtils.R0000644000176200001440000000326314144333464015727 0ustar liggesusers## scriptUtils.R ## $Revision: 1.6 $ $Date: 2017/11/14 06:42:02 $ ## slick way to use precomputed data ## If the named file exists, it is loaded, giving access to the data. ## Otherwise, 'expr' is evaluated, and all objects created ## are saved in the designated file, for loading next time. reload.or.compute <- function(filename, expr, objects=NULL, destination=parent.frame(), force=FALSE, verbose=TRUE) { stopifnot(is.character(filename) && length(filename) == 1) if(force || !file.exists(filename)) { if(verbose) splat("Recomputing...") ## evaluate 'expr' in a fresh environment ee <- as.expression(substitute(expr)) en <- new.env() local(eval(ee), envir=en) ## default is to save all objects that were created if(is.null(objects)) objects <- ls(envir=en) ## save them in the designated file evalq(save(list=objects, file=filename, compress=TRUE), envir=en) ## assign them into the parent frame for(i in seq_along(objects)) assign(objects[i], get(objects[i], envir=en), envir=destination) result <- objects } else { if(verbose) splat("Reloading from", sQuote(filename), "saved at", file.mtime(filename)) result <- load(filename, envir=destination) if(!all(ok <- (objects %in% result))) { nbad <- sum(!ok) warning(paste(ngettext(nbad, "object", "objects"), commasep(sQuote(objects[!ok])), ngettext(nbad, "was", "were"), "not present in data file", dQuote(filename)), call.=FALSE) } } return(invisible(result)) } spatstat.core/R/vblogistic.R0000644000176200001440000002134314144333464015546 0ustar liggesusers#' Variational Bayesian Logistic regression #' #' author: Tuomas Rajala < tuomas.rajala a iki.fi > #' #' Copyright (C) Tuomas Rajala 2014 #' GNU Public License GPL 2.0 | 3.0 #' #' Special version for 'spatstat' #' #' $Revision: 1.6 $ $Date: 2019/04/12 03:34:48 $ #' #################################################### #' Used inside ppm vblogit.fmla <- function(formula, offset, data, subset, weights, verbose=FALSE, epsilon=0.01, ...) { mf <- match.call(expand.dots = FALSE) m <- match(c("formula", "data", "subset", "offset"), names(mf), 0L) mf <- mf[c(1L, m)] mf$drop.unused.levels <- TRUE mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) mt <- attr(mf, "terms") offset <- model.offset(mf) y <- model.response(mf, "any") X <- model.matrix(mt, mf) colnames(X)[1] <- "(Intercept)" Vnames <- colnames(X) #' then we fit: fit <- vblogit(y=y, X=X, offset=offset, verb=verbose, eps=epsilon, ...) #' names(fit$coefficients) <- names(fit$coef) <- Vnames #' add some variables to conform to summary.ppm fit$se <- sqrt(diag(as.matrix(fit$S))) fit$call <- match.call(expand.dots=FALSE) fit$formula <- formula fit$method <- "vblogit" fit$model <- mf fit$terms <- mt fit$offset <- offset fit$data <- data fit$xlevels <- .getXlevels(mt, mf) fit } ################################################### # the fitting function: vblogit <- local({ ## helper functions needed: lambda <- function(x) { -tanh(x/2)/(4*x) } mygamma <- function(x) { x/2 - log(1+exp(x)) + x*tanh(x/2)/4 } vblogit <- function(y, X, offset, eps=1e-2, m0, S0, S0i, xi0, verb=FALSE, maxiter=1000, ...) { ## Logistic regression using JJ96 idea. Ormeron00 notation. ## p(y, w, t) = p(y | w) p(w | t) p(t) ## ## Y ~ Bern(logit(Xw + offset)) ## w ~ N(m0, S0) iid ## ## "*0" are fixed priors. ## cat2 <- if(verb) cat else function(...) NULL varnames <- colnames(data.frame(as.matrix(X[1:2,]))) ## Write N <- length(y) K <- ncol(X) #' #' #' offset if(missing('offset')) offset <- 0 if(length(offset) eps & (iter<-iter+1) <= maxiter le_hist <- c(le_hist, le) cat2("diff:", devi, " \r") } if(iter == maxiter) warning("Maximum iteration limit reached.") cat2("\n") ## done. Compile: est <- list(m=m, S=S, Si=Si, xi=xi, lambda_xi=la) #' Marginal evidence est$logLik <- le #' Compute max logLik with the Bernoulli model; #' this should be what glm gives: est$logLik_ML <- as.numeric( t(y)%*%(X%*%m+offset) - sum( log( 1 + exp(X%*%m+offset)) ) ) #' Max loglik with the approximation est$logLik_ML2 <- as.numeric( t(y)%*%(X%*%m + offset) + t(m)%*%t(X*la)%*%X%*%m - 0.5*sum(X%*%m) + sum(mygamma(xi)) + 2*t(offset*la)%*%X%*%m + t(offset*la)%*%offset - 0.5 * sum(offset) ) #' some additional parts, like in glm output est$coefficients <- est$m[,1] names(est$coefficients) <- varnames est$call <- sys.call() est$converged <- !(maxiter==iter) #' more additional stuff est$logp_hist <- le_hist est$parameters <- list(eps=eps, maxiter=maxiter) est$priors <- list(m=m0, S=S0) est$iterations <- iter class(est) <- "vblogit" ## return est } vblogit }) ################################################### #' Predict method predict.vblogit <- local({ sigmoid <- function(e) 1/(1+exp(-e)) predict.vblogit <- function(object, newdata = NULL, type = c("link", "response", "terms"), se.fit = FALSE, dispersion = NULL, terms = NULL, na.action = na.pass, ...) { type <- match.arg(type) if(type != "response") stop("type not supported.") if(missing(newdata)) { stop("not implemented.") } else{ # newdata #' build the new covariate matrix, inspired by predict.lm tt <- terms(object) Terms <- delete.response(tt) m <- model.frame(Terms, newdata, na.action = na.action, xlev = object$xlevels) X <- model.matrix(Terms, m, contrasts.arg = object$contrasts) offset <- rep(0, nrow(X)) if (!is.null(off.num <- attr(tt, "offset"))) for (i in off.num) offset <- offset + eval(attr(tt, "variables")[[i + 1]], newdata) if (!is.null(object$call$offset)) offset <- offset + eval(object$call$offset, newdata) #' predict using probit approximation to logit-function mu <- object$m S <- object$S mua <- as.numeric(X%*%mu)+offset #' was: s2a <- diag(X%*%S%*%t(X) ) s2a <- quadform(X, S) predictor <- sigmoid( as.numeric( mua/sqrt(1+pi*s2a/8) ) ) names(predictor) <- rownames(X) } predictor } predict.vblogit }) # ################################################### # print method print.vblogit <- function(x, ...) { splat("Variational Bayes logistic regression fit") cat("\nCall: ") print(x$call) cat("\nCoefficients:\n") print(x$coefficients) cat("\n") splat("Log-likelihood:", x$logLik) splat("Converged:", x$converged) splat("Convergence threshold:", x$parameters$eps) splat("Iterations / max:", x$iterations, "/", x$parameters$maxiter) splat("* Caution: the estimates are conditional on convergence.") invisible(NULL) } #################################################### # vblogit family method family.vblogit <- function(object, ...) binomial() #################################################### #' vblogit fit summary method summary.vblogit <- function(object, ...) { splat("Variational Bayes logistic regression fit") cat("\nCall: ") print(object$call) splat("\nCoefficients and posterior 95% central regions:") vna <- names(object$coefficients) s <- sqrt(diag(object$S)) q0 <- qnorm(c(0.025, 0.975)) m <- as.numeric(object$m) df <- data.frame(estimate=m, "low 0.05"=m+s*q0[1], "high 97.5"=m+s*q0[2], "prior mean"=object$priors$m, "prior var"=diag(object$priors$S)) rownames(df) <- vna print(df) cat("\n") splat("Lower bound for log-likelihood:", object$logLik) invisible(NULL) } #################################################### # Coef coef.vblogit <- function(object, ...) object$coefficients #################################################### # Log-evidence logLik.vblogit <- function(object, ...) { object$logLik } spatstat.core/R/summary.kppm.R0000644000176200001440000001402714144333464016045 0ustar liggesusers#' #' summary.kppm.R #' #' $Revision: 1.29 $ $Date: 2021/11/03 02:37:57 $ #' summary.kppm <- function(object, ..., quick=FALSE) { nama <- names(object) result <- unclass(object)[!(nama %in% c("X", "po", "call", "callframe"))] ## handle old format if(is.null(result$isPCP)) result$isPCP <- TRUE ## extract 'optim' object Fit <- object$Fit opt <- switch(Fit$method, mincon = Fit$mcfit$opt, clik =, clik2 = Fit$clfit, palm = Fit$clfit, adapcl = Fit$cladapfit, warning(paste("Unrecognised fitting method", sQuote(Fit$method))) ) if(Fit$method != "adapcl") { result$optim.converged <- optimConverged(opt) result$optim.status <- optimStatus(opt) result$optim.nsteps <- optimNsteps(opt) } ## summarise trend component result$trend <- summary(as.ppm(object), ..., quick=quick) if(isFALSE(quick)) { theta <- coef(object) if(length(theta) > 0) { vc <- vcov(object, matrix.action="warn") if(!is.null(vc)) { se <- if(is.matrix(vc)) sqrt(diag(vc)) else if(length(vc) == 1) sqrt(vc) else NULL } if(!is.null(se)) { two <- qnorm(0.975) lo <- theta - two * se hi <- theta + two * se zval <- theta/se pval <- 2 * pnorm(abs(zval), lower.tail=FALSE) psig <- cut(pval, c(0,0.001, 0.01, 0.05, 1), labels=c("***", "**", "*", " "), include.lowest=TRUE) ## table of coefficient estimates with SE and 95% CI result$coefs.SE.CI <- data.frame(Estimate=theta, S.E.=se, CI95.lo=lo, CI95.hi=hi, Ztest=psig, Zval=zval) } } } #' sibling probability if(object$isPCP) result$psib <- mean(psib(object)) #' overdispersion index win <- as.owin(object, from="points") vac <- varcount(object, B=win) Lam <- integral(predict(object, window=win)) result$odi <- vac/Lam #' class(result) <- "summary.kppm" return(result) } coef.summary.kppm <- function(object, ...) { return(object$coefs.SE.CI) } print.summary.kppm <- function(x, ...) { terselevel <- spatstat.options('terse') digits <- getOption('digits') isPCP <- x$isPCP splat(if(x$stationary) "Stationary" else "Inhomogeneous", if(isPCP) "cluster" else "Cox", "point process model") if(waxlyrical('extras', terselevel) && nchar(x$Xname) < 20) splat("Fitted to point pattern dataset", sQuote(x$Xname)) Fit <- x$Fit if(waxlyrical('gory', terselevel)) { switch(Fit$method, mincon = { splat("Fitted by minimum contrast") splat("\tSummary statistic:", Fit$StatName) print(Fit$mcfit) }, clik =, clik2 = { splat("Fitted by maximum second order composite likelihood") splat("\trmax =", Fit$rmax) if(!is.null(wtf <- Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } printStatus(x$optim.status) }, adapcl = { splat("Fitted by adaptive second order composite likelihood") splat("\tepsilon =", x$Fit$epsilon) if(!is.null(wtf <- x$Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } }, palm = { splat("Fitted by maximum Palm likelihood") splat("\trmax =", Fit$rmax) if(!is.null(wtf <- Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } printStatus(x$optim.status) }, warning(paste("Unrecognised fitting method", sQuote(Fit$method))) ) } # ............... trend ......................... parbreak() splat("----------- TREND MODEL -----") print(x$trend, ...) # ..................... clusters ................ tableentry <- spatstatClusterModelInfo(x$clusters) parbreak() splat("-----------", if(isPCP) "CLUSTER" else "COX", "MODEL", "-----------") splat("Model:", tableentry$printmodelname(x)) parbreak() cm <- x$covmodel if(!isPCP) { # Covariance model - LGCP only splat("\tCovariance model:", cm$model) margs <- cm$margs if(!is.null(margs)) { nama <- names(margs) tags <- ifelse(nzchar(nama), paste(nama, "="), "") tagvalue <- paste(tags, margs) splat("\tCovariance parameters:", paste(tagvalue, collapse=", ")) } } pa <- x$clustpar if (!is.null(pa)) { splat("Fitted", if(isPCP) "cluster" else "covariance", "parameters:") print(pa, digits=digits) } if(!is.null(mu <- x$mu)) { if(isPCP) { splat("Mean cluster size: ", if(!is.im(mu)) paste(signif(mu, digits), "points") else "[pixel image]") } else { splat("Fitted mean of log of random intensity:", if(!is.im(mu)) signif(mu, digits) else "[pixel image]") } } #' table of coefficient estimates with SE and 95% CI if(!is.null(cose <- x$coefs.SE.CI)) { parbreak() splat("Final standard error and CI") splat("(allowing for correlation of", if(isPCP) "cluster" else "Cox", "process):") print(cose) } #' Cluster strength indices psi <- x$psib odi <- x$odi if(!is.null(psi) || !is.null(odi)) { parbreak() splat("----------- cluster strength indices ---------- ") if(!is.null(psi)) { psi <- signif(psi, digits) if(isTRUE(x$stationary)) { splat("Sibling probability", psi) } else splat("Mean sibling probability", psi) } if(!is.null(odi)) splat("Count overdispersion index (on original window):", signif(odi, digits)) } #' invisible(NULL) } spatstat.core/R/pairsat.family.R0000644000176200001440000002071314144333463016323 0ustar liggesusers# # # pairsat.family.S # # $Revision: 1.44 $ $Date: 2016/02/11 09:36:11 $ # # The saturated pairwise interaction family of point process models # # (an extension of Geyer's saturation process to all pairwise interactions) # # pairsat.family: object of class 'isf' # defining saturated pairwise interaction # # # ------------------------------------------------------------------- # pairsat.family <- list( name = "saturated pairwise", print = function(self) { cat("Saturated pairwise interaction family\n") }, eval = function(X,U,EqualPairs,pairpot,potpars,correction, ..., Reach=NULL, precomputed=NULL, savecomputed=FALSE, halfway=FALSE) { # # This is the eval function for the `pairsat' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # For the `pairsat' family of pairwise-interaction processes, # this eval function computes the distances between points, # invokes 'pairpot' to evaluate the potential between each pair of points, # applies edge corrections, and then sums the pair potential terms # applying the saturation threshold. # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ######################################################################## # # POTENTIAL: # The pair potential function 'pairpot' will be called as # pairpot(M, potpars) where M is a matrix of interpoint distances. # It must return a matrix with the same dimensions as M # or an array with its first two dimensions the same as the dimensions of M. # # NOTE: # Note the Geyer saturation threshold must be given in 'potpars$sat' ########################################################################## # coercion should be unnecessary, but this is useful for debugging X <- as.ppp(X) U <- as.ppp(U, X$window) # i.e. X$window is DEFAULT window # saturation parameter(s) saturate <- potpars$sat # interaction distance of corresponding pairwise interaction PairReach <- if(!is.null(Reach) && is.finite(Reach)) Reach/2 else NULL if(is.null(saturate)) { # pairwise interaction V <- pairwise.family$eval(X, U, EqualPairs, pairpot, potpars, correction, ..., Reach=PairReach, precomputed=precomputed, savecomputed=savecomputed) return(V) } # first ensure all data points are included in the quadrature points nX <- npoints(X) nU <- npoints(U) Xseq <- seq_len(nX) if(length(EqualPairs) == 0) { # no data points currently included missingdata <- rep.int(TRUE, nX) } else { Xused <- EqualPairs[,1] missingdata <- !(Xseq %in% Xused) } somemissing <- any(missingdata) if(somemissing) { # add the missing data points originalrows <- seq_len(nU) nmiss <- sum(missingdata) U <- superimpose(U, X[missingdata], W=X$window, check=FALSE) # correspondingly augment the list of equal pairs newXindex <- Xseq[missingdata] newUindex <- nU + seq_len(nmiss) EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex)) nU <- nU + nmiss } # compute the pair potentials POT and the unsaturated potential sums V V <- pairwise.family$eval(X, U, EqualPairs, pairpot, potpars, correction, ..., Reach=PairReach) POT <- attr(V, "POT") computed <- attr(V, "computed") # could be NULL # # V is a matrix with rows = quadrature points, # columns = coordinates of potential # POT is an array with rows = data points # columns = quadrature points # planes = coordinates of potential ################################################################# ################## saturation part ############################## ################################################################# # check dimensions and ensure 'saturate' is a vector ns <- length(saturate) np <- ncol(V) if(ns == 1 && np > 1) saturate <- rep.int(saturate, np) else if(ns != np) stop("Length of vector of saturation parameters is incompatible with the pair potential", call.=FALSE) # replicate as a matrix and as an array saturate2 <- array(saturate[slice.index(V, 2)], dim=dim(V)) saturate3 <- array(saturate[slice.index(POT, 3)], dim=dim(POT)) # # (a) compute SATURATED potential sums V.sat <- pmin(V, saturate2) if(halfway) return(V.sat) # # (b) compute effect of addition/deletion of dummy/data point j # on the UNSATURATED potential sum of each data point i # # Identify data points is.data <- seq_len(npoints(U)) %in% EqualPairs[,2] # logical vector corresp. to rows of V # Extract potential sums for data points only V.data <- V[is.data, , drop=FALSE] # replicate them so that V.dat.rep[i,j,k] = V.data[i, k] V.dat.rep <- aperm(array(V.data, dim=c(dim(V.data), U$n)), c(1,3,2)) # make a logical array col.is.data[i,j,k] = is.data[j] col.is.data <- array(is.data[slice.index(POT, 2)], dim=dim(POT)) # compute value of unsaturated potential sum for each data point i # obtained after addition/deletion of each dummy/data point j if(!(correction %in% c("isotropic", "Ripley"))) { dV <- ifelseNegPos(col.is.data, POT) ## equivalent to ifelse(col.is.data, -POT, POT) } else { ## Weighted potential is not exactly symmetric dV <- POT dV[col.is.data] <- - aperm(POT[ , is.data, , drop=FALSE], c(2,1,3)) } V.after <- V.dat.rep + dV # # # (c) difference of SATURATED potential sums for each data point i # before & after increment/decrement of each dummy/data point j # # saturated values after increment/decrement V.after.sat <- array(pmin.int(saturate3, V.after), dim=dim(V.after)) # saturated values before V.dat.rep.sat <- array(pmin.int(saturate3, V.dat.rep), dim=dim(V.dat.rep)) # difference V.delta <- V.after.sat - V.dat.rep.sat V.delta <- ifelseNegPos(col.is.data, V.delta) # # (d) Sum (c) over all data points i V.delta.sum <- apply(V.delta, c(2,3), sum) # # (e) Result V <- V.sat + V.delta.sum ########################################## # remove rows corresponding to supplementary points if(somemissing) V <- V[originalrows, , drop=FALSE] ### tack on the saved computations from pairwise.family$eval if(savecomputed) attr(V, "computed") <- computed return(V) }, ######### end of function $eval suffstat = function(model, X=NULL, callstring="pairsat.family$suffstat") { # for saturated pairwise models only (possibly nonstationary) verifyclass(model, "ppm") if(!identical(model$interaction$family$name,"saturated pairwise")) stop("Model is not a saturated pairwise interaction process") if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") modelX <- update(model, X, method="mpl") } # find data points which do not contribute to pseudolikelihood mplsubset <- getglmdata(modelX)$.mpl.SUBSET mpldata <- is.data(quad.ppm(modelX)) contribute <- mplsubset[mpldata] Empty <- X[integer(0)] mom <- partialModelMatrix(X, Empty, model, "suffstat", halfway=TRUE) # halfway=TRUE is passed to pairsat.family$eval # and yields matrix of saturated potential sums # take only those terms that contribute to the pseudolikelihood mom <- mom[contribute, , drop=FALSE] result <- apply(mom, 2, sum) return(result) } ######### end of function $suffstat ) ######### end of list class(pairsat.family) <- "isf" spatstat.core/R/pairs.im.R0000644000176200001440000001312314144333463015117 0ustar liggesusers# # pairs.im.R # # $Revision: 1.22 $ $Date: 2021/06/29 03:43:35 $ # pairs.listof <- pairs.solist <- function(..., plot=TRUE) { argh <- expandSpecialLists(list(...), special=c("solist", "listof")) names(argh) <- good.names(names(argh), "V", seq_along(argh)) haslines <- any(sapply(argh, inherits, what="linim")) if(haslines) { if(!requireNamespace("spatstat.linnet")) { warning(paste("the pairs() plot for images on a linear network", "requires the package 'spatstat.linnet'"), call.=FALSE) return(NULL) } do.call(spatstat.linnet::pairs.linim, append(argh, list(plot=plot))) } else { do.call(pairs.im, append(argh, list(plot=plot))) } } pairs.im <- local({ allpixelvalues <- function(Z) { as.numeric(as.matrix(Z)) } pairs.im <- function(..., plot=TRUE, drop=TRUE) { argh <- list(...) cl <- match.call() ## unpack single argument which is a list of images if(length(argh) == 1) { arg1 <- argh[[1]] if(is.list(arg1) && all(unlist(lapply(arg1, is.im)))) argh <- arg1 } ## identify which arguments are images isim <- unlist(lapply(argh, is.im)) nim <- sum(isim) if(nim == 0) stop("No images provided") ## separate image arguments from others imlist <- argh[isim] rest <- argh[!isim] ## determine image names for plotting imnames <- argh$labels %orifnull% names(imlist) if(length(imnames) != nim || !all(nzchar(imnames))) { #' names not given explicitly callednames <- paste(cl)[c(FALSE, isim, FALSE)] backupnames <- paste0("V", seq_len(nim)) if(length(callednames) != nim) { callednames <- backupnames } else if(any(toolong <- (nchar(callednames) > 15))) { callednames[toolong] <- backupnames[toolong] } imnames <- good.names(imnames, good.names(callednames, backupnames)) } ## if(nim == 1) { ## one image: plot histogram Z <- imlist[[1L]] xname <- imnames[1L] do.call(hist, resolve.defaults(list(x=quote(Z), plot=plot), rest, list(xlab=xname, main=paste("Histogram of", xname)))) ## save pixel values pixvals <- list(allpixelvalues(Z)) names(pixvals) <- xname } else { ## extract pixel rasters and reconcile them imwins <- solapply(imlist, as.owin) names(imwins) <- NULL rasta <- do.call(intersect.owin, imwins) ## convert images to common raster imlist <- lapply(imlist, "[.im", i=rasta, raster=rasta, drop=FALSE) ## extract pixel values pixvals <- lapply(imlist, allpixelvalues) } ## combine into data frame pixdf <- do.call(data.frame, pixvals) ## remove NA's if(drop) pixdf <- pixdf[complete.cases(pixdf), , drop=FALSE] ## pairs plot if(plot && nim > 1) do.call(pairs, resolve.defaults(list(x=quote(pixdf)), rest, list(labels=imnames, pch="."))) labels <- resolve.defaults(rest, list(labels=imnames))$labels colnames(pixdf) <- labels class(pixdf) <- c("plotpairsim", class(pixdf)) return(invisible(pixdf)) } pairs.im }) plot.plotpairsim <- function(x, ...) { xname <- short.deparse(substitute(x)) x <- as.data.frame(x) if(ncol(x) == 1) { x <- x[,1L] do.call(hist.default, resolve.defaults(list(x=quote(x)), list(...), list(main=xname, xlab=xname))) } else { do.call(pairs.default, resolve.defaults(list(x=quote(x)), list(...), list(pch="."))) } return(invisible(NULL)) } print.plotpairsim <- function(x, ...) { cat("Object of class plotpairsim\n") cat(paste("contains pixel data for", commasep(sQuote(colnames(x))), "\n")) return(invisible(NULL)) } panel.image <- function(x, y, ..., sigma=NULL) { usr <- par("usr"); on.exit(par(usr)) par(usr = c(0, 1, 0, 1)) xx <- scaletointerval(x) yy <- scaletointerval(y) p <- ppp(xx, yy, window=square(1), check=FALSE) plot(density(p, sigma=sigma), add=TRUE, ...) } panel.contour <- function(x, y, ..., sigma=NULL) { usr <- par("usr"); on.exit(par(usr)) par(usr = c(0, 1, 0, 1)) xx <- scaletointerval(x) yy <- scaletointerval(y) p <- ppp(xx, yy, window=square(1), check=FALSE) Z <- density(p, sigma=sigma) dont.complain.about(Z) do.call(contour, resolve.defaults(list(x=quote(Z), add=TRUE), list(...), list(drawlabels=FALSE))) } panel.histogram <- function(x, ...) { usr <- par("usr"); on.exit(par(usr)) par(usr = c(usr[1:2], 0, 1.5) ) h <- hist(x, plot = FALSE) breaks <- h$breaks; nB <- length(breaks) y <- h$counts; y <- y/max(y) do.call(rect, resolve.defaults(list(xleft = breaks[-nB], ybottom = 0, xright = breaks[-1], ytop = y), list(...), list(col="grey"))) } ## pairwise things like correlations cov.im <- function(..., use = "everything", method = c("pearson", "kendall", "spearman")) { df <- pairs.im(..., plot=FALSE, drop=FALSE) V <- cov(df, use=use, method=method) return(V) } cor.im <- function(..., use = "everything", method = c("pearson", "kendall", "spearman")) { df <- pairs.im(..., plot=FALSE, drop=FALSE) R <- cor(df, use=use, method=method) return(R) } spatstat.core/R/bw.abram.R0000644000176200001440000000343214144333461015066 0ustar liggesusers#' #' bw.abram.R #' #' Abramson bandwidths #' #' $Revision: 1.8 $ $Date: 2019/04/25 05:34:49 $ #' bw.abram <- function(X, h0, ..., at=c("points", "pixels"), hp=h0, pilot=NULL, trim=5, smoother=density.ppp){ stopifnot(is.ppp(X)) at <- match.arg(at) if(missing(h0) || is.null(h0)) { h0 <- bw.ppl(X) } else { check.1.real(h0) stopifnot(h0 > 0) } check.1.real(trim) stopifnot(trim > 0) pilot.data <- X imwin <- as.im(Window(X), ...) if(is.im(pilot)){ if(!compatible.im(imwin,pilot)) stop("'X' and 'pilot' have incompatible spatial domains", call.=FALSE) #' clip the worst small values away pilot[pilot<=0] <- min(pilot[pilot>0]) } else if(is.ppp(pilot)){ if(!compatible.im(imwin,as.im(Window(pilot), ...))) stop("'X' and 'pilot' have incompatible spatial domains", call.=FALSE) pilot.data <- pilot } else if(!is.null(pilot)) stop("if supplied, 'pilot' must be a pixel image or a point pattern", call.=FALSE) if(!is.im(pilot)) { if(is.character(smoother)) { smoother <- get(smoother, mode="function") } else stopifnot(is.function(smoother)) pilot <- smoother(pilot.data,sigma=hp,positive=TRUE,...) } pilot <- pilot/integral(pilot) # scale to probability density pilotvalues <- safelookup(pilot, pilot.data, warn=FALSE) ## geometric mean re-scaler (Silverman, 1986; ch 5). gamma <- exp(mean(log(pilotvalues[pilotvalues > 0])))^(-0.5) switch(at, points = { pilot.X <- safelookup(pilot,X,warn=FALSE) bw <- h0 * pmin((pilot.X^(-0.5))/gamma,trim) }, pixels = { bw <- eval.im(h0 * pmin((pilot^(-0.5))/gamma, trim)) }) return(bw) } spatstat.core/R/anova.ppm.R0000644000176200001440000002745014144333461015302 0ustar liggesusers# # anova.ppm.R # # $Revision: 1.28 $ $Date: 2020/01/30 05:05:52 $ # anova.ppm <- local({ do.gripe <- function(...) warning(paste(...), call.=FALSE) dont.gripe <- function(...) NULL nquad <- function(x) { if(is.quad(x)) n.quad(x) else 0 } fmlaString <- function(z) { paste(as.expression(formula(z))) } interString <- function(z) { as.interact(z)$creator } anova.ppm <- function(object, ..., test=NULL, adjust=TRUE, warn=TRUE, fine=FALSE) { gripe <- if(warn) do.gripe else dont.gripe if(!is.null(test)) { test <- match.arg(test, c("Chisq", "LRT", "Rao", "score", "F", "Cp")) if(test == "score") test <- "Rao" if(!(test %in% c("Chisq", "LRT", "Rao"))) stop("test=", dQuote(test), "is not yet implemented") } ## trap outmoded usage argh <- list(...) if("override" %in% names(argh)) { gripe("Argument 'override' is superseded and was ignored") argh <- argh[-which(names(argh) == "override")] } ## list of models objex <- append(list(object), argh) if(!all(sapply(objex, is.ppm))) stop(paste("Arguments must all be", sQuote("ppm"), "objects")) ## all models Poisson? pois <- all(sapply(objex, is.poisson.ppm)) gibbs <- !pois ## any models fitted by ippm? newton <- any(sapply(objex, inherits, what="ippm")) if(gibbs && !is.null(test) && test == "Rao") stop("Score test is only implemented for Poisson models", call.=FALSE) ## handle anova for a single object expandedfrom1 <- FALSE if(length(objex) == 1 && (gibbs || newton)) { ## we can't rely on anova.glm in this case ## so we have to re-fit explicitly Terms <- drop.scope(object) if((nT <- length(Terms)) > 0) { ## generate models by adding terms sequentially objex <- vector(mode="list", length=nT+1) for(n in 1L:nT) { ## model containing terms 1, ..., n-1 fmla <- paste(". ~ . - ", paste(Terms[n:nT], collapse=" - ")) fmla <- as.formula(fmla) objex[[n]] <- update(object, fmla) } ## full model objex[[nT+1L]] <- object expandedfrom1 <- TRUE } } ## all models fitted by same method? fitmethod <- unique(sapply(objex, getElement, name="method")) if(length(fitmethod) > 1) stop(paste("Models were fitted by different methods", commasep(sQuote(fitmethod)), "- comparison is not possible")) ## fitted by MPL or logistic? if(!(fitmethod %in% c("mpl", "logi"))) stop(paste("Not implemented for models fitted by method=", sQuote(fitmethod))) logi <- (fitmethod == "logi") refitargs <- list() fitz <- NULL ## fitted to same quadscheme using same edge correction? if(length(objex) > 1) { ## same data? datas <- lapply(objex, data.ppm) samedata <- all(sapply(datas[-1L], identical, y=datas[[1L]])) if(!samedata) stop("Models were fitted to different datasets") ## same dummy points? quads <- lapply(objex, quad.ppm) samequad <- all(sapply(quads[-1L], identical, y=quads[[1L]])) if(!samequad) { gripe("Models were re-fitted using a common quadrature scheme") sizes <- sapply(quads, nquad) imax <- which.max(sizes) bigQ <- quads[[imax]] refitargs$Q <- bigQ } ## same edge correction? corrxn <- unique(sapply(objex, getElement, name="correction")) if(length(corrxn) > 1) stop(paste("Models were fitting using different edge corrections", commasep(sQuote(corrxn)))) if(corrxn == "border") { rbord <- unique(sapply(objex, getElement, name="rbord")) if(length(rbord) > 1) { gripe("Models were re-fitted using a common value of 'rbord'") refitargs$rbord <- max(rbord) } } ## Extract glmfit objects fitz <- lapply(objex, getglmfit) ## Any trivial models? (uniform Poisson) trivial <- sapply(fitz, is.null) if(any(trivial)) refitargs$forcefit <- TRUE ## force all non-trivial models to be fitted using same method ## (all using GLM or all using GAM) isgam <- sapply(fitz, inherits, what="gam") isglm <- sapply(fitz, inherits, what="glm") usegam <- any(isgam) if(usegam && any(isglm)) { gripe("Models were re-fitted with use.gam=TRUE") refitargs$use.gam <- TRUE refitargs$forcefit <- TRUE } ## finally refit models if(length(refitargs) > 0) { objex <- do.call(lapply, append(list(X=objex, FUN=update), refitargs)) fitz <- lapply(objex, getglmfit) } } ## Ensure GLM/GAM objects all use the same 'subset' subz <- lapply(objex, getglmsubset) if(length(unique(subz)) > 1) { subsub <- Reduce("&", subz) fitz <- lapply(fitz, refittosubset, sub=subsub) gripe("Models were re-fitted after discarding quadrature points", "that were illegal under some of the models") } ## If any models were fitted by ippm we need to correct the df if(newton) { nfree <- sapply(lapply(objex, logLik), attr, which="df") ncanonical <- lengths(lapply(objex, coef)) nextra <- nfree - ncanonical if(is.null(fitz)) fitz <- lapply(objex, getglmfit) for(i in seq_along(fitz)) if(nextra[i] != 0) fitz[[i]]$df.residual <- fitz[[i]]$df.residual - nextra[i] } ## Finally do the appropriate ANOVA if(is.null(fitz)) fitz <- lapply(objex, getglmfit) result <- do.call(anova, append(fitz, list(test=test, dispersion=1))) ## Remove approximation-dependent columns if present result[, "Resid. Dev"] <- NULL ## replace 'residual df' by number of parameters in model if("Resid. Df" %in% names(result)) { ## count number of quadrature points used in each model obj1 <- objex[[1L]] ss <- getglmsubset(obj1) nq <- if(!is.null(ss)) sum(ss) else n.quad(quad.ppm(obj1)) result[, "Resid. Df"] <- nq - result[, "Resid. Df"] names(result)[match("Resid. Df", names(result))] <- "Npar" } ## edit header if(!is.null(h <- attr(result, "heading"))) { ## remove .mpl.Y and .logi.Y from formulae if present h <- gsub(".mpl.Y", "", h) h <- gsub(".logi.Y", "", h) ## delete GLM information if present h <- gsub("Model: quasi, link: log", "", h) h <- gsub("Model: binomial, link: logit", "", h) h <- gsub("Response: ", "", h) ## remove blank lines (up to 4 consecutive blanks can occur) for(i in 1L:5L) h <- gsub("\n\n", "\n", h) if(length(objex) > 1 && length(h) > 1) { ## anova(mod1, mod2, ...) ## change names of models fmlae <- sapply(objex, fmlaString) intrx <- sapply(objex, interString) h[2L] <- paste("Model", paste0(1L:length(objex), ":"), fmlae, "\t", intrx, collapse="\n") } ## Add explanation if we did the stepwise thing ourselves if(expandedfrom1) h <- c(h[1L], "Terms added sequentially (first to last)\n", h[-1L]) ## Contract spaces in output if spatstat.options('terse') >= 2 if(!waxlyrical('space')) h <- gsub("\n$", "", h) ## Put back attr(result, "heading") <- h } if(adjust && gibbs) { fitz <- lapply(objex, getglmfit) usegam <- any(sapply(fitz, inherits, what="gam")) if(usegam) { gripe("Deviance adjustment is not available for gam fits;", "unadjusted composite deviance calculated.") } else { ## issue warning, if not already given if(warn) warn.once("anovaAdjust", "anova.ppm now computes the *adjusted* deviances", "when the models are not Poisson processes.") ## Corrected pseudolikelihood ratio nmodels <- length(objex) if(nmodels > 1) { cfac <- rep(1, nmodels) for(i in 2:nmodels) { a <- objex[[i-1]] b <- objex[[i]] df <- length(coef(a)) - length(coef(b)) if(df > 0) { ibig <- i-1 ismal <- i } else { ibig <- i ismal <- i-1 df <- -df } bigger <- objex[[ibig]] smaller <- objex[[ismal]] if(df == 0) { gripe("Models", i-1, "and", i, "have the same dimension") } else { bignames <- names(coef(bigger)) smallnames <- names(coef(smaller)) injection <- match(smallnames, bignames) if(any(uhoh <- is.na(injection))) { gripe("Unable to match", ngettext(sum(uhoh), "coefficient", "coefficients"), commasep(sQuote(smallnames[uhoh])), "of model", ismal, "to coefficients in model", ibig) } else { thetaDot <- 0 * coef(bigger) thetaDot[injection] <- coef(smaller) JH <- vcov(bigger, what="internals", new.coef=thetaDot, fine=fine) J <- if(!logi) JH$Sigma else (JH$Sigma1log+JH$Sigma2log) H <- if(!logi) JH$A1 else JH$Slog G <- H%*%solve(J)%*%H if(df == 1) { cfac[i] <- H[-injection,-injection]/G[-injection,-injection] } else { Res <- residuals(bigger, type="score", new.coef=thetaDot, drop=TRUE) U <- integral.msr(Res) Uo <- U[-injection] Uo <- matrix(Uo, ncol=1) Hinv <- solve(H) Ginv <- solve(G) Hoo <- Hinv[-injection,-injection, drop=FALSE] Goo <- Ginv[-injection,-injection, drop=FALSE] ## ScoreStat <- t(Uo) %*% Hoo %*% solve(Goo) %*% Hoo %*% Uo HooUo <- Hoo %*% Uo ScoreStat <- t(HooUo) %*% solve(Goo) %*% HooUo ## cfac[i] <- ScoreStat/(t(Uo) %*% Hoo %*% Uo) cfac[i] <- ScoreStat/(t(HooUo) %*% Uo) } } } } ## apply Pace et al (2011) adjustment to pseudo-deviances ## (save attributes of 'result' for later reinstatement) oldresult <- result result$Deviance <- AdjDev <- result$Deviance * cfac cn <- colnames(result) colnames(result)[cn == "Deviance"] <- "AdjDeviance" if("Pr(>Chi)" %in% colnames(result)) result[["Pr(>Chi)"]] <- c(NA, pchisq(abs(AdjDev[-1L]), df=abs(result$Df[-1L]), lower.tail=FALSE)) class(result) <- class(oldresult) attr(result, "heading") <- attr(oldresult, "heading") } } if(newton) { ## calculation does not include 'covfunargs' cfa <- lapply(lapply(objex, getElement, name="covfunargs"), names) cfa <- unique(unlist(cfa)) action <- if(adjust && gibbs) "Adjustment to composite likelihood" else if(test == "Rao") "Score test calculation" else NULL if(!is.null(action)) gripe(action, "does not account for", "irregular trend parameters (covfunargs)", commasep(sQuote(cfa))) } } return(result) } refittosubset <- function(fut, sub) { etf <- environment(terms(fut)) gd <- get("glmdata", envir=etf) gd$.mpl.SUBSET <- sub assign("glmdata", gd, envir=etf) up <- update(fut, evaluate=FALSE) eval(up, envir=etf) } anova.ppm }) spatstat.core/R/softcore.R0000644000176200001440000000727414144333464015234 0ustar liggesusers# # # softcore.S # # $Revision: 2.16 $ $Date: 2018/03/15 07:37:41 $ # # Soft core processes. # # Softcore() create an instance of a soft core process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Softcore <- local({ BlankSoftcore <- list( name = "Soft core process", creator = "Softcore", family = "pairwise.family", # evaluated later pot = function(d, par) { sig0 <- par$sigma0 if(is.na(sig0)) { p <- -d^(-2/par$kappa) } else { # expand around sigma0 and set large negative numbers to -Inf drat <- d/sig0 p <- -drat^(-2/par$kappa) p[p < -25] <- -Inf } return(p) }, par = list(kappa = NULL, sigma0=NA), # filled in later parnames = c("Exponent kappa", "Initial approximation to sigma"), hasInf = TRUE, selfstart = function(X, self) { # self starter for Softcore if(npoints(X) < 2) { # not enough points to make any decisions return(self) } md <- minnndist(X) if(md == 0) { warning(paste("Pattern contains duplicated points:", "impossible under Softcore model")) return(self) } kappa <- self$par$kappa if(!is.na(sigma0 <- self$par$sigma0)) { # value fixed by user or previous invocation # check it if((md/sigma0)^(-2/kappa) > 25) warning(paste("Initial approximation sigma0 is too large;", "some data points will have zero probability")) return(self) } # take sigma0 = minimum interpoint distance Softcore(kappa=kappa, sigma0=md) }, init = function(self) { kappa <- self$par$kappa if(!is.numeric(kappa) || length(kappa) != 1 || kappa <= 0 || kappa >= 1) stop(paste("Exponent kappa must be a", "positive number less than 1")) }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { theta <- as.numeric(coeffs[1]) sigma <- theta^(self$par$kappa/2) if(!is.na(sig0 <- self$par$sigma0)) sigma <- sigma * sig0 return(list(param=list(sigma=sigma), inames="interaction parameter sigma", printable=signif(sigma))) }, valid = function(coeffs, self) { theta <- coeffs[1] return(is.finite(theta) && (theta >= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { # distance d beyond which log(interaction factor) <= epsilon if(anyNA(coeffs) || epsilon == 0) return(Inf) theta <- as.numeric(coeffs[1]) kappa <- self$par$kappa sig0 <- self$par$sigma0 if(is.na(sig0)) sig0 <- 1 return(sig0 * (theta/epsilon)^(kappa/2)) }, Mayer=function(coeffs, self) { # second Mayer cluster integral kappa <- self$par$kappa sigma <- (self$interpret)(coeffs, self)$param$sigma return(pi * (sigma^2) * gamma(1 - kappa)) }, version=NULL # filled in later ) class(BlankSoftcore) <- "interact" Softcore <- function(kappa, sigma0=NA) { instantiate.interact(BlankSoftcore, list(kappa=kappa, sigma0=sigma0)) } Softcore <- intermaker(Softcore, BlankSoftcore) Softcore }) spatstat.core/R/Gcom.R0000644000176200001440000001534014144333461014263 0ustar liggesusers# # Gcom.R # # Model compensator of G # # $Revision: 1.9 $ $Date: 2018/10/19 03:29:05 $ # ################################################################################ # Gcom <- function(object, r=NULL, breaks=NULL, ..., correction=c("border", "Hanisch"), conditional=!is.poisson(object), restrict=FALSE, model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), ppmcorrection="border", truecoef=NULL, hi.res=NULL) { if(is.ppm(object)) { fit <- object } else if(is.ppp(object) || is.quad(object)) { if(is.ppp(object)) object <- quadscheme(object, ...) if(!is.null(model)) { fit <- update(model, Q=object, forcefit=TRUE) } else { fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } } else stop("object should be a fitted point process model or a point pattern") if(missing(conditional) || is.null(conditional)) conditional <- !is.poisson(fit) restrict <- isTRUE(restrict) if(restrict && !conditional) { warning("restrict=TRUE ignored because conditional=FALSE", call.=FALSE) restrict <- FALSE } # rfixed <- !is.null(r) || !is.null(breaks) # selection of edge corrections # correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", Hanisch="Hanisch", hanisch="Hanisch", best="Hanisch"), multi=TRUE) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) Win <- X$window # edge correction algorithm algo <- if(!conditional) "classical" else if(restrict) "restricted" else "reweighted" # conditioning on border region? if(!conditional) { Wfree <- Win } else { rbord <- fit$rbord Wfree <- erosion(Win, rbord) if(restrict) { retain <- inside.owin(union.quad(Q), , Wfree) Q <- Q[Wfree] X <- X[Wfree] Win <- Wfree } } # Extract quadrature info U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy # E <- equalsfun.quad(Q) WQ <- w.quad(Q) # quadrature weights # basic statistics npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW # quadrature points used USED <- if(algo == "reweighted") (bdist.points(U) > rbord) else rep.int(TRUE, U$n) # adjustments to account for restricted domain if(conditional && spatstat.options("eroded.intensity")) { npts.used <- sum(Z & USED) area.used <- sum(WQ[USED]) lambda.used <- npts.used/area.used } else { npts.used <- npts area.used <- areaW lambda.used <- lambda } # determine breakpoints for r values rmaxdefault <- rmax.rule("G", if(restrict) Wfree else Win, lambda) breaks <- handle.r.b.args(r, breaks, Wfree, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max # residuals resid <- residuals(fit, type="raw",drop=FALSE, new.coef=truecoef, quad=hi.res) rescts <- with(resid, "continuous") if(restrict) { # keep only data inside Wfree rescts <- rescts[retain] } # absolute weight for continuous integrals # wc <- -rescts # nearest neighbours (quadrature point to data point) nn <- nncross(U, X, seq(U$n), seq(X$n)) dIJ <- nn$dist I <- seq(U$n) # J <- nn$which DD <- Z <- (I <= X$n) # TRUE for data points wcIJ <- -rescts # determine whether a quadrature point will be used in integral okI <- USED[I] # initialise fv object r <- breaks$r df <- data.frame(r=r, pois=1 - exp(-pi * lambda.used * r^2)) G <- fv(df, "r", substitute(G(r), NULL), "pois", . ~ r, alim=c(0, rmax), labl=c("r","%s[pois](r)"), desc=c("distance argument r", "theoretical Poisson %s"), fname="G") # distance to boundary b <- bI <- bdist.points(U) dotnames <- character(0) # Border method if("border" %in% correction) { # reduced sample for G(r) of data only ZUSED <- Z & USED RSX <- Kount(dIJ[DD & okI], bI[DD & okI], b[ZUSED], breaks) Gb <- RSX$numerator/RSX$denom.count G <- bind.fv(G, data.frame(border=Gb), "hat(%s)[bord](r)", "border-corrected nonparametric estimate of %s", "border") # reduced sample for adjustment integral RSD <- Kwtsum(dIJ[okI], bI[okI], wcIJ[okI], b[ZUSED], rep.int(1, sum(ZUSED)), breaks, fatal=FALSE) Gbcom <- RSD$numerator/(1 + RSD$denominator) G <- bind.fv(G, data.frame(bcom=Gbcom), "bold(C)~hat(%s)[bord](r)", "model compensator of border-corrected %s", "bcom") dotnames <- c("border", "bcom", "pois") } # Hanisch correction for data if("Hanisch" %in% correction) { nnd <- dIJ[DD & okI] bdry <- bI[DD & okI] # weights ea <- eroded.areas(Win, rvals) if(algo == "reweighted") { # replace weight(r) by weight(max(rbord,r)) ea[rvals < rbord] <- eroded.areas(Win, rbord) } # compute x <- nnd[nnd <= bdry] h <- whist(x[x <= rmax], breaks=breaks$val) H <- (1/lambda.used) * cumsum(h/ea) # glue on G <- bind.fv(G, data.frame(han=H), "hat(%s)[han](r)", "Hanisch correction estimate of %s", "han") # Hanisch correction for adjustment integral nnd <- dIJ[okI] bdry <- bI[okI] wt <- wcIJ[okI] x <- nnd[nnd <= bdry] wt <- wt[nnd <= bdry] h <- whist(x[x <= rmax], breaks=breaks$val, weights=wt[x <= rmax]) lambdaplus <- (npts.used + 1)/area.used Hint <- (1/lambdaplus) * cumsum(h/ea) # glue on G <- bind.fv(G, data.frame(hcom=Hint), "bold(C)~hat(%s)[han](r)", "model compensator of Hanisch-corrected %s", "hcom") # pseudovariance for Hanisch residual Hvar <- (1/lambdaplus^2) * cumsum(h/ea^2) G <- bind.fv(G, data.frame(hvar=Hvar), "bold(C)^2~hat(%s)[han](r)", "Poincare variance for Hanisch corrected %s", "hcom") # default plot does not show all components dotnames <- c("han", "hcom", dotnames) } # compute sensible 'alim' endpoint <- function(y, r, f) { min(r[y >= f * max(y)]) } amax <- endpoint(G$pois, G$r, 0.99) if(length(dotnames) > 0) amax <- max(amax, unlist(lapply(as.data.frame(G)[,dotnames,drop=FALSE], endpoint, r=r, f=0.9))) attr(G, "alim") <- c(0, amax) # fvnames(G, ".") <- dotnames unitname(G) <- unitname(X) # secret tag used by 'Gres' attr(G, "maker") <- "Gcom" return(G) } spatstat.core/R/segtest.R0000644000176200001440000000351314144333464015056 0ustar liggesusers#' #' segtest.R #' #' Monte Carlo test of segregation for multitype patterns #' #' $Revision: 1.5 $ $Date: 2021/09/26 08:59:09 $ #' segregation.test <- function(X, ...) { UseMethod("segregation.test") } segregation.test.ppp <- function(X, ..., nsim=19, permute=TRUE, verbose=TRUE, Xname) { if(missing(Xname)) Xname <- short.deparse(substitute(X)) stopifnot(is.ppp(X)) stopifnot(is.multitype(X)) verboten <- c("at", "relative", "se", "leaveoneout", "casecontrol", "case", "control") if(any(nyet <- (verboten %in% names(list(...))))) stop(paste(ngettext(sum(nyet), "Argument", "Arguments"), commasep(sQuote(verboten[nyet])), "cannot be used")) lam <- intensity(X) pbar <- lam/sum(lam) np <- npoints(X) nt <- length(pbar) pbar <- matrix(pbar, byrow=TRUE, nrow=np, ncol=nt) if(verbose) cat("Computing observed value... ") phat <- relrisk(X, at="points", ..., casecontrol=FALSE) obs <- sum((phat-pbar)^2) if(verbose) { cat(paste("Done.\nComputing", nsim, "simulated values... ")) pstate <- list() } sim <- numeric(nsim) for(i in 1:nsim) { Xsim <- rlabel(X, permute=permute) phatsim <- relrisk(Xsim, at="points", ..., casecontrol=FALSE) if(permute) pbarsim <- pbar else { lamsim <- intensity(Xsim) pbarsim <- lamsim/sum(lamsim) pbarsim <- matrix(pbarsim, byrow=TRUE, nrow=np, ncol=nt) } sim[i] <- sum((phatsim - pbarsim)^2) if(verbose) pstate <- progressreport(i, nsim, state=pstate) } if(verbose) cat("Done.\n") p.value <- (1+sum(sim >= obs))/(1+nsim) names(obs) <- "T" out <- list(statistic=obs, p.value=p.value, method="Monte Carlo test of spatial segregation of types", data.name=Xname) class(out) <- "htest" return(out) } spatstat.core/R/kernels.R0000644000176200001440000002215014144333462015037 0ustar liggesusers# # kernels.R # # rXXX, dXXX, pXXX and qXXX for kernels # # $Revision: 1.19 $ $Date: 2018/06/07 05:42:54 $ # match.kernel <- function(kernel) { kernel.map <- c(Gaussian ="gaussian", gaussian ="gaussian", Normal ="gaussian", normal ="gaussian", rectangular ="rectangular", triangular ="triangular", Epanechnikov="epanechnikov", epanechnikov="epanechnikov", biweight ="biweight", cosine ="cosine", optcosine ="optcosine" ) ker <- pickoption("kernel", kernel, kernel.map) return(ker) } kernel.factor <- function(kernel="gaussian") { # This function returns the factor c such that # h = c * sigma # where sigma is the standard deviation of the kernel, and # h is the corresponding bandwidth parameter as conventionally defined. # Conventionally h is defined as a scale factor # relative to the `standard form' of the kernel, namely the # form with support [-1,1], except in the Gaussian case where # the standard form is N(0,1). # Thus the standard form of the kernel (h=1) has standard deviation 1/c. # The kernel with standard deviation 1 has support [-c,c] # except for gaussian case. kernel <- match.kernel(kernel) switch(kernel, gaussian = 1, rectangular = sqrt(3), triangular = sqrt(6), epanechnikov = sqrt(5), biweight = sqrt(7), cosine = 1/sqrt(1/3 - 2/pi^2), optcosine = 1/sqrt(1 - 8/pi^2)) } rkernel <- function(n, kernel="gaussian", mean=0, sd=1) { kernel <- match.kernel(kernel) if(kernel == "gaussian") return(rnorm(n, mean=mean, sd=sd)) # inverse cdf transformation u <- runif(n) qkernel(u, kernel, mean=mean, sd=sd) } dkernel <- function(x, kernel="gaussian", mean=0, sd=1) { kernel <- match.kernel(kernel) stopifnot(is.numeric(x)) stopifnot(is.numeric(sd) && length(sd) == 1 && sd > 0) a <- sd * kernel.factor(kernel) y <- abs(x-mean)/a dens <- switch(kernel, gaussian = { dnorm(y) }, rectangular = { ifelse(y < 1, 1/2, 0) }, triangular = { ifelse(y < 1, (1 - y), 0) }, epanechnikov = { ifelse(y < 1, (3/4) * (1 - y^2), 0) }, biweight = { ifelse(y < 1, (15/16) * (1 - y^2)^2, 0) }, cosine = { ifelse(y < 1, (1 + cos(pi * y))/2, 0) }, optcosine = { ifelse(y < 1, (pi/4) * cos(pi * y/2), 0) } ) dens/a } pkernel <- function(q, kernel="gaussian", mean=0, sd=1, lower.tail=TRUE){ kernel <- match.kernel(kernel) stopifnot(is.numeric(q)) stopifnot(is.numeric(sd) && length(sd) == 1 && sd > 0) a <- sd * kernel.factor(kernel) y <- (q-mean)/a switch(kernel, gaussian = { pnorm(y, lower.tail=lower.tail) }, rectangular = { punif(y, min=-1, max=1, lower.tail=lower.tail) }, triangular = { p <- ifelse(y < -1, 0, ifelse(y > 1, 1, ifelse(y < 0, y + y^2/2 + 1/2, y - y^2/2 + 1/2))) if(lower.tail) p else (1 - p) }, epanechnikov = { p <- ifelse(y < -1, 0, ifelse(y > 1, 1, (2 + 3 * y - y^3)/4)) if(lower.tail) p else (1 - p) }, biweight = { p <- ifelse(y < -1, 0, ifelse(y > 1, 1, (15 * y - 10 * y^3 + 3 * y^5 + 8)/16)) if(lower.tail) p else (1 - p) }, cosine = { p <- ifelse(y < -1, 0, ifelse(y > 1, 1, (y + sin(pi * y)/pi + 1)/2)) if(lower.tail) p else (1 - p) }, optcosine = { p <- ifelse(y < -1, 0, ifelse(y > 1, 1, (sin(pi * y/2) + 1)/2)) if(lower.tail) p else (1 - p) }) } qkernel <- function(p, kernel="gaussian", mean=0, sd=1, lower.tail=TRUE) { kernel <- match.kernel(kernel) stopifnot(is.numeric(p)) stopifnot(is.numeric(sd) && length(sd) == 1 && sd > 0) a <- sd * kernel.factor(kernel) if(!lower.tail) p <- 1 - p y <- switch(kernel, gaussian = { qnorm(p, lower.tail=lower.tail) }, rectangular = { qunif(p, min=-1, max=1, lower.tail=lower.tail) }, triangular = { ifelse(p < 1/2, sqrt(2 * p) - 1, 1 - sqrt(2 * (1-p))) }, epanechnikov = { # solve using `polyroot' yy <- numeric(n <- length(p)) yy[p == 0] <- -1 yy[p == 1] <- 1 inside <- (p != 0) & (p != 1) # coefficients of polynomial (2 + 3 y - y^3)/4 z <- c(2, 3, 0, -1)/4 for(i in seq(n)[inside]) { sol <- polyroot(z - c(p[i], 0, 0, 0)) ok <- abs(Im(sol)) < 1e-6 realpart <- Re(sol) ok <- ok & (abs(realpart) <= 1) if(sum(ok) != 1) stop(paste("Internal error:", sum(ok), "roots of polynomial")) yy[i] <- realpart[ok] } yy }, biweight = { # solve using `polyroot' yy <- numeric(n <- length(p)) yy[p == 0] <- -1 yy[p == 1] <- 1 inside <- (p != 0) & (p != 1) # coefficients of polynomial (8 + 15 * y - 10 * y^3 + 3 * y^5)/16 z <- c(8, 15, 0, -10, 0, 3)/16 for(i in seq(n)[inside]) { sol <- polyroot(z - c(p[i], 0, 0, 0, 0, 0)) ok <- abs(Im(sol)) < 1e-6 realpart <- Re(sol) ok <- ok & (abs(realpart) <= 1) if(sum(ok) != 1) stop(paste("Internal error:", sum(ok), "roots of polynomial")) yy[i] <- realpart[ok] } yy }, cosine = { # solve using `uniroot' g <- function(y, pval) { (y + sin(pi * y)/pi + 1)/2 - pval } yy <- numeric(n <- length(p)) yy[p == 0] <- -1 yy[p == 1] <- 1 inside <- (p != 0) & (p != 1) for(i in seq(n)[inside]) yy[i] <- uniroot(g, c(-1,1), pval=p[i])$root yy }, optcosine = { (2/pi) * asin(2 * p - 1) }) return(mean + a * y) } #' integral of t^m k(t) dt from -Inf to r #' where k(t) is the standard kernel with support [-1,1] #' was: nukernel(r, m, kernel) kernel.moment <- local({ kernel.moment <- function(m, r, kernel="gaussian") { ker <- match.kernel(kernel) check.1.integer(m) #' restrict to support if(ker != "gaussian") { r <- pmin(r, 1) r <- pmax(r, -1) } if(!(m %in% c(0,1,2)) || (ker %in% c("cosine", "optcosine"))) { ## use generic integration neginf <- if(ker == "gaussian") -10 else -1 result <- numeric(length(r)) for(i in seq_along(r)) result[i] <- integralvalue(kintegrand, lower=neginf, upper=r[i], m=m, ker=ker) return(result) } switch(ker, gaussian={ if(m == 0) return(pnorm(r)) else if(m == 1) return(-dnorm(r)) else return(pnorm(r) - r * dnorm(r)) }, rectangular = { if(m == 0) return((r + 1)/2) else if(m == 1) return((r^2 - 1)/4) else return((r^3 + 1)/6) }, triangular={ m1 <- m+1 m2 <- m+2 const <- ((-1)^m1)/m1 + ((-1)^m2)/m2 answer <- (r^m1)/m1 + ifelse(r < 0, 1, -1) * (r^m2)/m2 - const return(answer) }, epanechnikov = { if(m == 0) return((2 + 3*r - r^3)/4) else if(m == 1) return((-3 + 6*r^2 - 3*r^4)/16) else return(( 2 + 5*r^3 - 3* r^5)/20) }, biweight = { if(m == 0) return((3*r^5 - 10*r^3 + 15*r + 8)/16) else if(m == 1) return((5*r^6 - 15*r^4 + 15*r^2 -5)/32) else return((15*r^7 - 42*r^5 + 35*r^3 + 8)/112) }, # never reached! cosine={stop("Sorry, not yet implemented for cosine kernel")}, optcosine={stop("Sorry, not yet implemented for optcosine kernel")} ) } integralvalue <- function(...) integrate(...)$value kintegrand <- function(x, m, ker) { (x^m) * dkernel(x, ker, mean=0, sd=1/kernel.factor(ker)) } kernel.moment }) kernel.squint <- function(kernel="gaussian", bw=1) { kernel <- match.kernel(kernel) check.1.real(bw) RK <- switch(kernel, gaussian = 1/(2 * sqrt(pi)), rectangular = sqrt(3)/6, triangular = sqrt(6)/9, epanechnikov = 3/(5 * sqrt(5)), biweight = 5 * sqrt(7)/49, cosine = 3/4 * sqrt(1/3 - 2/pi^2), optcosine = sqrt(1 - 8/pi^2) * pi^2/16) return(RK/bw) } spatstat.core/R/rmh.R0000644000176200001440000000010714144333463014161 0ustar liggesusers# # generic rmh rmh <- function(model, ...){ UseMethod("rmh") } spatstat.core/R/slrm.R0000644000176200001440000007061014144333464014357 0ustar liggesusers# # slrm.R # # Spatial Logistic Regression # # $Revision: 1.57 $ $Date: 2021/10/30 05:19:17 $ # slrm <- function(formula, ..., data=NULL, offset=TRUE, link="logit", dataAtPoints=NULL, splitby=NULL) { # remember call CallInfo <- list(callstring = short.deparse(sys.call()), cl = match.call(), formula = formula, offset=offset, link=link, splitby=splitby, dotargs=list(...)) if(!(link %in% c("logit", "cloglog"))) stop(paste("Unrecognised link", dQuote(link))) ########### INTERPRET FORMULA ############################## if(!inherits(formula, "formula")) stop(paste("Argument", dQuote("formula"), "should be a formula")) # check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Argument", sQuote("formula"), "must have a left hand side")) Yname <- formula[[2]] trend <- rhs <- formula[c(1,3)] if(!is.name(Yname)) stop("Left hand side of formula should be a single name") Yname <- paste(Yname) if(!inherits(trend, "formula")) stop("Internal error: failed to extract RHS of formula") varnames <- unique(variablesinformula(trend)) if(isTRUE(CallInfo$dotargs$save.all.vars)) varnames <- union(varnames, names(data)) specials <- c("x", "y", "logpixelarea") covnames <- varnames[!(varnames %in% specials)] # add 'splitby' to covariate names if(!is.null(splitby)) { if(!is.character(splitby) || length(splitby) != 1) stop("splitby should be a single character string") covnames <- unique(c(covnames, splitby)) } CallInfo$responsename <- Yname CallInfo$varnames <- varnames CallInfo$covnames <- covnames # Parent environment parenv <- environment(formula) ######## FIND DATA AND RESHAPE ####################### Data <- slr.prepare(CallInfo, parenv, data, dataAtPoints, splitby) # W <- Data$W df <- Data$df nY <- npoints(Data$response) ######## FIT MODEL ############################### dformula <- formula if(offset) { # insert offset term in formula rhs <- paste(as.character(rhs), collapse=" ") rhs <- paste(c(rhs, "offset(logpixelarea)"), collapse="+") dformula <- as.formula(paste(Yname, rhs)) } linkname <- link FIT <- glm(dformula, family=binomial(link=linkname), data=df, na.action=na.exclude) result <- list(call = CallInfo$cl, CallInfo = CallInfo, Data = Data, Fit = list(FIT=FIT, dformula=dformula), terms = terms(formula), nobs = nY) class(result) <- c("slrm", class(result)) return(result) } ################ UTILITY TO FIND AND RESHAPE DATA ################# slr.prepare <- local({ slr.prepare <- function(CallInfo, envir, data, dataAtPoints=NULL, splitby=NULL, clip=TRUE) { ## CallInfo is produced by slrm() ## envir is parent environment of model formula ## data is 'data' argument that takes precedence over 'envir' ## 'clip' is TRUE if the data should be clipped to the domain of Y Yname <- CallInfo$responsename ## varnames <- CallInfo$varnames covnames <- CallInfo$covnames dotargs <- CallInfo$dotargs ## ## Get the response point pattern Y Y <- getobj(Yname, envir, data) if(!is.ppp(Y)) stop(paste("The response", sQuote(Yname), "must be a point pattern")) ## if(!is.null(dataAtPoints)) { dataAtPoints <- as.data.frame(dataAtPoints) if(nrow(dataAtPoints) != npoints(Y)) stop(paste("dataAtPoints should have one row for each point in", dQuote(Yname))) } ## Find the covariates ncov <- length(covnames) covlist <- lapply(as.list(covnames), getobj, env = envir, dat=data) names(covlist) <- covnames ## Each covariate should be an image, a window, a function, or single number if(ncov == 0) { isim <- isowin <- ismask <- isfun <- isnum <- isspatial <- israster <- logical(0) } else { isim <- sapply(covlist, is.im) isowin <- sapply(covlist, is.owin) ismask <- sapply(covlist, is.mask) isfun <- sapply(covlist, is.function) isspatial <- isim | isowin | isfun israster <- isim | ismask isnum <- sapply(covlist, is.numeric) & (lengths(covlist) == 1) } if(!all(ok <- (isspatial | isnum))) { n <- sum(!ok) stop(paste(ngettext(n, "The argument", "Each of the arguments"), commasep(sQuote(covnames[!ok])), "should be either an image, a window, or a single number")) } ## 'splitby' if(!is.null(splitby)) { splitwin <- covlist[[splitby]] if(!is.owin(splitwin)) stop("The splitting covariate must be a window") ## ensure it is a polygonal window covlist[[splitby]] <- splitwin <- as.polygonal(splitwin) ## delete splitting covariate from lists to be processed issplit <- (covnames == splitby) isspatial[issplit] <- FALSE israster[issplit] <- FALSE } ## ## nnum <- sum(isnum) ## nspatial <- sum(isspatial) nraster <- sum(israster) ## numlist <- covlist[isnum] spatiallist <- covlist[isspatial] rasterlist <- covlist[israster] ## numnames <- names(numlist) spatialnames <- names(spatiallist) ## rasternames <- names(rasterlist) ## ######## CONVERT TO RASTER DATA ############################### ## determine spatial domain & common resolution: convert all data to it if(length(dotargs) > 0 || nraster == 0) { ## Pixel resolution is determined by explicit arguments if(clip) { ## Window extent is determined by response point pattern D <- as.owin(Y) } else { ## Window extent is union of domains of data domains <- lapply(append(spatiallist, list(Y)), as.owin) D <- do.call(union.owin, domains) } ## Create template mask W <- do.call.matched(as.mask, append(list(w=D), dotargs)) ## Convert all spatial objects to this resolution spatiallist <- lapply(spatiallist, convert, W=W) } else { ## Pixel resolution is determined implicitly by covariate data W <- do.call(commonGrid, rasterlist) if(clip) { ## Restrict data to spatial extent of response point pattern W <- intersect.owin(W, as.owin(Y)) } ## Adjust spatial objects to this resolution spatiallist <- lapply(spatiallist, convert, W=W) } ## images containing coordinate values xcoordim <- as.im(function(x,y){x}, W=W) ycoordim <- as.im(function(x,y){y}, W=W) ## ## create a list of covariate images, with names as in formula covimages <- append(list(x=xcoordim, y=ycoordim), spatiallist) basepixelarea <- W$xstep * W$ystep ######## ASSEMBLE DATA FRAME ############################### if(is.null(splitby)) { df <- slrAssemblePixelData(Y, Yname, W, covimages, dataAtPoints, basepixelarea) sumYloga <- Y$n * log(basepixelarea) serial <- attr(df, "serial") Yserial <- attr(df, "Yserial") } else { ## fractional pixel areas pixsplit <- pixellate(splitwin, W) splitpixelarea <- as.vector(as.matrix(pixsplit)) ## determine which points of Y are inside/outside window ins <- inside.owin(Y$x, Y$y, splitwin) ## split processing dfIN <- slrAssemblePixelData(Y[ins], Yname, W, covimages, dataAtPoints[ins, ], splitpixelarea) serialIN <- attr(dfIN, "serial") YserialIN <- attr(dfIN, "Yserial") dfIN[[splitby]] <- TRUE dfOUT <- slrAssemblePixelData(Y[!ins], Yname, W, covimages, dataAtPoints[!ins, ], basepixelarea - splitpixelarea) serialOUT <- attr(dfOUT, "serial") YserialOUT <- attr(dfOUT, "Yserial") dfOUT[[splitby]] <- FALSE df <- rbind(dfIN, dfOUT) serial <- c(serialIN, serialOUT) Yserial <- c(YserialIN, YserialOUT) ## sum of log pixel areas associated with points Ysplit <- pixsplit[Y] sumYloga <- sum(log(ifelseXY(ins, Ysplit, basepixelarea - Ysplit))) } ## tack on any numeric values df <- do.call(cbind, append(list(df), numlist)) ### RETURN ALL Data <- list(response=Y, covariates=covlist, spatialnames=spatialnames, numnames=numnames, W=W, df=df, serial=serial, Yserial=Yserial, sumYloga=sumYloga, dataAtPoints=dataAtPoints) return(Data) } getobj <- function(nama, env, dat) { if(!is.null(dat) && !is.null(x <- dat[[nama]])) return(x) else return(get(nama, envir=env)) } convert <- function(x,W) { if(is.im(x) || is.function(x)) return(as.im(x,W)) if(is.owin(x)) return(as.im(x, W, value=TRUE, na.replace=FALSE)) return(NULL) } slr.prepare }) ## ............................................................. slrAssemblePixelData <- local({ slrAssemblePixelData <- function(Y, Yname, W, covimages, dataAtPoints, pixelarea) { #' pixellate point pattern PY <- pixellate(Y, W=W, savemap=TRUE) IY <- eval.im(as.integer(PY>0)) #' if(!is.null(dataAtPoints)) { #' overwrite pixel entries for data points using exact values #' spatial coordinates covimages[["x"]][Y] <- Y$x covimages[["y"]][Y] <- Y$y #' other values provided enames <- colnames(dataAtPoints) relevant <- enames %in% names(covimages) for(v in enames[relevant]) { cova <- covimages[[v]] cova[Y] <- dataAtPoints[, v, drop=TRUE] covimages[[v]] <- cova } } #' assemble list of all images Ylist <- list(IY) names(Ylist) <- Yname allimages <- append(Ylist, covimages) #' extract pixel values of each image, convert to data frame pixdata <- lapply(allimages, pixelvalues) df <- as.data.frame(pixdata) serial <- seq_len(nrow(df)) ## add log(pixel area) column if(length(pixelarea) == 1) { df <- cbind(df, logpixelarea=log(pixelarea)) } else { ok <- (pixelarea > 0) df <- cbind(df[ok, ], logpixelarea=log(pixelarea[ok])) serial <- serial[ok] } attr(df, "serial") <- serial #' map original data points to pixels Yrowcol <- attr(PY, "map") attr(df, "Yserial") <- Yrowcol[,"row"] + (nrow(PY) - 1L) * Yrowcol[,"col"] return(df) } pixelvalues <- function(z) { v <- as.vector(as.matrix(z)) if(z$type != "factor") return(v) lev <- levels(z) return(factor(v, levels=seq_along(lev), labels=lev)) } slrAssemblePixelData }) ## ................. Methods ................................... is.slrm <- function(x) { inherits(x, "slrm") } coef.slrm <- function(object, ...) { coef(object$Fit$FIT) } print.slrm <- function(x, ...) { lk <- x$CallInfo$link switch(lk, logit= { splat("Fitted spatial logistic regression model") }, cloglog= { splat("Fitted spatial regression model (complementary log-log)") }, { splat("Fitted spatial regression model") splat("Link =", dQuote(lk)) }) cat("Formula:\t") print(x$CallInfo$formula) splat("Fitted coefficients:") print(coef(x)) return(invisible(NULL)) } summary.slrm <- function(object, ...) { y <- object$CallInfo[c("link", "formula", "callstring")] co <- coef(object) se <- sqrt(diag(vcov(object))) two <- qnorm(0.975) lo <- co - two * se hi <- co + two * se zval <- co/se pval <- 2 * pnorm(abs(zval), lower.tail = FALSE) psig <- cut(pval, c(0, 0.001, 0.01, 0.05, 1), labels = c("***", "**", "*", " "), include.lowest = TRUE) y$coefs.SE.CI <- data.frame(Estimate = co, S.E. = se, CI95.lo = lo, CI95.hi = hi, Ztest = psig, Zval = zval) class(y) <- c(class(y), "summary.slrm") return(y) } print.summary.slrm <- function(x, ...) { switch(x$link, logit= { splat("Fitted spatial logistic regression model") }, cloglog= { splat("Fitted spatial regression model (complementary log-log)") }, { splat("Fitted spatial regression model") splat("Link =", dQuote(x$link)) }) cat("Call:\t") print(x$callstring) cat("Formula:\t") print(x$formula) splat("Fitted coefficients:\t") print(x$coefs.SE.CI) return(invisible(NULL)) } coef.summary.slrm <- function(object, ...) { object$coefs.SE.CI } logLik.slrm <- function(object, ..., adjust=TRUE) { FIT <- object$Fit$FIT ll <- -deviance(FIT)/2 if(adjust) { sumYloga <- object$Data$sumYloga ll <- ll - sumYloga } attr(ll, "df") <- length(coef(object)) class(ll) <- "logLik" return(ll) } fitted.slrm <- function(object, ...) { if(length(list(...)) > 0) warning("second argument (and any subsequent arguments) ignored") predict(object, type="probabilities") } intensity.slrm <- function(X, ...) { Z <- predict(X, type="intensity", ..., newdata=NULL, window=NULL) if(is.stationary(X)) Z <- mean(Z) return(Z) } predict.slrm <- function(object, ..., type="intensity", newdata=NULL, window=NULL) { type <- pickoption("type", type, c(probabilities="probabilities", link="link", intensity="intensity", lambda="intensity")) FIT <- object$Fit$FIT link <- object$CallInfo$link splitby <- object$CallInfo$splitby Yname <- object$CallInfo$responsename W <- object$Data$W df <- object$Data$df loga <- df$logpixelarea if(!is.null(window)) window <- as.owin(window) if(is.null(newdata) && is.null(window) && is.null(splitby)) { # fitted pixel values from existing fit switch(type, probabilities={ values <- fitted(FIT) }, link={ values <- predict(FIT, type="link") }, intensity={ # this calculation applies whether an offset was included or not if(link == "cloglog") { linkvalues <- predict(FIT, type="link") values <- exp(linkvalues - loga) } else { probs <- fitted(FIT) values <- -log(1-probs)/exp(loga) } } ) out <- im(values, xcol=W$xcol, yrow=W$yrow, unitname=unitname(W)) return(out) } else { ## prediction from new data and/or at new locations if(is.null(newdata)) { ## prediction using existing covariates, at new locations newdata <- object$Data$covariates } else { ## prediction with completely new data stopifnot(is.list(newdata)) } ## ensure newdata includes response pattern to placate internal code if(!(Yname %in% names(newdata))) newdata[[Yname]] <- ppp(window=window %orifnull% W) ## Update arguments that may affect pixel resolution CallInfo <- object$CallInfo CallInfo$dotargs <- resolve.defaults(list(...), CallInfo$dotargs) ## prevent pixel splitting CallInfo$splitby <- NULL ## process new data newData <- slr.prepare(CallInfo, environment(CallInfo$formula), newdata, clip=!is.null(window)) newdf <- newData$df newW <- newData$W newloga <- newdf$logpixelarea ## avoid NA etc npixel <- nrow(newdf) ok <- complete.cases(newdf) if(!all(ok)) { newdf <- newdf[ok, , drop=FALSE] newloga <- newloga[ok] } ## compute link values linkvalues <- predict(FIT, newdata=newdf, type="link") ## transform to desired scale linkinv <- family(FIT)$linkinv switch(type, probabilities={ values <- linkinv(linkvalues) }, link={ values <- linkvalues }, intensity={ # this calculation applies whether an offset was included or not if(link == "cloglog") { values <- exp(linkvalues - newloga) } else { probs <- linkinv(linkvalues) values <- -log(1-probs)/exp(newloga) } } ) ## form image v <- rep.int(NA_real_, npixel) v[ok] <- values out <- im(v, xcol=newW$xcol, yrow=newW$yrow, unitname=unitname(W)) return(out) } } plot.slrm <- function(x, ..., type="intensity") { xname <- short.deparse(substitute(x)) y <- predict(x, type=type) dont.complain.about(y) do.call(plot.im, resolve.defaults(list(x=quote(y)), list(...), list(main=xname))) } formula.slrm <- function(x, ...) { f <- x$CallInfo$formula return(f) } terms.slrm <- function(x, ...) { terms(formula(x), ...) } labels.slrm <- function(object, ...) { # extract fitted trend coefficients co <- coef(object) # model terms tt <- terms(object) lab <- attr(tt, "term.labels") if(length(lab) == 0) return(character(0)) # model matrix mm <- model.matrix(object) ass <- attr(mm, "assign") # 'ass' associates coefficients with model terms # except ass == 0 for the Intercept coef.ok <- is.finite(co) relevant <- (ass > 0) okterms <- unique(ass[coef.ok & relevant]) return(lab[okterms]) } deviance.slrm <- function(object, ...) { deviance(object$Fit$FIT, ...) } extractAIC.slrm <- function (fit, scale = 0, k = 2, ...) { edf <- length(coef(fit)) aic <- AIC(fit) c(edf, aic + (k - 2) * edf) } model.frame.slrm <- function(formula, ...) { FIT <- formula$Fit$FIT mf <- model.frame(FIT, ...) return(mf) } model.matrix.slrm <- function(object,..., keepNA=TRUE) { FIT <- object$Fit$FIT mm <- model.matrix(FIT, ...) if(!keepNA) return(mm) df <- object$Data$df comp <- complete.cases(df) if(all(comp)) return(mm) if(sum(comp) != nrow(mm)) stop("Internal error in patching NA's") mmplus <- matrix(NA, nrow(df), ncol(mm)) mmplus[comp, ] <- mm colnames(mmplus) <- colnames(mm) return(mmplus) } model.images.slrm <- function(object, ...) { mm <- model.matrix(object, ...) mm <- as.data.frame(mm) Data <- object$Data W <- Data$W serial <- Data$serial splitby <- object$CallInfo$splitby blank <- as.im(NA_real_, W) assignbyserial <- function(values, serial, template) { Z <- template Z$v[serial] <- values return(Z) } if(is.null(splitby)) { result <- lapply(as.list(mm), assignbyserial, serial=serial, template=blank) } else { df <- Data$df IN <- as.logical(df[[splitby]]) OUT <- !IN mmIN <- mm[IN, , drop=FALSE] mmOUT <- mm[OUT, , drop=FALSE] resultIN <- lapply(as.list(mmIN), assignbyserial, serial=serial[IN], template=blank) resultOUT <- lapply(as.list(mmOUT), assignbyserial, serial=serial[OUT], template=blank) names(resultIN) <- paste(names(resultIN), splitby, "TRUE", sep="") names(resultOUT) <- paste(names(resultOUT), splitby, "FALSE", sep="") result <- c(resultIN, resultOUT) } return(as.solist(result)) } update.slrm <- function(object, ..., evaluate=TRUE, env=parent.frame()) { e <- update.default(object, ..., evaluate=FALSE) if(evaluate) { if(!missing(env)) environment(e$formula) <- env e <- eval(e, envir=env) } return(e) } anova.slrm <- local({ anova.slrm <- function(object, ..., test=NULL) { objex <- append(list(object), list(...)) if(!all(unlist(lapply(objex, is.slrm)))) stop("Some arguments are not of class slrm") fitz <- lapply(objex, getFIT) do.call(anova, append(fitz, list(test=test))) } getFIT <- function(z) {z$Fit$FIT} anova.slrm }) vcov.slrm <- function(object, ..., what=c("vcov", "corr", "fisher", "Fisher")) { stopifnot(is.slrm(object)) what <- match.arg(what) vc <- vcov(object$Fit$FIT) result <- switch(what, vcov = vc, corr = { sd <- sqrt(diag(vc)) vc / outer(sd, sd, "*") }, fisher=, Fisher={ solve(vc) }) return(result) } unitname.slrm <- function(x) { return(unitname(x$Data$response)) } "unitname<-.slrm" <- function(x, value) { unitname(x$Data$response) <- value return(x) } domain.slrm <- Window.slrm <- function(X, ..., from=c("points", "covariates")) { from <- match.arg(from) as.owin(X, ..., from=from) } as.owin.slrm <- function(W, ..., from=c("points", "covariates")) { from <- match.arg(from) U <- switch(from, points = W$Data$response, covariates = W$Data$W) V <- as.owin(U, ...) return(V) } is.stationary.slrm <- function(x) { trend <- rhs.of.formula(formula(x)) return(identical.formulae(trend, ~1)) } is.poisson.slrm <- function(x) { TRUE } is.marked.slrm <- is.multitype.slrm <- function(X, ...) { FALSE } reach.slrm <- function(x, ...) { 0 } ## pseudoR2.slrm is defined in ppmclass.R Kmodel.slrm <- function(model, ...) { function(r) { pi * r^2 } } pcfmodel.slrm <- function(model, ...) { function(r) { rep.int(1, length(r)) } } parameters.slrm <- function(model, ...) { list(trend=coef(model)) } ## ............ SIMULATION .............................. simulate.slrm <- function(object, nsim=1, seed=NULL, ..., window=NULL, covariates=NULL, verbose=TRUE, drop=FALSE) { # .... copied from simulate.lm .... if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } starttime <- proc.time() # determine simulation window and compute intensity if(!is.null(window)) stopifnot(is.owin(window)) lambda <- predict(object, type="intensity", newdata=covariates, window=window) # max lambda (for efficiency) summ <- summary(lambda) lmax <- summ$max + 0.05 * diff(summ$range) # run out <- list() verbose <- verbose && (nsim > 1) if(verbose) { cat(paste("Generating", nsim, "simulations... ")) pstate <- list() } for(i in 1:nsim) { out[[i]] <- rpoispp(lambda, lmax=lmax) if(verbose) pstate <- progressreport(i, nsim, state=pstate) } #' pack up out <- simulationresult(out, nsim, drop) out <- timed(out, starttime=starttime) attr(out, "seed") <- RNGstate return(out) } ## ------------------ residuals -------------------------------- residuals.slrm <- function(object, type=c("raw", "deviance", "pearson", "working", "response", "partial", "score"), ...) { type <- match.arg(type) otype <- if(type %in% c("raw", "score")) "response" else type FIT <- object$Fit$FIT W <- object$Data$W res <- residuals(FIT, type=otype, ...) if(type == "score") { M <- model.matrix(object) res <- res * M colnames(res) <- colnames(M) } R <- wrangle2image(res, W) return(R) } ## ------------------ leverage and influence ------------------- leverage.slrm <- function(model, ...) { slrmInfluence(model, "leverage", ...)[["leverage"]] } influence.slrm <- function(model, ...) { slrmInfluence(model, "influence", ...)[["influence"]] } dfbetas.slrm <- function(model, ...) { slrmInfluence(model, "dfbetas", ...)[["dfbetas"]] } dffit.slrm <- function(object, ...) { slrmInfluence(object, "dffit", ...)[["dffit"]] } slrmInfluence <- function(model, what=c("all", "leverage", "influence", "dfbetas", "dffit"), ...) { stopifnot(is.slrm(model)) what <- match.arg(what, several.ok=TRUE) if("all" %in% what) what <- c("leverage", "influence", "dfbetas", "dffit") FIT <- model$Fit$FIT W <- model$Data$W nr <- nrow(W) nc <- ncol(W) result <- list() if("leverage" %in% what) { h <- hatvalues(FIT, ...) result$leverage <- wrangle2image(h, W) } if("influence" %in% what) { h <- hatvalues(FIT, ...) rP <- rstandard(FIT, type="pearson", ...) p <- length(coef(model)) s <- (1/p) * rP^2 * h/(1-h) result$influence <- wrangle2image(s, W) } if("dfbetas" %in% what) { dfb <- dfbetas(FIT, ...) result$dfbetas <- wrangle2image(dfb, W) } if("dffit" %in% what) { dfb <- dfbeta(FIT, ...) #sic X <- model.matrix(model) # sic if(is.null(dim(X)) || is.null(dim(dfb)) || !all(dim(X) == dim(dfb))) stop("Internal error: model.matrix dimensions incompatible with dfbeta") dff <- rowSums(X * dfb) result$dffit <- wrangle2image(dff, W) } return(result) } valid.slrm <- function(object, warn=TRUE, ...) { verifyclass(object, "slrm") coeffs <- coef(object) ok <- all(is.finite(coeffs)) return(ok) } emend.slrm <- local({ tracemessage <- function(depth, ...) { if(depth == 0) return(NULL) spacer <- paste(rep.int(" ", depth), collapse="") marker <- ngettext(depth, "trace", paste("trace", depth)) marker <- paren(marker, "[") splat(paste0(spacer, marker, " ", paste(...))) } leaving <- function(depth) { tracemessage(depth, ngettext(depth, "Returning.", "Exiting level.")) } emend.slrm <- function(object, ..., fatal=FALSE, trace=FALSE) { verifyclass(object, "slrm") fast <- spatstat.options("project.fast") # user specifies 'trace' as logical # but 'trace' can also be integer representing trace depth td <- as.integer(trace) trace <- (td > 0) tdnext <- if(trace) td+1 else 0 if(valid.slrm(object)) { tracemessage(td, "Model is valid.") leaving(td) return(object) } # Fitted coefficients coef.orig <- coeffs <- coef(object) coefnames <- names(coeffs) # Trend terms in trend formula trendterms <- attr(terms(object), "term.labels") # Mapping from coefficients to terms of GLM coef2term <- attr(model.matrix(object), "assign") istrend <- (coef2term > 0) # Identify non-finite trend coefficients bad <- !is.finite(coeffs) if(!any(bad)) { tracemessage(td, "Trend terms are valid.") } else { nbad <- sum(bad) tracemessage(td, "Non-finite ", ngettext(nbad, "coefficient for term ", "coefficients for terms "), commasep(sQuote(trendterms[coef2term[bad]]))) if(fast) { # remove first illegal term firstbad <- min(which(bad)) badterm <- trendterms[coef2term[firstbad]] # remove this term from model tracemessage(td, "Removing term ", sQuote(badterm)) removebad <- as.formula(paste("~ . - ", badterm), env=object$callframe) newobject <- update(object, removebad) if(trace) { tracemessage(td, "Updated model:") print(newobject) } # recurse newobject <- emend.slrm(newobject, fatal=fatal, trace=tdnext) # return leaving(td) return(newobject) } else { # consider all illegal terms bestobject <- NULL for(i in which(bad)) { badterm <- trendterms[coef2term[i]] # remove this term from model tracemessage(td, "Considering removing term ", sQuote(badterm)) removebad <- as.formula(paste("~ . - ", badterm), env=object$callframe) object.i <- update(object, removebad) if(trace) { tracemessage(td, "Considering updated model:") print(object.i) } # recurse object.i <- emend.slrm(object.i, fatal=fatal, trace=tdnext) # evaluate log likelihood logL.i <- logLik(object.i, warn=FALSE) tracemessage(td, "max log likelihood = ", logL.i) # optimise if(is.null(bestobject) || (logLik(bestobject, warn=FALSE) < logL.i)) bestobject <- object.i } if(trace) { tracemessage(td, "Best submodel:") print(bestobject) } # return leaving(td) return(bestobject) } } object$projected <- TRUE object$coef.orig <- coef.orig leaving(td) return(object) } emend.slrm }) spatstat.core/R/psstA.R0000644000176200001440000001143314144333463014471 0ustar liggesusers# # psstA.R # # Pseudoscore residual for unnormalised F (area-interaction) # # $Revision: 1.7 $ $Date: 2014/11/11 02:31:44 $ # ################################################################################ # psstA <- function(object, r=NULL, breaks=NULL, ..., model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), ppmcorrection="border", correction="all", truecoef=NULL, hi.res=NULL, nr=spatstat.options("psstA.nr"), ngrid=spatstat.options("psstA.ngrid")) { if(is.ppm(object)) fit <- object else if(is.ppp(object) || is.quad(object)) { # convert to quadscheme if(is.ppp(object)) object <- quadscheme(object, ...) # fit model if(!is.null(model)) fit <- update(model, Q=object, forcefit=TRUE) else if(ppmcorrection == "border") fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) else fit <- ppm(object, trend=trend, interaction=interaction, correction=ppmcorrection, forcefit=TRUE) } else stop("object should be a fitted point process model or a point pattern") rfixed <- !is.null(r) || !is.null(breaks) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy # E <- equalsfun.quad(Q) # WQ <- w.quad(Q) # quadrature weights # integrals will be restricted to quadrature points # that were actually used in the fit # USED <- getglmsubset(fit) if(fit$correction == "border") { rbord <- fit$rbord b <- bdist.points(U) USED <- (b > rbord) bX <- bdist.points(X) USEDX <- (bX > rbord) } else { USED <- rep.int(TRUE, U$n) USEDX <- rep.int(TRUE, X$n) } # basic statistics Win <- Window(X) npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW # determine breakpoints for r values rmaxdefault <- rmax.rule("F", Win, lambda) if(rfixed) breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault) else { # create fairly coarse 'r' values r <- seq(0, rmaxdefault, length=nr) breaks <- breakpts.from.r(r) } rvals <- breaks$r rmax <- breaks$max # residuals res <- residuals(fit, type="raw", drop=FALSE, new.coef=truecoef, quad=hi.res) # rescts <- with(res, "continuous") # absolute weight for continuous integrals wc <- -rescts # initialise fv object df <- data.frame(r=rvals, theo=0) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~Delta~V[A](r), NULL), "theo", . ~ r, alim=c(0, rmax), c("r","%s[theo](r)"), desc, fname="bold(R)~Delta~V[A]") # # for efficiency, compute the largest value of distance transform Dmax <- 0 for(i in 1:npts) { Di <- distmap(X[-i]) Dimax <- summary(Di)$max Dmax <- max(Dmax, Dimax) } Rmax <- min(max(rvals), Dmax * 1.1) nontrivial <- (rvals <= Rmax) trivialzeroes <- numeric(sum(!nontrivial)) # pseudosum Ax <- areaLoss.grid(X, rvals[nontrivial], subset=USEDX, ngrid=ngrid) C1 <- apply(Ax, 2, sum) C1 <- c(C1, trivialzeroes) # pseudocompensator OK <- USED & !Z Au <- areaGain.grid(U[OK], X, rvals[nontrivial], W=Win, ngrid=ngrid) lamu <- matrix(wc[OK], nrow=nrow(Au), ncol=ncol(Au)) C2 <- apply(lamu * Au, 2, sum) C2 <- c(C2, trivialzeroes) # pseudoscore residual Ctot <- C1 - C2 # tack on ans <- bind.fv(ans, data.frame(dat=C1, com=C2, res=Ctot), c("Sigma~Delta~V[A](r)", "bold(C)~Delta~V[A](r)", "%s(r)"), c("data pseudosum (contribution to %s)", "model pseudocompensator (contribution to %s)", "pseudoscore residual %s"), "res") # # pseudovariance # (skipped if called by envelope() etc) # if(correction == "all") { lamX <- matrix(wc[USED & Z], nrow=nrow(Ax), ncol=ncol(Ax)) Var <- apply(lamu * Au^2, 2, sum) + apply(lamX * Ax^2, 2, sum) Var <- c(Var, trivialzeroes) # two-sigma limits TwoSig <- 2 * sqrt(Var) # tack on ans <- bind.fv(ans, data.frame(var=Var, up=TwoSig, lo=-TwoSig), c("bold(C)^2~Delta~V[A](r)", "%s[up](r)", "%s[lo](r)"), c("pseudovariance of %s", "upper 2sigma critical limit for %s", "lower 2sigma critical limit for %s"), "res") fvnames(ans, ".") <- c("res", "up", "lo", "theo") } unitname(ans) <- unitname(fit) # return(ans) } spatstat.core/R/lohboot.R0000644000176200001440000003041314144333462015043 0ustar liggesusers# # lohboot.R # # $Revision: 1.24 $ $Date: 2019/06/24 03:15:26 $ # # Loh's bootstrap CI's for local pcf, local K etc # spatstatLocalFunctionInfo <- function(key) { ## This table has to be built on the fly. TheTable <- list( pcf = list(Global=pcf, Local=localpcf, L=FALSE, inhom=FALSE, indices=0), Kest = list(Global=Kest, Local=localK, L=FALSE, inhom=FALSE, indices=0), Lest = list(Global=Lest, Local=localK, # stet! L=TRUE, inhom=FALSE, indices=0), pcfinhom = list(Global=pcfinhom, Local=localpcfinhom, L=FALSE, inhom=TRUE, indices=0), Kinhom = list(Global=Kinhom, Local=localKinhom, L=FALSE, inhom=TRUE, indices=0), Linhom = list(Global=Linhom, Local=localKinhom, # stet! L=TRUE, inhom=TRUE, indices=0), Kcross = list(Global=Kcross, Local=localKcross, L=FALSE, inhom=FALSE, indices=2), Lcross = list(Global=Lcross, Local=localKcross, # stet! L=TRUE, inhom=FALSE, indices=2), Kdot = list(Global=Kdot, Local=localKdot, L=FALSE, inhom=FALSE, indices=1), Ldot = list(Global=Ldot, Local=localKdot, # stet! L=TRUE, inhom=FALSE, indices=1), Kcross.inhom = list(Global=Kcross.inhom, Local=localKcross.inhom, L=FALSE, inhom=TRUE, indices=2), Lcross.inhom = list(Global=Lcross.inhom, Local=localKcross.inhom, # stet! L=TRUE, inhom=TRUE, indices=2) ) if(length(key) != 1) stop("Argument must be a single character string or function", call.=FALSE) nama <- names(TheTable) pos <- if(is.character(key)) { match(key, nama) } else if(is.function(key)) { match(list(key), lapply(TheTable, getElement, name="Global")) } else NULL if(is.na(pos)) return(NULL) out <- TheTable[[pos]] out$GlobalName <- nama[pos] return(out) } lohboot <- function(X, fun=c("pcf", "Kest", "Lest", "pcfinhom", "Kinhom", "Linhom", "Kcross", "Lcross", "Kdot", "Ldot", "Kcross.inhom", "Lcross.inhom"), ..., block=FALSE, global=FALSE, basicboot=FALSE, Vcorrection=FALSE, confidence=0.95, nx = 4, ny = nx, nsim=200, type=7) { stopifnot(is.ppp(X)) ## validate 'fun' fun.name <- short.deparse(substitute(fun)) if(is.character(fun)) fun <- match.arg(fun) info <- spatstatLocalFunctionInfo(fun) if(is.null(info)) stop(paste("Loh's bootstrap is not supported for the function", sQuote(fun.name)), call.=FALSE) fun <- info$GlobalName localfun <- info$Local # validate confidence level stopifnot(confidence > 0.5 && confidence < 1) alpha <- 1 - confidence if(!global) { probs <- c(alpha/2, 1-alpha/2) rank <- nsim * probs[2L] } else { probs <- 1-alpha rank <- nsim * probs } if(abs(rank - round(rank)) > 0.001) warning(paste("confidence level", confidence, "corresponds to a non-integer rank", paren(rank), "so quantiles will be interpolated")) ## compute local functions f <- localfun(X, ...) theo <- f$theo ## parse edge correction info correction <- attr(f, "correction") switch(correction, none = { ckey <- clab <- "un" cadj <- "uncorrected" }, border = { ckey <- "border" clab <- "bord" cadj <- "border-corrected" }, translate = { ckey <- clab <- "trans" cadj <- "translation-corrected" }, isotropic = { ckey <- clab <- "iso" cadj <- "Ripley isotropic corrected" }) ## determine indices for Kcross etc types <- levels(marks(X)) from <- resolve.1.default(list(from=types[1]), list(...)) to <- resolve.1.default(list(to=types[2]), list(...)) fromName <- make.parseable(paste(from)) toName <- make.parseable(paste(to)) ## TEMPORARY HACK for cross/dot functions. ## Uses a possibly temporary attribute to overwrite X with only "from" points. if(info$indices > 0) { X <- attr(f, "Xfrom") } # first n columns are the local pcfs (etc) for the n points of X n <- npoints(X) y <- as.matrix(as.data.frame(f))[, 1:n] nr <- nrow(y) ## ---------- Modification by Christophe Biscio ----------------- ## (some re-coding by Adrian) if(!block) { ## Adrian's wrong code ## average local statistics ymean <- .rowMeans(y, na.rm=TRUE, nr, n) ## resample ystar <- matrix(, nrow=nr, ncol=nsim) for(i in 1:nsim) { ## resample n points with replacement ind <- sample(n, replace=TRUE) ## average their local statistics ystar[,i] <- .rowMeans(y[,ind], nr, n, na.rm=TRUE) } } else { ## Correct block bootstrap as described by Loh. W <- Window(X) GridTess <- quadrats(boundingbox(W), nx = nx, ny =ny) ## Classify points of X into grid tiles BlockIndex <- tileindex(X$x, X$y, GridTess) ## Use only 'full' blocks if(!is.rectangle(W)) { blocks <- tiles(GridTess) fullblocks <- sapply(blocks, is.subset.owin, B = W) if(sum(fullblocks)<2) stop("Not enough blocks are fully contained in the window", call.=FALSE) warning(paste("For non-rectangular windows,", "only blocks fully contained in the window are used:", paste(sum(fullblocks), "were used and", sum(!fullblocks), "were ignored.") ), call.=FALSE) ## blocks <- blocks[fullblocks] ## adjust classification of points of X indexmap <- cumsum(fullblocks) indexmap[!fullblocks] <- NA BlockIndex <- indexmap[BlockIndex] ## adjust total number of points n <- sum(!is.na(BlockIndex)) BlockFactor <- factor(BlockIndex, levels=unique(indexmap[!is.na(indexmap)])) } else BlockFactor <- factor(BlockIndex) nmarks <- length(levels(BlockFactor)) ## Average the local function values in each block ymarks <- by(t(y), BlockFactor, colSums, na.rm=TRUE, simplify=FALSE) ## Ensure empty data yield zero if(any(isempty <- sapply(ymarks, is.null))) ymarks[isempty] <- rep(list(numeric(nr)), sum(isempty)) ymarks <- as.matrix(do.call(cbind, ymarks)) * nmarks/n ## average all the marks ymean <- .rowMeans(ymarks, na.rm=TRUE, nr, nmarks) ## Average the marks in each block ystar <- matrix(, nrow=nr, ncol=nsim) for(i in 1:nsim) { ## resample nblocks blocks with replacement ind <- sample( nmarks , replace=TRUE) ## average their local function values ystar[,i] <- .rowMeans(ymarks[,ind], nr, nmarks, na.rm=TRUE) } } ## compute quantiles if(!global) { ## pointwise quantiles hilo <- apply(ystar, 1, quantile, probs=probs, na.rm=TRUE, type=type) ## Ripley's K function correction proposed by Loh if(Vcorrection && (fun=="Kest" || fun=="Kinhom")) { Vcov=sqrt(1+2*pi*n*(f$r)^2/area.owin(W)) hilo[1L,] <- ymean+(ymean-hilo[1L,]) / Vcov hilo[2L,] <- ymean+(ymean-hilo[2L,]) / Vcov hilo <- hilo[2:1,] # switch index so hilo[1,] is lower bound basicboot <- FALSE # The basic bootstrap interval is already used. Ensure that I do not modify hilo } ## So-called "basic bootstrap interval" proposed in Loh's paper; ## the intervals are asymptotically the same if(basicboot) { hilo[1L,] <- 2*ymean-hilo[1L,] hilo[2L,] <- 2*ymean-hilo[2L,] hilo <- hilo[c(2,1),] # switch index so hilo[1,] is lower bound } } else { ## quantiles of deviation ydif <- sweep(ystar, 1, ymean) ydev <- apply(abs(ydif), 2, max, na.rm=TRUE) crit <- quantile(ydev, probs=probs, na.rm=TRUE, type=type) hilo <- rbind(ymean - crit, ymean + crit) } ## ============= End Modification by Christophe Biscio =================== ## Transform to L function if required if(info$L) { theo <- sqrt(theo/pi) ymean <- sqrt(ymean/pi) hilo <- sqrt(hilo/pi) warn.once("lohbootLfun", "The calculation of confidence intervals for L functions", "in lohboot() has changed in spatstat 1.60-0 and later;", "they are now computed by transforming the confidence intervals", "for the corresponding K functions.") } ## create fv object df <- data.frame(r=f$r, theo=theo, ymean, lo=hilo[1L,], hi=hilo[2L,]) colnames(df)[3L] <- ckey CIlevel <- paste(100 * confidence, "%% confidence", sep="") desc <- c("distance argument r", "theoretical Poisson %s", paste(cadj, "estimate of %s"), paste("lower", CIlevel, "limit for %s"), paste("upper", CIlevel, "limit for %s")) switch(fun, pcf={ fname <- "g" yexp <- ylab <- quote(g(r)) }, Kest={ fname <- "K" yexp <- ylab <- quote(K(r)) }, Lest={ fname <- "L" yexp <- ylab <- quote(L(r)) }, pcfinhom={ fname <- c("g", "inhom") yexp <- ylab <- quote(g[inhom](r)) }, Kinhom={ fname <- c("K", "inhom") yexp <- ylab <- quote(K[inhom](r)) }, Linhom={ fname <- c("L", "inhom") yexp <- ylab <- quote(L[inhom](r)) }, Kcross={ fname <- c("K", paste0("list(", fromName, ",", toName, ")")) ylab <- substitute(K[fra,til](r), list(fra=fromName,til=toName)) yexp <- substitute(K[list(fra,til)](r), list(fra=fromName,til=toName)) }, Lcross={ fname <- c("L", paste0("list(", fromName, ",", toName, ")")) ylab <- substitute(L[fra,til](r), list(fra=fromName,til=toName)) yexp <- substitute(L[list(fra,til)](r), list(fra=fromName,til=toName)) }, Kdot={ fname <- c("K", paste0(fromName, "~ symbol(\"\\267\")")) ylab <- substitute(K[fra ~ dot](r), list(fra=fromName)) yexp <- substitute(K[fra ~ symbol("\267")](r), list(fra=fromName)) }, Ldot={ fname <- c("L", paste0(fromName, "~ symbol(\"\\267\")")) ylab <- substitute(L[fra ~ dot](r), list(fra=fromName)) yexp <- substitute(L[fra ~ symbol("\267")](r), list(fra=fromName)) }, Kcross.inhom={ fname <- c("K", paste0("list(inhom,", fromName, ",", toName, ")")) ylab <- substitute(K[inhom,fra,til](r), list(fra=fromName,til=toName)) yexp <- substitute(K[list(inhom,fra,til)](r), list(fra=fromName,til=toName)) }, Lcross.inhom={ fname <- c("L", paste0("list(inhom,", fromName, ",", toName, ")")) ylab <- substitute(L[inhom,fra,til](r), list(fra=fromName,til=toName)) yexp <- substitute(L[list(inhom,fra,til)](r), list(fra=fromName,til=toName)) }) labl <- c("r", makefvlabel(NULL, NULL, fname, "pois"), makefvlabel(NULL, "hat", fname, clab), makefvlabel(NULL, "hat", fname, "loCI"), makefvlabel(NULL, "hat", fname, "hiCI")) g <- fv(df, "r", ylab=ylab, ckey, , c(0, max(f$r)), labl, desc, fname=fname, yexp=yexp) formula(g) <- . ~ r fvnames(g, ".") <- c(ckey, "theo", "hi", "lo") fvnames(g, ".s") <- c("hi", "lo") unitname(g) <- unitname(X) g } spatstat.core/R/Jinhom.R0000644000176200001440000003204114144333461014617 0ustar liggesusers# # Jinhom.R # # $Revision: 1.13 $ $Date: 2021/01/07 03:18:50 $ # Ginhom <- function(X, lambda=NULL, lmin=NULL, ..., sigma=NULL, varcov=NULL, r=NULL, breaks=NULL, ratio=FALSE, update = TRUE, warn.bias=TRUE, savelambda=FALSE) { stopifnot(is.ppp(X)) npts <- npoints(X) W <- as.owin(X) areaW <- area(W) miss.update <- missing(update) # determine 'r' values rmaxdefault <- rmax.rule("G", W, npts/areaW) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) if(!breaks$even) stop("r values must be evenly spaced") r <- breaks$r rmax <- breaks$max nr <- length(r) dangerous <- "lambda" danger <- TRUE # Intensity values at data points if(is.null(lambda)) { # No intensity data provided danger <- FALSE # Estimate density at points by leave-one-out kernel smoothing lamX <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambdaX <- as.numeric(lamX) # negative or zero values are due to numerical error lambdaX <- pmax.int(lambdaX, .Machine$double.eps) } else { # lambda values provided if(is.im(lambda)) lambdaX <- safelookup(lambda, X) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) { model <- lambda if(!update) { ## just use intensity of fitted model lambdaX <- predict(lambda, locations=X, type="trend") } else { ## re-fit model to data X model <- if(is.ppm(model)) update(model, Q=X) else update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE) danger <- FALSE if(miss.update) warn.once(key="Ginhom.update", "The behaviour of Ginhom when lambda is a ppm object", "has changed (in spatstat 1.37-0 and later).", "See help(Ginhom)") } } else if(is.function(lambda)) lambdaX <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) { lambdaX <- lambda check.nvector(lambdaX, npts) } else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) # negative values are illegal minX <- min(lambdaX) if(minX < 0) stop("Negative values of lambda were encountered at data points") if(minX == 0) stop("Zero values of lambda were encountered at data points") } # Minimum intensity if(!is.null(lmin)) { check.1.real(lmin) stopifnot(lmin >= 0) if(lmin >= min(lambdaX)) stop("lmin must be smaller than all values of lambda") } else { # Compute minimum value over window if(is.null(lambda)) { # extract previously selected smoothing bandwidth sigma <- attr(lamX, "sigma") varcov <- attr(lamX, "varcov") # estimate density on a pixel grid and minimise lam <- density(X, ..., sigma=sigma, varcov=varcov, at="pixels") lmin <- min(lam) # negative or zero values may occur due to numerical error lmin <- max(lmin, .Machine$double.eps) } else { if(is.im(lambda)) lmin <- min(lambda) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) lmin <- min(predict(lambda)) else if(is.function(lambda)) lmin <- min(as.im(lambda, W)) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) lmin <- min(lambdaX) } if(lmin < 0) stop("Negative values of intensity encountered") # ensure lmin < lambdaX lmin <- min(lmin, 0.95 * min(lambdaX)) } ## Compute intensity factor lratio <- lmin/lambdaX vv <- 1 - lratio if(warn.bias) { ra <- range(lratio) if(ra[1] < 1e-6 || ra[2] > 1 - 1e-6) warning(paste("Possible bias: range of values of lmin/lambdaX is", prange(signif(ra, 5))), call.=FALSE) } ## sort data points in order of increasing x coordinate xx <- X$x yy <- X$y oX <- fave.order(xx) xord <- xx[oX] yord <- yy[oX] vord <- vv[oX] # compute local cumulative products z <- .C(SC_locprod, n = as.integer(npts), x = as.double(xord), y = as.double(yord), v = as.double(vord), nr = as.integer(nr), rmax = as.double(rmax), ans = as.double(numeric(npts * nr)), PACKAGE="spatstat.core") ans <- matrix(z$ans, nrow=nr, ncol=npts) # revert to original ordering loccumprod <- matrix(, nrow=nr, ncol=npts) loccumprod[, oX] <- ans # border correction bX <- bdist.points(X) ok <- outer(r, bX, "<=") denom <- .rowSums(ok, nr, npts) loccumprod[!ok] <- 0 numer <- .rowSums(loccumprod, nr, npts) # pack up Gdf <- data.frame(r=r, theo = 1 - exp(- lmin * pi * r^2)) desc <- c("distance argument r", "theoretical Poisson %s") theo.denom <- rep.int(npts, nr) G <- ratfv(Gdf, NULL, theo.denom, "r", quote(G[inhom](r)), "theo", NULL, c(0,rmax), c("r", "{%s[%s]^{pois}}(r)"), desc, fname=c("G", "inhom"), ratio=ratio) G <- bind.ratfv(G, data.frame(bord=denom-numer), denom, "{hat(%s)[%s]^{bord}}(r)", "border estimate of %s", "bord", ratio=ratio) # formula(G) <- . ~ r fvnames(G, ".") <- c("bord", "theo") unitname(G) <- unitname(X) if(ratio) G <- conform.ratfv(G) if(danger) attr(G, "dangerous") <- dangerous if(savelambda) { attr(G, "lambda") <- lambdaX attr(G, "lmin") <- lmin } return(G) } Finhom <- function(X, lambda=NULL, lmin=NULL, ..., sigma=NULL, varcov=NULL, r=NULL, breaks=NULL, ratio=FALSE, update = TRUE, warn.bias=TRUE, savelambda=FALSE) { stopifnot(is.ppp(X)) npts <- npoints(X) W <- as.owin(X) areaW <- area(W) miss.update <- missing(update) # determine 'r' values rmaxdefault <- rmax.rule("F", W, npts/areaW) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) if(!breaks$even) stop("r values must be evenly spaced") r <- breaks$r rmax <- breaks$max nr <- length(r) dangerous <- "lambda" danger <- TRUE # Intensity values at data points if(is.null(lambda)) { # No intensity data provided danger <- FALSE # Estimate density at points by leave-one-out kernel smoothing lamX <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambdaX <- as.numeric(lamX) # negative or zero values are due to numerical error lambdaX <- pmax.int(lambdaX, .Machine$double.eps) } else { # lambda values provided if(is.im(lambda)) lambdaX <- safelookup(lambda, X) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) { model <- lambda if(!update) { ## just use intensity of fitted model lambdaX <- predict(lambda, locations=X, type="trend") } else { ## re-fit model to data X model <- if(is.ppm(model)) update(model, Q=X) else update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE) danger <- FALSE if(miss.update) warn.once(key="Finhom.update", "The behaviour of Finhom when lambda is a ppm object", "has changed (in spatstat 1.37-0 and later).", "See help(Finhom)") } } else if(is.function(lambda)) lambdaX <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) { lambdaX <- lambda check.nvector(lambdaX, npts) } else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) # negative values are illegal minX <- min(lambdaX) if(minX < 0) stop("Negative values of lambda were encountered at data points") if(minX == 0) stop("Zero values of lambda were encountered at data points") } # Minimum intensity if(!is.null(lmin)) { check.1.real(lmin) stopifnot(lmin >= 0) if(lmin >= min(lambdaX)) stop("lmin must be smaller than all values of lambda") } else { # Compute minimum value over window if(is.null(lambda)) { # extract previously selected smoothing bandwidth sigma <- attr(lamX, "sigma") varcov <- attr(lamX, "varcov") # estimate density on a pixel grid and minimise lam <- density(X, ..., sigma=sigma, varcov=varcov, at="pixels") lmin <- min(lam) # negative or zero values may occur due to numerical error lmin <- max(lmin, .Machine$double.eps) } else { if(is.im(lambda)) lmin <- min(lambda) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) lmin <- min(predict(lambda)) else if(is.function(lambda)) lmin <- min(as.im(lambda, W)) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) lmin <- min(lambdaX) } if(lmin < 0) stop("Negative values of intensity encountered") # ensure lmin < lambdaX lmin <- min(lmin, 0.95 * min(lambdaX)) } # Compute intensity factor lratio <- lmin/lambdaX vv <- 1 - lratio if(warn.bias) { ra <- range(lratio) if(ra[1] < 1e-6 || ra[2] > 1 - 1e-6) warning(paste("Possible bias: range of values of lmin/lambdaX is", prange(signif(ra, 5))), call.=FALSE) } ## sort data points in order of increasing x coordinate xx <- X$x yy <- X$y oX <- fave.order(xx) xord <- xx[oX] yord <- yy[oX] vord <- vv[oX] # determine pixel grid and compute distance to boundary M <- do.call.matched(as.mask, append(list(w=W), list(...))) bM <- bdist.pixels(M, style="matrix") bM <- as.vector(bM) # x, y coordinates of pixels are already sorted by increasing x xM <- as.vector(rasterx.mask(M)) yM <- as.vector(rastery.mask(M)) nM <- length(xM) # compute local cumulative products z <- .C(SC_locxprod, ntest = as.integer(nM), xtest = as.double(xM), ytest = as.double(yM), ndata = as.integer(npts), xdata = as.double(xord), ydata = as.double(yord), vdata = as.double(vord), nr = as.integer(nr), rmax = as.double(rmax), ans = as.double(numeric(nM * nr)), PACKAGE="spatstat.core") loccumprod <- matrix(z$ans, nrow=nr, ncol=nM) # border correction ok <- outer(r, bM, "<=") denom <- .rowSums(ok, nr, nM) loccumprod[!ok] <- 0 numer <- .rowSums(loccumprod, nr, nM) # pack up Fdf <- data.frame(r=r, theo = 1 - exp(- lmin * pi * r^2)) desc <- c("distance argument r", "theoretical Poisson %s") theo.denom <- rep.int(npts, nr) FX <- ratfv(Fdf, NULL, theo.denom, "r", quote(F[inhom](r)), "theo", NULL, c(0,rmax), c("r","{%s[%s]^{pois}}(r)"), desc, fname=c("F", "inhom"), ratio=ratio) FX <- bind.ratfv(FX, data.frame(bord=denom-numer), denom, "{hat(%s)[%s]^{bord}}(r)", "border estimate of %s", "bord", ratio=ratio) # formula(FX) <- . ~ r fvnames(FX, ".") <- c("bord", "theo") unitname(FX) <- unitname(X) if(ratio) FX <- conform.ratfv(FX) if(danger) attr(FX, "dangerous") <- dangerous if(savelambda) { attr(FX, "lambda") <- lambdaX attr(FX, "lmin") <- lmin } return(FX) } Jinhom <- function(X, lambda=NULL, lmin=NULL, ..., sigma=NULL, varcov=NULL, r=NULL, breaks=NULL, update = TRUE, warn.bias=TRUE, savelambda=FALSE) { if(missing(update) & (is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda))) warn.once(key="Jinhom.update", "The behaviour of Jinhom when lambda is a ppm object", "has changed (in spatstat 1.37-0 and later).", "See help(Jinhom)") ## compute inhomogeneous G (including determination of r and lmin) GX <- Ginhom(X, lambda=lambda, lmin=lmin, ..., sigma=sigma, varcov=varcov, r=r, breaks=breaks, ratio=FALSE, update=update, warn.bias=warn.bias, savelambda=TRUE) ## extract auxiliary values to be used for Finhom r <- GX$r lmin <- attr(GX, "lmin") lambdaX <- attr(GX, "lambda") ## compute inhomogeneous J using previously-determined values FX <- Finhom(X, lambda=lambdaX, lmin=lmin, ..., sigma=sigma, varcov=varcov, r=r, ratio=FALSE, update=update, warn.bias=FALSE, savelambda=FALSE) ## evaluate inhomogeneous J function JX <- eval.fv((1-GX)/(1-FX)) # relabel the fv object JX <- rebadge.fv(JX, quote(J[inhom](r)), c("J","inhom"), names(JX), new.labl=attr(GX, "labl")) # tack on extra info attr(JX, "G") <- GX attr(JX, "F") <- FX attr(JX, "dangerous") <- attr(GX, "dangerous") if(savelambda) { attr(JX, "lmin") <- lmin attr(JX, "lambda") <- lambdaX } return(JX) } spatstat.core/R/bc.R0000644000176200001440000000405314144333461013761 0ustar liggesusers#' bc.R #' #' Bias correction techniques #' #' $Revision: 1.2 $ $Date: 2016/09/15 02:21:15 $ bc <- function(fit, ...) { UseMethod("bc") } bc.ppm <- function(fit, ..., nfine=256) { stopifnot(is.ppm(fit)) # theta0 <- coef(fit) nc <- length(theta0) # X <- data.ppm(fit) Z <- is.data(quad.ppm(fit)) # evaluate sufficient statistic at data points sufX <- model.matrix(fit)[Z, ] if(ncol(sufX) != nc) stop("Internal error: model.matrix does not match coef(model)") # predict on fine grid finemask <- as.mask(as.owin(fit), dimyx=nfine) lamF <- predict(fit, type="cif", locations=finemask) sufF <- model.images(fit, W=finemask) if(length(sufF) != nc) stop("Internal error: model.images does not match coef(model)") # edge correction if(fit$correction == "border" && ((rbord <- fit$rbord) > 0)) { b <- bdist.pixels(finemask) bX <- bdist.points(X) excludeU <- eval.im(b < rbord) retainX <- (bX >= rbord) sufX <- sufX[retainX, , drop=FALSE] } else { excludeU <- FALSE } # compute fine approximation to score scoreX <- colSums(sufX) scoreW <- numeric(nc) for(k in seq_len(nc)) { S <- sufF[[k]] # infinite values of S may occur and correspond to zero cif Slam <- eval.im(ifelse(is.infinite(S) | excludeU, 0, S * lamF)) scoreW[k] <- integral.im(Slam) } score <- scoreX - scoreW # Newton-Raphson Iinv <- vcov(fit, hessian=TRUE) theta <- theta0 + Iinv %*% score theta <- theta[ , 1L, drop=TRUE] # # return(list(theta0=theta0, theta=theta)) return(theta) } # Richardson extrapolation (generic) rex <- function(x, r=2, k=1, recursive=FALSE) { # x should be a matrix # whose columns are successive estimates of a parameter vector # obtained using "grid step sizes" t, t/r, t/r^2, ... # Estimate from step size t is assumed to converge at rate t^k if(!is.matrix(x)) x <- matrix(x, nrow=1) if(ncol(x) <= 1) return(x) rk <- r^k y <- (rk * x[, -1L, drop=FALSE] - x[, -ncol(x), drop=FALSE])/(rk - 1) if(recursive) y <- rex(y, r=r, k=k+1, recursive=TRUE) return(y) } spatstat.core/R/leverage.R0000644000176200001440000012450414144333462015174 0ustar liggesusers# # leverage.R # # leverage and influence # # $Revision: 1.121 $ $Date: 2020/12/19 05:25:06 $ # leverage <- function(model, ...) { UseMethod("leverage") } leverage.ppm <- function(model, ..., drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) { fitname <- short.deparse(substitute(model)) a <- ppmInfluence(model, what="leverage", drop=drop, iScore=iScore, iHessian=iHessian, iArgs=iArgs, ..., fitname=fitname) return(a$leverage) } influence.ppm <- function(model, ..., drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) { fitname <- short.deparse(substitute(model)) a <- ppmInfluence(model, what="influence", drop=drop, iScore=iScore, iHessian=iHessian, iArgs=iArgs, ..., fitname=fitname) return(a$influence) } dfbetas.ppm <- function(model, ..., drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) { fitname <- short.deparse(substitute(model)) a <- ppmInfluence(model, what="dfbetas", drop=drop, iScore=iScore, iHessian=iHessian, iArgs=iArgs, ..., fitname=fitname) return(a$dfbetas) } ppmInfluence <- function(fit, what=c("leverage", "influence", "dfbetas"), ..., iScore=NULL, iHessian=NULL, iArgs=NULL, drop=FALSE, fitname=NULL) { stuff <- ppmInfluenceEngine(fit, what=what, ..., iScore=iScore, iHessian=iHessian, iArgs=iArgs, drop=drop, fitname=fitname) fnam <- c("fitname", "fit.is.poisson") result <- list() if("lev" %in% names(stuff)) { lev <- stuff[c(fnam, "lev")] class(lev) <- "leverage.ppm" result$leverage <- lev } if("infl" %in% names(stuff)) { infl <- stuff[c(fnam, "infl")] class(infl) <- "influence.ppm" result$influence <- infl } if(!is.null(dfb <- stuff$dfbetas)) { attr(dfb, "info") <- stuff[fnam] result$dfbetas <- dfb } other <- setdiff(names(stuff), c("lev", "infl", "dfbetas")) result[other] <- stuff[other] class(result) <- "ppmInfluence" return(result) } leverage.ppmInfluence <- function(model, ...) { model$leverage } influence.ppmInfluence <- function(model, ...) { model$influence } dfbetas.ppmInfluence <- function(model, ...) { model$dfbetas } ## ............... main workhorse .................................... ppmInfluenceEngine <- function(fit, what=c("leverage", "influence", "dfbetas", "score", "derivatives", "increments", "all"), ..., iScore=NULL, iHessian=NULL, iArgs=NULL, drop=FALSE, method=c("C", "interpreted"), fine=FALSE, precomputed=list(), sparseOK=TRUE, fitname=NULL, multitypeOK=FALSE, entrywise = TRUE, matrix.action = c("warn", "fatal", "silent"), dimyx=NULL, eps=NULL, geomsmooth = TRUE) { if(is.null(fitname)) fitname <- short.deparse(substitute(fit)) ## type of calculation to be performed method <- match.arg(method) what <- match.arg(what, several.ok=TRUE) if("all" %in% what) what <- c("leverage", "influence", "dfbetas", "score", "derivatives", "increments") matrix.action <- match.arg(matrix.action) influencecalc <- any(what %in% c("leverage", "influence", "dfbetas")) hesscalc <- influencecalc || any(what == "derivatives") sparse <- sparseOK target <- paste(what, collapse=",") ## ........... collect information about the model ................. stopifnot(is.ppm(fit)) #' ensure object contains GLM fit if(!hasglmfit(fit)) { fit <- update(fit, forcefit=TRUE) precomputed <- list() } #' type of interpoint interaction fit.is.poisson <- is.poisson(fit) hasInf <- !fit.is.poisson && !identical(fit$interaction$hasInf, FALSE) #' estimating function fitmethod <- fit$method logi <- (fitmethod == "logi") pseudo <- (fitmethod == "mpl") if(!logi && !pseudo) { warning(paste("Model was fitted with method =", dQuote(fitmethod), "but is treated as having been fitted by maximum", if(fit.is.poisson) "likelihood" else "pseudolikelihood", "for leverage/influence calculation"), call.=FALSE) pseudo <- TRUE } ## Detect presence of irregular parameters if(is.null(iArgs)) iArgs <- fit$covfunargs gotScore <- !is.null(iScore) gotHess <- !is.null(iHessian) needHess <- gotScore && hesscalc # may be updated later if(!gotHess && needHess) stop("Must supply iHessian", call.=FALSE) #' ................... evaluate basic terms .................... ## extract values from model, using precomputed values if given theta <- precomputed$coef %orifnull% coef(fit) lampos <- precomputed$lambda %orifnull% fitted(fit, ignore.hardcore=hasInf, check=FALSE) mom <- precomputed$mom %orifnull% model.matrix(fit, splitInf=hasInf) ## 'lampos' is positive part of cif ## 'lam' is full model cif including zeroes lam <- lampos zerocif <- attr(mom, "-Inf") %orifnull% logical(nrow(mom)) anyzerocif <- any(zerocif) if(hasInf && anyzerocif) lam[zerocif] <- 0 p <- length(theta) Q <- quad.ppm(fit) w <- w.quad(Q) loc <- union.quad(Q) isdata <- is.data(Q) mt <- is.multitype(loc) if(length(w) != length(lam)) stop(paste("Internal error: length(w) = ", length(w), "!=", length(lam), "= length(lam)"), call.=FALSE) ## smoothing bandwidth and resolution for smoothed images of densities smallsigma <- if(!mt) avenndist(loc) else max(sapply(split(loc), avenndist)) ## previously used 'maxnndist' instead of 'avenndist' if(is.null(dimyx) && is.null(eps)) eps <- sqrt(prod(sidelengths(Frame(loc))))/256 #' ............... evaluate Hessian of regular parameters ................ ## domain of composite likelihood ## (e.g. eroded window in border correction) inside <- getglmsubset(fit) %orifnull% rep(TRUE, npoints(loc)) ## extract negative Hessian matrix of regular part of log composite likelihood ## hess = negative Hessian H ## fgrad = Fisher-scoring-like gradient G = estimate of E[H] if(logi) { ## .............. logistic composite likelihood ...................... ## Intensity of dummy points rho <- fit$Q$param$rho %orifnull% intensity(as.ppp(fit$Q)) logiprob <- lampos / (lampos + rho) vclist <- vcov(fit, what = "internals", fine=fine, matrix.action="silent") hess <- vclist$Slog fgrad <- vclist$fisher invhess <- if(is.null(hess)) NULL else checksolve(hess, "silent") invfgrad <- if(is.null(fgrad)) NULL else checksolve(fgrad, "silent") if(is.null(invhess) || is.null(invfgrad)) { #' use more expensive estimate of variance terms vclist <- vcov(fit, what = "internals", fine=TRUE, matrix.action=matrix.action) hess <- vclist$Slog fgrad <- vclist$fisher #' try again - exit if really singular invhess <- checksolve(hess, matrix.action, "Hessian", target) invfgrad <- checksolve(fgrad, matrix.action, "gradient matrix", target) } # vc <- invhess %*% (vclist$Sigma1log+vclist$Sigma2log) %*% invhess } else { ## .............. likelihood or pseudolikelihood .................... invfgrad <- vcov(fit, hessian=TRUE, fine=fine, matrix.action="silent") fgrad <- hess <- if(is.null(invfgrad) || anyNA(invfgrad)) NULL else checksolve(invfgrad, "silent") if(is.null(fgrad)) { invfgrad <- vcov(fit, hessian=TRUE, fine=TRUE, matrix.action=matrix.action) fgrad <- hess <- checksolve(invfgrad, matrix.action, "Hessian", target) } } #' ............... augment Hessian ................... ## evaluate additional (`irregular') components of score, if any iscoremat <- ppmDerivatives(fit, "gradient", iScore, loc, covfunargs=iArgs) gotScore <- !is.null(iscoremat) needHess <- gotScore && hesscalc if(!gotScore) { REG <- 1:ncol(mom) } else { ## count regular and irregular parameters nreg <- ncol(mom) nirr <- ncol(iscoremat) ## add extra columns to model matrix mom <- cbind(mom, iscoremat) REG <- 1:nreg IRR <- nreg + 1:nirr ## evaluate additional (`irregular') entries of Hessian ihessmat <- if(!needHess) NULL else ppmDerivatives(fit, "hessian", iHessian, loc, covfunargs=iArgs) if(gotHess <- !is.null(ihessmat)) { ## recompute negative Hessian of log PL and its mean fgrad <- hessextra <- matrix(0, ncol(mom), ncol(mom)) } else if(needHess && length(iArgs)) { nami <- names(iArgs) stop(paste("Unable to compute iHess, the", ngettext(length(nami), "component", "components"), "of the Hessian matrix for the irregular", ngettext(length(nami), "parameter", "parameters"), commasep(sQuote(names(iArgs)))), call.=FALSE) } if(pseudo) { ## .............. likelihood or pseudolikelihood .................... switch(method, interpreted = { for(i in seq(loc$n)) { # weight for integrand wti <- lam[i] * w[i] if(all(is.finite(wti))) { # integral of outer product of score momi <- mom[i, ] v1 <- outer(momi, momi, "*") * wti if(all(is.finite(v1))) fgrad <- fgrad + v1 # integral of Hessian # contributions nonzero for irregular parameters if(gotHess) { v2 <- matrix(as.numeric(ihessmat[i,]), nirr, nirr) * wti if(all(is.finite(v2))) hessextra[IRR, IRR] <- hessextra[IRR, IRR] + v2 } } } # subtract sum over data points if(gotHess) { for(i in which(isdata)) { v2 <- matrix(as.numeric(ihessmat[i,]), nirr, nirr) if(all(is.finite(v2))) hessextra[IRR, IRR] <- hessextra[IRR, IRR] - v2 } hess <- fgrad + hessextra invhess <- checksolve(hess, matrix.action, "Hessian", target) } else { invhess <- hess <- NULL } }, C = { wlam <- lam * w fgrad <- sumouter(mom, wlam) if(gotHess) { # integral term isfin <- is.finite(wlam) & matrowall(is.finite(ihessmat)) vintegral <- if(all(isfin)) wlam %*% ihessmat else wlam[isfin] %*% ihessmat[isfin,, drop=FALSE] # sum over data points vdata <- .colSums(ihessmat[isdata, , drop=FALSE], sum(isdata), ncol(ihessmat), na.rm=TRUE) vcontrib <- vintegral - vdata hessextra[IRR, IRR] <- hessextra[IRR, IRR] + matrix(vcontrib, nirr, nirr) hess <- fgrad + hessextra invhess <- checksolve(hess, matrix.action, "Hessian", target) } else { invhess <- hess <- NULL } }) } else { ## .............. logistic composite likelihood .................... switch(method, interpreted = { oweight <- logiprob * (1 - logiprob) hweight <- ifelse(isdata, -(1 - logiprob), logiprob) for(i in seq(loc$n)) { ## outer product of score momi <- mom[i, ] v1 <- outer(momi, momi, "*") * oweight[i] if(all(is.finite(v1))) fgrad <- fgrad + v1 ## Hessian term ## contributions nonzero for irregular parameters if(gotHess) { v2 <- hweight[i] * matrix(as.numeric(ihessmat[i,]), nirr, nirr) if(all(is.finite(v2))) hessextra[IRR, IRR] <- hessextra[IRR, IRR] + v2 } } if(gotHess) { hess <- fgrad + hessextra invhess <- checksolve(hess, matrix.action, "Hessian", target) } else { invhess <- hess <- NULL } }, C = { oweight <- logiprob * (1 - logiprob) hweight <- ifelse(isdata, -(1 - logiprob), logiprob) fgrad <- sumouter(mom, oweight) if(gotHess) { # Hessian term isfin <- is.finite(hweight) & matrowall(is.finite(ihessmat)) vcontrib <- if(all(isfin)) hweight %*% ihessmat else hweight[isfin] %*% ihessmat[isfin,, drop=FALSE] hessextra[IRR, IRR] <- hessextra[IRR, IRR] + matrix(vcontrib, nirr, nirr) hess <- fgrad + hessextra invhess <- checksolve(hess, matrix.action, "Hessian", target) } else { invhess <- hess <- NULL } }) } invfgrad <- checksolve(fgrad, matrix.action, "gradient matrix", target) } if(!needHess) { if(pseudo){ hess <- fgrad invhess <- invfgrad } } # ok <- NULL if(drop) { ok <- complete.cases(mom) if(all(ok)) { ok <- NULL } else { if((nbad <- sum(isdata[!ok])) > 0) warning(paste("NA value of canonical statistic at", nbad, ngettext(nbad, "data point", "data points")), call.=FALSE) Q <- Q[ok] mom <- mom[ok, , drop=FALSE] loc <- loc[ok] lam <- lam[ok] w <- w[ok] isdata <- isdata[ok] inside <- inside[ok] } } # ........ start assembling result ..................... result <- list(fitname=fitname, fit.is.poisson=fit.is.poisson) if(any(c("score", "derivatives") %in% what)) { ## calculate the composite score rawmean <- if(logi) logiprob else (lam * w) rawresid <- isdata - rawmean score <- matrix(rawresid, nrow=1) %*% mom if("score" %in% what) result$score <- score if("derivatives" %in% what) result$deriv <- list(mom=mom, score=score, fgrad=fgrad, invfgrad=invfgrad, hess=hess, invhess=invhess) if(all(what %in% c("score", "derivatives"))) return(result) } ## ::::::::::::::: compute second order terms ::::::::::::: ## >>> set model matrix to zero outside the domain <<< mom[!inside, ] <- 0 ## compute effect of adding/deleting each quadrature point if(fit.is.poisson) { ## ........ Poisson case .................................. eff <- mom ddS <- ddSintegrand <- NULL } else { ## ........ Gibbs case .................................... ## initialise eff <- mom ## second order interaction terms ## columns index the point being added/deleted ## rows index the points affected ## goal is to compute these effect matrices: eff.data <- eff.back <- matrix(0, nrow(eff), ncol(eff), dimnames=dimnames(eff)) ## U <- union.quad(Q) nU <- npoints(U) ## decide whether to split into blocks nX <- Q$data$n nD <- Q$dummy$n bls <- quadBlockSizes(nX, nD, p, announce=TRUE) nblocks <- bls$nblocks nperblock <- bls$nperblock ## if(nblocks > 1 && ("increments" %in% what)) { warning("Oversize quadrature scheme: cannot return array of increments", call.=FALSE) what <- setdiff(what, "increments") } R <- reach(fit) ## indices into original quadrature scheme whichok <- if(!is.null(ok)) which(ok) else seq_len(nX+nD) whichokdata <- whichok[isdata] whichokdummy <- whichok[!isdata] ## {{{{{{{{{{{{{ L O O P }}}}}}}}}}}}} ## loop for(iblock in 1:nblocks) { first <- min(nD, (iblock - 1) * nperblock + 1) last <- min(nD, iblock * nperblock) # corresponding subset of original quadrature scheme if(!is.null(ok) || nblocks > 1) { ## subset for which we will compute the effect current <- c(whichokdata, whichokdummy[first:last]) ## find neighbours, needed for calculation other <- setdiff(seq_len(nU), current) crx <- crosspairs(U[current], U[other], R, what="indices") nabers <- other[unique(crx$j)] ## subset actually requested requested <- c(current, nabers) ## corresponding stuff ('B' for block) isdataB <- isdata[requested] changesignB <- ifelse(isdataB, -1, 1) zerocifB <- zerocif[requested] anyzerocifB <- any(zerocifB) momB <- mom[requested, , drop=FALSE] lamB <- lam[requested] #' unused: #' insideB <- inside[requested] #' lamposB <- lampos[requested] if(logi) logiprobB <- logiprob[requested] wB <- w[requested] currentB <- seq_along(current) } else { requested <- NULL isdataB <- isdata changesignB <- ifelse(isdataB, -1, 1) zerocifB <- zerocif anyzerocifB <- anyzerocif momB <- mom lamB <- lam #' unused: #' insideB <- inside #' lamposB <- lampos if(logi) logiprobB <- logiprob wB <- w } ## compute second order terms ## ddS[i,j, ] = Delta_i Delta_j S(x) ddS <- deltasuffstat(fit, restrict = "first", dataonly=FALSE, quadsub=requested, sparseOK=sparse, splitInf=hasInf, force=TRUE, warn.forced=TRUE) ## if(is.null(ddS)) { warning("Second order interaction terms are not implemented", " for this model; they are treated as zero", call.=FALSE) break } else { sparse <- inherits(ddS, "sparse3Darray") if(hasInf) { deltaInf <- attr(ddS, "deltaInf") hasInf <- !is.null(deltaInf) if(hasInf) sparse <- sparse && inherits(deltaInf, "sparseMatrix") } if(gotScore) { ## add extra planes of zeroes to second-order model matrix ## (zero because the irregular components are part of the trend) paddim <- c(dim(ddS)[1:2], nirr) if(!sparse) { ddS <- abind::abind(ddS, array(0, dim=paddim), along=3) } else { ddS <- bind.sparse3Darray(ddS, sparse3Darray(dims=paddim), along=3) } } } ## ^^^^^^^^^^^^^^^^^ second term in DeltaScore ^^^^^^^^^^^^^^^^^^^^ ## effect of addition/deletion of U[j] ## on score contribution from data points (sum automatically restricted to ## interior for border correction by earlier call to ## deltasuffstat(..., restrict = "first")) ddSX <- ddS[isdataB, , , drop=FALSE] eff.data.B <- marginSumsSparse(ddSX, c(2,3)) ## check if any quadrature points have zero conditional intensity; ## these do not contribute to this term if(anyzerocifB) eff.data.B[zerocifB, ] <- 0 ## save results for current subset of quadrature points if(is.null(requested)) { eff.data <- eff.data.B } else { eff.data[current,] <- as.matrix(eff.data.B[currentB,,drop=FALSE]) } ## rm(ddSX, eff.data.B) ## ^^^^^^^^^^^^^^^^^ third term in DeltaScore ^^^^^^^^^^^^^^^^^^^^ ## effect of addition/deletion of U[j] on integral term in score if(!sparse) { ## ::::::::::::::: full arrays, simpler code ::::::::::::::::::: if(pseudo) { ## --------------- likelihood or pseudolikelihood ----------- ## model matrix after addition/deletion of each U[j] ## mombefore[i,j,] <- mom[i,] di <- dim(ddS) mombefore <- array(apply(momB, 2, rep, times=di[2]), dim=di) momchange <- ddS momchange[ , isdataB, ] <- - momchange[, isdataB, ] momafter <- mombefore + momchange ## effect of addition/deletion of U[j] on lambda(U[i], X) if(gotScore) { lamratio <- exp(tensor::tensor(momchange[,,REG,drop=FALSE], theta, 3, 1)) } else { lamratio <- exp(tensor::tensor(momchange, theta, 3, 1)) } lamratio <- array(lamratio, dim=dim(momafter)) if(!hasInf) { #' cif is positive ddSintegrand <- lamB * (momafter * lamratio - mombefore) } else { #' cif can be zero zerobefore <- matrix(zerocifB, di[1], di[2]) zerochange <- deltaInf * 1L zerochange[ , isdataB] <- - zerochange[ , isdataB] zeroafter <- zerobefore + zerochange momZbefore <- mombefore momZbefore[ , zerocifB, ] <- 0 IJK <- unname(which(array(zeroafter == 1, dim=di), arr.ind=TRUE)) momZafter <- momafter momZafter[IJK] <- 0 momZchange <- momZafter- momZbefore ddSintegrand <- lamB * (momZafter * lamratio - momZbefore) } rm(momchange, mombefore, momafter, lamratio) } else { ## --------------- logistic composite likelihood ----------- stop("Non-sparse method is not implemented for method = 'logi'!") } gc() } else { ## :::::::::::::::::: sparse arrays :::::::::::::::::::::::: if(logi) { ## --------------- logistic composite likelihood ----------- ## Delta suff. stat. with sign change for data/dummy (sparse3Darray) momchange <- ddS momchange[ , isdataB, ] <- - momchange[, isdataB, ] ## Evaluate theta^T %*% ddS (with sign -1/+1 according to data/dummy) ## as triplet sparse matrix if(gotScore){ momchangeeffect <- tensorSparse(momchange[,,REG,drop=FALSE], theta, 3, 1) } else{ momchangeeffect <- tensorSparse(momchange, theta, 3, 1) } ## Copy to each slice momchangeeffect <- expandSparse(momchangeeffect, n = dim(ddS)[3], across = 3) ijk <- SparseIndices(momchangeeffect) ## Entrywise calculations below momchange <- as.numeric(momchange[ijk]) mombefore <- mom[cbind(ijk$i,ijk$k)] momafter <- mombefore + momchange ## Transform to change in probability expchange <- exp(momchangeeffect$x) lamBi <- lamB[ijk$i] logiprobBi <- logiprobB[ijk$i] changesignBj <- changesignB[ijk$j] pchange <- changesignBj*(lamBi * expchange / (lamBi*expchange + rho) - logiprobBi) ## Note: changesignBj * momchange == as.numeric(ddS[ijk]) if(!hasInf) { #' cif is positive ddSintegrand <- momafter * pchange + logiprobBi * changesignBj * momchange } else { #' cif can be zero isdataBj <- isdataB[ijk$j] zerobefore <- as.logical(zerocifB[ijk$i]) zerochange <- as.logical(deltaInf[cbind(ijk$i, ijk$j)]) zerochange[isdataBj] <- - zerochange[isdataBj] zeroafter <- zerobefore + zerochange momZbefore <- ifelse(zerobefore, 0, mombefore) momZafter <- ifelse(zeroafter, 0, momafter) momZchange <- momZafter - momZbefore ddSintegrand <- momZafter * pchange + logiprobBi * changesignBj * momZchange } ddSintegrand <- sparse3Darray(i = ijk$i, j = ijk$j, k = ijk$k, x = ddSintegrand, dims = dim(ddS)) } else{ ## --------------- likelihood or pseudolikelihood ----------- if(entrywise) { ## ...... sparse arrays, using explicit indices ...... momchange <- ddS momchange[ , isdataB, ] <- - momchange[, isdataB, ] if(gotScore){ lamratiominus1 <- expm1(tensorSparse(momchange[,,REG,drop=FALSE], theta, 3, 1)) } else{ lamratiominus1 <- expm1(tensorSparse(momchange, theta, 3, 1)) } lamratiominus1 <- expandSparse(lamratiominus1, n = dim(ddS)[3], across = 3) ijk <- SparseIndices(lamratiominus1) ## Everything entrywise with ijk now: # lamratiominus1 <- lamratiominus1[cbind(ijk$i, ijk$j)] lamratiominus1 <- as.numeric(lamratiominus1$x) momchange <- as.numeric(momchange[ijk]) mombefore <- momB[cbind(ijk$i, ijk$k)] momafter <- mombefore + momchange ## lamarray[i,j,k] <- lam[i] lamarray <- lamB[ijk$i] if(!hasInf) { #' cif is positive ddSintegrand <- lamarray * (momafter * lamratiominus1 + momchange) } else { #' cif can be zero isdataBj <- isdataB[ijk$j] zerobefore <- as.logical(zerocifB[ijk$i]) zerochange <- as.logical(deltaInf[cbind(ijk$i, ijk$j)]) zerochange[isdataBj] <- - zerochange[isdataBj] zeroafter <- zerobefore + zerochange momZbefore <- ifelse(zerobefore, 0, mombefore) momZafter <- ifelse(zeroafter, 0, momafter) momZchange <- momZafter - momZbefore ddSintegrand <- lamarray*(momZafter*lamratiominus1 + momZchange) } ddSintegrand <- sparse3Darray(i = ijk$i, j = ijk$j, k = ijk$k, x = ddSintegrand, dims = dim(ddS)) } else { ## ...... sparse array code ...... ## Entries are required only for pairs i,j which interact. ## mombefore[i,j,] <- mom[i,] mombefore <- mapSparseEntries(ddS, 1, momB, conform=TRUE, across=3) momchange <- ddS momchange[ , isdataB, ] <- - momchange[, isdataB, ] ## momafter <- evalSparse3Dentrywise(mombefore + momchange) momafter <- mombefore + momchange ## lamarray[i,j,k] <- lam[i] lamarray <- mapSparseEntries(ddS, 1, lamB, conform=TRUE, across=3) if(gotScore){ lamratiominus1 <- expm1(tensorSparse(momchange[,,REG,drop=FALSE], theta, 3, 1)) } else{ lamratiominus1 <- expm1(tensorSparse(momchange,theta, 3, 1)) } lamratiominus1 <- expandSparse(lamratiominus1, n = dim(ddS)[3], across = 3) ## ddSintegrand <- evalSparse3Dentrywise(lamarray * (momafter* lamratiominus1 + momchange)) if(!hasInf) { #' cif is positive ddSintegrand <- lamarray * (momafter* lamratiominus1 + momchange) } else { #' cif has zeroes zerobefore <- mapSparseEntries(ddS, 1, zerocifB, conform=TRUE, across=3) zerochange <- mapSparseEntries(ddS, 1, deltaInf, conform=TRUE, across=2) zerochange[,isdataB,] <- - zerochange[,isdataB,] zeroafter <- zerobefore + zerochange momZbefore <- mombefore momZafter <- momafter momZbefore[ , zerocifB , ] <- 0 IJK <- SparseIndices(zeroafter) momZafter[IJK] <- 0 momZchange <- momZafter - momZbefore ddSintegrand <- lamarray*(momZafter*lamratiominus1 + momZchange) } } rm(lamratiominus1, lamarray, momafter) } rm(momchange, mombefore) } if(anyzerocifB) { ddSintegrand[zerocifB,,] <- 0 ddSintegrand[,zerocifB,] <- 0 } ## integrate if(logi){ # eff.back.B <- tensorSparse(ddSintegrand, rep(1, length(wB)), 1, 1) eff.back.B <- marginSumsSparse(ddSintegrand, c(2,3)) } else{ eff.back.B <- changesignB * tensorSparse(ddSintegrand, wB, 1, 1) } ## save contribution if(is.null(requested)) { eff.back <- eff.back.B } else { eff.back[current,] <- as.matrix(eff.back.B[currentB, , drop=FALSE]) } } ## {{{{{{{{{{{{{ E N D O F L O O P }}}}}}}}}}}}} ## total eff <- eff + eff.data - eff.back eff <- as.matrix(eff) } if("increments" %in% what) { result$increm <- list(ddS=ddS, ddSintegrand=ddSintegrand, isdata=isdata, wQ=w) } if(!any(c("leverage", "influence", "dfbetas") %in% what)) return(result) # ............ compute leverage, influence, dfbetas .............. if(!is.matrix(invhess)) stop("Internal error: inverse Hessian not available", call.=FALSE) # compute basic contribution from each quadrature point nloc <- npoints(loc) switch(method, interpreted = { b <- numeric(nloc) for(i in seq(nloc)) { effi <- eff[i,, drop=FALSE] momi <- mom[i,, drop=FALSE] b[i] <- momi %*% invhess %*% t(effi) } }, C = { b <- bilinearform(mom, invhess, eff) }) # .......... leverage ............. if("leverage" %in% what) { ## values of leverage (diagonal) at points of 'loc' h <- b * lam ok <- is.finite(h) geomsmooth <- geomsmooth && all(h[!isdata & ok] >= 0) if(mt) h <- data.frame(leverage=h, type=marks(loc)) levval <- (loc %mark% h)[ok] levvaldum <- levval[!isdata[ok]] if(!mt) { levsmo <- Smooth(levvaldum, sigma=smallsigma, geometric=geomsmooth, dimyx=dimyx, eps=eps) levnearest <- nnmark(levvaldum, dimyx=dimyx, eps=eps) } else { levsplitdum <- split(levvaldum, reduce=TRUE) levsmo <- Smooth(levsplitdum, sigma=smallsigma, geometric=geomsmooth, dimyx=dimyx, eps=eps) levnearest <- solapply(levsplitdum, nnmark, dimyx=dimyx, eps=eps) } ## mean level if(fit.is.poisson) { a <- area(Window(loc)) * markspace.integral(loc) levmean <- p/a } else { levmean <- if(!mt) mean(levnearest) else mean(sapply(levnearest, mean)) } lev <- list(val=levval, smo=levsmo, ave=levmean, nearest=levnearest) result$lev <- lev } # .......... influence ............. if("influence" %in% what) { if(logi){ X <- loc effX <- as.numeric(isdata) * eff - mom * (inside * logiprob) } else{ # values of influence at data points X <- loc[isdata] effX <- eff[isdata, ,drop=FALSE] } M <- (1/p) * quadform(effX, invhess) if(logi || is.multitype(X)) { # result will have several columns of marks M <- data.frame(influence=M) if(logi) M$isdata <- factor(isdata, levels = c(TRUE, FALSE), labels = c("data", "dummy")) if(is.multitype(X)) M$type <- marks(X) } V <- X %mark% M result$infl <- V } # .......... dfbetas ............. if("dfbetas" %in% what) { if(logi){ M <- as.numeric(isdata) * eff - mom * (inside * logiprob) M <- t(invhess %*% t(M)) Mdum <- M Mdum[isdata,] <- 0 Mdum <- Mdum / w.quad(Q) DFB <- msr(Q, M[isdata, ], Mdum) } else { vex <- invhess %*% t(mom) dex <- invhess %*% t(eff) switch(method, interpreted = { dis <- con <- matrix(0, nloc, ncol(mom)) for(i in seq(nloc)) { vexi <- vex[,i, drop=FALSE] dexi <- dex[,i, drop=FALSE] dis[i, ] <- if(isdata[i]) dexi else 0 con[i, ] <- if(inside[i]) (- lam[i] * vexi) else 0 } }, C = { dis <- t(dex) dis[!isdata,] <- 0 con <- - lam * t(vex) con[(lam == 0 | !inside), ] <- 0 }) colnames(dis) <- colnames(con) <- colnames(mom) DFB <- msr(Q, dis[isdata, ], con) } #' add smooth component DFB <- augment.msr(DFB, sigma=smallsigma, dimyx=dimyx, eps=eps) result$dfbetas <- DFB } return(result) } ## >>>>>>>>>>>>>>>>>>>>>>> HELPER FUNCTIONS <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< ## extract derivatives from covariate functions ## WARNING: these are not the score components in general ppmDerivatives <- function(fit, what=c("gradient", "hessian"), Dcovfun=NULL, loc, covfunargs=list()) { what <- match.arg(what) if(!is.null(Dcovfun)) { ## use provided function Dcov to compute derivatives Dvalues <- mpl.get.covariates(Dcovfun, loc, covfunargs=covfunargs) result <- as.matrix(as.data.frame(Dvalues)) return(result) } ## any irregular parameters? if(length(covfunargs) == 0) return(NULL) ## Try to extract derivatives from covariate functions ## This often works if the functions were created by symbolic differentiation fvalues <- mpl.get.covariates(fit$covariates, loc, covfunargs=covfunargs, need.deriv=TRUE) Dlist <- attr(fvalues, "derivatives")[[what]] if(length(Dlist) == 0) return(NULL) switch(what, gradient = { result <- do.call(cbind, unname(lapply(Dlist, as.data.frame))) result <- as.matrix(result) }, hessian = { ## construct array containing Hessian matrices biga <- do.call(blockdiagarray, Dlist) ## flatten matrices result <- matrix(biga, nrow=dim(biga)[1L]) }) return(result) } ## >>>>>>>>>>>>>>>> PLOT METHODS <<<<<<<<<<<<<<<<<<<<< plot.leverage.ppm <- function(x, ..., what=c("smooth", "nearest", "exact"), showcut=TRUE, args.cut=list(drawlabels=FALSE), multiplot=TRUE) { what <- match.arg(what) fitname <- x$fitname defaultmain <- paste("Leverage for", fitname) y <- x$lev if(what == "exact") { #' plot exact quadrature locations and leverage values yval <- y$val dont.complain.about(yval) z <- do.call(plot, resolve.defaults(list(x=quote(yval), multiplot=multiplot), list(...), list(main=defaultmain))) return(invisible(z)) } smo <- as.im(x, what=what) if(is.null(smo)) return(invisible(NULL)) ave <- y$ave if(!multiplot && inherits(smo, "imlist")) { ave <- ave * length(smo) smo <- do.call(harmonise.im, unname(smo)) ## smo <- Reduce("+", smo) smo <- im.apply(smo, sum, check=FALSE) defaultmain <- c(defaultmain, "(sum over all types of point)") } args.contour <- resolve.defaults(args.cut, list(levels=ave)) cutinfo <- list(addcontour=showcut, args.contour=args.contour) if(is.im(smo)) { do.call(plot.im, resolve.defaults(list(quote(smo)), cutinfo, list(...), list(main=defaultmain))) } else if(inherits(smo, "imlist")) { do.call(plot.solist, resolve.defaults(list(quote(smo)), cutinfo, list(...), list(main=defaultmain))) } invisible(NULL) } persp.leverage.ppm <- function(x, ..., what=c("smooth", "nearest"), main, zlab="leverage") { if(missing(main)) main <- deparse(substitute(x)) what <- match.arg(what) y <- as.im(x, what=what) if(is.null(y)) return(invisible(NULL)) if(is.im(y)) return(persp(y, main=main, ..., zlab=zlab)) pa <- par(ask=TRUE) lapply(y, persp, main=main, ..., zlab=zlab) par(pa) return(invisible(NULL)) } contour.leverage.ppm <- function(x, ..., what=c("smooth", "nearest"), showcut=TRUE, args.cut=list(col=3, lwd=3, drawlabels=FALSE), multiplot=TRUE) { defaultmain <- paste("Leverage for", x$fitname) smo <- as.im(x, what=what) y <- x$lev ave <- y$ave if(!multiplot && inherits(smo, "imlist")) { ave <- ave * length(smo) smo <- do.call(harmonise.im, unname(smo)) ## smo <- Reduce("+", smo) smo <- im.apply(smo, sum, check=FALSE) defaultmain <- c(defaultmain, "(sum over all types of point)") } argh1 <- resolve.defaults(list(...), list(main=defaultmain)) argh2 <- resolve.defaults(args.cut, list(levels=ave), list(...)) if(is.im(smo)) { #' single panel out <- do.call(contour, append(list(x=smo), argh1)) if(showcut) do.call(contour, append(list(x=smo, add=TRUE), argh2)) } else if(inherits(smo, "imlist")) { #' multiple panels argh <- append(list(x=smo, plotcommand ="contour"), argh1) if(showcut) { argh <- append(argh, list(panel.end=function(i, y, ...) contour(y, ...), panel.end.args=argh2)) } out <- do.call(plot.solist, argh) } else { warning("Unrecognised format") out <- NULL } return(invisible(out)) } plot.influence.ppm <- function(x, ..., multiplot=TRUE) { fitname <- x$fitname defaultmain <- paste("Influence for", fitname) y <- x$infl if(multiplot && isTRUE(ncol(marks(y)) > 1)) { # apart from the influence value, there may be additional columns of marks # containing factors: {type of point}, { data vs dummy in logistic case } ma <- as.data.frame(marks(y)) fax <- sapply(ma, is.factor) nfax <- sum(fax) if(nfax == 1) { # split on first available factor, and remove this factor y <- split(y, reduce=TRUE) } else if(nfax > 1) { # several factors: split according to them all, and remove them all f.all <- do.call(interaction, ma[fax]) z <- y %mark% ma[,!fax] y <- split(z, f.all) } } do.call(plot, resolve.defaults(list(quote(y)), list(...), list(main=defaultmain, multiplot=multiplot, which.marks=1))) } ## >>>>>>>>>>>>>>>> CONVERSION METHODS <<<<<<<<<<<<<<<<<<<<< as.im.leverage.ppm <- function(X, ..., what=c("smooth", "nearest")) { what <- match.arg(what) y <- switch(what, smooth = X$lev$smo, nearest = X$lev$nearest) if(is.null(y)) warning(paste("Data for", sQuote(what), "image are not available:", "please recompute the leverage using the current spatstat"), call.=FALSE) return(y) # could be either an image or a list of images } as.function.leverage.ppm <- function(x, ...) { X <- x$lev$val S <- ssf(unmark(X), marks(X)) return(as.function(S)) } as.ppp.influence.ppm <- function(X, ...) { return(X$infl) } as.owin.leverage.ppm <- function(W, ..., fatal=TRUE) { y <- as.im(W) if(inherits(y, "imlist")) y <- y[[1L]] as.owin(y, ..., fatal=fatal) } as.owin.influence.ppm <- function(W, ..., fatal=TRUE) { as.owin(as.ppp(W), ..., fatal=fatal) } domain.leverage.ppm <- domain.influence.ppm <- Window.leverage.ppm <- Window.influence.ppm <- function(X, ...) { as.owin(X) } ## >>>>>>>>>>>>>>>> PRINT METHODS <<<<<<<<<<<<<<<<<<<<< print.leverage.ppm <- function(x, ...) { splat("Point process leverage function") fitname <- x$fitname splat("for model:", fitname) lev <- x$lev splat("\nExact values:") print(lev$val) splat("\nSmoothed values:") print(lev$smo) ## for compatibility we retain the x$fit usage if(x$fit.is.poisson %orifnull% is.poisson(x$fit)) splat("\nAverage value:", lev$ave) return(invisible(NULL)) } print.influence.ppm <- function(x, ...) { splat("Point process influence measure") fitname <- x$fitname splat("for model:", fitname) splat("\nExact values:") print(x$infl) return(invisible(NULL)) } ## >>>>>>>>>>>>>>>> SUBSET METHODS <<<<<<<<<<<<<<<<<<<<< "[.leverage.ppm" <- function(x, i, ..., update=TRUE) { if(missing(i)) return(x) y <- x$lev smoi <- if(is.im(y$smo)) y$smo[i, ...] else solapply(y$smo, "[", i=i, ...) if(!inherits(smoi, c("im", "imlist"))) return(smoi) y$smo <- smoi y$val <- y$val[i, ...] if(update) y$ave <- if(is.im(smoi)) mean(smoi) else mean(sapply(smoi, mean)) x$lev <- y return(x) } "[.influence.ppm" <- function(x, i, ...) { if(missing(i)) return(x) y <- x$infl[i, ...] if(!is.ppp(y)) return(y) x$infl <- y return(x) } ## >>>>>>>>>>>>>>>> SMOOTHING, INTEGRATION <<<<<<<<<<<<<<<<<<<<< integral.leverage.ppm <- function(f, domain=NULL, ...) { y <- as.im(f, what="nearest") z <- if(is.im(y)) { integral(y, domain=domain, ...) } else if(is.solist(y)) { sapply(y, integral, domain=domain, ...) } else stop("Internal format is not understood") if(length(dim(z))) z <- t(z) return(z) } integral.influence.ppm <- function(f, domain=NULL, ...) { if(!is.null(domain)) { if(is.tess(domain)) { z <- sapply(tiles(domain), integral, f=f) if(length(dim(z))) z <- t(z) return(z) } f <- f[domain] } #' actual computation y <- as.ppp(f) return(colSums(as.matrix(marks(y)))) } mean.leverage.ppm <- function(x, ...) { y <- as.im(x, what="nearest") mean(y, ...) } Smooth.leverage.ppm <- function(X, ...) Smooth(X$lev$val, ...) Smooth.influence.ppm <- function(X, ...) Smooth(as.ppp(X), ...) ## >>>>>>>>>>>>>>>> GEOMETRICAL OPERATIONS <<<<<<<<<<<<<<<<<<<<< shift.leverage.ppm <- function(X, ...) { vec <- getlastshift(shift(as.owin(X), ...)) X$lev$val <- shift(X$lev$val, vec=vec) smo <- X$lev$smo X$lev$smo <- if(is.im(smo)) shift(smo, vec=vec) else solapply(smo, shift, vec=vec) return(putlastshift(X, vec)) } shift.influence.ppm <- function(X, ...) { X$infl <- shift(X$infl, ...) return(putlastshift(X, getlastshift(X$infl))) } spatstat.core/R/plot.plotppm.R0000644000176200001440000001077214144333463016054 0ustar liggesusers# # plot.plotppm.R # # engine of plot method for ppm # # $Revision: 1.22 $ $Date: 2020/12/19 05:25:06 $ # # plot.plotppm <- function(x,data=NULL,trend=TRUE,cif=TRUE,se=TRUE, pause=interactive(), how=c("persp","image","contour"), ..., pppargs=list()) { verifyclass(x,"plotppm") # determine main plotting actions superimposed <- !is.null(data) if(!missing(trend) && (trend & is.null(x[["trend"]]))) stop("No trend to plot.\n") trend <- trend & !is.null(x[["trend"]]) if(!missing(cif) && (cif & is.null(x[["cif"]]))) stop("No cif to plot.\n") cif <- cif & !is.null(x[["cif"]]) if(!missing(se) && (se & is.null(x[["se"]]))) stop("No SE to plot.\n") se <- se & !is.null(x[["se"]]) surftypes <- c("trend", "cif", "se")[c(trend, cif, se)] # marked point process? mrkvals <- attr(x,"mrkvals") marked <- (length(mrkvals) > 1) if(marked) data.marks <- marks(data) if(marked & superimposed) { data.types <- levels(data.marks) if(any(sort(data.types) != sort(mrkvals))) stop(paste("Data marks are different from mark", "values for argument x.\n")) } # plotting style howmat <- outer(how, c("persp", "image", "contour"), "==") howmatch <- matrowany(howmat) if (any(!howmatch)) stop(paste("unrecognised option", how[!howmatch])) # no pause required for single display if(missing(pause) || is.null(pause)) { nplots <- length(surftypes) * length(mrkvals) pause <- interactive() && (nplots != 1) } # start plotting if(pause) oldpar <- par(ask = TRUE) on.exit(if(pause) par(oldpar)) for(ttt in surftypes) { xs <- x[[ttt]] for (i in seq_along(mrkvals)) { xsi <- xs[[i]] level <- mrkvals[i] main <- paste(if(ttt == "se") "Estimated" else "Fitted", ttt, if(marked) paste("\n mark =", level) else NULL) dont.complain.about(xsi) for (style in how) { switch(style, persp = { do.call(persp, resolve.defaults(list(quote(xsi)), list(...), spatstat.options("par.persp"), list(xlab="x", zlab=ttt, main=main))) }, image = { do.call(image, resolve.defaults(list(quote(xsi)), list(...), list(main=main))) if(superimposed) { X <- if(marked) data[data.marks == level] else data dont.complain.about(X) do.call(plot.ppp, append(list(x=quote(X), add=TRUE), pppargs)) } }, contour = { do.call(contour, resolve.defaults(list(quote(xsi)), list(...), list(main=main))) if(superimposed) { X <- if(marked) data[data.marks == level] else data dont.complain.about(X) do.call(plot.ppp, append(list(x=quote(X), add=TRUE), pppargs)) } }, { stop(paste("Unrecognised plot style", style)) }) } } } return(invisible()) } print.plotppm <- function(x, ...) { verifyclass(x, "plotppm") trend <- x$trend cif <- x$cif mrkvals <- attr(x, "mrkvals") ntypes <- length(mrkvals) unmarked <- (ntypes == 1 ) cat(paste("Object of class", sQuote("plotppm"), "\n")) if(unmarked) cat("Computed for an unmarked point process\n") else { cat("Computed for a marked point process, with mark values:\n") print(mrkvals) } cat("Contains the following components:\n") if(!is.null(trend)) { cat("\n$trend:\tFitted trend.\n") if(unmarked) { cat("A list containing 1 image\n") print(trend[[1]], ...) } else { cat(paste("A list of", ntypes, "images\n")) cat("Typical details:\n") print(trend[[1]], ...) } } if(!is.null(cif)) { cat("\n$cif:\tFitted conditional intensity.\n") if(unmarked) { cat("A list containing 1 image\n") print(cif[[1]], ...) } else { cat(paste("A list of", ntypes, "images\n")) cat("Typical details:\n") print(cif[[1]], ...) } } invisible(NULL) } spatstat.core/R/concom.R0000644000176200001440000000742514144333462014662 0ustar liggesusers# # # concom.R # # $Revision: 1.5 $ $Date: 2018/03/15 07:37:41 $ # # The connected component interaction # # Concom() create an instance of the connected component interaction # [an object of class 'interact'] # # ------------------------------------------------------------------- # Concom <- local({ connectedlabels <- function(X, R) { connected(X, R, internal=TRUE) } countcompo <- function(X, R) { length(unique(connectedlabels(X, R))) } # change in number of components when point i is deleted cocoDel <- function(X, R, subset=seq_len(npoints(X))) { n <- length(subset) ans <- integer(n) if(n > 0) { cX <- countcompo(X, R) for(i in 1:n) ans[i] = countcompo(X[-subset[i]], R) - cX } return(ans) } # change in number of components when new point is added cocoAdd <- function(U, X, R) { U <- as.ppp(U, W=as.owin(X)) nU <- npoints(U) cr <- crosspairs(U, X, R, what="indices") lab <- connectedlabels(X, R) hitcomp <- tapply(X=lab[cr$j], INDEX=factor(cr$i, levels=1:nU), FUN=unique, simplify=FALSE) nhit <- unname(lengths(hitcomp)) change <- 1L - nhit return(change) } # connected component potential cocopot <- function(X,U,EqualPairs,pars,correction, ...) { bad <- !(correction %in% c("border", "none")) if((nbad <- sum(bad)) > 0) warning(paste("The", ngettext(nbad, "correction", "corrections"), commasep(sQuote(correction[!ok])), ngettext(nbad, "is", "are"), "not implemented")) n <- U$n answer <- numeric(n) r <- pars$r if(is.null(r)) stop("internal error: r parameter not found") dummies <- !(seq_len(n) %in% EqualPairs[,2L]) if(sum(dummies) > 0) answer[dummies] <- -cocoAdd(U[dummies], X, r) ii <- EqualPairs[,1L] jj <- EqualPairs[,2L] answer[jj] <- cocoDel(X, r, subset=ii) return(answer + 1L) } # template object without family, par, version BlankCoco <- list( name = "Connected component process", creator = "Concom", family = "inforder.family", # evaluated later pot = cocopot, par = list(r = NULL), # to be filled in parnames = "distance threshold", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1L || r <= 0) stop("distance threshold r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { logeta <- as.numeric(coeffs[1L]) eta <- exp(logeta) return(list(param=list(eta=eta), inames="interaction parameter eta", printable=signif(eta))) }, valid = function(coeffs, self) { eta <- ((self$interpret)(coeffs, self))$param$eta return(is.finite(eta)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { if(anyNA(coeffs)) return(Inf) logeta <- coeffs[1L] if(abs(logeta) <= epsilon) return(0) else return(Inf) }, version=NULL # to be added ) class(BlankCoco) <- "interact" Concom <- function(r) { instantiate.interact(BlankCoco, list(r=r)) } Concom <- intermaker(Concom, BlankCoco) Concom }) spatstat.core/R/randomseg.R0000644000176200001440000000403014144333463015351 0ustar liggesusers# # randomseg.R # # $Revision: 1.16 $ $Date: 2021/09/10 08:10:54 $ # rpoisline <- function(lambda, win=owin()) { win <- as.owin(win) # determine circumcircle xr <- win$xrange yr <- win$yrange xmid <- mean(xr) ymid <- mean(yr) width <- diff(xr) height <- diff(yr) rmax <- sqrt(width^2 + height^2)/2 boundbox <- owin(xmid + c(-1,1) * rmax, ymid + c(-1,1) * rmax) # generate poisson lines through circumcircle n <- rpois(1, lambda * 2 * pi * rmax) if(n == 0) { X <- psp(numeric(0), numeric(0), numeric(0), numeric(0), marks=integer(0), window=win) attr(X, "lines") <- infline(p=numeric(0), theta=numeric(0)) attr(X, "linemap") <- integer(0) return(X) } theta <- runif(n, max= 2 * pi) p <- runif(n, max=rmax) # compute intersection points with circle q <- sqrt(rmax^2 - p^2) co <- cos(theta) si <- sin(theta) X <- psp(x0= xmid + p * co + q * si, y0= ymid + p * si - q * co, x1= xmid + p * co - q * si, y1= ymid + p * si + q * co, marks = seq_len(n), window=boundbox, check=FALSE) # infinite lines L <- infline(p = p + xmid * co + ymid * si, theta = theta) # clip to window X <- X[win] # append info linemap <- as.integer(marks(X)) X <- unmark(X) attr(X, "lines") <- L attr(X, "linemap") <- linemap return(X) } rjitter.psp <- function(X, radius, ..., clip=TRUE, nsim=1, drop=TRUE) { if(nsegments(X) == 0) { result <- rep(list(X), nsim) result <- simulationresult(result, nsim, drop) return(result) } Xfrom <- endpoints.psp(X, "first") Xto <- endpoints.psp(X, "second") if(clip) Window(Xfrom) <- Window(Xto) <- grow.rectangle(Frame(X), radius) result <- vector(mode="list", length=nsim) for(isim in seq_len(nsim)) { Xfrom <- rjitter(Xfrom, radius) Xto <- rjitter(Xto, radius) Y <- as.psp(from=Xfrom, to=Xto) if(clip) Y <- Y[Window(X), clip=TRUE] result[[isim]] <- Y } result <- simulationresult(result, nsim, drop) return(result) } spatstat.core/R/multistrhard.R0000644000176200001440000003107714144333463016127 0ustar liggesusers# # # multistrhard.S # # $Revision: 2.39 $ $Date: 2018/03/15 07:37:41 $ # # The multitype Strauss/hardcore process # # MultiStraussHard() # create an instance of the multitype Strauss/ harcore # point process # [an object of class 'interact'] # # ------------------------------------------------------------------- # doMultiStraussHard <- local({ # ........ define potential ...................... MSHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[i] type (mark) of point U[j] # # get matrices of interaction radii r <- par$iradii h <- par$hradii # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) # list all UNORDERED pairs of types to be counted # (the interaction must be symmetric in type, and scored as such) uptri <- (row(r) <= col(r)) & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] # corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) # list all ORDERED pairs of types to be counted # (to save writing the same code twice) different <- mark1 != mark2 mark1o <- c(mark1, mark2[different]) mark2o <- c(mark2, mark1[different]) nordpairs <- length(mark1o) # unordered pair corresponding to each ordered pair ucode <- c(1:npairs, (1:npairs)[different]) # # create numeric array for result z <- array(0, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # apply the relevant interaction distance to each pair of points rxu <- r[ tx, tu ] str <- (d < rxu) str[is.na(str)] <- FALSE # and the relevant hard core distance hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- str value[forbid] <- -Inf # assign value[i,j] -> z[i,j,k] where k is relevant interaction code for(i in 1:nordpairs) { # data points with mark m1 Xsub <- (tx == mark1o[i]) # quadrature points with mark m2 Qsub <- (tu == mark2o[i]) # assign z[Xsub, Qsub, ucode[i]] <- value[Xsub, Qsub] } } return(z) } # ............... end of potential function ................... # .......... auxiliary functions ................. delMSH <- function(which, types, iradii, hradii, ihc) { iradii[which] <- NA if(any(!is.na(iradii))) { # some gamma interactions left # return modified MultiStraussHard with fewer gamma parameters return(MultiStraussHard(types, iradii, hradii)) } else if(any(!ihc)) { # no gamma interactions left, but some active hard cores return(MultiHard(types, hradii)) } else return(Poisson()) } # ........................................................... # Set up basic object except for family and parameters BlankMSHobject <- list( name = "Multitype Strauss Hardcore process", creator = "MultiStraussHard", family = "pairwise.family", # evaluated later pot = MSHpotential, par = list(types=NULL, iradii=NULL, hradii=NULL), # to be added parnames = c("possible types", "interaction distances", "hardcore distances"), pardesc = c("vector of possible types", "matrix of interaction distances", "matrix of hardcore distances"), hasInf = TRUE, selfstart = function(X, self) { types <- self$par$types hradii <- self$par$hradii if(!is.null(types) && !is.null(hradii)) return(self) if(is.null(types)) types <- levels(marks(X)) if(is.null(hradii)) { marx <- marks(X) d <- nndist(X, by=marx) h <- aggregate(d, by=list(from=marx), min) h <- as.matrix(h[, -1, drop=FALSE]) m <- table(marx) mm <- outer(m, m, pmin) hradii <- h * mm/(mm+1) dimnames(hradii) <- list(types, types) } MultiStraussHard(types=types,hradii=hradii,iradii=self$par$iradii) }, init = function(self) { types <- self$par$types iradii <- self$par$iradii hradii <- self$par$hradii # hradii could be NULL if(!is.null(types)) { if(!is.null(dim(types))) stop(paste("The", sQuote("types"), "argument should be a vector")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } nt <- length(types) MultiPair.checkmatrix(iradii, nt, sQuote("iradii")) if(!is.null(hradii)) MultiPair.checkmatrix(hradii, nt, sQuote("hradii")) } ina <- is.na(iradii) if(all(ina)) stop(paste("All entries of", sQuote("iradii"), "are NA")) if(!is.null(hradii)) { hna <- is.na(hradii) both <- !ina & !hna if(any(iradii[both] <= hradii[both])) stop("iradii must be larger than hradii") } }, update = NULL, # default OK print = function(self) { types <- self$par$types iradii <- self$par$iradii hradii <- self$par$hradii nt <- nrow(iradii) if(waxlyrical('gory')) { splat(nt, "types of points") if(!is.null(types)) { splat("Possible types:") print(noquote(types)) } else splat("Possible types:\t not yet determined") } splat("Interaction radii:") dig <- getOption("digits") print(signif(iradii, dig)) if(!is.null(hradii)) { splat("Hardcore radii:") print(signif(hradii, dig)) } else splat("Hardcore radii: not yet determined") invisible() }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) # get matrices of interaction radii r <- self$par$iradii h <- self$par$hradii # list all relevant unordered pairs of types uptri <- (row(r) <= col(r)) & !is.na(r) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) expcoef <- exp(coeffs) gammas[ cbind(index1, index2) ] <- expcoef gammas[ cbind(index2, index1) ] <- expcoef # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=dround(gammas))) }, valid = function(coeffs, self) { # interaction radii r[i,j] iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # Check that we managed to estimate all required parameters required <- !is.na(iradii) if(!all(is.finite(gamma[required]))) return(FALSE) # Check that the model is integrable # inactive hard cores ... ihc <- (is.na(hradii) | hradii == 0) # .. must have gamma <= 1 return(all(gamma[required & ihc] <= 1)) }, project = function(coeffs, self) { # types types <- self$par$types # interaction radii r[i,j] iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # required gamma parameters required <- !is.na(iradii) # active hard cores activehard <- !is.na(hradii) & (hradii > 0) ihc <- !activehard # problems gammavalid <- is.finite(gamma) & (activehard | gamma <= 1) naughty <- required & !gammavalid if(!any(naughty)) return(NULL) # if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delMSH(naughty, types, iradii, hradii, ihc)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) uptri <- (rn <= cn) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) # matindex <- function(v) { matrix(c(v, rev(v)), # ncol=2, byrow=TRUE) } mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matindex) inters <- lapply(mats, delMSH, types=types, iradii=iradii, hradii=hradii, ihc=ihc) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$iradii h <- self$par$hradii ractive <- !is.na(r) hactive <- !is.na(h) if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 ractive <- ractive & (abs(log(gamma)) > epsilon) } if(!any(c(ractive,hactive))) return(0) else return(max(c(r[ractive],h[hactive]))) }, version=NULL # to be added ) class(BlankMSHobject) <- "interact" matindex <- function(v) { matrix(c(v, rev(v)), ncol=2, byrow=TRUE) } # Finally define MultiStraussHard function doMultiStraussHard <- function(iradii, hradii=NULL, types=NULL) { iradii[iradii == 0] <- NA if(!is.null(hradii)) hradii[hradii == 0] <- NA out <- instantiate.interact(BlankMSHobject, list(types=types, iradii = iradii, hradii = hradii)) if(!is.null(types)) { dn <- list(types, types) dimnames(out$par$iradii) <- dn if(!is.null(out$par$hradii)) dimnames(out$par$hradii) <- dn } return(out) } doMultiStraussHard }) MultiStraussHard <- local({ MultiStraussHard <- function(iradii, hradii, types=NULL) { ## try new syntax newcall <- match.call() newcall[[1]] <- as.name('doMultiStraussHard') out <- try(eval(newcall, parent.frame()), silent=TRUE) if(is.interact(out)) return(out) ## try old syntax oldcall <- match.call(function(types=NULL, iradii, hradii) {}) oldcall[[1]] <- as.name('doMultiStraussHard') out <- try(eval(oldcall, parent.frame()), silent=TRUE) if(is.interact(out)) return(out) ## Syntax is wrong: generate error using new syntax rules if(missing(hradii)) hradii <- NULL doMultiStraussHard(iradii=iradii, hradii=hradii, types=types) } BlankMSHobject <- get("BlankMSHobject", envir=environment(doMultiStraussHard)) MultiStraussHard <- intermaker(MultiStraussHard, BlankMSHobject) MultiStraussHard }) spatstat.core/R/pcfmulti.R0000644000176200001440000001641514144333463015227 0ustar liggesusers# # pcfmulti.R # # $Revision: 1.8 $ $Date: 2016/09/21 07:28:58 $ # # multitype pair correlation functions # pcfcross <- function(X, i, j, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), divisor=c("r","d")) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL divisor <- match.arg(divisor) ## marx <- marks(X) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] I <- (marx == i) J <- (marx == j) Iname <- paste("points with mark i =", i) Jname <- paste("points with mark j =", j) ## result <- pcfmulti(X, I, J, ..., r=r, kernel=kernel, bw=bw, stoyan=stoyan, correction=correction, divisor=divisor, Iname=Iname, Jname=Jname) ## iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(g[i,j](r), list(i=iname,j=jname)), c("g", paste0("list", paren(paste(iname, jname, sep=",")))), new.yexp=substitute(g[list(i,j)](r), list(i=iname,j=jname))) return(result) } pcfdot <- function(X, i, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), divisor=c("r", "d")) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL divisor <- match.arg(divisor) marx <- marks(X) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points Iname <- paste("points with mark i =", i) Jname <- "points" result <- pcfmulti(X, I, J, ..., r=r, kernel=kernel, bw=bw, stoyan=stoyan, correction=correction, divisor=divisor, Iname=Iname, Jname=Jname) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(g[i ~ dot](r), list(i=iname)), c("g", paste0(iname, "~symbol(\"\\267\")")), new.yexp=substitute(g[i ~ symbol("\267")](r), list(i=iname))) return(result) } pcfmulti <- function(X, I, J, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor=c("r","d"), Iname="points satisfying condition I", Jname="points satisfying condition J") { verifyclass(X, "ppp") # r.override <- !is.null(r) divisor <- match.arg(divisor) win <- X$window areaW <- area(win) npts <- npoints(X) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("translate", "Ripley") correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, win$type, correction.given) ## .......... indices I and J ............................. I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") nI <- sum(I) nJ <- sum(J) if(nI == 0) stop(paste("There are no", Iname)) if(nJ == 0) stop(paste("There are no", Jname)) XI <- X[I] XJ <- X[J] # lambdaI <- nI/areaW lambdaJ <- nJ/areaW nIJ <- sum(I & J) lambdaIJarea <- (nI * nJ - nIJ)/areaW ## ........... kernel bandwidth and support ......................... if(is.null(bw) && kernel=="epanechnikov") { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(lambdaJ) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(lambdaJ) } ########## r values ############################ # handle argument r rmaxdefault <- rmax.rule("K", win, lambdaJ) breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) fname <- c("g", "list(I,J)") yexp <- quote(g[list(I,J)](r)) out <- fv(df, "r", quote(g[I,J](r)), "theo", , alim, c("r", makefvlabel(NULL, NULL, fname, "Pois")), c("distance argument r", "theoretical Poisson %s"), fname=fname, yexp=yexp) ########## smoothing parameters for pcf ############################ # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax)) ################################################# ## compute pairwise distances ## identify close pairs of points what <- if(any(correction == "translate")) "all" else "ijd" close <- crosspairs(XI, XJ, rmax+hmax, what=what) ## map (i,j) to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] ## eliminate any identical pairs if(nIJ > 0) { ok <- (iX != jX) if(!all(ok)) close <- as.list(as.data.frame(close)[ok, , drop=FALSE]) } ## extract information for these pairs (relative to orderings of XI, XJ) dclose <- close$d icloseI <- close$i # jcloseJ <- close$j ###### compute ####### if(any(correction=="translate")) { # translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=win, paired=TRUE) gT <- sewpcf(dclose, edgewt, denargs, lambdaIJarea, divisor)$g out <- bind.fv(out, data.frame(trans=gT), makefvlabel(NULL, "hat", fname, "Trans"), "translation-corrected estimate of %s", "trans") } if(any(correction=="isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1)) gR <- sewpcf(dclose, edgewt, denargs, lambdaIJarea, divisor)$g out <- bind.fv(out, data.frame(iso=gR), makefvlabel(NULL, "hat", fname, "Ripley"), "isotropic-corrected estimate of %s", "iso") } ## sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } # which corrections have been computed? corrxns <- rev(setdiff(names(out), "r")) # default is to display them all formula(out) <- . ~ r fvnames(out, ".") <- corrxns # unitname(out) <- unitname(X) return(out) } spatstat.core/R/envelope.R0000644000176200001440000025066314144333462015225 0ustar liggesusers# # envelope.R # # computes simulation envelopes # # $Revision: 2.110 $ $Date: 2021/10/09 10:42:18 $ # envelope <- function(Y, fun, ...) { UseMethod("envelope") } # ................................................................. # A 'simulation recipe' contains the following variables # # type = Type of simulation # "csr": uniform Poisson process # "rmh": simulated realisation of fitted Gibbs or Poisson model # "kppm": simulated realisation of fitted cluster model # "slrm": simulated realisation of spatial logistic regression # "expr": result of evaluating a user-supplied expression # "list": user-supplied list of point patterns # # expr = expression that is repeatedly evaluated to generate simulations # # envir = environment in which to evaluate the expression `expr' # # 'csr' = TRUE iff the model is (known to be) uniform Poisson # # pois = TRUE if model is known to be Poisson # # constraints = additional information about simulation (e.g. 'with fixed n') # # ................................................................... simulrecipe <- function(type, expr, envir, csr, pois=csr, constraints="") { if(csr && !pois) warning("Internal error: csr=TRUE but pois=FALSE") out <- list(type=type, expr=expr, envir=envir, csr=csr, pois=pois, constraints=constraints) class(out) <- "simulrecipe" out } envelope.ppp <- function(Y, fun=Kest, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) ismarked <- is.marked(Y) ismulti <- is.multitype(Y) fix.marks <- fix.marks && ismarked if(!is.null(simulate)) { # ................................................... # Simulations are determined by 'simulate' argument if(fix.n || fix.marks) warning("fix.n and fix.marks were ignored, because 'simulate' was given") # Processing is deferred to envelopeEngine simrecipe <- simulate # Data pattern is argument Y X <- Y } else if(!fix.n && !fix.marks) { # ................................................... # Realisations of complete spatial randomness # will be generated by rpoispp # Data pattern X is argument Y # Data pattern determines intensity of Poisson process X <- Y sY <- summary(Y, checkdup=FALSE) Yintens <- sY$intensity nY <- npoints(Y) Ywin <- Y$window Ymarx <- marks(Y) # expression that will be evaluated simexpr <- if(is.null(Ymarx)) { # unmarked point pattern expression(rpoispp(Yintens, win=Ywin)) } else if(is.null(dim(Ymarx))) { # single column of marks expression({ A <- rpoispp(Yintens, win=Ywin); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j] }) } else { # multiple columns of marks expression({ A <- rpoispp(Yintens, win=Ywin); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j, , drop=FALSE] }) } dont.complain.about(Yintens, Ywin) # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE, pois = TRUE) } else if(fix.marks) { # ................................................... # Data pattern is argument Y X <- Y # Realisations of binomial process # with fixed number of points and fixed marks # will be generated by runifpoint nY <- npoints(Y) Ywin <- as.owin(Y) Ymarx <- marks(Y) # expression that will be evaluated simexpr <- expression(runifpoint(nY, Ywin) %mark% Ymarx) # suppress warnings from code checkers dont.complain.about(nY, Ywin, Ymarx) # simulation constraints (explanatory string) constraints <- if(ismulti) "with fixed number of points of each type" else "with fixed number of points and fixed marks" # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE, pois = TRUE, constraints = constraints) } else { # ................................................... # Data pattern is argument Y X <- Y # Realisations of binomial process # will be generated by runifpoint nY <- npoints(Y) Ywin <- as.owin(Y) Ymarx <- marks(Y) # expression that will be evaluated simexpr <- if(is.null(Ymarx)) { ## unmarked expression(runifpoint(nY, Ywin)) } else if(is.null(dim(Ymarx))) { ## single column of marks expression({ A <- runifpoint(nY, Ywin); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j] }) } else { ## multiple columns of marks expression({ A <- runifpoint(nY, Ywin); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j, ,drop=FALSE] }) } dont.complain.about(nY, Ywin) # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE, pois = TRUE, constraints = "with fixed number of points") } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } envelope.ppm <- function(Y, fun=Kest, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, clipdata=TRUE, start=NULL, control=update(default.rmhcontrol(Y), nrep=nrep), nrep=1e5, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) # Extract data pattern X from fitted model Y X <- data.ppm(Y) if(is.null(simulate)) { # ................................................... # Simulated realisations of the fitted model Y # will be generated pois <- is.poisson(Y) csr <- is.stationary(Y) && pois type <- if(csr) "csr" else "rmh" # Set up parameters for rmh rmodel <- rmhmodel(Y, verbose=FALSE) if(is.null(start)) start <- list(n.start=npoints(X)) rstart <- rmhstart(start) rcontr <- rmhcontrol(control) if(fix.marks) { rcontr <- update(rcontr, fixall=TRUE, p=1, expand=1) nst <- if(is.multitype(X)) table(marks(X)) else npoints(X) rstart <- update(rstart, n.start=nst) constraints <- "with fixed number of points of each type" } else if(fix.n) { rcontr <- update(rcontr, p=1, expand=1) rstart <- update(rstart, n.start=X$n) constraints <- "with fixed number of points" } else constraints <- "" # pre-digest arguments rmhinfolist <- rmh(rmodel, rstart, rcontr, preponly=TRUE, verbose=FALSE) # expression that will be evaluated simexpr <- expression(rmhEngine(rmhinfolist, verbose=FALSE)) dont.complain.about(rmhinfolist) # evaluate in THIS environment simrecipe <- simulrecipe(type = type, expr = simexpr, envir = envir.here, csr = csr, pois = pois, constraints = constraints) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } envelope.kppm <- function(Y, fun=Kest, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) # Extract data pattern X from fitted model Y X <- Y$X if(is.null(simulate)) { # Simulated realisations of the fitted model Y # will be generated using simulate.kppm kmodel <- Y # expression that will be evaluated simexpr <- expression(simulate(kmodel)[[1L]]) dont.complain.about(kmodel) # evaluate in THIS environment simrecipe <- simulrecipe(type = "kppm", expr = simexpr, envir = envir.here, csr = FALSE, pois = FALSE) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } envelope.slrm <- function(Y, fun=Kest, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) # Extract data pattern X from fitted model Y X <- response(Y) if(is.null(simulate)) { # Simulated realisations of the fitted model Y # will be generated using simulate.slrm smodel <- Y # expression that will be evaluated simexpr <- expression(simulate(smodel)[[1L]]) dont.complain.about(smodel) # evaluate in THIS environment simrecipe <- simulrecipe(type = "slrm", expr = simexpr, envir = envir.here, csr = FALSE, pois = FALSE) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } ## ................................................................. ## Engine for simulating and computing envelopes ## ................................................................. # # X is the data point pattern, which could be ppp, pp3, ppx etc # X determines the class of pattern expected from the simulations # envelopeEngine <- function(X, fun, simul, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, saveresultof=NULL, weights=NULL, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, maxerr.action=c("fatal", "warn", "null"), internal=NULL, cl=NULL, envir.user=envir.user, expected.arg="r", do.pwrong=FALSE, foreignclass=NULL, collectrubbish=FALSE) { # envir.here <- sys.frame(sys.nframe()) alternative <- match.arg(alternative) maxerr.action <- match.arg(maxerr.action) foreignclass <- as.character(foreignclass) if(length(foreignclass) != 0 && clipdata) { warning(paste("Ignoring clipdata=TRUE:", "I don't know how to clip objects of class", sQuote(paste(foreignclass, collapse=",")))) clipdata <- FALSE } # ---------------------------------------------------------- # Determine Simulation # ---------------------------------------------------------- # Identify class of patterns to be simulated, from data pattern X Xclass <- if(is.ppp(X)) "ppp" else if(is.pp3(X)) "pp3" else if(is.ppx(X)) "ppx" else if(inherits(X, foreignclass)) foreignclass else stop("Unrecognised class of point pattern") Xobjectname <- paste("point pattern of class", sQuote(Xclass)) # Option to use weighted average if(use.weights <- !is.null(weights)) { # weight can be either a numeric vector or a function if(is.numeric(weights)) { compute.weights <- FALSE weightfun <- NULL } else if(is.function(weights)) { compute.weights <- TRUE weightfun <- weights weights <- NULL } else stop("weights should be either a function or a numeric vector") } else compute.weights <- FALSE # Undocumented option to generate patterns only. patterns.only <- identical(internal$eject, "patterns") # Undocumented option to evaluate 'something' for each pattern if(savevalues <- !is.null(saveresultof)) { stopifnot(is.function(saveresultof)) SavedValues <- list() } # Identify type of simulation from argument 'simul' if(inherits(simul, "simulrecipe")) { # .................................................. # simulation recipe is given simtype <- simul$type simexpr <- simul$expr envir <- simul$envir csr <- simul$csr pois <- simul$pois constraints <- simul$constraints } else { # ................................................... # simulation is specified by argument `simulate' to envelope() simulate <- simul # which should be an expression, or a list of point patterns, # or an envelope object, or a function to be applied to the data csr <- FALSE # override if(!is.null(icsr <- internal$csr)) csr <- icsr pois <- csr constraints <- "" # model <- NULL if(inherits(simulate, "envelope")) { # envelope object: see if it contains stored point patterns simpat <- attr(simulate, "simpatterns") if(!is.null(simpat)) simulate <- simpat else stop(paste("The argument", sQuote("simulate"), "is an envelope object but does not contain", "any saved point patterns.")) } if(is.expression(simulate)) { ## The user-supplied expression 'simulate' will be evaluated repeatedly simtype <- "expr" simexpr <- simulate envir <- envir.user } else if(is.function(simulate)) { ## User-supplied function 'simulate' will be repeatedly evaluated on X simtype <- "func" simexpr <- expression(simulate(X)) envir <- envir.here } else if(is.list(simulate) && all(sapply(simulate, inherits, what=Xclass))) { # The user-supplied list of point patterns will be used simtype <- "list" SimDataList <- simulate # expression that will be evaluated simexpr <- expression(SimDataList[[i+nerr]]) dont.complain.about(SimDataList) envir <- envir.here # ensure that `i' is defined i <- 1L nerr <- 0L maxnerr <- min(length(SimDataList)-nsim, maxnerr) # any messages? if(!is.null(mess <- attr(simulate, "internal"))) { # determine whether these point patterns are realisations of CSR csr <- !is.null(mc <- mess$csr) && mc } } else stop(paste(sQuote("simulate"), "should be an expression,", "or a list of point patterns of the same kind as X")) } # ------------------------------------------------------------------- # Determine clipping window # ------------------------------------------------------------------ if(clipdata) { # Generate one realisation Xsim <- eval(simexpr, envir=envir) if(!inherits(Xsim, Xclass)) switch(simtype, csr=stop(paste("Internal error:", Xobjectname, "not generated")), rmh=stop(paste("Internal error: rmh did not return an", Xobjectname)), kppm=stop(paste("Internal error: simulate.kppm did not return an", Xobjectname)), slrm=stop(paste("Internal error: simulate.slrm did not return an", Xobjectname)), expr=stop(paste("Evaluating the expression", sQuote("simulate"), "did not yield an", Xobjectname)), func=stop(paste("Evaluating the function", sQuote("simulate"), "did not yield an", Xobjectname)), list=stop(paste("Internal error: list entry was not an", Xobjectname)), stop(paste("Internal error:", Xobjectname, "not generated")) ) # Extract window clipwin <- Xsim$window if(!is.subset.owin(clipwin, X$window)) warning("Window containing simulated patterns is not a subset of data window") } # ------------------------------------------------------------------ # Summary function to be applied # ------------------------------------------------------------------ if(is.null(fun)) stop("Internal error: fun is NULL") # Name of function, for error messages fname <- if(is.name(substitute(fun))) short.deparse(substitute(fun)) else if(is.character(fun)) fun else "fun" fname <- sQuote(fname) # R function to apply if(is.character(fun)) { gotfun <- try(get(fun, mode="function")) if(inherits(gotfun, "try-error")) stop(paste("Could not find a function named", sQuote(fun))) fun <- gotfun } else if(!is.function(fun)) stop(paste("unrecognised format for function", fname)) fargs <- names(formals(fun)) if(!any(c(expected.arg, "...") %in% fargs)) stop(paste(fname, "should have", ngettext(length(expected.arg), "an argument", "arguments"), "named", commasep(sQuote(expected.arg)), "or a", sQuote("..."), "argument")) usecorrection <- any(c("correction", "...") %in% fargs) # --------------------------------------------------------------------- # validate other arguments if((nrank %% 1) != 0) stop("nrank must be an integer") if((nsim %% 1) != 0) stop("nsim must be an integer") stopifnot(nrank > 0 && nrank < nsim/2) rgiven <- any(expected.arg %in% names(list(...))) if(tran <- !is.null(transform)) { stopifnot(is.expression(transform)) # prepare expressions to be evaluated each time transform.funX <- inject.expr("with(funX,.)", transform) transform.funXsim <- inject.expr("with(funXsim,.)", transform) # .... old code using 'eval.fv' ...... # transform.funX <- dotexpr.to.call(transform, "funX", "eval.fv") # transform.funXsim <- dotexpr.to.call(transform, "funXsim", "eval.fv") # 'transform.funX' and 'transform.funXsim' are unevaluated calls to eval.fv } if(!is.null(ginterval)) stopifnot(is.numeric(ginterval) && length(ginterval) == 2) # --------------------------------------------------------------------- # Evaluate function for data pattern X # ------------------------------------------------------------------ Xarg <- if(!clipdata) X else X[clipwin] corrx <- if(usecorrection) list(correction="best") else NULL dont.complain.about(Xarg) funX <- do.call(fun, resolve.defaults(list(quote(Xarg)), list(...), funYargs, corrx)) if(!inherits(funX, "fv")) stop(paste("The function", fname, "must return an object of class", sQuote("fv"))) ## catch 'conservation' parameters conserveargs <- attr(funX, "conserve") if(!is.null(conserveargs) && !any(c("conserve", "...") %in% fargs)) stop(paste("In this usage, the function", fname, "should have an argument named 'conserve' or '...'")) ## warn about 'dangerous' arguments if(!is.null(dang <- attr(funX, "dangerous")) && any(uhoh <- dang %in% names(list(...)))) { nuh <- sum(uhoh) warning(paste("Envelope may be invalid;", ngettext(nuh, "argument", "arguments"), commasep(sQuote(dang[uhoh])), ngettext(nuh, "appears", "appear"), "to have been fixed."), call.=FALSE) } argname <- fvnames(funX, ".x") valname <- fvnames(funX, ".y") has.theo <- "theo" %in% fvnames(funX, "*") csr.theo <- csr && has.theo use.theory <- if(is.null(use.theory)) csr.theo else (use.theory && has.theo) if(tran) { # extract only the recommended value if(use.theory) funX <- funX[, c(argname, valname, "theo")] else funX <- funX[, c(argname, valname)] # apply the transformation to it funX <- eval(transform.funX) } rvals <- funX[[argname]] # fX <- funX[[valname]] # default domain over which to maximise alim <- attr(funX, "alim") if(global && is.null(ginterval)) ginterval <- if(rgiven || is.null(alim)) range(rvals) else alim #-------------------------------------------------------------------- # Determine number of simulations # ------------------------------------------------------------------ # ## determine whether dual simulations are required ## (one set of simulations to calculate the theoretical mean, ## another independent set of simulations to obtain the critical point.) dual <- (global && !use.theory && !VARIANCE) Nsim <- if(!dual) nsim else (nsim + nsim2) # if taking data from a list of point patterns, # check there are enough of them if(simtype == "list" && Nsim > length(SimDataList)) stop(paste("Number of simulations", paren(if(!dual) paste(nsim) else paste(nsim, "+", nsim2, "=", Nsim) ), "exceeds number of point pattern datasets supplied")) # Undocumented secret exit # ------------------------------------------------------------------ if(patterns.only) { # generate simulated realisations and return only these patterns if(verbose) { action <- if(simtype == "list") "Extracting" else "Generating" descrip <- switch(simtype, csr = "simulations of CSR", rmh = paste("simulated realisations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm = "simulated realisations of fitted cluster model", slrm = "simulated realisations of spatial logistic regression model", expr = "simulations by evaluating expression", func = "simulations by evaluating function", list = "point patterns from list", "simulated realisations") if(!is.null(constraints) && nzchar(constraints)) descrip <- paste(descrip, constraints) explan <- if(dual) paren(paste(nsim2, "to estimate the mean and", nsim, "to calculate envelopes")) else "" splat(action, Nsim, descrip, explan, "...") } XsimList <- list() # start simulation loop sstate <- list() for(i in 1:Nsim) { if(verbose) sstate <- progressreport(i, Nsim, state=sstate) Xsim <- eval(simexpr, envir=envir) if(!inherits(Xsim, Xclass)) switch(simtype, csr={ stop(paste("Internal error:", Xobjectname, "not generated")) }, rmh={ stop(paste("Internal error: rmh did not return an", Xobjectname)) }, kppm={ stop(paste("Internal error: simulate.kppm did not return an", Xobjectname)) }, slrm={ stop(paste("Internal error: simulate.slrm did not return an", Xobjectname)) }, expr={ stop(paste("Evaluating the expression", sQuote("simulate"), "did not yield an", Xobjectname)) }, func={ stop(paste("Evaluating the function", sQuote("simulate"), "did not yield an", Xobjectname)) }, list={ stop(paste("Internal error: list entry was not an", Xobjectname)) }, stop(paste("Internal error:", Xobjectname, "not generated")) ) XsimList[[i]] <- Xsim } if(verbose) { cat(paste("Done.\n")) flush.console() } attr(XsimList, "internal") <- list(csr=csr) return(XsimList) } # capture main decision parameters envelopeInfo <- list(call=cl, Yname=Yname, valname=valname, csr=csr, csr.theo=csr.theo, use.theory=use.theory, pois=pois, simtype=simtype, constraints=constraints, nrank=nrank, nsim=nsim, Nsim=Nsim, global=global, ginterval=ginterval, dual=dual, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, alternative=alternative, scale=scale, clamp=clamp, use.weights=use.weights, do.pwrong=do.pwrong) # ---------------------------------------- ######### SIMULATE ####################### # ---------------------------------------- if(verbose) { action <- if(simtype == "list") "Extracting" else "Generating" descrip <- switch(simtype, csr = "simulations of CSR", rmh = paste("simulated realisations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm = "simulated realisations of fitted cluster model", slrm = "simulated realisations of fitted spatial logistic regression model", expr = "simulations by evaluating expression", func = "simulations by evaluating function", list = "point patterns from list", "simulated patterns") if(!is.null(constraints) && nzchar(constraints)) descrip <- paste(descrip, constraints) explan <- if(dual) paren(paste(nsim2, "to estimate the mean and", nsim, "to calculate envelopes")) else "" splat(action, Nsim, descrip, explan, "...") } # determine whether simulated point patterns should be saved catchpatterns <- savepatterns && simtype != "list" Caughtpatterns <- list() # allocate space for computed function values nrvals <- length(rvals) simvals <- matrix(, nrow=nrvals, ncol=Nsim) # allocate space for weights to be computed if(compute.weights) weights <- numeric(Nsim) # inferred values of function argument 'r' or equivalent parameters if(identical(expected.arg, "r")) { # Kest, etc inferred.r.args <- list(r=rvals) } else if(identical(expected.arg, c("rmax", "nrval"))) { # K3est, etc inferred.r.args <- list(rmax=max(rvals), nrval=length(rvals)) } else stop(paste("Don't know how to infer values of", commasep(expected.arg))) # arguments for function when applied to simulated patterns funargs <- resolve.defaults(funargs, inferred.r.args, list(...), conserveargs, if(usecorrection) list(correction="best") else NULL) # reject simulated pattern if function values are all NA (etc) rejectNA <- isTRUE(rejectNA) # start simulation loop nerr <- 0 gaveup <- FALSE if(verbose) pstate <- list() for(i in 1:Nsim) { ## safely generate a random pattern and apply function success <- FALSE while(!success && !gaveup) { Xsim <- eval(simexpr, envir=envir) ## check valid point pattern if(!inherits(Xsim, Xclass)) switch(simtype, csr=stop(paste("Internal error:", Xobjectname, "not generated")), rmh=stop(paste("Internal error: rmh did not return an", Xobjectname)), kppm=stop(paste("Internal error:", "simulate.kppm did not return an", Xobjectname)), slrm=stop(paste("Internal error:", "simulate.slrm did not return an", Xobjectname)), expr=stop(paste("Evaluating the expression", sQuote("simulate"), "did not yield an", Xobjectname)), func=stop(paste("Evaluating the function", sQuote("simulate"), "did not yield an", Xobjectname)), list=stop(paste("Internal error: list entry was not an", Xobjectname)), stop(paste("Internal error:", Xobjectname, "not generated")) ) if(catchpatterns) Caughtpatterns[[i]] <- Xsim if(savevalues) SavedValues[[i]] <- saveresultof(Xsim) if(compute.weights) { wti <- weightfun(Xsim) if(!is.numeric(wti)) stop("weightfun did not return a numeric value") if(length(wti) != 1L) stop("weightfun should return a single numeric value") weights[i] <- wti } ## apply function safely funXsim <- try(do.call(fun, c(list(Xsim), funargs)), silent=silent) success <- !inherits(funXsim, "try-error") && inherits(funXsim, "fv") && (!rejectNA || any(is.finite(funXsim[[valname]]))) if(!success) { #' error in computing summary function nerr <- nerr + 1L if(nerr > maxnerr) { gaveup <- TRUE errtype <- if(rejectNA) "fatal errors or NA function values" if(simtype == "list") { whinge <- paste("Exceeded maximum possible number of errors", "when evaluating summary function:", length(SimDataList), "patterns provided,", nsim, "patterns required,", nerr, ngettext(nerr, "pattern", "pattern"), "rejected due to", errtype) } else { whinge <- paste("Exceeded maximum permissible number of", errtype, paren(paste("maxnerr =", maxnerr)), "when evaluating summary function", "for simulated point patterns") } switch(maxerr.action, fatal = stop(whinge, call.=FALSE), warn = warning(whinge, call.=FALSE), null = {}) } else if(!silent) cat("[retrying]\n") } #' ..... end of while(!success) ................ } if(gaveup) break; # exit loop now ## sanity checks if(i == 1L) { if(!inherits(funXsim, "fv")) stop(paste("When applied to a simulated pattern, the function", fname, "did not return an object of class", sQuote("fv"))) argname.sim <- fvnames(funXsim, ".x") valname.sim <- fvnames(funXsim, ".y") if(argname.sim != argname) stop(paste("The objects returned by", fname, "when applied to a simulated pattern", "and to the data pattern", "are incompatible. They have different argument names", sQuote(argname.sim), "and", sQuote(argname), "respectively")) if(valname.sim != valname) stop(paste("When", fname, "is applied to a simulated pattern", "it provides an estimate named", sQuote(valname.sim), "whereas the estimate for the data pattern is named", sQuote(valname), ". Try using the argument", sQuote("correction"), "to make them compatible")) rfunX <- with(funX, ".x") rfunXsim <- with(funXsim, ".x") if(!identical(rfunX, rfunXsim)) stop(paste("When", fname, "is applied to a simulated pattern,", "the values of the argument", sQuote(argname.sim), "are different from those used for the data.")) } if(tran) { # extract only the recommended value if(use.theory) funXsim <- funXsim[, c(argname, valname, "theo")] else funXsim <- funXsim[, c(argname, valname)] # apply the transformation to it funXsim <- eval(transform.funXsim) } # extract the values for simulation i simvals.i <- funXsim[[valname]] if(length(simvals.i) != nrvals) stop("Vectors of function values have incompatible lengths") simvals[ , i] <- funXsim[[valname]] if(verbose) pstate <- progressreport(i, Nsim, state=pstate) if(collectrubbish) { rm(Xsim) rm(funXsim) gc() } } ## end simulation loop if(verbose) { cat("\nDone.\n") flush.console() } # ........................................................... # save functions and/or patterns if so commanded if(!gaveup) { if(savefuns) { alldata <- cbind(rvals, simvals) simnames <- paste("sim", 1:Nsim, sep="") colnames(alldata) <- c("r", simnames) alldata <- as.data.frame(alldata) SimFuns <- fv(alldata, argu="r", ylab=attr(funX, "ylab"), valu="sim1", fmla= deparse(. ~ r), alim=attr(funX, "alim"), labl=names(alldata), desc=c("distance argument r", paste("Simulation ", 1:Nsim, sep="")), fname=attr(funX, "fname"), yexp=attr(funX, "yexp"), unitname=unitname(funX)) fvnames(SimFuns, ".") <- simnames } if(savepatterns) SimPats <- if(simtype == "list") SimDataList else Caughtpatterns } ######### COMPUTE ENVELOPES ####################### etype <- if(global) "global" else if(VARIANCE) "variance" else "pointwise" if(dual) { jsim <- 1:nsim jsim.mean <- nsim + 1:nsim2 } else { jsim <- jsim.mean <- NULL } result <- envelope.matrix(simvals, funX=funX, jsim=jsim, jsim.mean=jsim.mean, type=etype, alternative=alternative, scale=scale, clamp=clamp, csr=csr, use.theory=use.theory, nrank=nrank, ginterval=ginterval, nSD=nSD, Yname=Yname, do.pwrong=do.pwrong, weights=weights, gaveup=gaveup) ## tack on envelope information attr(result, "einfo") <- resolve.defaults(envelopeInfo, attr(result, "einfo")) if(!gaveup) { ## tack on functions and/or patterns if so commanded if(savefuns) attr(result, "simfuns") <- SimFuns if(savepatterns) { attr(result, "simpatterns") <- SimPats attr(result, "datapattern") <- X } ## undocumented - tack on values of some other quantity if(savevalues) { attr(result, "simvalues") <- SavedValues attr(result, "datavalue") <- saveresultof(X) } } ## save function weights if(use.weights) attr(result, "weights") <- weights return(result) } plot.envelope <- function(x, ..., main) { if(missing(main)) main <- short.deparse(substitute(x)) shade.given <- ("shade" %in% names(list(...))) shade.implied <- !is.null(fvnames(x, ".s")) if(!(shade.given || shade.implied)) { # ensure x has default 'shade' attribute # (in case x was produced by an older version of spatstat) if(all(c("lo", "hi") %in% colnames(x))) fvnames(x, ".s") <- c("lo", "hi") else warning("Unable to determine shading for envelope") } NextMethod("plot", main=main) } print.envelope <- function(x, ...) { e <- attr(x, "einfo") g <- e$global csr <- e$csr pois <- e$pois if(is.null(pois)) pois <- csr simtype <- e$simtype constraints <- e$constraints nr <- e$nrank nsim <- e$nsim V <- e$VARIANCE fname <- flat.deparse(attr(x, "ylab")) type <- if(V) paste("Pointwise", e$nSD, "sigma") else if(g) "Simultaneous" else "Pointwise" splat(type, "critical envelopes for", fname, "\nand observed value for", sQuote(e$Yname)) if(!is.null(valname <- e$valname) && waxlyrical('extras')) splat("Edge correction:", dQuote(valname)) ## determine *actual* type of simulation descrip <- if(csr) "simulations of CSR" else if(!is.null(simtype)) { switch(simtype, csr="simulations of CSR", rmh=paste("simulations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm="simulations of fitted cluster model", slrm="simulations of fitted spatial logistic regression model", expr="evaluations of user-supplied expression", func="evaluations of user-supplied function", list="point pattern datasets in user-supplied list", funs="columns of user-supplied data") } else "simulations of fitted model" if(!is.null(constraints) && nzchar(constraints)) descrip <- paste(descrip, constraints) # splat("Obtained from", nsim, descrip) # if(waxlyrical('extras')) { dual <- isTRUE(e$dual) usetheory <- isTRUE(e$use.theory) hownull <- if(usetheory) { "(known exactly)" } else if(dual) { paste("(estimated from a separate set of", e$nsim2, "simulations)") } else NULL formodel <- if(csr) "for CSR" else NULL if(g) { splat("Envelope based on maximum deviation of", fname, "from null value", formodel, hownull) } else if(dual) { splat("Null value of", fname, formodel, hownull) } if(!is.null(attr(x, "simfuns"))) splat("(All simulated function values are stored)") if(!is.null(attr(x, "simpatterns"))) splat("(All simulated point patterns are stored)") } splat("Alternative:", e$alternative) if(!V && waxlyrical('extras')) { ## significance interpretation! alpha <- if(g) { nr/(nsim+1) } else { 2 * nr/(nsim+1) } splat("Significance level of", if(g) "simultaneous" else "pointwise", "Monte Carlo test:", paste0(if(g) nr else 2 * nr, "/", nsim+1), "=", signif(alpha, 3)) } if(waxlyrical('gory') && !is.null(pwrong <- attr(x, "pwrong"))) { splat("\t[Estimated significance level of pointwise excursions:", paste0("pwrong=", signif(pwrong, 3), "]")) } NextMethod("print") } summary.envelope <- function(object, ...) { e <- attr(object, "einfo") g <- e$global V <- e$VARIANCE nr <- e$nrank nsim <- e$nsim csr <- e$csr pois <- e$pois if(is.null(pois)) pois <- csr simtype <- e$simtype constraints <- e$constraints alternative <- e$alternative use.theory <- e$use.theory has.theo <- "theo" %in% fvnames(object, "*") csr.theo <- csr && has.theo use.theory <- if(is.null(use.theory)) csr.theo else (use.theory && has.theo) fname <- deparse(attr(object, "ylab")) type <- if(V) paste("Pointwise", e$nSD, "sigma") else if(g) "Simultaneous" else "Pointwise" splat(type, "critical envelopes for", fname, "\nand observed value for", sQuote(e$Yname)) # determine *actual* type of simulation descrip <- if(csr) "simulations of CSR" else if(!is.null(simtype)) { switch(simtype, csr="simulations of CSR", rmh=paste("simulations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm="simulations of fitted cluster model", slrm="simulations of fitted spatial logistic regression model", expr="evaluations of user-supplied expression", func="evaluations of user-supplied function", list="point pattern datasets in user-supplied list", funs="columns of user-supplied data", "simulated point patterns") } else "simulations of fitted model" if(!is.null(constraints) && nzchar(constraints)) descrip <- paste(descrip, constraints) # splat("Obtained from", nsim, descrip) # if(waxlyrical('extras')) { if(!is.null(e$dual) && e$dual) splat("Theoretical (i.e. null) mean value of", fname, "estimated from a separate set of", e$nsim2, "simulations") if(!is.null(attr(object, "simfuns"))) splat("(All", nsim, "simulated function values", "are stored in attr(,", dQuote("simfuns"), ") )") if(!is.null(attr(object, "simpatterns"))) splat("(All", nsim, "simulated point patterns", "are stored in attr(,", dQuote("simpatterns"), ") )") } # splat("Alternative:", alternative) if(V) { # nSD envelopes splat(switch(alternative, two.sided = "Envelopes", "Critical boundary"), "computed as sample mean", switch(alternative, two.sided="plus/minus", less="minus", greater="plus"), e$nSD, "sample standard deviations") } else { # critical envelopes lo.ord <- if(nr == 1L) "minimum" else paste(ordinal(nr), "smallest") hi.ord <- if(nr == 1L) "maximum" else paste(ordinal(nr), "largest") if(g) splat(switch(alternative, two.sided = "Envelopes", "Critical boundary"), "computed as", if(use.theory) "theoretical curve" else "mean of simulations", switch(alternative, two.sided="plus/minus", less="minus", greater="plus"), hi.ord, "simulated value of maximum", switch(alternative, two.sided="absolute", less="negative", greater="positive"), "deviation") else { if(alternative != "less") splat("Upper envelope: pointwise", hi.ord, "of simulated curves") if(alternative != "greater") splat("Lower envelope: pointwise", lo.ord, "of simulated curves") } symmetric <- (alternative == "two.sided") && !g alpha <- if(!symmetric) { nr/(nsim+1) } else { 2 * nr/(nsim+1) } splat("Significance level of Monte Carlo test:", paste0(if(!symmetric) nr else 2 * nr, "/", nsim+1), "=", alpha) } splat("Data:", e$Yname) return(invisible(NULL)) } # envelope.matrix # core functionality to compute envelope values # theory = funX[["theo"]] # observed = fX envelope.matrix <- function(Y, ..., rvals=NULL, observed=NULL, theory=NULL, funX=NULL, nsim=NULL, nsim2=NULL, jsim=NULL, jsim.mean=NULL, type=c("pointwise", "global", "variance"), alternative=c("two.sided", "less", "greater"), scale = NULL, clamp=FALSE, csr=FALSE, use.theory = csr, nrank=1, ginterval=NULL, nSD=2, savefuns=FALSE, check=TRUE, Yname=NULL, do.pwrong=FALSE, weights=NULL, precomputed=NULL, gaveup=FALSE) { if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) type <- match.arg(type) alternative <- match.arg(alternative) if(!is.null(funX)) stopifnot(is.fv(funX)) pwrong <- NULL use.weights <- !is.null(weights) cheat <- !is.null(precomputed) if(is.null(rvals) && is.null(observed) && !is.null(funX)) { ## assume funX is summary function for observed data rvals <- with(funX, .x) observed <- with(funX, .y) theory <- if(use.theory) (theory %orifnull% funX[["theo"]]) else NULL if(check) stopifnot(nrow(funX) == nrow(Y)) } else if(check) { ## validate vectors of data if(is.null(rvals)) stop("rvals must be supplied") if(is.null(observed)) stop("observed must be supplied") stopifnot(length(rvals) == nrow(Y)) stopifnot(length(observed) == length(rvals)) } use.theory <- use.theory && !is.null(theory) if(use.theory && check) stopifnot(length(theory) == length(rvals)) simvals <- Y fX <- observed atr <- if(!is.null(funX)) attributes(funX) else list(alim=range(rvals), ylab=quote(f(r)), yexp=quote(f(r)), fname="f") fname <- atr$fname NAvector <- rep(NA_real_, length(rvals)) if(!cheat) { ## ................ standard calculation ..................... ## validate weights if(use.weights && !gaveup) check.nvector(weights, ncol(simvals), things="simulated functions", naok=TRUE) ## determine numbers of columns used Ncol <- if(!gaveup) ncol(simvals) else Inf if(Ncol < 2) stop("Need at least 2 columns of function values") ## all columns are used unless 'nsim' or 'jsim' given. if(!(is.null(nsim) && is.null(jsim))) { if(is.null(jsim)) { jsim <- 1:nsim } else if(is.null(nsim)) { nsim <- length(jsim) } else stopifnot(length(jsim) == nsim) if(nsim > Ncol) stop(paste(nsim, "simulations are not available; only", Ncol, "columns provided")) } ## nsim2 or jsim.mean may be given, and imply dual calculation if(!(is.null(nsim2) && is.null(jsim.mean))) { if(is.null(jsim.mean)) { jsim.mean <- setdiff(seq_len(Ncol), jsim)[1:nsim2] } else if(is.null(nsim2)) { nsim2 <- length(jsim.mean) } else stopifnot(length(jsim.mean) == nsim2) if(nsim + nsim2 > Ncol) stop(paste(nsim, "+", nsim2, "=", nsim+nsim2, "simulations are not available; only", Ncol, "columns provided")) if(length(intersect(jsim, jsim.mean))) warning("Internal warning: Indices in jsim and jsim.mean overlap") } restrict.columns <- !is.null(jsim) dual <- !is.null(jsim.mean) } else { ## ................ precomputed values .................. ## validate weights if(use.weights) check.nvector(weights, nsim, things="simulations", naok=TRUE) restrict.columns <- FALSE dual <- FALSE } shadenames <- NULL nsim.mean <- NULL switch(type, pointwise = { ## ....... POINTWISE ENVELOPES ............................... if(gaveup) { lo <- hi <- NAvector } else if(cheat) { stopifnot(checkfields(precomputed, c("lo", "hi"))) lo <- precomputed$lo hi <- precomputed$hi } else { simvals[is.infinite(simvals)] <- NA if(restrict.columns) { simvals <- simvals[,jsim] if(use.weights) weights <- weights[jsim] } nsim <- ncol(simvals) if(nrank == 1L) { lohi <- apply(simvals, 1L, range) } else { lohi <- apply(simvals, 1L, # function(x, n) { sort(x)[n] }, orderstats, k=c(nrank, nsim-nrank+1L)) } lo <- lohi[1L,] hi <- lohi[2L,] } lo.name <- "lower pointwise envelope of %s from simulations" hi.name <- "upper pointwise envelope of %s from simulations" ## if(!gaveup) switch(alternative, two.sided = { }, less = { hi <- rep.int(Inf, length(hi)) hi.name <- "infinite upper limit" }, greater = { lo <- rep.int(-Inf, length(lo)) lo.name <- "infinite lower limit" }) if(use.theory) { results <- data.frame(r=rvals, obs=fX, theo=theory, lo=lo, hi=hi) } else { m <- if(gaveup) NAvector else if(cheat) precomputed$mmean else if(!use.weights) apply(simvals, 1L, mean, na.rm=TRUE) else apply(simvals, 1L, weighted.mean, w=weights, na.rm=TRUE) results <- data.frame(r=rvals, obs=fX, mmean=m, lo=lo, hi=hi) } shadenames <- c("lo", "hi") if(do.pwrong) { ## estimate the p-value for the 'wrong test' if(gaveup) { pwrong <- NA_real_ } else if(cheat) { pwrong <- precomputed$pwrong do.pwrong <- !is.null(pwrong) && !badprobability(pwrong, FALSE) } else { dataranks <- t(apply(simvals, 1, rank, ties.method="random")) upper.signif <- (dataranks <= nrank) lower.signif <- (dataranks >= nsim-nrank+1L) is.signif <- switch(alternative, less = lower.signif, greater = upper.signif, two.sided = lower.signif | upper.signif) is.signif.somewhere <- matcolany(is.signif) pwrong <- sum(is.signif.somewhere)/nsim } } }, global = { ## ..... SIMULTANEOUS ENVELOPES .......................... if(gaveup) { lo <- hi <- reference <- NAvector } else if(cheat) { ## ... use precomputed values .. stopifnot(checkfields(precomputed, c("lo", "hi"))) lo <- precomputed$lo hi <- precomputed$hi if(use.theory) { reference <- theory } else { stopifnot(checkfields(precomputed, "mmean")) reference <- precomputed$mmean } domain <- rep.int(TRUE, length(rvals)) } else { ## ... normal case: compute envelopes from simulations if(!is.null(ginterval)) { domain <- (rvals >= ginterval[1L]) & (rvals <= ginterval[2L]) funX <- funX[domain, ] simvals <- simvals[domain, ] } else domain <- rep.int(TRUE, length(rvals)) simvals[is.infinite(simvals)] <- NA if(use.theory) { reference <- theory[domain] if(restrict.columns) { simvals <- simvals[, jsim] if(use.weights) weights <- weights[jsim] } } else if(dual) { # Estimate the mean from one set of columns # Form envelopes from another set of columns simvals.mean <- simvals[, jsim.mean] # mmean <- reference <- if(!use.weights) apply(simvals.mean, 1L, mean, na.rm=TRUE) else apply(simvals.mean, 1L, weighted.mean, w=weights[jsim.mean], na.rm=TRUE) nsim.mean <- ncol(simvals.mean) # retain only columns used for envelope simvals <- simvals[, jsim] } else { # Compute the mean and envelopes using the same data if(restrict.columns) { simvals <- simvals[, jsim] if(use.weights) weights <- weights[jsim] } # mmean <- reference <- if(!use.weights) apply(simvals, 1L, mean, na.rm=TRUE) else apply(simvals, 1L, weighted.mean, w=weights, na.rm=TRUE) } nsim <- ncol(simvals) # compute deviations deviations <- sweep(simvals, 1L, reference) deviations <- switch(alternative, two.sided = abs(deviations), greater = if(clamp) pmax(0, deviations) else deviations, less = if(clamp) pmax(0, -deviations) else (-deviations)) deviations <- matrix(deviations, nrow=nrow(simvals), ncol=ncol(simvals)) ## rescale ? sc <- 1 if(!is.null(scale)) { stopifnot(is.function(scale)) sc <- scale(rvals) sname <- "scale(r)" ans <- check.nvector(sc, length(rvals), things="values of r", fatal=FALSE, vname=sname) if(!ans) stop(attr(ans, "whinge"), call.=FALSE) if(any(bad <- (sc <= 0))) { ## issue a warning unless this only happens at r=0 if(any(bad[rvals > 0])) warning(paste("Some values of", sname, "were negative or zero:", "scale was reset to 1 for these values"), call.=FALSE) sc[bad] <- 1 } deviations <- sweep(deviations, 1L, sc, "/") } ## compute max (scaled) deviations suprema <- apply(deviations, 2L, max, na.rm=TRUE) # ranked deviations dmax <- sort(suprema)[nsim-nrank+1L] # simultaneous bands lo <- reference - sc * dmax hi <- reference + sc * dmax } lo.name <- "lower critical boundary for %s" hi.name <- "upper critical boundary for %s" if(!gaveup) switch(alternative, two.sided = { }, less = { hi <- rep.int(Inf, length(hi)) hi.name <- "infinite upper boundary" }, greater = { lo <- rep.int(-Inf, length(lo)) lo.name <- "infinite lower boundary" }) if(use.theory) results <- data.frame(r=rvals[domain], obs=fX[domain], theo=reference, lo=lo, hi=hi) else results <- data.frame(r=rvals[domain], obs=fX[domain], mmean=reference, lo=lo, hi=hi) shadenames <- c("lo", "hi") if(do.pwrong) warning(paste("Argument", sQuote("do.pwrong=TRUE"), "ignored;", "it is not relevant to global envelopes")) }, variance={ ## ....... POINTWISE MEAN, VARIANCE etc ...................... if(gaveup) { Ef <- varf <- NAvector } else if(cheat) { # .... use precomputed values .... stopifnot(checkfields(precomputed, c("Ef", "varf"))) Ef <- precomputed$Ef varf <- precomputed$varf } else { ## .... normal case: compute from simulations simvals[is.infinite(simvals)] <- NA if(restrict.columns) { simvals <- simvals[, jsim] if(use.weights) weights <- weights[jsim] } nsim <- ncol(simvals) if(!use.weights) { Ef <- apply(simvals, 1L, mean, na.rm=TRUE) varf <- apply(simvals, 1L, var, na.rm=TRUE) } else { Ef <- apply(simvals, 1L, weighted.mean, w=weights, na.rm=TRUE) varf <- apply(simvals, 1L, weighted.var, w=weights, na.rm=TRUE) } } if(gaveup) { sd <- stdres <- lo <- hi <- loCI <- hiCI <- NAvector } else { ## derived quantities sd <- sqrt(varf) stdres <- (fX-Ef)/sd stdres[!is.finite(stdres)] <- NA ## critical limits lo <- Ef - nSD * sd hi <- Ef + nSD * sd ## confidence interval loCI <- Ef - nSD * sd/sqrt(nsim) hiCI <- Ef + nSD * sd/sqrt(nsim) } lo.name <- paste("lower", nSD, "sigma critical limit for %s") hi.name <- paste("upper", nSD, "sigma critical limit for %s") loCI.name <- paste("lower", nSD, "sigma confidence bound", "for mean of simulated %s") hiCI.name <- paste("upper", nSD, "sigma confidence bound", "for mean of simulated %s") ## if(!gaveup) switch(alternative, two.sided = { }, less = { hi <- hiCI <- rep.int(Inf, length(hi)) hi.name <- "infinite upper boundary" hiCI.name <- "infinite upper confidence limit" }, greater = { lo <- loCI <- rep.int(-Inf, length(lo)) lo.name <- "infinite lower boundary" loCI.name <- "infinite lower confidence limit" }) ## put together if(use.theory) { results <- data.frame(r=rvals, obs=fX, theo=theory, lo=lo, hi=hi) shadenames <- c("lo", "hi") morestuff <- data.frame(mmean=Ef, var=varf, res=fX-Ef, stdres=stdres, loCI=loCI, hiCI=hiCI) loCIlabel <- if(alternative == "greater" && !gaveup) "-infinity" else makefvlabel(NULL, NULL, fname, "loCI") hiCIlabel <- if(alternative == "less" && !gaveup) "infinity" else makefvlabel(NULL, NULL, fname, "hiCI") mslabl <- c(makefvlabel(NULL, "bar", fname), makefvlabel("var", "hat", fname), makefvlabel("res", "hat", fname), makefvlabel("stdres", "hat", fname), loCIlabel, hiCIlabel) wted <- if(use.weights) "weighted " else NULL msdesc <- c(paste0(wted, "sample mean of %s from simulations"), paste0(wted, "sample variance of %s from simulations"), "raw residual", "standardised residual", loCI.name, hiCI.name) } else { results <- data.frame(r=rvals, obs=fX, mmean=Ef, lo=lo, hi=hi) shadenames <- c("lo", "hi") morestuff <- data.frame(var=varf, res=fX-Ef, stdres=stdres, loCI=loCI, hiCI=hiCI) loCIlabel <- if(alternative == "greater" && !gaveup) "-infinity" else makefvlabel(NULL, NULL, fname, "loCI") hiCIlabel <- if(alternative == "less" && !gaveup) "infinity" else makefvlabel(NULL, NULL, fname, "hiCI") mslabl <- c(makefvlabel("var", "hat", fname), makefvlabel("res", "hat", fname), makefvlabel("stdres", "hat", fname), loCIlabel, hiCIlabel) msdesc <- c(paste0(if(use.weights) "weighted " else NULL, "sample variance of %s from simulations"), "raw residual", "standardised residual", loCI.name, hiCI.name) } if(do.pwrong) { ## estimate the p-value for the 'wrong test' if(gaveup) { pwrong <- NA_real_ } else if(cheat) { pwrong <- precomputed$pwrong do.pwrong <- !is.null(pwrong) && !badprobability(pwrong, FALSE) } else { upper.signif <- (simvals > hi) lower.signif <- (simvals < lo) is.signif <- switch(alternative, less = lower.signif, greater = upper.signif, two.sided = lower.signif | upper.signif) # is.signif.somewhere <- apply(is.signif, 2, any) is.signif.somewhere <- matcolany(is.signif) pwrong <- sum(is.signif.somewhere)/nsim } } } ) ############ WRAP UP ######################### if(use.theory) { # reference is computed curve `theo' reflabl <- makefvlabel(NULL, NULL, fname, "theo") refdesc <- paste0("theoretical value of %s", if(csr) " for CSR" else NULL) } else { # reference is sample mean of simulations reflabl <- makefvlabel(NULL, "bar", fname) refdesc <- paste0(if(use.weights) "weighted " else NULL, "sample mean of %s from simulations") } lolabl <- if(alternative == "greater" && !gaveup) "-infinity" else makefvlabel(NULL, "hat", fname, "lo") hilabl <- if(alternative == "less"&& !gaveup) "infinity" else makefvlabel(NULL, "hat", fname, "hi") result <- fv(results, argu="r", ylab=atr$ylab, valu="obs", fmla= deparse(. ~ r), alim=intersect.ranges(atr$alim, range(results$r)), labl=c("r", makefvlabel(NULL, "hat", fname, "obs"), reflabl, lolabl, hilabl), desc=c("distance argument r", "observed value of %s for data pattern", refdesc, lo.name, hi.name), fname=atr$fname, yexp =atr$yexp) # columns to be plotted by default dotty <- c("obs", if(use.theory) "theo" else "mmean", "hi", "lo") if(type == "variance") { # add more stuff result <- bind.fv(result, morestuff, mslabl, msdesc) if(use.theory) dotty <- c(dotty, "mmean") } fvnames(result, ".") <- dotty fvnames(result, ".s") <- shadenames unitname(result) <- unitname(funX) class(result) <- c("envelope", class(result)) # tack on envelope information attr(result, "einfo") <- list(global = (type =="global"), ginterval = ginterval, alternative=alternative, scale = scale, clamp = clamp, csr = csr, use.theory = use.theory, csr.theo = csr && use.theory, simtype = "funs", constraints = "", nrank = nrank, nsim = nsim, VARIANCE = (type == "variance"), nSD = nSD, valname = NULL, dual = dual, nsim = nsim, nsim2 = nsim.mean, Yname = Yname, do.pwrong=do.pwrong, use.weights=use.weights, gaveup = gaveup) # tack on saved functions if(savefuns && !gaveup) { nSim <- ncol(Y) alldata <- cbind(rvals, Y) simnames <- paste("sim", 1:nSim, sep="") colnames(alldata) <- c("r", simnames) alldata <- as.data.frame(alldata) SimFuns <- fv(alldata, argu="r", ylab=atr$ylab, valu="sim1", fmla= deparse(. ~ r), alim=atr$alim, labl=names(alldata), desc=c("distance argument r", paste("Simulation ", 1:nSim, sep="")), unitname=unitname(funX)) fvnames(SimFuns, ".") <- simnames attr(result, "simfuns") <- SimFuns } if(do.pwrong) attr(result, "pwrong") <- pwrong if(use.weights) attr(result, "weights") <- weights return(result) } envelope.envelope <- function(Y, fun=NULL, ..., transform=NULL, global=FALSE, VARIANCE=FALSE) { Yname <- short.deparse(substitute(Y)) stopifnot(inherits(Y, "envelope")) Yorig <- Y aargh <- list(...) X <- attr(Y, "datapattern") sf <- attr(Y, "simfuns") sp <- attr(Y, "simpatterns") wt <- attr(Y, "weights") einfo <- attr(Y, "einfo") csr <- aargh$internal$csr %orifnull% einfo$csr if(is.null(fun) && is.null(sf)) { # No simulated functions - must compute them from simulated patterns if(is.null(sp)) stop(paste("Cannot compute envelope:", "Y does not contain simulated functions", "(was not generated with savefuns=TRUE)", "and does not contain simulated patterns", "(was not generated with savepatterns=TRUE)")) # set default fun=Kest fun <- Kest } if(!is.null(fun)) { # apply new function # point patterns are required if(is.null(sp)) stop(paste("Object Y does not contain simulated point patterns", "(attribute", dQuote("simpatterns"), ");", "cannot apply a new", sQuote("fun"))) if(is.null(X)) stop(paste("Cannot apply a new", sQuote("fun"), "; object Y generated by an older version of spatstat")) ## send signal if simulations were CSR internal <- aargh$internal if(csr) { if(is.null(internal)) internal <- list() internal$csr <- TRUE } ## compute new envelope result <- do.call(envelope, resolve.defaults(list(Y=quote(X), fun=fun, simulate=sp), aargh, list(transform=transform, global=global, VARIANCE=VARIANCE, internal=internal, Yname=Yname, nsim=einfo$nsim, nsim2=einfo$nsim2, weights=wt), .StripNull=TRUE)) } else { #' compute new envelope with existing simulated functions if(is.null(sf)) stop(paste("Y does not contain a", dQuote("simfuns"), "attribute", "(it was not generated with savefuns=TRUE)")) if(!is.null(transform)) { # Apply transformation to Y and sf stopifnot(is.expression(transform)) ## cc <- dotexpr.to.call(transform, "Y", "eval.fv") cc <- inject.expr("with(Y, .)", transform) Y <- eval(cc) ## cc <- dotexpr.to.call(transform, "sf", "eval.fv") cc <- inject.expr("with(sf, .)", transform) sf <- eval(cc) } #' catch discrepancy between domains of observed and simulated functions if(nrow(sf) != nrow(Y)) { rrsim <- sf[[fvnames(sf, ".x")]] rrobs <- Y[[fvnames(Y, ".x")]] ra <- intersect.ranges(range(rrsim), range(rrobs)) delta <- min(mean(diff(rrsim)), mean(diff(rrobs)))/2 oksim <- (rrsim >= ra[1] - delta) & (rrsim <= ra[2] + delta) okobs <- (rrobs >= ra[1] - delta) & (rrobs <= ra[2] + delta) if(sum(oksim) != sum(okobs)) stop("Internal error: Unable to reconcile the domains", "of the observed and simulated functions", call.=FALSE) if(mean(abs(rrsim[oksim] - rrobs[okobs])) > delta) stop("Internal error: Unable to reconcile the r values", "of the observed and simulated functions", call.=FALSE) sf <- sf[oksim, ,drop=FALSE] Y <- Y[okobs, ,drop=FALSE] } # extract simulated function values df <- as.data.frame(sf) rname <- fvnames(sf, ".x") df <- df[, (names(df) != rname)] # interface with 'envelope.matrix' etype <- if(global) "global" else if(VARIANCE) "variance" else "pointwise" dfm <- as.matrix(df) dont.complain.about(dfm) result <- do.call(envelope.matrix, resolve.defaults(list(Y=quote(dfm)), aargh, list(type=etype, csr=csr, funX=Y, Yname=Yname, weights=wt), .StripNull=TRUE)) } if(!is.null(transform)) { # post-process labels labl <- attr(result, "labl") dnames <- colnames(result) dnames <- dnames[dnames %in% fvnames(result, ".")] # expand "." ud <- as.call(lapply(c("cbind", dnames), as.name)) dont.complain.about(ud) expandtransform <- eval(substitute(substitute(tr, list(.=ud)), list(tr=transform[[1L]]))) # compute new labels attr(result, "fname") <- attr(Yorig, "fname") mathlabl <- as.character(fvlegend(result, expandtransform)) # match labels to columns evars <- all.vars(expandtransform) used.dotnames <- evars[evars %in% dnames] mathmap <- match(colnames(result), used.dotnames) okmath <- !is.na(mathmap) # update appropriate labels labl[okmath] <- mathlabl[mathmap[okmath]] attr(result, "labl") <- labl } # Tack on envelope info copyacross <- c("Yname", "csr.theo", "use.theory", "simtype", "constraints") attr(result, "einfo")[copyacross] <- attr(Yorig, "einfo")[copyacross] attr(result, "einfo")$csr <- csr # Save data return(result) } pool.envelope <- local({ pool.envelope <- function(..., savefuns=FALSE, savepatterns=FALSE) { Yname <- short.deparse(sys.call()) if(nchar(Yname) > 60) Yname <- paste(substr(Yname, 1L, 40L), "[..]") Elist <- unname(list(...)) nE <- length(Elist) if(nE == 0) return(NULL) #' ........ validate envelopes ..................... #' All arguments must be envelopes notenv <- !unlist(lapply(Elist, inherits, what="envelope")) if(any(notenv)) { n <- sum(notenv) why <- paste(ngettext(n, "Argument", "Arguments"), commasep(which(notenv)), ngettext(n, "does not", "do not"), "belong to the class", dQuote("envelope")) stop(why) } ## Only one envelope? if(nE == 1) return(Elist[[1L]]) ## envelopes must be compatible ok <- do.call(compatible, Elist) if(!ok) stop("Envelopes are not compatible") ## ... reconcile parameters in different envelopes ....... eilist <- lapply(Elist, attr, which="einfo") nrank <- resolveEinfo(eilist, "nrank", 1) global <- resolveEinfo(eilist, "global", FALSE) ginterval <- resolveEinfo(eilist, "ginterval", NULL, atomic=FALSE) VARIANCE <- resolveEinfo(eilist, "VARIANCE", FALSE) alternative <- resolveEinfo(eilist, "alternative", FALSE) scale <- resolveEinfo(eilist, "scale", NULL, atomic=FALSE) clamp <- resolveEinfo(eilist, "clamp", FALSE) resolveEinfo(eilist, "simtype", "funs", "Envelopes were generated using different types of simulation") resolveEinfo(eilist, "constraints", "", "Envelopes were generated using different types of conditioning") resolveEinfo(eilist, "csr.theo", FALSE, NULL) csr <- resolveEinfo(eilist, "csr", FALSE, NULL) use.weights <- resolveEinfo(eilist, "use.weights" , FALSE, "Weights were used in some, but not all, envelopes: they will be ignored") use.theory <- resolveEinfo(eilist, "use.theory", csr, NULL) ## weights <- if(use.weights) unlist(lapply(Elist, attr, which="weights")) else NULL type <- if(global) "global" else if(VARIANCE) "variance" else "pointwise" ## ........ validate saved functions ..................... if(savefuns || !VARIANCE) { ## Individual simulated functions are required SFlist <- lapply(Elist, attr, which="simfuns") isnul <- unlist(lapply(SFlist, is.null)) if(any(isnul)) { n <- sum(isnul) comply <- if(!VARIANCE) "compute the envelope:" else "save the simulated functions:" why <- paste("Cannot", comply, ngettext(n, "argument", "arguments"), commasep(which(isnul)), ngettext(n, "does not", "do not"), "contain a", dQuote("simfuns"), "attribute", "(not generated with savefuns=TRUE)") stop(why) } ## Simulated functions must be the same function fnames <- unique(lapply(SFlist, attr, which="fname")) if(length(fnames) > 1L) { fnames <- unlist(lapply(fnames, flatfname)) stop(paste("Envelope objects contain values", "of different functions:", commasep(sQuote(fnames)))) } ## vectors of r values must be identical rlist <- lapply(SFlist, getrvals) rvals <- rlist[[1L]] samer <- unlist(lapply(rlist, identical, y=rvals)) if(!all(samer)) stop(paste("Simulated function values are not compatible", "(different values of function argument)")) ## Extract function values and assemble into one matrix matlist <- lapply(SFlist, getdotvals) SFmatrix <- do.call(cbind, matlist) } ## compute pooled envelope switch(type, pointwise = { result <- envelope(SFmatrix, funX=Elist[[1L]], type=type, alternative=alternative, clamp=clamp, nrank=nrank, csr=csr, use.theory=use.theory, Yname=Yname, weights=weights, savefuns=savefuns) }, global = { simfunmatrix <- if(is.null(ginterval)) SFmatrix else { ## savefuns have not yet been clipped to ginterval ## while envelope data have been clipped. domain <- (rvals >= ginterval[1L]) & (rvals <= ginterval[2L]) SFmatrix[domain, , drop=FALSE] } result <- envelope(simfunmatrix, funX=Elist[[1L]], type=type, alternative=alternative, scale=scale, clamp=clamp, csr=csr, use.theory=use.theory, nrank=nrank, ginterval=ginterval, Yname=Yname, weights=weights, savefuns=savefuns) }, variance = { ## Pool sample means and variances nsims <- unlist(lapply(eilist, getElement, name="nsim")) mmeans <- lapply(Elist, getElement, name="mmean") vars <- lapply(Elist, getElement, name="var") mmeans <- matrix(unlist(mmeans), ncol=nE) vars <- matrix(unlist(vars), ncol=nE) if(!use.weights) { w.mean <- nsims d.mean <- sum(nsims) w.var <- nsims - 1 d.var <- sum(nsims) - 1 } else { weightlist <- lapply(Elist, attr, which="weights") w.mean <- unlist(lapply(weightlist, sum)) d.mean <- sum(w.mean) ssw <- unlist(lapply(weightlist, meansqfrac)) ## meansqfrac : function(x) {sum((x/sum(x))^2)})) w.var <- w.mean * (1 - ssw) d.var <- d.mean * (1 - sum(ssw)) } poolmmean <- as.numeric(mmeans %*% matrix(w.mean/d.mean, ncol=1L)) within <- vars %*% matrix(w.var, ncol=1L) between <- ((mmeans - poolmmean[])^2) %*% matrix(w.mean, ncol=1L) poolvar <- as.numeric((within + between)/d.var) ## feed precomputed data to envelope.matrix pc <- list(Ef=poolmmean[], varf=poolvar[]) nsim <- sum(nsims) result <- envelope.matrix(NULL, funX=Elist[[1L]], type=type, alternative=alternative, csr=csr, Yname=Yname, weights=weights, savefuns=savefuns, nsim=nsim, precomputed=pc) }) ## Copy envelope info that is not handled by envelope.matrix copyacross <- c("Yname", "csr.theo", "use.theory", "simtype", "constraints") attr(result, "einfo")[copyacross] <- attr(Elist[[1L]], "einfo")[copyacross] ## ..............saved patterns ..................... if(savepatterns) { SPlist <- lapply(Elist, attr, which="simpatterns") isnul <- unlist(lapply(SPlist, is.null)) if(any(isnul)) { n <- sum(isnul) why <- paste("Cannot save the simulated patterns:", ngettext(n, "argument", "arguments"), commasep(which(isnul)), ngettext(n, "does not", "do not"), "contain a", dQuote("simpatterns"), "attribute", "(not generated with savepatterns=TRUE)") warning(why) } else { attr(result, "simpatterns") <- Reduce(append, SPlist) } } ## ..............saved summary functions ................ if(savefuns) { alldata <- cbind(rvals, SFmatrix) Nsim <- ncol(SFmatrix) simnames <- paste0("sim", 1:Nsim) colnames(alldata) <- c("r", simnames) alldata <- as.data.frame(alldata) SFtemplate <- SFlist[[1L]] SimFuns <- fv(alldata, argu="r", ylab=attr(SFtemplate, "ylab"), valu="sim1", fmla= deparse(. ~ r), alim=attr(SFtemplate, "alim"), labl=names(alldata), desc=c("distance argument r", paste("Simulation ", 1:Nsim, sep="")), fname=attr(SFtemplate, "fname"), yexp=attr(SFtemplate, "yexp"), unitname=unitname(SFtemplate)) fvnames(SimFuns, ".") <- simnames attr(result, "simfuns") <- SimFuns } dotnames <- lapply(Elist, fvnames, a=".") dn <- dotnames[[1L]] if(all(unlist(lapply(dotnames, identical, y=dn)))) fvnames(result, ".") <- dn shadenames <- lapply(Elist, fvnames, a=".s") sh <- shadenames[[1L]] if(all(unlist(lapply(shadenames, identical, y=sh)))) fvnames(result, ".s") <- sh return(result) } getrvals <- function(z) { as.matrix(z)[, fvnames(z, ".x")] } getdotvals <- function(z) { as.matrix(z)[, fvnames(z, "."), drop=FALSE] } meansqfrac <- function(x) {sum((x/sum(x))^2)} pool.envelope }) # resolve matching entries in different envelope objects # x is a list of envelope info objects resolveEinfo <- function(x, what, fallback, warn, atomic=TRUE) { if(atomic) { y <- unique(unlist(lapply(x, getElement, name=what))) if(length(y) == 1L) return(y) } else { y <- unique(lapply(x, getElement, name=what)) if(length(y) == 1L) return(y[[1L]]) } if(missing(warn)) warn <- paste("Envelopes were generated using different values", "of argument", paste(sQuote(what), ";", sep=""), "reverting to default value") if(!is.null(warn)) warning(warn, call.=FALSE) return(fallback) } as.data.frame.envelope <- function(x, ..., simfuns=FALSE) { if(simfuns && !is.null(sf <- attr(x, "simfuns"))) { # tack on the simulated functions as well y <- as.data.frame(bind.fv(x, sf, clip=TRUE)) return(y) } NextMethod("as.data.frame") } spatstat.core/R/Iest.R0000644000176200001440000000517614144333461014310 0ustar liggesusers# Iest.R # # I function # # $Revision: 1.16 $ $Date: 2019/10/31 03:01:26 $ # # # Iest <- local({ Iest <- function(X, ..., eps=NULL, r = NULL, breaks = NULL, correction=NULL) { X <- as.ppp(X) if(!is.multitype(X)) stop("Only applicable to multitype point patterns") marx <- marks(X, dfok=FALSE) ntypes <- length(levels(marx)) Y <- unmark(split(X)) ## relative proportions ni <- sapply(Y, npoints) fi <- ni/sum(ni) ## J function of pattern regardless of type Jdotdot <- Jest(unmark(X), correction=correction, r=r, eps=eps, breaks=breaks, ...) rvals <- Jdotdot$r ## J function of subpattern of each type i Jii <- lapply(Y, Jest, r=rvals, correction=correction, eps=eps, ...) nrvals <- lengths(lapply(Jii, getElement, name="r")) if(length(unique(nrvals)) != 1 || nrvals[1] != length(rvals)) stop("Internal error: J function objects have different lengths") ## initialise fv object alim <- attr(Jdotdot, "alim") Z <- fv(data.frame(r=rvals, theo=0), "r", substitute(I(r), NULL), "theo", . ~ r, alim, c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="I") ## Estimates of each type namii <- unlist(lapply(Jii, names)) namdd <- names(Jdotdot) bothnames <- namii[namii %in% namdd] if("un" %in% bothnames) { Jun <- matrix(extract(Jii, "un"), nrow=ntypes, byrow=TRUE) Iun <- apply(fi * Jun, 2, sum) - Jdotdot$un Z <- bind.fv(Z, data.frame(un=Iun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") } if("rs" %in% bothnames) { Jrs <- matrix(extract(Jii, "rs"), nrow=ntypes, byrow=TRUE) Irs <- apply(fi * Jrs, 2, sum) - Jdotdot$rs Z <- bind.fv(Z, data.frame(rs=Irs), "hat(%s)[rs](r)", "border corrected estimate of %s", "rs") } if("han" %in% bothnames) { Jhan <- matrix(extract(Jii, "han"), nrow=ntypes, byrow=TRUE) Ihan <- apply(fi * Jhan, 2, sum) - Jdotdot$han Z <- bind.fv(Z, data.frame(han=Ihan), "hat(%s)[han](r)", "Hanisch-style estimate of %s", "han") } if("km" %in% bothnames) { Jkm <- matrix(extract(Jii, "km"), nrow=ntypes, byrow=TRUE) Ikm <- apply(fi * Jkm, 2, sum) - Jdotdot$km Z <- bind.fv(Z, data.frame(km=Ikm), "hat(%s)[km](r)", "Kaplan-Meier estimate of %s", "km") } unitname(Z) <- unitname(X) attr(Z, "conserve") <- attr(Jdotdot, "conserve") return(Z) } extract <- function(Zlist, what) sapply(Zlist, "[[", i=what) Iest }) spatstat.core/R/rags.R0000644000176200001440000000511314144333463014331 0ustar liggesusers#' #' rags.R #' #' Alternating Gibbs Sampler #' #' $Revision: 1.6 $ $Date: 2016/11/29 05:01:51 $ #' #' Initial implementation for multitype hard core process #' without interaction within types rags <- function(model, ..., ncycles=100) { if(!is.list(model)) stop("Argument 'model' should be a list") if(!all(c("beta", "hradii") %in% names(model))) stop("Argument 'model' should have entries 'beta' and 'hradii'") do.call(ragsMultiHard, append(model, list(..., ncycles=ncycles))) } ragsMultiHard <- function(beta, hradii, ..., types=NULL, bmax=NULL, periodic=FALSE, ncycles=100) { ## validate beta by generating first proposal points Xprop <- rmpoispp(lambda=beta, lmax=bmax, ..., types=types) ntypes <- length(levels(marks(Xprop))) check.nmatrix(hradii, ntypes, things="types of points") if(any(is.finite(dh <- diag(hradii)) & dh > 0)) stop("Interaction between points of the same type is not permitted") ## initial state empty X <- Xprop[integer(0)] Y <- split(X) ## for(cycle in 1:ncycles) { if(cycle > 1) Xprop <- rmpoispp(lambda=beta, lmax=bmax, ..., types=types) Xprop <- Xprop[order(coords(Xprop)$x)] Yprop <- split(Xprop) for(i in 1:ntypes) { Xi <- Yprop[[i]] ok <- TRUE for(j in (1:ntypes)[-i]) { if(!any(ok)) break; ok <- ok & !has.close(Xi, hradii[i,j], Y[[j]], sorted=TRUE, periodic=periodic) } Y[[i]] <- Xi[ok] } } Z <- do.call(superimpose, Y) return(Z) } ragsAreaInter <- function(beta, eta, r, ..., win=NULL, bmax=NULL, periodic=FALSE, ncycles=100) { check.1.real(eta) check.1.real(r) if(r == 0 || eta == 1) return(rpoispp(beta, win=win, lmax=bmax, ...)) if(eta < 1) stop("Alternating Gibbs algorithm requires eta >= 1", call.=FALSE) if(is.function(beta)) { beta <- as.im(beta, W=win, ...) } else if(is.numeric(beta)) { check.1.real(beta) stopifnot(beta >= 0) } else if(!is.im(beta)) { stop("beta should be a number, a pixel image, or a function(x,y)", call.=FALSE) } if(is.im(beta) && is.null(win)) win <- as.owin(beta) kappa <- beta * eta loggamma <- log(eta)/(pi * r^2) bmax <- if(is.null(bmax)) NULL else c(max(kappa), loggamma) B <- if(is.numeric(beta)) c(kappa, loggamma) else solist(kappa, as.im(loggamma, W=win)) H <- matrix(c(0,r,r,0), 2, 2) Y <- ragsMultiHard(B, H, types=1:2, bmax=bmax, periodic=periodic, ncycles=ncycles) X <- split(Y)[[1]] return(X) } spatstat.core/R/augment.msr.R0000644000176200001440000000524514144333461015641 0ustar liggesusers#' #' augment.msr.R #' #' Given a measure, compute a pixel image of the smoothed density #' and insert it in the object. #' #' $Revision: 1.2 $ $Date: 2020/12/19 05:25:06 $ augment.msr <- function(x, ..., sigma, recompute=FALSE) { ## add a pixel image of the smoothed density component stopifnot(inherits(x, "msr")) if(!recompute && !is.null(attr(x, "smoothdensity"))) return(x) d <- ncol(as.matrix(x$val)) xloc <- x$loc W <- as.owin(xloc) mt <- is.multitype(xloc) if(missing(sigma)) { sigma <- if(!mt) avenndist(xloc) else max(sapply(split(xloc), avenndist)) if(sigma == 0) sigma <- max(bw.scott(xloc))/5 } if(mt) { ## multitype case - split by type, extract smoothed part, then sum y <- lapply(split(x), augment.msr, sigma=sigma, ...) z <- lapply(y, attr, which="smoothdensity") if((nc <- ncol(x)) == 1) { ## scalar valued smo <- im.apply(z, sum) ## WAS: z <- do.call(harmonise, unname(z)) ## smo <- Reduce("+", z) } else { ## vector valued smo <- vector(mode="list", length=nc) for(j in 1:nc) { zj <- lapply(z, "[[", i=j) smo[[j]] <- im.apply(zj, sum) ## WAS: zj <- do.call(harmonise, unname(zj)) ## smo[[j]] <- Reduce("+", zj) } smo <- as.solist(smo) } attr(smo, "sigma") <- sigma attr(x, "smoothdensity") <- smo return(x) } ## Single-type xdensity <- as.matrix(x$density) ## first weed out Inf, NA, NaN if(!all(ok <- complete.cases(xdensity))) xdensity <- ok * xdensity ## smooth density unless constant ra <- apply(xdensity, 2, range) varble <- apply(as.matrix(ra), 2, diff) > sqrt(.Machine$double.eps) ## if(d == 1) { if(!varble) { smo <- as.im(mean(xdensity), W=W) } else { xmd <- xloc %mark% xdensity dont.complain.about(xmd) smo <- do.call(Smooth, resolve.defaults(list(X=quote(xmd)), list(...), list(sigma=sigma))) } } else { smo <- vector(mode="list", length=d) names(smo) <- colnames(x) if(any(varble)) { xmdv <- xloc %mark% xdensity[,varble, drop=FALSE] dont.complain.about(xmdv) smo[varble] <- do.call(Smooth, resolve.defaults(list(X=quote(xmdv)), list(...), list(sigma=sigma))) } if(any(!varble)) smo[!varble] <- solapply(apply(xdensity[, !varble, drop=FALSE], 2, mean), as.im, W=W) } attr(smo, "sigma") <- sigma attr(x, "smoothdensity") <- smo return(x) } spatstat.core/R/spatcov.R0000644000176200001440000000450214141452520015046 0ustar liggesusers#' estimate covariance function of a random field #' assuming stationary (and optionally - isotropic) #' #' Naive moment estimator #' #' Originally written for Max Chatfield #' original: Adrian Baddeley 15-19 may 2020 #' $Revision: 1.11 $ $Date: 2021/05/03 02:40:27 $ spatcov <- function(X, Y=X, ..., correlation=FALSE, isotropic=TRUE, clip=TRUE, pooling=TRUE) { stopifnot(is.im(X)) eX <- X - mean(X) if(correlation) eX <- eX/sqrt(mean(eX^2)) if(missing(Y) || is.null(Y)) { #' spatial covariance of X A <- imcov(eX) } else { #' spatial cross-covariance of X and Y stopifnot(is.im(Y)) eY <- Y - mean(Y) if(correlation) eY <- eY/sqrt(mean(eY^2)) A <- imcov(eX, eY) } B <- setcov(Window(X)) if(!(isotropic && pooling)) { #' first estimate covariance as function of vector argument Z <- A/B #' deal with numerical errors at extremes pixelarea <- with(X, xstep * ystep) Z[B < pixelarea] <- 0 } if(isotropic) { #' result is a function of lag distance if(pooling) { mA <- rotmean(A) mB <- rotmean(B) f <- eval.fv(mA/mB) } else { f <- rotmean(Z) } #' give it more meaningful labels f <- rebadge.fv(f, new.ylab=quote(C(r)), new.fname="C", tags=fvnames(f, ".y"), new.tags="est", new.desc="estimate of %s", new.labl="hat(%s)(r)") if(clip) attr(f, "alim") <- c(0, shortside(Frame(X))/2) result <- f } else { #' result is an image representing a function of lag vector Z <- A/B #' return an image representing a function of lag vector if(clip) { Box <- Frame(Z) b <- sidelengths(Box) Bclip <- trim.rectangle(Box, b[1]/4, b[2]/4) Z <- Z[Bclip, drop=FALSE, tight=TRUE] } result <- Z } return(result) } pairMean <- function(fun, W, V=NULL, ..., normalise=TRUE) { #' fun is a function of pairwise distance if(!is.function(fun)) stop("fun should be a function in the R language") #' W is the domain over which to integrate W <- as.owin(W) FD <- distcdf(W, V, ..., savedenom=!normalise) result <- as.numeric(stieltjes(fun, FD, ...)) if(!normalise) result <- result * attr(FD, "denom") return(result) } spatstat.core/R/bw.diggle.R0000644000176200001440000000530014144333461015233 0ustar liggesusers## ## bw.diggle.R ## ## bandwidth selection rule bw.diggle (for density.ppp) ## ## $Revision: 1.8 $ $Date: 2021/01/07 03:08:41 $ ## bw.diggle <- local({ #' integrand phi <- function(x,h) { if(h <= 0) return(numeric(length(x))) y <- pmax.int(0, pmin.int(1, x/(2 * h))) 4 * pi * h^2 * (acos(y) - y * sqrt(1 - y^2)) } #' secret option for debugging mf <- function(..., method=c("C", "interpreted")) { match.arg(method) } bw.diggle <- function(X, ..., correction="good", hmax=NULL, nr=512, warn=TRUE) { stopifnot(is.ppp(X)) method <- mf(...) W <- Window(X) lambda <- npoints(X)/area(W) rmax <- if(!is.null(hmax)) (4 * hmax) else rmax.rule("K", W, lambda) r <- seq(0, rmax, length=nr) K <- Kest(X, r=r, correction=correction) yname <- fvnames(K, ".y") K <- K[, c("r", yname)] ## check that K values can be passed to C code if(any(bad <- !is.finite(K[[yname]]))) { ## throw out bad values lastgood <- min(which(bad)) - 1L if(lastgood < 2L) stop("K function yields too many NA/NaN values") K <- K[1:lastgood, ] } rvals <- K$r ## evaluation of M(r) requires K(2r) rmax2 <- max(rvals)/2 if(!is.null(alim <- attr(K, "alim"))) rmax2 <- min(alim[2L], rmax2) ok <- (rvals <= rmax2) switch(method, interpreted = { rvals <- rvals[ok] nr <- length(rvals) J <- numeric(nr) for(i in 1:nr) J[i] <- stieltjes(phi, K, h=rvals[i])[[yname]]/(2 * pi) }, C = { nr <- length(rvals) nrmax <- sum(ok) dK <- diff(K[[yname]]) ndK <- length(dK) z <- .C(SC_digberJ, r=as.double(rvals), dK=as.double(dK), nr=as.integer(nr), nrmax=as.integer(nrmax), ndK=as.integer(ndK), J=as.double(numeric(nrmax)), PACKAGE="spatstat.core") J <- z$J rvals <- rvals[ok] }) pir2 <- pi * rvals^2 M <- (1/lambda - 2 * K[[yname]][ok])/pir2 + J/pir2^2 ## This calculation was for the uniform kernel on B(0,h) ## Convert to standard deviation of (one-dimensional marginal) kernel sigma <- rvals/2 result <- bw.optim(M, sigma, creator="bw.diggle", criterion="Berman-Diggle Cross-Validation", J=J, lambda=lambda, warnextreme=warn, hargnames="hmax", unitname=unitname(X)) return(result) } bw.diggle }) spatstat.core/R/envelopeArray.R0000644000176200001440000000541414144333462016214 0ustar liggesusers# # envelopeArray.R # # $Revision: 1.1 $ $Date: 2017/06/05 10:31:58 $ # # envelopeArray <- function(X, fun, ..., dataname=NULL,verb=FALSE,reuse=TRUE) { #' if(is.null(dataname)) dataname <- short.deparse(substitute(X)) #' determine function name f.is.name <- is.name(substitute(fun)) fname <- if(f.is.name) paste(as.name(substitute(fun))) else if(is.character(fun)) fun else sQuote("fun") #' determine function to be called if(is.character(fun)) { fun <- get(fun, mode="function") } else if(!is.function(fun)) stop(paste(sQuote("fun"), "should be a function or a character string")) #' Apply function to data pattern, to test it #' and to determine array dimensions, margin labels etc. fX <- do.call.matched(fun, append(list(X), list(...)), matchfirst=TRUE) if(!inherits(fX, "fasp")) stop("function did not return an object of class 'fasp'") d <- dim(fX) witch <- matrix(1:prod(d), nrow=d[1L], ncol=d[2L], dimnames=dimnames(fX)) #' make function that extracts [i,j] entry of result ijfun <- function(X, ..., i=1, j=1, expectdim=d) { fX <- fun(X, ...) if(!inherits(fX, "fasp")) stop("function did not return an object of class 'fasp'") if(!all(dim(fX) == expectdim)) stop("function returned an array with different dimensions") return(fX[i,j]) } # ------------ start computing ------------------------------- if(reuse) { L <- do.call(spatstat.core::envelope, resolve.defaults( list(quote(X), fun=ijfun), list(internal=list(eject="patterns")), list(...), list(verbose=verb))) intern <- attr(L, "internal") } else intern <- L <- NULL # compute function array and build up 'fasp' object fns <- list() k <- 0 for(i in 1:nrow(witch)) { for(j in 1:ncol(witch)) { if(verb) cat("i =",i,"j =",j,"\n") currentfv <- do.call(spatstat.core::envelope, resolve.defaults( list(quote(X), ijfun), list(simulate=L, internal=intern), list(verbose=FALSE), list(...), list(Yname=dataname), list(i=i, j=j))) k <- k+1 fns[[k]] <- as.fv(currentfv) } } # wrap up into 'fasp' object title <- paste("array of envelopes of", fname, "for", dataname) rslt <- fasp(fns, which=witch, formulae=NULL, dataname=dataname, title=title, checkfv=FALSE) return(rslt) } spatstat.core/R/Kres.R0000644000176200001440000000526714144333461014311 0ustar liggesusers# # Kres.R # # Residual K # # $Revision: 1.3 $ $Date: 2013/04/25 06:37:43 $ # ############################################################################# # Kres <- function(object, ...) { if(!is.fv(object)) { # usual case where 'object' is a ppm, ppp or quad K <- Kcom(object, ...) } else { # case where 'object' is the output of 'Kcom' a <- attr(object, "maker") if(is.null(a) || a != "Kcom") stop("fv object was not created by Kcom") K <- object if(length(list(...)) > 0) warning("Extra arguments ignored") } # initialise fv object df <- data.frame(r=K$r, theo=numeric(length(K$r))) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~hat(K)(r), NULL), "theo", . ~ r , attr(K, "alim"), c("r","bold(R)~%s[theo](r)"), desc, fname="K") # add residual functions nam <- names(K) if("border" %in% nam) ans <- bind.fv(ans, data.frame(bres=with(K, border-bcom)), "bold(R)~hat(%s)[bord](r)", "residual function %s based on border correction", "bres") if(all(c("trans","tcom") %in% nam)) ans <- bind.fv(ans, data.frame(tres=with(K, trans-tcom)), "bold(R)~hat(%s)[trans](r)", "residual function %s based on translation correction", "tres") if(all(c("iso","icom") %in% nam)) ans <- bind.fv(ans, data.frame(ires=with(K, iso-icom)), "bold(R)~hat(%s)[iso](r)", "residual function %s based on isotropic correction", "ires") if("ivar" %in% nam) { savedotnames <- fvnames(ans, ".") ans <- bind.fv(ans, as.data.frame(K)[, c("ivar", "isd", "ihi", "ilo")], c("bold(C)^2~hat(%s)[iso](r)", "sqrt(bold(C)^2~hat(%s)[iso](r))", "bold(R)~hat(%s)[Hi](r)", "bold(R)~hat(%s)[Lo](r)"), c("pseudovariance of isotropic-corrected residual %s", "pseudo-SD of isotropic-corrected residual %s", "upper critical band for isotropic-corrected residual %s", "lower critical band for isotropic-corrected residual %s"), "ires") ans <- bind.fv(ans, data.frame(istdres=with(ans, ires/isd)), "bold(T)~hat(%s)[iso](r)", "standardised isotropic-corrected residual %s", "ires") fvnames(ans, ".") <- c(savedotnames, c("ihi", "ilo")) } unitname(ans) <- unitname(K) return(ans) } spatstat.core/R/rshift.psp.R0000644000176200001440000000257414144333463015505 0ustar liggesusers# # rshift.psp.R # # $Revision: 1.7 $ $Date: 2019/11/18 06:22:50 $ # rshift.psp <- function(X, ..., group=NULL, which=NULL) { verifyclass(X, "psp") # process arguments W <- rescue.rectangle(X$window) arglist <- handle.rshift.args(W, ..., edgedefault="erode") radius <- arglist$radius width <- arglist$width height <- arglist$height edge <- arglist$edge clip <- arglist$clip if(W$type != "rectangle") stop("Not yet implemented for non-rectangular windows") if(edge != "erode") stop(paste("Only implemented for edge=", dQuote("erode"))) # split into groups if(is.null(group)) Y <- list(X) else { stopifnot(is.factor(group)) stopifnot(length(group) == X$n) Y <- lapply(levels(group), function(l, Z, group) {Z[group == l]}, Z=X, group=group) } ############ loop ################ result <- NULL for(i in seq_along(Y)) { Z <- Y[[i]] # generate random translation vector if(!is.null(radius)) jump <- runifdisc(1, radius=radius) else { jump <- list(x=runif(1, min=0, max=width), y=runif(1, min=0, max=height)) } # translate segments Zsh <- shift(Z, c(jump$x, jump$y)) Zsh$window <- W # append to result result <- append.psp(result, Zsh) } # clip if(!is.null(clip)) result <- result[clip] return(result) } spatstat.core/R/FGmultiInhom.R0000644000176200001440000001722014144333461015737 0ustar liggesusers#' #' FGmultiInhom.R #' #' inhomogeneous multitype G and F functions #' #' Original code by Ottmar Cronie and Marie-Colette van Lieshout #' #' Rewritten for spatstat by Adrian Baddeley #' #' GmultiInhom #' FmultiInhom #' #' $Revision: 1.8 $ $Date: 2021/01/07 03:16:28 $ GmultiInhom <- function(X, I, J, lambda=NULL, lambdaI=NULL, lambdaJ=NULL, lambdamin=NULL, ..., r=NULL, ReferenceMeasureMarkSetI=NULL, ratio=FALSE){ if(!is.ppp(X) || !is.marked(X)) stop("X should be a marked point pattern") W <- Window(X) nX <- npoints(X) #' handle r argument rmax <- rmax.rule("G", W, intensity(X)) bks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmax) r <- bks$r rmax <- bks$max nr <- length(r) #' Accept any kind of index for I; convert it to a logical index I <- ppsubset(X, I) if(is.null(I)) stop("I must be a valid subset index") XI <- X[I] nI <- sum(I) if (nI == 0) stop("No points satisfy condition I") if(!is.null(ReferenceMeasureMarkSetI)) { check.1.real(ReferenceMeasureMarkSetI) stopifnot(ReferenceMeasureMarkSetI >= 0) } #' likewise for J if(missing(J) || is.null(J)) { J <- rep(TRUE, nX) } else { J <- ppsubset(X, J) } XJ <- X[J] nJ <- sum(J) if (nJ == 0) stop("No points satisfy condition J") #' supply either lambda, or lambdaI and lambdaJ lam.given <- !is.null(lambda) lamIJ.given <- !is.null(lambdaI) || !is.null(lambdaJ) if(lam.given == lamIJ.given || is.null(lambdaI) != is.null(lambdaJ)) stop(paste("Supply either a vector lambda of length equal to npoints(X),", "or two vectors lambdaI, lambdaJ of lengths", "equal to npoints(X[I]) and npoints(X[J]) respectively"), call.=FALSE) if(lamIJ.given) { #' lambdaI and lambdaJ given check.nvector(lambdaI, nI, things="points of X[I]") stopifnot(all(lambdaI > 0)) check.nvector(lambdaJ, nJ, things="points of X[J]") stopifnot(all(lambdaJ > 0)) if(is.null(lambdamin)){ stop(paste("Supply lambdamin - a single positive number which is", "smaller than the values in lambdaJ"), call.=FALSE) } check.1.real(lambdamin) stopifnot(lambdamin > 0) stopifnot(lambdamin <= min(lambdaJ)) } else { #' lambda given check.nvector(lambda, nX, things="points of X") stopifnot(all(lambda > 0)) lambdaI <- lambda[I] lambdaJ <- lambda[J] if(is.null(lambdamin)){ stop(paste("Supply lambdamin - a single positive number which is", "smaller than the values in lambda"), call.=FALSE) } check.1.real(lambdamin) stopifnot(lambdamin > 0) stopifnot(lambdamin <= min(lambda)) } #' Calculate 1/lambda(x_i,y_i,m_i)) #' for all (x_i,y_i,m_i) with m_i in I invlambdaI <- 1/lambdaI #' Calculate (1 - lambda_min/lambda(x_i,y_i,m_i)) #' for all (x_i,y_i,m_i) with m_i in J Coeff <- 1-(lambdamin/lambdaJ) ## CoeffMatrix <- matrix(rep(Coeff,times=nI), nrow=nI, byrow=TRUE) #' distances ## DistanceXItoXJ <- crossdist(XI,XJ) #' eroded areas and boundary distances areaWr <- eroded.areas(W, r) bdistXI <- bdist.points(XI) #' for each point x in XI, determine largest r such that x \in W-r ibI <- fastFindInterval(bdistXI, r, labels=TRUE) #' count of points inside W-r for each r ## NumberEroded <- revcumsum(table(ibI)) #' denominator #' sum invlambdaI for all points x \in W-r DenominatorN <- c(sum(invlambdaI), revcumsum(natozero(tapply(invlambdaI, ibI, sum)))) if(!is.null(ReferenceMeasureMarkSetI)) DenominatorA <- areaWr * ReferenceMeasureMarkSetI #' local products of weights #' sort data points in order of increasing x coordinate xxI <- XI$x yyI <- XI$y oXI <- fave.order(xxI) xIord <- xxI[oXI] yIord <- yyI[oXI] #' xxJ <- XJ$x yyJ <- XJ$y vvJ <- Coeff oXJ <- fave.order(xxJ) xJord <- xxJ[oXJ] yJord <- yyJ[oXJ] vJord <- vvJ[oXJ] # compute local cumulative products z <- .C(SC_locxprod, ntest = as.integer(nI), xtest = as.double(xIord), ytest = as.double(yIord), ndata = as.integer(nJ), xdata = as.double(xJord), ydata = as.double(yJord), vdata = as.double(vJord), nr = as.integer(nr), rmax = as.double(rmax), ans = as.double(numeric(nI * nr)), PACKAGE="spatstat.core") ans <- matrix(z$ans, nrow=nr, ncol=nI) #' revert to original ordering loccumprod <- matrix(, nrow=nr, ncol=nI) loccumprod[, oXI] <- ans #' border correction outside <- outer(r, bdistXI, ">") loccumprod[outside] <- 0 #' weight by 1/lambdaI wlcp <- loccumprod * matrix(invlambdaI, byrow=TRUE, nr, nI) #' sum over I for each fixed r numer <- .rowSums(wlcp, nr, nI) # pack up Gdf <- data.frame(r=r, theo = 1 - exp(- lambdamin * pi * r^2)) desc <- c("distance argument r", "theoretical Poisson %s") theo.denom <- rep.int(nI, nr) fname <- c("G", "list(inhom,I,J)") G <- ratfv(Gdf, NULL, theo.denom, "r", quote(G[inhom, I, J](r)), "theo", NULL, c(0,rmax), c("r", makefvlabel(NULL, NULL, fname, "pois")), desc, fname=fname, yexp=quote(G[list(inhom,I,J)](r)), ratio=ratio) # add border corrected (Hamilton Principle) estimate G <- bind.ratfv(G, data.frame(bord=DenominatorN-numer), DenominatorN, makefvlabel(NULL, "hat", fname, "bord"), "border estimate of %s", "bord", ratio=ratio) fvnames(G, ".") <- c("bord", "theo") # add modified border corrected (non-Hamilton-Principle) estimate if(!is.null(ReferenceMeasureMarkSetI)) { G <- bind.ratfv(G, data.frame(bordm=DenominatorA-numer), DenominatorA, makefvlabel(NULL, "hat", fname, "bordm"), "modified border estimate of %s", "bordm", ratio=ratio) fvnames(G, ".") <- c("bord", "bordm", "theo") } # formula(G) <- . ~ r unitname(G) <- unitname(X) if(ratio) G <- conform.ratfv(G) return(G) } #' marked inhomogeneous F FmultiInhom <- function(X, J, lambda=NULL,lambdaJ=NULL, lambdamin=NULL, ..., r=NULL) { if(!is.ppp(X) || !is.marked(X)) stop("X should be a marked point pattern") nX <- npoints(X) #' Accept any kind of index for J; convert it to a logical index J <- ppsubset(X, J) if(is.null(J)) stop("J must be a valid subset index") XJ <- X[J] nJ <- sum(J) if (nJ == 0) stop("No points satisfy condition J") if(is.null(lambda) == is.null(lambdaJ)) stop(paste("Supply either a vector lambda of length equal to npoints(X),", "or a vector lambdaJ of length equal to npoints(X[J])"), call.=FALSE) if(is.null(lambdamin)) stop("Supply a value for lambdamin", call.=FALSE) check.1.real(lambdamin) if(!is.null(lambda)) { check.nvector(lambda, nX) stopifnot(all(lambda > 0)) stopifnot(lambdamin <= min(lambda[J])) lambdaJ <- lambda[J] } else { check.nvector(lambdaJ, nJ) stopifnot(all(lambdaJ > 0)) stopifnot(lambdamin <= min(lambdaJ)) } FJ <- Finhom(XJ, lambda=lambdaJ, lmin=lambdamin, r=r) FJ <- rebadge.fv(FJ, new.ylab = quote(F[inhom, J](r)), new.fname = c("F", "list(inhom,J)"), new.yexp = quote(F[list(inhom,J)](r))) return(FJ) } spatstat.core/R/Kmulti.inhom.R0000644000176200001440000004140214144333461015752 0ustar liggesusers# # Kmulti.inhom.S # # $Revision: 1.53 $ $Date: 2020/08/05 02:50:32 $ # # # ------------------------------------------------------------------------ Lcross.inhom <- function(X, i, j, ..., correction) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- levels(marks(X))[1] if(missing(j)) j <- levels(marks(X))[2] if(missing(correction)) correction <- NULL K <- Kcross.inhom(X, i, j, ..., correction=correction) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) # relabel the fv object L <- rebadge.fv(L, substitute(L[inhom,i,j](r), list(i=iname,j=jname)), c("L", paste0("list", paren(paste("inhom", i, j, sep=",")))), new.yexp=substitute(L[list(inhom,i,j)](r), list(i=iname,j=jname))) attr(L, "labl") <- attr(K, "labl") attr(L, "dangerous") <- attr(K, "dangerous") return(L) } Ldot.inhom <- function(X, i, ..., correction) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- levels(marks(X))[1] if(missing(correction)) correction <- NULL K <- Kdot.inhom(X, i, ..., correction=correction) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) # relabel the fv object iname <- make.parseable(paste(i)) L <- rebadge.fv(L, substitute(L[inhom, i ~ dot](r), list(i=iname)), c("L", paste0("list(inhom,", iname, "~symbol(\"\\267\"))")), new.yexp=substitute(L[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) attr(L, "labl") <- attr(K, "labl") attr(L, "dangerous") <- attr(K, "dangerous") return(L) } "Kcross.inhom" <- function(X, i, j, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIJ=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL miss.update <- missing(update) miss.leave <- missing(leaveoneout) marx <- marks(X) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] I <- (marx == i) J <- (marx == j) Iname <- paste("points with mark i =", i) Jname <- paste("points with mark j =", j) K <- Kmulti.inhom(X, I, J, lambdaI, lambdaJ, ..., r=r,breaks=breaks,correction=correction, sigma=sigma, varcov=varcov, lambdaIJ=lambdaIJ, Iname=Iname, Jname=Jname, lambdaX=lambdaX, update=update, leaveoneout=leaveoneout, miss.update=miss.update, miss.leave=miss.leave) iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(K, substitute(K[inhom,i,j](r), list(i=iname,j=jname)), c("K", paste0("list", paren(paste("inhom", i, j, sep=",")))), new.yexp=substitute(K[list(inhom,i,j)](r), list(i=iname,j=jname))) attr(result, "dangerous") <- attr(K, "dangerous") return(result) } "Kdot.inhom" <- function(X, i, lambdaI=NULL, lambdadot=NULL, ..., r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIdot=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL miss.update <- missing(update) miss.leave <- missing(leaveoneout) marx <- marks(X) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points Iname <- paste("points with mark i =", i) Jname <- paste("points") K <- Kmulti.inhom(X, I, J, lambdaI, lambdadot, ..., r=r,breaks=breaks,correction=correction, sigma=sigma, varcov=varcov, lambdaIJ=lambdaIdot, Iname=Iname, Jname=Jname, lambdaX=lambdaX, update=update, leaveoneout=leaveoneout, miss.update=miss.update, miss.leave=miss.leave) iname <- make.parseable(paste(i)) result <- rebadge.fv(K, substitute(K[inhom, i ~ dot](r), list(i=iname)), c("K", paste0("list(inhom,", iname, "~symbol(\"\\267\"))")), new.yexp=substitute(K[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) if(!is.null(dang <- attr(K, "dangerous"))) { dang[dang == "lambdaJ"] <- "lambdadot" dang[dang == "lambdaIJ"] <- "lambdaIdot" attr(result, "dangerous") <- dang } return(result) } "Kmulti.inhom"<- function(X, I, J, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), lambdaIJ=NULL, sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") dflt <- list(Iname="points satisfying condition I", Jname="points satisfying condition J", miss.update=missing(update), miss.leave=missing(leaveoneout)) extrargs <- resolve.defaults(list(...), dflt) if(length(extrargs) > length(dflt)) warning("Additional arguments unrecognised") Iname <- extrargs$Iname Jname <- extrargs$Jname miss.update <- extrargs$miss.update miss.leave <- extrargs$miss.leave npts <- npoints(X) W <- as.owin(X) areaW <- area(W) # validate edge correction correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) # validate I, J I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") XI <- X[I] XJ <- X[J] nI <- sum(I) nJ <- sum(J) if(nI == 0) stop(paste("There are no", Iname)) if(nJ == 0) stop(paste("There are no", Jname)) # r values rmaxdefault <- rmax.rule("K", W, nJ/areaW) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max dangerous <- c("lambdaI", "lambdaJ") dangerI <- dangerJ <- TRUE ## intensity data if(!is.null(lambdaX)) { ## Intensity values for all points of X if(!is.null(lambdaI)) warning("lambdaI was ignored, because lambdaX was given", call.=FALSE) if(!is.null(lambdaJ)) warning("lambdaJ was ignored, because lambdaX was given", call.=FALSE) if(is.im(lambdaX)) { ## Look up intensity values lambdaI <- safelookup(lambdaX, X[I]) lambdaJ <- safelookup(lambdaX, X[J]) } else if(is.function(lambdaX)) { ## evaluate function at locations lambdaI <- lambdaX(XI$x, XI$y) lambdaJ <- lambdaX(XJ$x, XJ$y) } else if(is.numeric(lambdaX) && is.vector(as.numeric(lambdaX))) { ## vector of intensity values if(length(lambdaX) != npts) stop(paste("The length of", sQuote("lambdaX"), "should equal the number of points of X")) lambdaI <- lambdaX[I] lambdaJ <- lambdaX[J] } else if(is.ppm(lambdaX) || is.kppm(lambdaX) || is.dppm(lambdaX)) { ## point process model provides intensity model <- lambdaX if(!update) { ## just use intensity of fitted model lambdaI <- predict(model, locations=XI, type="trend") lambdaJ <- predict(model, locations=XJ, type="trend") } else { ## re-fit model to data X if(is.ppm(model)) { model <- update(model, Q=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { model <- update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } lambdaI <- lambdaX[I] lambdaJ <- lambdaX[J] dangerI <- dangerJ <- FALSE dangerous <- "lambdaIJ" if(miss.update) warn.once(key="Kmulti.inhom.update", "The behaviour of Kmulti.inhom when lambda is a ppm object", "has changed (in spatstat 1.45-3 and later).", "See help(Kmulti.inhom)") } } else stop(paste("Argument lambdaX is not understood:", "it should be a numeric vector,", "an image, a function(x,y)", "or a fitted point process model (ppm, kppm or dppm)")) } else { ## lambdaI, lambdaJ expected if(is.null(lambdaI)) { ## estimate intensity dangerI <- FALSE dangerous <- setdiff(dangerous, "lambdaI") lambdaI <- density(X[I], ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) } else if(is.im(lambdaI)) { ## look up intensity values lambdaI <- safelookup(lambdaI, X[I]) } else if(is.function(lambdaI)) { ## evaluate function at locations lambdaI <- lambdaI(XI$x, XI$y) } else if(is.numeric(lambdaI) && is.vector(as.numeric(lambdaI))) { ## validate intensity vector if(length(lambdaI) != nI) stop(paste("The length of", sQuote("lambdaI"), "should equal the number of", Iname)) } else if(is.ppm(lambdaI) || is.kppm(lambdaI) || is.dppm(lambdaI)) { ## point process model provides intensity model <- lambdaI if(!update) { ## just use intensity of fitted model lambdaI <- predict(model, locations=XI, type="trend") } else { ## re-fit model to data X if(is.ppm(model)) { model <- update(model, Q=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { #' kppm or dppm model <- update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } lambdaI <- lambdaX[I] dangerI <- FALSE dangerous <- setdiff(dangerous, "lambdaI") if(miss.update) warn.once(key="Kmulti.inhom.update", "The behaviour of Kmulti.inhom when lambda is a ppm object", "has changed (in spatstat 1.45-3 and later).", "See help(Kmulti.inhom)") } } else stop(paste(sQuote("lambdaI"), "should be a vector or an image")) if(is.null(lambdaJ)) { ## estimate intensity dangerJ <- FALSE dangerous <- setdiff(dangerous, "lambdaJ") lambdaJ <- density(X[J], ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) } else if(is.im(lambdaJ)) { ## look up intensity values lambdaJ <- safelookup(lambdaJ, X[J]) } else if(is.function(lambdaJ)) { ## evaluate function at locations XJ <- X[J] lambdaJ <- lambdaJ(XJ$x, XJ$y) } else if(is.numeric(lambdaJ) && is.vector(as.numeric(lambdaJ))) { ## validate intensity vector if(length(lambdaJ) != nJ) stop(paste("The length of", sQuote("lambdaJ"), "should equal the number of", Jname)) } else if(is.ppm(lambdaJ) || is.kppm(lambdaJ) || is.dppm(lambdaJ)) { ## point process model provides intensity model <- lambdaJ if(!update) { ## just use intensity of fitted model lambdaJ <- predict(model, locations=XJ, type="trend") } else { ## re-fit model to data X if(is.ppm(model)) { model <- update(model, Q=X) if(leaveoneout && !miss.leave) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { model <- update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } lambdaJ <- lambdaX[J] dangerJ <- FALSE dangerous <- setdiff(dangerous, "lambdaJ") if(miss.update) warn.once(key="Kmulti.inhom.update", "The behaviour of Kmulti.inhom when lambda is a ppm object", "has changed (in spatstat 1.45-3 and later).", "See help(Kmulti.inhom)") } } else stop(paste(sQuote("lambdaJ"), "should be a vector or an image")) } ## Weight for each pair if(!is.null(lambdaIJ)) { dangerIJ <- TRUE dangerous <- union(dangerous, "lambdaIJ") if(!is.matrix(lambdaIJ)) stop("lambdaIJ should be a matrix") if(nrow(lambdaIJ) != nI) stop(paste("nrow(lambdaIJ) should equal the number of", Iname)) if(ncol(lambdaIJ) != nJ) stop(paste("ncol(lambdaIJ) should equal the number of", Jname)) } else { dangerIJ <- FALSE } danger <- dangerI || dangerJ || dangerIJ # Recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame # It will be given more columns later K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") fname <- c("K", "list(inhom,I,J)") K <- fv(K, "r", quote(K[inhom, I, J](r)), "theo", , alim, c("r", makefvlabel(NULL, NULL, fname, "pois")), desc, fname=fname, yexp=quote(K[list(inhom,I,J)](r))) # identify close pairs of points close <- crosspairs(XI, XJ, max(r), what="ijd") # map (i,j) to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(I & J)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$d <- close$d[ok] } } # extract information for these pairs (relative to orderings of XI, XJ) dclose <- close$d icloseI <- close$i jcloseJ <- close$j # Form weight for each pair if(is.null(lambdaIJ)) weight <- 1/(lambdaI[icloseI] * lambdaJ[jcloseJ]) else weight <- 1/lambdaIJ[cbind(icloseI, jcloseJ)] # Compute estimates by each of the selected edge corrections. if(any(correction == "none")) { ## uncorrected wh <- whist(dclose, breaks$val, weight) Kun <- cumsum(wh)/areaW rmax <- diameter(W)/2 Kun[r >= rmax] <- NA K <- bind.fv(K, data.frame(un=Kun), makefvlabel(NULL, "hat", fname, "un"), "uncorrected estimate of %s", "un") } if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(XI) bI <- b[icloseI] # apply reduced sample algorithm RS <- Kwtsum(dclose, bI, weight, b, 1/lambdaI, breaks) if(any(correction == "border")) { Kb <- RS$ratio K <- bind.fv(K, data.frame(border=Kb), makefvlabel(NULL, "hat", fname, "bord"), "border-corrected estimate of %s", "border") } if(any(correction == "bord.modif")) { Kbm <- RS$numerator/eroded.areas(W, r) K <- bind.fv(K, data.frame(bord.modif=Kbm), makefvlabel(NULL, "hat", fname, "bordm"), "modified border-corrected estimate of %s", "bord.modif") } } if(any(correction == "translate")) { ## translation correction edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE) allweight <- edgewt * weight wh <- whist(dclose, breaks$val, allweight) Ktrans <- cumsum(wh)/areaW rmax <- diameter(W)/2 Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), makefvlabel(NULL, "hat", fname, "trans"), "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { ## Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1)) allweight <- edgewt * weight wh <- whist(dclose, breaks$val, allweight) Kiso <- cumsum(wh)/areaW rmax <- diameter(W)/2 Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(iso=Kiso), makefvlabel(NULL, "hat", fname, "iso"), "Ripley isotropic correction estimate of %s", "iso") } ## default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) if(danger) attr(K, "dangerous") <- dangerous return(K) } spatstat.core/R/hierstrauss.R0000644000176200001440000002110614144333462015750 0ustar liggesusers## ## hierstrauss.R ## ## $Revision: 1.10 $ $Date: 2018/03/15 07:37:41 $ ## ## The hierarchical Strauss process ## ## HierStrauss() create an instance of the hierarchical Strauss process ## [an object of class 'interact'] ## ## ------------------------------------------------------------------- ## HierStrauss <- local({ # ......... define interaction potential HSpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrix of interaction radii r[ , ] r <- par$radii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) ## list all ordered pairs of types to be checked uptri <- par$archy$relation & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] ## corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) ## create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { ## assemble the relevant interaction distance for each pair of points rxu <- r[ tx, tu ] ## apply relevant threshold to each pair of points str <- (d <= rxu) ## score for(i in 1:npairs) { # data points with mark m1 Xsub <- (tx == mark1[i]) # quadrature points with mark m2 Qsub <- (tu == mark2[i]) # assign z[Xsub, Qsub, i] <- str[Xsub, Qsub] } } return(z) } #### end of 'pot' function #### # ........ auxiliary functions .............. delHS <- function(which, types, radii, archy) { radii[which] <- NA if(all(is.na(radii))) return(Poisson()) return(HierStrauss(types=types, radii=radii, archy=archy)) } # Set up basic object except for family and parameters BlankHSobject <- list( name = "Hierarchical Strauss process", creator = "HierStrauss", family = "hierpair.family", # evaluated later pot = HSpotential, par = list(types=NULL, radii=NULL, archy=NULL), # filled in later parnames = c("possible types", "interaction distances", "hierarchical order"), hasInf = FALSE, selfstart = function(X, self) { if(!is.null(self$par$types) && !is.null(self$par$archy)) return(self) types <- self$par$types %orifnull% levels(marks(X)) archy <- self$par$archy %orifnull% types HierStrauss(types=types,radii=self$par$radii,archy=archy) }, init = function(self) { types <- self$par$types if(!is.null(types)) { radii <- self$par$radii nt <- length(types) MultiPair.checkmatrix(radii, nt, sQuote("radii"), asymmok=TRUE) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } } }, update = NULL, # default OK print = function(self) { radii <- self$par$radii types <- self$par$types archy <- self$par$archy if(waxlyrical('gory')) splat(nrow(radii), "types of points") if(!is.null(types) && !is.null(archy)) { if(waxlyrical('space')) { splat("Possible types and ordering:") } else cat("Hierarchy: ") print(archy) } else if(!is.null(types)) { (if(waxlyrical('space')) splat else cat)("Possible types: ") print(types) } else if(waxlyrical('gory')) splat("Possible types:\t not yet determined") splat("Interaction radii:") print(hiermat(radii, self$par$archy)) invisible(NULL) }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) # get matrix of Strauss interaction radii r <- self$par$radii # list all unordered pairs of types uptri <- self$par$archy$relation & !is.na(r) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(NA, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) gammas[ cbind(index1, index2) ] <- exp(coeffs) # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=hiermat(round(gammas, 4), self$par$archy))) }, valid = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii radii <- self$par$radii # parameters to estimate required <- !is.na(radii) & self$par$archy$relation # all required parameters must be finite if(!all(is.finite(gamma[required]))) return(FALSE) # DIAGONAL interaction parameters must be non-explosive d <- diag(rep(TRUE, nrow(radii))) return(all(gamma[required & d] <= 1)) }, project = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii and types radii <- self$par$radii types <- self$par$types archy <- self$par$archy # problems? uptri <- archy$relation required <- !is.na(radii) & uptri okgamma <- !uptri | (is.finite(gamma) & (gamma <= 1)) naughty <- required & !okgamma # if(!any(naughty)) return(NULL) if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delHS(naughty, types, radii, archy)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) ord <- self$par$archy$ordering uptri <- (ord[rn] <= ord[cn]) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matrix, ncol=2) inters <- lapply(mats, delHS, types=types, radii=radii, archy=archy) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$radii active <- !is.na(r) & self$par$archy$relation if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- active & (abs(log(gamma)) > epsilon) } if(any(active)) return(max(r[active])) else return(0) }, version=NULL # to be added ) class(BlankHSobject) <- "interact" # finally create main function HierStrauss <- function(radii, types=NULL, archy=NULL) { if(!is.null(types)) { if(is.null(archy)) archy <- seq_len(length(types)) archy <- hierarchicalordering(archy, types) } radii[radii == 0] <- NA out <- instantiate.interact(BlankHSobject, list(types=types, radii=radii, archy=archy)) if(!is.null(types)) dimnames(out$par$radii) <- list(types, types) return(out) } HierStrauss <- intermaker(HierStrauss, BlankHSobject) HierStrauss }) spatstat.core/R/kmrs.R0000644000176200001440000001721614144333462014357 0ustar liggesusers# # kmrs.S # # S code for Kaplan-Meier, Reduced Sample and Hanisch # estimates of a distribution function # from _histograms_ of censored data. # # kaplan.meier() # reduced.sample() # km.rs() # # $Revision: 3.26 $ $Date: 2013/06/27 08:59:16 $ # # The functions in this file produce vectors `km' and `rs' # where km[k] and rs[k] are estimates of F(breaks[k+1]), # i.e. an estimate of the c.d.f. at the RIGHT endpoint of the interval. # "kaplan.meier" <- function(obs, nco, breaks, upperobs=0) { # obs: histogram of all observations : min(T_i,C_i) # nco: histogram of noncensored observations : T_i such that T_i <= C_i # breaks: breakpoints (vector or 'breakpts' object, see breaks.S) # upperobs: number of observations beyond rightmost breakpoint # breaks <- as.breakpts(breaks) n <- length(obs) if(n != length(nco)) stop("lengths of histograms do not match") check.hist.lengths(nco, breaks) # # # reverse cumulative histogram of observations d <- revcumsum(obs) + upperobs # # product integrand s <- ifelseXB(d > 0, 1 - nco/d, 1) # km <- 1 - cumprod(s) # km has length n; km[i] is an estimate of F(r) for r=breaks[i+1] # widths <- diff(breaks$val) lambda <- numeric(n) pos <- (s > 0) lambda[pos] <- -log(s[pos])/widths[pos] # lambda has length n; lambda[i] is an estimate of # the average of \lambda(r) over the interval (breaks[i],breaks[i+1]). # return(list(km=km, lambda=lambda)) } "reduced.sample" <- function(nco, cen, ncc, show=FALSE, uppercen=0) # nco: histogram of noncensored observations: T_i such that T_i <= C_i # cen: histogram of all censoring times: C_i # ncc: histogram of censoring times for noncensored obs: # C_i such that T_i <= C_i # # Then nco[k] = #{i: T_i <= C_i, T_i \in I_k} # cen[k] = #{i: C_i \in I_k} # ncc[k] = #{i: T_i <= C_i, C_i \in I_k}. # # The intervals I_k must span an interval [0,R] beginning at 0. # If this interval did not include all censoring times, # then `uppercen' must be the number of censoring times # that were not counted in 'cen'. { n <- length(nco) if(n != length(cen) || n != length(ncc)) stop("histogram lengths do not match") # # denominator: reverse cumulative histogram of censoring times # denom(r) = #{i : C_i >= r} # We compute # cc[k] = #{i: C_i > breaks[k]} # except that > becomes >= for k=0. # cc <- revcumsum(cen) + uppercen # # # numerator # #{i: T_i <= r <= C_i } # = #{i: T_i <= r, T_i <= C_i} - #{i: C_i < r, T_i <= C_i} # We compute # u[k] = #{i: T_i <= C_i, T_i <= breaks[k+1]} # - #{i: T_i <= C_i, C_i <= breaks[k]} # = #{i: T_i <= C_i, C_i > breaks[k], T_i <= breaks[k+1]} # this ensures that numerator and denominator are # comparable, u[k] <= cc[k] always. # u <- cumsum(nco) - c(0,cumsum(ncc)[1:(n-1)]) rs <- u/cc # # Hence rs[k] = u[k]/cc[k] is an estimator of F(r) # for r = breaks[k+1], i.e. for the right hand end of the interval. # if(!show) return(rs) else return(list(rs=rs, numerator=u, denominator=cc)) } "km.rs" <- function(o, cc, d, breaks) { # o: censored lifetimes min(T_i,C_i) # cc: censoring times C_i # d: censoring indicators 1(T_i <= C_i) # breaks: histogram breakpoints (vector or 'breakpts' object) # breaks <- as.breakpts(breaks) bval <- breaks$val # compile histograms (breakpoints may not span data) obs <- whist( o, breaks=bval) nco <- whist( o[d], breaks=bval) cen <- whist( cc, breaks=bval) ncc <- whist( cc[d], breaks=bval) # number of observations exceeding largest breakpoint upperobs <- attr(obs, "high") uppercen <- attr(cen, "high") # go km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) # return(list(rs=rs, km=km$km, hazard=km$lambda, r=breaks$r, breaks=bval)) } "km.rs.opt" <- function(o, cc, d, breaks, KM=TRUE, RS=TRUE) { # o: censored lifetimes min(T_i,C_i) # cc: censoring times C_i # d: censoring indicators 1(T_i <= C_i) # breaks: histogram breakpoints (vector or 'breakpts' object) # breaks <- as.breakpts(breaks) bval <- breaks$val out <- list(r=breaks$r, breaks=bval) if(KM || RS) nco <- whist( o[d], breaks=bval) if(KM) { obs <- whist( o, breaks=bval) upperobs <- attr(obs, "high") km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) out <- append(list(km=km$km, hazard=km$lambda), out) } if(RS) { cen <- whist( cc, breaks=bval) ncc <- whist( cc[d], breaks=bval) uppercen <- attr(cen, "high") rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) out <- append(list(rs=rs), out) } return(out) } censtimeCDFest <- function(o, cc, d, breaks, ..., KM=TRUE, RS=TRUE, HAN=TRUE, RAW=TRUE, han.denom=NULL, tt=NULL, pmax=0.9) { # Histogram-based estimation of cumulative distribution function # of lifetimes subject to censoring. # o: censored lifetimes min(T_i,C_i) # cc: censoring times C_i # d: censoring indicators 1(T_i <= C_i) # breaks: histogram breakpoints (vector or 'breakpts' object) # han.denom: denominator (eroded area) for each value of r # tt: uncensored lifetimes T_i, if known breaks <- as.breakpts(breaks) bval <- breaks$val rval <- breaks$r rmax <- breaks$max # Kaplan-Meier and/or Reduced Sample out <- km.rs.opt(o, cc, d, breaks, KM=KM, RS=RS) # convert to data frame out$breaks <- NULL df <- as.data.frame(out) # Raw ecdf of observed lifetimes if available if(RAW && !is.null(tt)) { h <- whist(tt[tt <= rmax], breaks=bval) df <- cbind(df, data.frame(raw=cumsum(h)/length(tt))) } # Hanisch if(HAN) { if(is.null(han.denom)) stop("Internal error: missing denominator for Hanisch estimator") if(length(han.denom) != length(rval)) stop(paste("Internal error:", "length(han.denom) =", length(han.denom), "!=", length(rval), "= length(rvals)")) # uncensored distances x <- o[d] # calculate Hanisch estimator h <- whist(x[x <= rmax], breaks=bval) H <- cumsum(h/han.denom) df <- cbind(df, data.frame(han=H/max(H[is.finite(H)]))) } # determine appropriate plotting range bestest <- if(KM) "km" else if(HAN) "han" else if(RS) "rs" else "raw" alim <- range(df$r[df[[bestest]] <= pmax]) # convert to fv object nama <- c("r", "km", "hazard", "han", "rs", "raw") avail <- c(TRUE, KM, KM, HAN, RS, RAW) iscdf <- c(FALSE, TRUE, FALSE, TRUE, TRUE, TRUE) labl <- c("r", "hat(%s)[km](r)", "lambda(r)", "hat(%s)[han](r)", "hat(%s)[bord](r)", "hat(%s)[raw](r)")[avail] desc <- c("distance argument r", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function lambda(r)", "Hanisch estimate of %s", "border corrected estimate of %s", "uncorrected estimate of %s")[avail] df <- df[, nama[avail]] Z <- fv(df, "r", substitute(CDF(r), NULL), bestest, . ~ r, alim, labl, desc, fname="CDF") fvnames(Z, ".") <- nama[iscdf & avail] return(Z) } # simple interface for students and code development compileCDF <- function(D, B, r, ..., han.denom=NULL, check=TRUE) { han <- !is.null(han.denom) breaks <- breakpts.from.r(r) if(check) { stopifnot(length(D) == length(B) && all(D >= 0) && all(B >= 0)) if(han) stopifnot(length(han.denom) == length(r)) } D <- as.vector(D) B <- as.vector(B) # observed (censored) lifetimes o <- pmin.int(D, B) # censoring indicators d <- (D <= B) # go result <- censtimeCDFest(o, B, d, breaks, HAN=han, han.denom=han.denom, RAW=TRUE, tt=D) result <- rebadge.fv(result, new.fname="compileCDF") } spatstat.core/R/diagnoseppm.R0000644000176200001440000003457114144333462015714 0ustar liggesusers# # diagnoseppm.R # # Makes diagnostic plots based on residuals or energy weights # # $Revision: 1.44 $ $Date: 2019/10/02 10:33:21 $ # diagnose.ppm.engine <- function(object, ..., type="eem", typename, opt, sigma=NULL, rbord=reach(object), compute.sd=is.poisson(object), compute.cts=TRUE, envelope=FALSE, nsim=39, nrank=1, rv=NULL, oldstyle=FALSE, splineargs = list(spar=0.5), verbose=TRUE) { if(is.marked.ppm(object)) stop("Sorry, this is not yet implemented for marked models") # quadrature points Q <- quad.ppm(object) U <- union.quad(Q) Qweights <- w.quad(Q) # -------------- Calculate residuals/weights ------------------- # Discretised residuals if(type == "eem") { residval <- if(!is.null(rv)) rv else eem(object, check=FALSE) residval <- as.numeric(residval) X <- data.ppm(object) Y <- X %mark% residval } else { if(!is.null(rv) && !inherits(rv, "msr")) stop("rv should be a measure (object of class msr)") residobj <- if(!is.null(rv)) rv else residuals.ppm(object, type=type, check=FALSE) residval <- with(residobj, "increment") if(ncol(as.matrix(residval)) > 1L) stop("Not implemented for vector-valued residuals; use [.msr to split into separate components") Y <- U %mark% residval } # Atoms and density of measure Ymass <- NULL Ycts <- NULL Ydens <- NULL if(compute.cts) { if(type == "eem") { Ymass <- Y Ycts <- U %mark% (-1) Ydens <- as.im(-1, Y$window) } else { atoms <- with(residobj, "is.atom") masses <- with(residobj, "discrete") cts <- with(residobj, "density") if(!is.null(atoms) && !is.null(masses) && !is.null(cts)) { Ymass <- (U %mark% masses)[atoms] Ycts <- U %mark% cts # remove NAs (as opposed to zero cif points) if(!all(ok <- is.finite(cts))) { U <- U[ok] Ycts <- Ycts[ok] cts <- cts[ok] Qweights <- Qweights[ok] } # interpolate continuous part to yield an image for plotting if(type == "inverse" && all(cts != 0)) { Ydens <- as.im(-1, Y$window) } else if(is.stationary.ppm(object) && is.poisson.ppm(object)) { # all values of `cts' will be equal Ydens <- as.im(cts[1L], Y$window) } else { smallsigma <- maxnndist(Ycts) Ujitter <- U Ujitter$x <- U$x + runif(U$n, -smallsigma, smallsigma) Ujitter$y <- U$y + runif(U$n, -smallsigma, smallsigma) Ydens <- Smooth(Ujitter %mark% marks(Ycts), sigma=smallsigma, weights=Qweights, edge=TRUE, ...) } } } } #---------------- Erode window --------------------------------- # ## Compute windows W <- Y$window # Erode window if required clip <- !is.null(rbord) && is.finite(rbord) && (rbord > 0) if(clip) { Wclip <- erosion.owin(W, rbord) Yclip <- Y[Wclip] Qweightsclip <- Qweights[inside.owin(U, , Wclip)] if(!is.null(Ycts)) Ycts <- Ycts[Wclip] if(!is.null(Ydens)) Ydens <- Ydens[Wclip, drop=FALSE] } else { Wclip <- W Yclip <- Y } # ------------ start collecting results ------------------------- result <- list(type=type, clip=clip, Y=Y, W=W, Yclip=Yclip, Ymass=Ymass, Ycts=Ycts, Ydens=Ydens) # ------------- smoothed field ------------------------------ Z <- NULL if(opt$smooth | opt$xcumul | opt$ycumul | opt$xmargin | opt$ymargin) { if(is.null(sigma)) sigma <- 0.1 * diameter(Wclip) Z <- density.ppp(Yclip, sigma, weights=Yclip$marks, edge=TRUE, ...) } if(opt$smooth) { result$smooth <- list(Z = Z, sigma=sigma) if(type == "pearson") result$smooth$sdp <- 1/(2 * sigma * sqrt(pi)) } # -------------- marginals of smoothed field ------------------------ if(opt$xmargin) { xZ <- apply(Z$v, 2, sum, na.rm=TRUE) * Z$xstep if(type == "eem") ExZ <- colSums(!is.na(Z$v)) * Z$xstep else ExZ <- numeric(length(xZ)) result$xmargin <- list(x=Z$xcol, xZ=xZ, ExZ=ExZ) } if(opt$ymargin) { yZ <- apply(Z$v, 1L, sum, na.rm=TRUE) * Z$ystep if(type == "eem") EyZ <- rowSums(!is.na(Z$v)) * Z$ystep else EyZ <- numeric(length(yZ)) result$ymargin <- list(y=Z$yrow, yZ=yZ, EyZ=EyZ) } # -------------- cumulative (lurking variable) plots -------------- ## precompute simulated patterns for envelopes if(identical(envelope, TRUE)) envelope <- simulate(object, nsim=nsim, progress=verbose) if(opt$xcumul) result$xcumul <- lurking(object, covariate=expression(x), type=type, clipwindow= if(clip) Wclip else NULL, rv=residval, plot.sd=compute.sd, envelope=envelope, nsim=nsim, nrank=nrank, plot.it=FALSE, typename=typename, covname="x coordinate", oldstyle=oldstyle, check=FALSE, splineargs=splineargs, ...) if(opt$ycumul) result$ycumul <- lurking(object, covariate=expression(y), type=type, clipwindow= if(clip) Wclip else NULL, rv=residval, plot.sd=compute.sd, envelope=envelope, nsim=nsim, nrank=nrank, plot.it=FALSE, typename=typename, covname="y coordinate", oldstyle=oldstyle, check=FALSE, splineargs=splineargs, ...) # -------------- summary numbers -------------- if(opt$sum) result$sum <- list(marksum=sum(Yclip$marks, na.rm=TRUE), areaWclip=area(Wclip), areaquad=if(clip) sum(Qweightsclip) else sum(Qweights), range=if(!is.null(Z)) range(Z) else NULL) return(invisible(result)) } ######################################################################## diagnose.ppm <- function(object, ..., type="raw", which="all", sigma=NULL, rbord =reach(object), cumulative=TRUE, plot.it = TRUE, rv = NULL, compute.sd=is.poisson(object), compute.cts=TRUE, envelope=FALSE, nsim=39, nrank=1, typename, check=TRUE, repair=TRUE, oldstyle=FALSE, splineargs=list(spar=0.5)) { asked.newstyle <- !missing(oldstyle) && !oldstyle if(is.marked.ppm(object)) stop("Sorry, this is not yet implemented for marked models") # check whether model originally came from replicated data is.subfit <- (object$method == "mppm") Coefs <- coef(object) if(check && damaged.ppm(object)) { if(!repair) stop("object format corrupted; try update(object, use.internal=TRUE)") message("object format corrupted; repairing it.") object <- update(object, use.internal=TRUE) object <- tweak.coefs(object, Coefs) } else if(compute.sd && is.null(getglmfit(object))) { object <- update(object, forcefit=TRUE, use.internal=TRUE) object <- tweak.coefs(object, Coefs) } # ------------- Interpret arguments -------------------------- # Edge-effect avoidance if(missing(rbord) && !is.finite(rbord)) { ## Model has infinite reach ## Use correction rule employed when fitting rbord <- if(object$correction == "border") object$rbord else 0 } # match type argument type <- pickoption("type", type, c(eem="eem", raw="raw", inverse="inverse", pearson="pearson", Pearson="pearson")) if(missing(typename)) typename <- switch(type, eem="exponential energy weights", raw="raw residuals", inverse="inverse-lambda residuals", pearson="Pearson residuals") # 'which' is multiple choice with exact matching optionlist <- c("all", "marks", "smooth", "x", "y", "sum") if(!all(m <- which %in% optionlist)) stop(paste("Unrecognised choice(s) of", paste(sQuote("which"), ":", sep=""), paste(which[!m], collapse=", "))) opt <- list() opt$all <- "all" %in% which opt$marks <- ("marks" %in% which) | opt$all opt$smooth <- ("smooth" %in% which) | opt$all opt$xmargin <- (("x" %in% which) | opt$all) && !cumulative opt$ymargin <- (("y" %in% which) | opt$all) && !cumulative opt$xcumul <- (("x" %in% which) | opt$all) && cumulative opt$ycumul <- (("y" %in% which) | opt$all) && cumulative opt$sum <- ("sum" %in% which) | opt$all # compute and plot estimated standard deviations? # yes for Poisson, no for other models, unless overridden if(!missing(compute.sd)) plot.sd <- compute.sd else plot.sd <- list(...)$plot.sd if(is.null(plot.sd)) plot.sd <- is.poisson.ppm(object) if(missing(compute.sd)) compute.sd <- plot.sd # default for mppm objects is oldstyle=TRUE if(compute.sd && is.subfit) { if(!asked.newstyle) { # silently change default oldstyle <- TRUE } else { stop(paste("Variance calculation for a subfit of an mppm object", "is only implemented for oldstyle=TRUE"), call.=FALSE) } } # interpolate the density of the residual measure? if(missing(compute.cts)) { plot.neg <- resolve.defaults(list(...), formals(plot.diagppm)["plot.neg"])$plot.neg # only if it is needed for the mark plot compute.cts <- opt$marks && (plot.neg != "discrete") } # ------- DO THE CALCULATIONS ----------------------------------- RES <- diagnose.ppm.engine(object, type=type, typename=typename, opt=opt, sigma=sigma, rbord=rbord, compute.sd=compute.sd, compute.cts=compute.cts, envelope=envelope, nsim=nsim, nrank=nrank, rv=rv, oldstyle=oldstyle, splineargs=splineargs, ...) RES$typename <- typename RES$opt <- opt RES$compute.sd <- compute.sd RES$compute.cts <- compute.cts class(RES) <- "diagppm" # ------- PLOT -------------------------------------------------- if(plot.it) plot(RES, ...) return(RES) } plot.diagppm <- function(x, ..., which, plot.neg=c("image", "discrete", "contour", "imagecontour"), plot.smooth=c("imagecontour", "image", "contour", "persp"), plot.sd, spacing=0.1, outer=3, srange=NULL, monochrome=FALSE, main=NULL) { opt <- x$opt plot.neg <- match.arg(plot.neg) plot.smooth <- match.arg(plot.smooth) if(!missing(which)) { witches <- c("all", "marks", "smooth", "x", "y", "sum") unknown <- is.na(match(which, witches)) if(any(unknown)) warning(paste("Unrecognised", ngettext(sum(unknown), "option", "options"), "which =", commasep(sQuote(which[unknown])), ": valid options are", commasep(sQuote(witches))), call.=FALSE) oldopt <- opt newopt <- list() newopt$all <- "all" %in% which newopt$marks <- ("marks" %in% which) | newopt$all newopt$smooth <- ("smooth" %in% which) | newopt$all newopt$xmargin <- (("x" %in% which) | newopt$all) && oldopt$xmargin newopt$ymargin <- (("y" %in% which) | newopt$all) && oldopt$ymargin newopt$xcumul <- (("x" %in% which) | newopt$all) && oldopt$xcumul newopt$ycumul <- (("y" %in% which) | newopt$all) && oldopt$ycumul newopt$sum <- ("sum" %in% which) | newopt$all illegal <- (unlist(newopt) > unlist(oldopt)) if(any(illegal)) { offending <- paste(names(newopt)[illegal], collapse=", ") whinge <- paste("cannot display the following components;\n", "they were not computed: - \n", offending, "\n") stop(whinge) } opt <- newopt } if(missing(plot.sd)) { plot.sd <- x$compute.sd } else if(plot.sd && !(x$compute.sd)) { warning("can't plot standard deviations; they were not computed") plot.sd <- FALSE } if(!(x$compute.cts) && (plot.neg != "discrete") && (opt$marks || opt$all)) { if(!missing(plot.neg)) warning("can't plot continuous component of residuals; it was not computed") plot.neg <- "discrete" } if(opt$all) resid4plot(RES=x, plot.neg=plot.neg, plot.smooth=plot.smooth, spacing=spacing, outer=outer, srange=srange, monochrome=monochrome, main=main, ...) else resid1plot(RES=x, opt=opt, plot.neg=plot.neg, plot.smooth=plot.smooth, srange=srange, monochrome=monochrome, main=main, ...) } print.diagppm <- function(x, ...) { opt <- x$opt typename <- x$typename splat("Model diagnostics", paren(typename)) splat("Diagnostics available:") optkey <- list(all="four-panel plot", marks=paste("mark plot", if(!x$compute.cts) "(discrete representation only)" else NULL), smooth="smoothed residual field", xmargin="x marginal density", ymargin="y marginal density", xcumul="x cumulative residuals", ycumul="y cumulative residuals", sum="sum of all residuals") avail <- unlist(optkey[names(opt)[unlist(opt)]]) names(avail) <- NULL cat(paste("\t", paste(avail, collapse="\n\t"), "\n", sep="")) if(opt$sum) { xs <- x$sum windowname <- if(x$clip) "clipped window" else "entire window" splat("sum of", typename, "in", windowname, "=", signif(sum(xs$marksum),4)) splat("area of", windowname, "=", signif(xs$areaWclip, 4)) splat("quadrature area =", signif(xs$areaquad, 4)) } if(opt$smooth) { splat("range of smoothed field = ", prange(signif(range(x$smooth$Z),4))) if(!is.null(sdp <- x$smooth$sdp)) splat("Null standard deviation of smoothed Pearson residual field:", signif(sdp, 4)) } return(invisible(NULL)) } spatstat.core/R/hermite.R0000644000176200001440000000417014144333462015033 0ustar liggesusers## ## hermite.R ## ## Gauss-Hermite quadrature ## ## $Revision: 1.5 $ $Date: 2017/02/07 07:35:32 $ ## HermiteCoefs <- function(order) { ## compute coefficients of Hermite polynomial (unnormalised) x <- 1 if(order > 0) for(n in 1:order) x <- c(0, 2 * x) - c(((0:(n-1)) * x)[-1L], 0, 0) return(x) } gauss.hermite <- function(f, mu=0, sd=1, ..., order=5) { stopifnot(is.function(f)) stopifnot(length(mu) == 1) stopifnot(length(sd) == 1) ## Hermite polynomial coefficients (un-normalised) Hn <- HermiteCoefs(order) Hn1 <- HermiteCoefs(order-1) ## quadrature points x <- sort(Re(polyroot(Hn))) ## weights Hn1x <- matrix(Hn1, nrow=1) %*% t(outer(x, 0:(order-1), "^")) w <- 2^(order-1) * factorial(order) * sqrt(pi)/(order * Hn1x)^2 ## adjust ww <- w/sqrt(pi) xx <- mu + sd * sqrt(2) * x ## compute ans <- 0 for(i in seq_along(x)) ans <- ans + ww[i] * f(xx[i], ...) return(ans) } dmixpois <- local({ dpoisG <- function(x, ..., k, g) dpois(k, g(x)) function(x, mu, sd, invlink=exp, GHorder=5) gauss.hermite(dpoisG, mu=mu, sd=sd, g=invlink, k=x, order=GHorder) }) pmixpois <- local({ ppoisG <- function(x, ..., q, g, lot) ppois(q, g(x), lower.tail=lot) function(q, mu, sd, invlink=exp, lower.tail = TRUE, GHorder=5) gauss.hermite(ppoisG, mu=mu, sd=sd, g=invlink, q=q, order=GHorder, lot=lower.tail) }) qmixpois <- function(p, mu, sd, invlink=exp, lower.tail = TRUE, GHorder=5) { ## guess upper limit ## Guess upper and lower limits pmin <- min(p, 1-p)/2 lam.hi <- invlink(qnorm(pmin, mean=max(mu), sd=max(sd), lower.tail=FALSE)) lam.lo <- invlink(qnorm(pmin, mean=min(mu), sd=max(sd), lower.tail=TRUE)) kmin <- qpois(pmin, lam.lo, lower.tail=TRUE) kmax <- qpois(pmin, lam.hi, lower.tail=FALSE) kk <- kmin:kmax pp <- pmixpois(kk, mu, sd, invlink, lower.tail=TRUE, GHorder) ans <- if(lower.tail) kk[findInterval(p, pp, all.inside=TRUE)] else rev(kk)[findInterval(1-p, rev(1-pp), all.inside=TRUE)] return(ans) } rmixpois <- function(n, mu, sd, invlink=exp) { lam <- invlink(rnorm(n, mean=mu, sd=sd)) y <- rpois(n, lam) return(y) } spatstat.core/R/pcf.R0000644000176200001440000003044314144333463014151 0ustar liggesusers# # pcf.R # # $Revision: 1.69 $ $Date: 2021/11/13 01:08:49 $ # # # calculate pair correlation function # from point pattern (pcf.ppp) # or from estimate of K or Kcross (pcf.fv) # or from fasp object # # pcf <- function(X, ...) { UseMethod("pcf") } pcf.ppp <- function(X, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor=c("r", "d"), var.approx=FALSE, domain=NULL, ratio=FALSE, close=NULL) { verifyclass(X, "ppp") # r.override <- !is.null(r) win <- Window(X) areaW <- area(win) npts <- npoints(X) lambda <- npts/areaW lambda2area <- areaW * lambda^2 kernel <- match.kernel(kernel) rmaxdefault <- rmax.rule("K", win, lambda) if(!is.null(domain)) { # estimate based on contributions from a subdomain domain <- as.owin(domain) if(!is.subset.owin(domain, win)) stop(paste(dQuote("domain"), "is not a subset of the window of X")) # trick pcfdot() into doing it indom <- factor(inside.owin(X$x, X$y, domain), levels=c(FALSE,TRUE)) g <- pcfdot(X %mark% indom, i="TRUE", r=r, correction=correction, kernel=kernel, bw=bw, stoyan=stoyan, divisor=divisor, ...) if(!ratio) { ## relabel g <- rebadge.fv(g, quote(g(r)), "g") } else { ## construct ratfv object denom <- sum(indom == "TRUE") * lambda g <- ratfv(as.data.frame(g), NULL, denom, "r", quote(g(r)), "theo", NULL, c(0, rmaxdefault), attr(g, "labl"), attr(g, "desc"), fname="g", ratio=TRUE) } unitname(g) <- unitname(X) if(var.approx) warning("var.approx is not implemented when 'domain' is given") return(g) } correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="translate", best="best", none="none"), multi=TRUE) correction <- implemented.for.K(correction, win$type, correction.given) divisor <- match.arg(divisor) # bandwidth if(is.null(bw) && (kernel == "epanechnikov")) { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(lambda) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(lambda) } ########## r values ############################ # handle arguments r and breaks breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax), .StripNull = TRUE) ################################################# # compute pairwise distances if(npts > 1) { needall <- any(correction %in% c("translate", "isotropic")) if(is.null(close)) { what <- if(needall) "all" else "ijd" close <- closepairs(X, rmax + hmax, what=what) } else { #' check 'close' has correct format needed <- if(!needall) c("i", "j", "d") else c("i", "j", "xi", "yi", "xj", "yj", "dx", "dy", "d") if(any(is.na(match(needed, names(close))))) stop(paste("Argument", sQuote("close"), "should have components named", commasep(sQuote(needed))), call.=FALSE) } dIJ <- close$d } else { undefined <- rep(NaN, length(r)) } # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) out <- ratfv(df, NULL, lambda2area, "r", quote(g(r)), "theo", NULL, alim, c("r","%s[Pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="g", ratio=ratio) ###### compute ####### bw.used <- NULL if(any(correction=="none")) { #' uncorrected if(npts > 1) { kdenN <- sewpcf(dIJ, 1, denargs, lambda2area, divisor) gN <- kdenN$g bw.used <- attr(kdenN, "bw") } else gN <- undefined if(!ratio) { out <- bind.fv(out, data.frame(un=gN), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") } else { out <- bind.ratfv(out, data.frame(un=gN * lambda2area), lambda2area, "hat(%s)[un](r)", "uncorrected estimate of %s", "un") } } if(any(correction=="translate")) { # translation correction if(npts > 1) { edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=win, paired=TRUE) kdenT <- sewpcf(dIJ, edgewt, denargs, lambda2area, divisor) gT <- kdenT$g bw.used <- attr(kdenT, "bw") } else gT <- undefined if(!ratio) { out <- bind.fv(out, data.frame(trans=gT), "hat(%s)[Trans](r)", "translation-corrected estimate of %s", "trans") } else { out <- bind.ratfv(out, data.frame(trans=gT * lambda2area), lambda2area, "hat(%s)[Trans](r)", "translation-corrected estimate of %s", "trans") } } if(any(correction=="isotropic")) { # Ripley isotropic correction if(npts > 1) { XI <- ppp(close$xi, close$yi, window=win, check=FALSE) edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) kdenR <- sewpcf(dIJ, edgewt, denargs, lambda2area, divisor) gR <- kdenR$g bw.used <- attr(kdenR, "bw") } else gR <- undefined if(!ratio) { out <- bind.fv(out, data.frame(iso=gR), "hat(%s)[Ripley](r)", "isotropic-corrected estimate of %s", "iso") } else { out <- bind.ratfv(out, data.frame(iso=gR * lambda2area), lambda2area, "hat(%s)[Ripley](r)", "isotropic-corrected estimate of %s", "iso") } } # sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } ## variance approximation ## Illian et al 2008 p 234 equation 4.3.42 if(var.approx) { gr <- if(any(correction == "isotropic")) gR else gT # integral of squared kernel intk2 <- kernel.squint(kernel, bw.used) # isotropised set covariance of window gWbar <- as.function(rotmean(setcov(win), result="fv")) vest <- gr * intk2/(pi * r * gWbar(r) * lambda^2) if(!ratio) { out <- bind.fv(out, data.frame(v=vest), "v(r)", "approximate variance of %s", "v") } else { vden <- rep((npts-1)^2, length(vest)) vnum <- vden * vest out <- bind.ratfv(out, data.frame(v=vnum), data.frame(v=vden), "v(r)", "approximate variance of %s", "v") } } ## Finish off ## default is to display all corrections formula(out) <- . ~ r fvnames(out, ".") <- setdiff(rev(colnames(out)), c("r", "v")) ## unitname(out) <- unitname(X) ## copy to other components if(ratio) out <- conform.ratfv(out) attr(out, "bw") <- bw.used return(out) } # Smoothing Estimate of Weighted Pair Correlation # d = vector of relevant distances # w = vector of edge correction weights (in normal use) # denargs = arguments to density.default # lambda2area = constant lambda^2 * areaW (in normal use) sewpcf <- function(d, w, denargs, lambda2area, divisor=c("r","d")) { divisor <- match.arg(divisor) nw <- length(w) if(nw != length(d) && nw != 1) stop("Internal error: incorrect length of weights vector in sewpcf") if(divisor == "d") { w <- w/d if(!all(good <- is.finite(w))) { nbad <- sum(!good) warning(paste(nbad, "infinite, NA or NaN", ngettext(nbad, "contribution was", "contributions were"), "deleted from pcf estimate with divisor='d'.", "Fraction deleted: ", paste0(round(100 * nbad/length(w), 2), "%")), call.=FALSE) d <- d[good] w <- w[good] } nw <- length(w) } if(nw == 1) { #' weights are equal kden <- do.call.matched(density.default, append(list(x=d), denargs)) wtot <- length(d) } else { #' weighted wtot <- sum(w) kden <- do.call.matched(density.default, append(list(x=d, weights=w/wtot), denargs)) } r <- kden$x y <- kden$y * wtot if(divisor == "r") y <- y/r g <- y/(2 * pi * lambda2area) result <- data.frame(r=r,g=g) attr(result, "bw") <- kden$bw return(result) } # #---------- OTHER METHODS FOR pcf -------------------- # "pcf.fasp" <- function(X, ..., method="c") { verifyclass(X, "fasp") Y <- X Y$title <- paste("Array of pair correlation functions", if(!is.null(X$dataname)) "for", X$dataname) # go to work on each function for(i in seq_along(X$fns)) { Xi <- X$fns[[i]] PCFi <- pcf.fv(Xi, ..., method=method) Y$fns[[i]] <- PCFi if(is.fv(PCFi)) Y$default.formula[[i]] <- formula(PCFi) } return(Y) } pcf.fv <- local({ callmatched <- function(fun, argue) { formalnames <- names(formals(fun)) formalnames <- formalnames[formalnames != "..."] do.call(fun, argue[names(argue) %in% formalnames]) } pcf.fv <- function(X, ..., method="c") { verifyclass(X, "fv") # extract r and the recommended estimate of K r <- with(X, .x) K <- with(X, .y) alim <- attr(X, "alim") # remove NA's ok <- !is.na(K) K <- K[ok] r <- r[ok] switch(method, a = { ss <- callmatched(smooth.spline, list(x=r, y=K, ...)) dK <- predict(ss, r, deriv=1)$y g <- dK/(2 * pi * r) }, b = { y <- K/(2 * pi * r) y[!is.finite(y)] <- 0 ss <- callmatched(smooth.spline, list(x=r, y=y, ...)) dy <- predict(ss, r, deriv=1)$y g <- dy + y/r }, c = { z <- K/(pi * r^2) z[!is.finite(z)] <- 1 ss <- callmatched(smooth.spline, list(x=r, y=z, ...)) dz <- predict(ss, r, deriv=1)$y g <- (r/2) * dz + z }, d = { z <- sqrt(K) z[!is.finite(z)] <- 0 ss <- callmatched(smooth.spline, list(x=r, y=z, ...)) dz <- predict(ss, r, deriv=1)$y g <- z * dz/(pi * r) }, stop(paste("unrecognised method", sQuote(method))) ) # pack result into "fv" data frame Z <- fv(data.frame(r=r, theo=rep.int(1, length(r)), pcf=g), "r", substitute(g(r), NULL), "pcf", . ~ r, alim, c("r", "%s[pois](r)", "%s(r)"), c("distance argument r", "theoretical Poisson value of %s", "estimate of %s by numerical differentiation"), fname="g") unitname(Z) <- unitname(X) return(Z) } pcf.fv }) spatstat.core/R/addvar.R0000644000176200001440000003067014144333461014642 0ustar liggesusers# # addvar.R # # added variable plot # # $Revision: 1.11 $ $Date: 2016/10/23 10:36:58 $ # addvar <- function(model, covariate, ..., subregion=NULL, bw="nrd0", adjust=1, from=NULL, to=NULL, n=512, bw.input = c("points", "quad"), bw.restrict = FALSE, covname, crosscheck=FALSE) { if(missing(covname)) covname <- sensiblevarname(deparse(substitute(covariate)), "X") callstring <- paste(deparse(sys.call()), collapse = "") if(is.marked(model)) stop("Sorry, this is not yet implemented for marked models") if(is.null(adjust)) adjust <- 1 bw.input <- match.arg(bw.input) # validate model stopifnot(is.ppm(model)) if(is.null(getglmfit(model))) model <- update(model, forcefit=TRUE) modelcall <- model$callstring if(is.null(modelcall)) modelcall <- model$call # extract spatial locations Q <- quad.ppm(model) # datapoints <- Q$data quadpoints <- union.quad(Q) Z <- is.data(Q) wts <- w.quad(Q) nQ <- n.quad(Q) # fitted intensity lam <- fitted(model, type="trend") # subset of quadrature points used to fit model subQset <- getglmsubset(model) if(is.null(subQset)) subQset <- rep.int(TRUE, nQ) # restriction to subregion insubregion <- if(!is.null(subregion)) { inside.owin(quadpoints, w=subregion) } else rep.int(TRUE, nQ) ################################################################ # Pearson residuals from point process model yr <- residuals(model, type="Pearson") yresid <- with(yr, "increment") # averaged (then sum with weight 'wts') yresid <- yresid/wts ################################################################# # Covariates # # covariate data frame df <- getglmdata(model) if(!all(c("x", "y") %in% names(df))) { xy <- as.data.frame(quadpoints) notxy <- !(colnames(df) %in% c("x", "y")) other <- df[, notxy] df <- cbind(xy, other) } # avail.covars <- names(df) # covariates used in model used.covars <- model.covariates(model) fitted.covars <- model.covariates(model, offset=FALSE) # ################################################################# # identify the covariate # if(!is.character(covariate)) { # Covariate is some kind of data, treated as external covariate if(covname %in% fitted.covars) stop(paste("covariate named", dQuote(covname), "is already used in model")) covvalues <- evalCovariate(covariate, quadpoints) # validate covvalues if(is.null(covvalues)) stop("Unable to extract covariate values") else if(length(covvalues) != npoints(quadpoints)) stop(paste("Internal error: number of covariate values =", length(covvalues), "!=", npoints(quadpoints), "= number of quadrature points")) # tack onto data frame covdf <- data.frame(covvalues) names(covdf) <- covname df <- cbind(df, covdf) } else { # Argument is name of covariate covname <- covariate if(length(covname) > 1) stop("Must specify only one covariate") # if(covname %in% fitted.covars) stop(paste("covariate", dQuote(covname), "already used in model")) # if(!(covname %in% avail.covars)) stop(paste("covariate", dQuote(covname), "not available")) # covvalues <- df[, covname] } ################################################################ # Pearson residuals from weighted linear regression of new covariate on others rhs <- formula(model) fo <- as.formula(paste(covname, paste(rhs, collapse=" "))) fit <- lm(fo, data=df, weights=lam * wts) xresid <- residuals(fit, type="pearson")/sqrt(wts) if(crosscheck) { message("Cross-checking...") X <- model.matrix(fo, data=df) V <- diag(lam * wts) sqrtV <- diag(sqrt(lam * wts)) Info <- t(X) %*% V %*% X H <- sqrtV %*% X %*% solve(Info) %*% t(X) %*% sqrtV nQ <- length(lam) Id <- diag(1, nQ, nQ) xresid.pearson <- (Id - H) %*% sqrtV %*% covvalues xresid.correct <- xresid.pearson/sqrt(wts) abserr <- max(abs(xresid - xresid.correct), na.rm=TRUE) relerr <- abserr/diff(range(xresid.correct, finite=TRUE)) if(is.finite(relerr) && relerr > 0.01) { warning("Large relative error in residual computation") } message("Done.") } # experiment suggests residuals(fit, "pearson") == xresid.correct # and residuals(fit) equivalent to # covvalues - X %*% solve(t(X) %*% V %*% X) %*% t(X) %*% V %*% covvalues ################################################################# # check for NA's etc # locations that must have finite values operative <- if(bw.restrict) insubregion & subQset else subQset nbg <- !is.finite(xresid) | !is.finite(yresid) if(any(offending <- nbg & operative)) { warning(paste(sum(offending), "out of", length(offending), "covariate values discarded because", ngettext(sum(offending), "it is", "they are"), "NA or infinite")) } ################################################################# # Restrict data to 'operative' points # with finite values ok <- !nbg & operative Q <- Q[ok] xresid <- xresid[ok] yresid <- yresid[ok] covvalues <- covvalues[ok] df <- df[ok, ] lam <- lam[ok] wts <- wts[ok] Z <- Z[ok] insubregion <- insubregion[ok] #################################################### # assemble data for smoothing xx <- xresid yy <- yresid ww <- wts if(makefrom <- is.null(from)) from <- min(xresid) if(maketo <- is.null(to)) to <- max(xresid) #################################################### # determine smoothing bandwidth # from 'operative' data switch(bw.input, quad = { # bandwidth selection from covariate values at all quadrature points numer <- unnormdensity(xx, weights=yy * ww, bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- numer$bw }, points= { # bandwidth selection from covariate values at data points fake <- unnormdensity(xx[Z], weights=1/lam[Z], bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- fake$bw numer <- unnormdensity(xx, weights=yy * ww, bw=sigma, adjust=1, n=n,from=from,to=to, ...) }) #################################################### # Restrict data and recompute numerator if required if(!is.null(subregion) && !bw.restrict) { # Bandwidth was computed on all data # Restrict to subregion and recompute numerator xx <- xx[insubregion] yy <- yy[insubregion] ww <- ww[insubregion] lam <- lam[insubregion] Z <- Z[insubregion] if(makefrom) from <- min(xx) if(maketo) to <- max(xx) numer <- unnormdensity(xx, weights=yy * ww, bw=sigma, adjust=1, n=n,from=from,to=to, ...) } #################################################### # Compute denominator denom <- unnormdensity(xx,weights=ww, bw=sigma, adjust=1, n=n,from=from,to=to, ...) #################################################### # Determine recommended plot range xr <- range(xresid[Z], finite=TRUE) alim <- xr + 0.1 * diff(xr) * c(-1,1) alim <- intersect.ranges(alim, c(from, to)) #################################################### # Compute terms interpolate <- function(x,y) { if(inherits(x, "density") && missing(y)) approxfun(x$x, x$y, rule=2) else approxfun(x, y, rule=2) } numfun <- interpolate(numer) denfun <- interpolate(denom) xxx <- numer$x ratio <- function(y, x) { ifelseXB(x != 0, y/x, NA) } yyy <- ratio(numfun(xxx), denfun(xxx)) # Null variance estimation # smooth with weight 1 and smaller bandwidth tau <- sigma/sqrt(2) varnumer <- unnormdensity(xx,weights=ww, bw=tau,adjust=1, n=n,from=from,to=to, ...) varnumfun <- interpolate(varnumer) vvv <- ratio(varnumfun(xxx), 2 * sigma * sqrt(pi) * denfun(xxx)^2) safesqrt <- function(x) { ok <- is.finite(x) & (x >= 0) y <- rep.int(NA_real_, length(x)) y[ok] <- sqrt(x[ok]) return(y) } twosd <- 2 * safesqrt(vvv) # pack into fv object rslt <- data.frame(rcov=xxx, rpts=yyy, theo=0, var=vvv, hi=twosd, lo=-twosd) nuc <- length(used.covars) if(nuc == 0) { given <- givenlab <- 1 } else if(nuc == 1) { given <- givenlab <- used.covars } else { given <- commasep(used.covars, ", ") givenlab <- paste("list", paren(given)) } given <- paste("|", given) xlab <- sprintf("r(paste(%s, '|', %s))", covname, givenlab) ylab <- sprintf("r(paste(points, '|', %s))", givenlab) yexpr <- parse(text=ylab)[[1L]] desc <- c(paste("Pearson residual of covariate", covname, given), paste("Smoothed Pearson residual of point process", given), "Null expected value of point process residual", "Null variance of point process residual", "Upper limit of pointwise 5%% significance band", "Lower limit of pointwise 5%% significance band") rslt <- fv(rslt, argu="rcov", ylab=yexpr, valu="rpts", fmla= (. ~ rcov), alim=alim, labl=c(xlab, "%s", "0", "bold(var) ~ %s", "%s[hi]", "%s[lo]"), desc=desc, fname=ylab) attr(rslt, "dotnames") <- c("rpts", "theo", "hi", "lo") # data associated with quadrature points reserved <- (substr(colnames(df), 1L, 4L) == ".mpl") isxy <- colnames(df) %in% c("x", "y") dfpublic <- cbind(df[, !(reserved | isxy)], data.frame(xresid, yresid)) attr(rslt, "spatial") <- union.quad(Q) %mark% dfpublic # auxiliary data attr(rslt, "stuff") <- list(covname = covname, xresid = xresid, yresid = yresid, covvalues = covvalues, wts = wts, bw = bw, adjust = adjust, sigma = sigma, used.covars = used.covars, modelcall = modelcall, callstring = callstring, xlim = c(from, to), xlab = xlab, ylab = ylab, lmcoef = coef(fit), bw.input = bw.input, bw.restrict = bw.restrict, restricted = !is.null(subregion)) # finish class(rslt) <- c("addvar", class(rslt)) return(rslt) } print.addvar <- function(x, ...) { cat("Added variable plot diagnostic (class addvar)\n") s <- attr(x, "stuff") mc <- paste(s$modelcall, collapse="") cat(paste("for the covariate", dQuote(s$covname), "for the fitted model:", if(nchar(mc) <= 30) "" else "\n\t", mc, "\n\n")) if(identical(s$restricted, TRUE)) cat("\t--Diagnostic computed for a subregion--\n") cat(paste("Call:", s$callstring, "\n")) cat(paste("Actual smoothing bandwidth sigma =", signif(s$sigma,5), "\n\n")) NextMethod("print") } plot.addvar <- function(x, ..., do.points=FALSE) { xname <- short.deparse(substitute(x)) s <- attr(x, "stuff") # covname <- s$covname xresid <- s$xresid yresid <- s$yresid # adjust y limits if intending to plot points as well ylimcover <- if(do.points) range(yresid, finite=TRUE) else NULL # do.call(plot.fv, resolve.defaults(list(quote(x)), list(...), list(main=xname, shade=c("hi", "lo"), legend=FALSE, ylim.covers=ylimcover))) # plot points if(do.points) do.call(points, resolve.defaults(list(x=xresid, y=yresid), list(...), list(pch=3, cex=0.5))) return(invisible(x)) } spatstat.core/R/ord.family.R0000644000176200001440000001020214144333463015434 0ustar liggesusers# # # ord.family.S # # $Revision: 1.17 $ $Date: 2015/10/21 09:06:57 $ # # The Ord model (family of point process models) # # ord.family: object of class 'isf' defining Ord model structure # # # ------------------------------------------------------------------- # ord.family <- list( name = "ord", print = function(self) { cat("Ord model family\n") }, eval = function(X, U, EqualPairs, pot, pars, ...) { # # This auxiliary function is not meant to be called by the user. # It computes the distances between points, # evaluates the pair potential and applies edge corrections. # # Arguments: # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function function(d, p) # pars auxiliary parameters for pot list(......) # ... IGNORED # # Value: # matrix of values of the potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # # Note: # The potential function 'pot' will be called as # pot(M, pars) where M is a vector of tile areas. # It must return a vector of the same length as M # or a matrix with number of rows equal to the length of M ########################################################################## nX <- npoints(X) nU <- length(U$x) # number of data + dummy points seqX <- seq_len(nX) seqU <- seq_len(nU) # determine which points in the combined list are data points if(length(EqualPairs) > 0) is.data <- seqU %in% EqualPairs[,2] else is.data <- rep.int(FALSE, nU) ############################################################################# # First compute Dirichlet tessellation of data # and its total potential (which could be vector-valued) ############################################################################# marks(X) <- NULL Wdata <- dirichletWeights(X) # sic - these are the tile areas. Pdata <- pot(Wdata, pars) summa <- function(P) { if(is.matrix(P)) matrowsum(P) else if(is.vector(P) || length(dim(P))==1 ) sum(P) else stop("Don't know how to take row sums of this object") } total.data.potential <- summa(Pdata) # Initialise V dimpot <- dim(Pdata)[-1] # dimension of each value of the potential function # (= numeric(0) if potential is a scalar) dimV <- c(nU, dimpot) if(length(dimV) == 1) dimV <- c(dimV, 1) V <- array(0, dim=dimV) rowV <- array(seqU, dim=dimV) #################### Next, evaluate V for the data points. ############### # For each data point, compute Dirichlet tessellation # of the data with this point removed. # Compute difference of total potential. ############################################################################# for(j in seq_len(nX)) { # Dirichlet tessellation of data without point j Wminus <- dirichletWeights(X[-j]) # regressor is the difference in total potential V[rowV == j] <- total.data.potential - summa(pot(Wminus, pars)) } #################### Next, evaluate V for the dummy points ################ # For each dummy point, compute Dirichlet tessellation # of (data points together with this dummy point) only. # Take difference of total potential. ############################################################################# for(j in seqU[!is.data]) { Xplus <- superimpose(X, list(x=U$x[j], y=U$y[j]), W=X$window) # compute Dirichlet tessellation (of these points only!) Wplus <- dirichletWeights(Xplus) # regressor is difference in total potential V[rowV == j] <- summa(pot(Wplus, pars)) - total.data.potential } cat("dim(V) = \n") print(dim(V)) return(V) } ######### end of function $eval ) ######### end of list class(ord.family) <- "isf" spatstat.core/R/nnclean.R0000644000176200001440000002051414144333463015015 0ustar liggesusers# # nnclean.R # # Nearest-neighbour clutter removal # # Adapted from statlib file NNclean.q # Authors: Simon Byers and Adrian Raftery # # $Revision: 1.20 $ $Date: 2020/12/19 05:25:06 $ # nnclean <- function(X, k, ...) { UseMethod("nnclean") } nnclean.pp3 <- function(X, k, ..., convergence = 0.001, plothist = FALSE, verbose=TRUE, maxit=50) { # Adapted from statlib file NNclean.q # Authors: Simon Byers and Adrian Raftery # Adapted for spatstat by Adrian Baddeley Xname <- short.deparse(substitute(X)) stopifnot(inherits(X, "pp3")) validposint(k, "nnclean.pp3") kthNND <- nndist(X, k=k) dont.complain.about(kthNND) # apply classification algorithm em <- do.call(nncleanEngine, resolve.defaults(list(quote(kthNND), k=k), list(...), list(d=3, tol=convergence, plothist=plothist, verbose=verbose, maxit=maxit, Xname=Xname))) # tack results onto point pattern as marks pp <- em$probs zz <- factor(em$z, levels=c(0,1)) levels(zz) <- c("noise", "feature") mm <- hyperframe(prob=pp, label=zz) marks(X) <- cbind(marks(X), mm) attr(X, "theta") <- em[c("lambda1", "lambda2", "p")] attr(X, "info") <- em[c("d", "niter", "maxit", "converged")] attr(X, "hist") <- em$hist return(X) } nnclean.ppp <- function(X, k, ..., edge.correct = FALSE, wrap = 0.1, convergence = 0.001, plothist = FALSE, verbose=TRUE, maxit=50) { # Adapted from statlib file NNclean.q # Authors: Simon Byers and Adrian Raftery # Adapted for spatstat by Adrian Baddeley Xname <- short.deparse(substitute(X)) validposint(k, "nnclean.ppp") if(!edge.correct) { # compute vector of k-th nearest neighbour distances kthNND <- nndist(X, k=k) } else { # replicate data periodically # (ensuring original points are listed first) Xbox <- X[as.rectangle(X)] Xpand <- periodify(Xbox, ix=c(0,-1,1), iy=c(0,-1,1), check=FALSE) # trim to margin W <- expand.owin(X$window, (1+2*wrap)^2) Xpand <- Xpand[W] kthNND <- nndist(Xpand, k=k) } dont.complain.about(kthNND) # apply classification algorithm em <- do.call(nncleanEngine, resolve.defaults(list(quote(kthNND), k=k), list(...), list(d=2, tol=convergence, plothist=plothist, verbose=verbose, maxit=maxit, Xname=Xname))) # extract results pp <- em$probs zz <- em$z zz <- factor(zz, levels=c(0,1)) levels(zz) <- c("noise", "feature") df <- data.frame(class=zz,prob=pp) if(edge.correct) { # trim back to original point pattern df <- df[seq_len(X$n), ] } # tack on marx <- marks(X, dfok=TRUE) if(is.null(marx)) marks(X, dfok=TRUE) <- df else marks(X, dfok=TRUE) <- cbind(df, marx) attr(X, "theta") <- em[c("lambda1", "lambda2", "p")] attr(X, "info") <- em[c("d", "niter", "maxit", "converged")] attr(X, "hist") <- em$hist return(X) } nncleanEngine <- function(kthNND, k, d, ..., tol = 0.001, maxit = 50, plothist = FALSE, lineargs = list(), verbose=TRUE, Xname="X") { ## Adapted from statlib file NNclean.q ## Authors: Simon Byers and Adrian Raftery ## Adapted for spatstat by Adrian Baddeley n <- length(kthNND) ## Error handler by Adrian if(k >= n) { if(verbose) cat(paste("Cannot compute neighbours of order k =", k, "for a pattern of", n, "data points;", "treating all points as noise"), call.=FALSE) return(list(z = rep(0, n), probs = rep(0, n), lambda1 = NA, lambda2 = NA, p = 0, kthNND = kthNND, d=d, n=n, k=k, niter = 0, maxit = maxit, converged = TRUE, hist=NULL)) } ## Undocumented extension by Adrian Baddeley 2014 ## Allow different dimensions in feature and noise. ## d[1] is cluster dimension. d <- ensure2vector(d) alpha.d <- (2. * pi^(d/2.))/(d * gamma(d/2.)) # raise to power d for efficiency kNNDpowd1 <- kthNND^(d[1]) kNNDpowd2 <- kthNND^(d[2]) # # Now use kthNND in E-M algorithm # First set up starting guesses. # # probs <- numeric(n) thresh <- (min(kthNND) + diff(range(kthNND))/3.) high <- (kthNND > thresh) delta <- as.integer(high) p <- 0.5 lambda1 <- k/(alpha.d[1] * mean(kNNDpowd1[!high])) lambda2 <- k/(alpha.d[2] * mean(kNNDpowd2[ high])) loglik.old <- 0. loglik.new <- 1. # # Iterator starts here, # Z <- !kthNND niter <- 0 while(abs(loglik.new - loglik.old)/(1 + abs(loglik.new)) > tol) { if(niter >= maxit) { warning(paste("E-M algorithm failed to converge in", maxit, ngettext(maxit, "iteration", "iterations")), call.=FALSE) break } niter <- niter + 1 # E - step f1 <- dknn(kthNND[!Z], lambda=lambda1, k = k, d = d[1]) f2 <- dknn(kthNND[!Z], lambda=lambda2, k = k, d = d[2]) delta[!Z] <- (p * f1)/(p * f1 + (1 - p) * f2) delta[Z] <- 0 # M - step sumdelta <- sum(delta) negdelta <- 1. - delta p <- sumdelta/n lambda1 <- (k * sumdelta)/(alpha.d[1] * sum(kNNDpowd1 * delta)) lambda2 <- (k * (n - sumdelta))/(alpha.d[2] * sum(kNNDpowd2 * negdelta)) # evaluate marginal loglikelihood loglik.old <- loglik.new loglik.new <- sum( - p * lambda1 * alpha.d[1] * (kNNDpowd1 * delta) - (1. - p) * lambda2 * alpha.d[2] * (kNNDpowd2 * negdelta) + delta * k * log(lambda1 * alpha.d[1]) + negdelta * k * log(lambda2 * alpha.d[2])) if(verbose) cat(paste("Iteration", niter, "\tlogLik =", loglik.new, "\tp =", signif(p,4), "\n")) } if(plothist) { dotargs <- list(...) if(spatstat.options('monochrome')) dotargs <- col.args.to.grey(dotargs) ## compute plot limits to include both histogram and density xlim <- c(0, max(kthNND)) H <- do.call(hist, resolve.defaults(list(quote(kthNND), plot=FALSE, warn.unused=FALSE), dotargs, list(nclass=40))) barheights <- H$density support <- seq(from=xlim[1], to=xlim[2], length.out = 200) fittedy <- p * dknn(support, lambda=lambda1, k = k, d = d[1]) + (1 - p) * dknn(support, lambda=lambda2, k = k, d = d[2]) ylim <- range(c(0, barheights, fittedy)) xlab <- paste("Distance to", ordinal(k), "nearest neighbour") ## now plot it (unless overridden by plot=FALSE) reallyplot <- resolve.1.default("plot", list(...), list(plot=TRUE)) H <- do.call(hist, resolve.defaults(list(quote(kthNND), probability=TRUE), dotargs, list(plot=TRUE, warn.unused=reallyplot, nclass=40, xlim = xlim, ylim=ylim, xlab = xlab, ylab = "Probability density", axes = TRUE, main=""))) H$xname <- xlab if(reallyplot) { box() lineargs <- resolve.defaults(lineargs, list(col="green", lwd=2)) if(spatstat.options("monochrome")) lineargs <- col.args.to.grey(lineargs) do.call(lines, append(list(x=support, y=fittedy), lineargs)) } } # delta1 <- dknn(kthNND[!Z], lambda=lambda1, k = k, d = d[1]) delta2 <- dknn(kthNND[!Z], lambda=lambda2, k = k, d = d[2]) probs[!Z] <- delta1/(delta1 + delta2) probs[Z] <- 1 # if(verbose) { cat("Estimated parameters:\n") cat(paste("p [cluster] =", signif(p, 5), "\n")) cat(paste("lambda [cluster] =", signif(lambda1, 5), "\n")) cat(paste("lambda [noise] =", signif(lambda2, 5), "\n")) } # # z will be the classifications. 1= in cluster. 0= in noise. # return(list(z = round(probs), probs = probs, lambda1 = lambda1, lambda2 = lambda2, p = p, kthNND = kthNND, d=d, n=n, k=k, niter = niter, maxit = maxit, converged = (niter >= maxit), hist=if(plothist) H else NULL)) } spatstat.core/R/bw.ppl.R0000644000176200001440000000263714144333461014605 0ustar liggesusers#' #' bw.ppl.R #' #' Likelihood cross-validation for kernel smoother of point pattern #' #' $Revision: 1.12 $ $Date: 2020/04/08 04:27:12 $ #' bw.ppl <- function(X, ..., srange=NULL, ns=16, sigma=NULL, weights=NULL, shortcut=FALSE, warn=TRUE) { stopifnot(is.ppp(X)) if(!is.null(sigma)) { stopifnot(is.numeric(sigma) && is.vector(sigma)) ns <- length(sigma) } else { if(!is.null(srange)) check.range(srange) else { nnd <- nndist(X) srange <- c(min(nnd[nnd > 0]), diameter(as.owin(X))/2) } sigma <- geomseq(from=srange[1L], to=srange[2L], length.out=ns) } cv <- numeric(ns) if(shortcut) { for(i in 1:ns) { si <- sigma[i] lamx <- density(X, sigma=si, at="points", leaveoneout=TRUE, weights=weights, ...) cv[i] <- sum(log(lamx)) } } else { for(i in 1:ns) { si <- sigma[i] lamx <- density(X, sigma=si, at="points", leaveoneout=TRUE, weights=weights, ...) lam <- density(X, sigma=si, weights=weights, ...) cv[i] <- sum(log(lamx)) - integral.im(lam) } } result <- bw.optim(cv, sigma, iopt=which.max(cv), creator="bw.ppl", criterion="Likelihood Cross-Validation", warnextreme=warn, hargnames="srange", unitname=unitname(X)) return(result) } spatstat.core/R/derivfv.R0000644000176200001440000001040014144333462015034 0ustar liggesusers# # derivfv.R # # differentiation for fv objects # # $Revision: 1.7 $ $Date: 2018/09/28 05:12:08 $ # deriv.fv <- local({ derivative <- function(y, r, ...) { ss <- smooth.spline(r, y, ...) predict(ss, r, deriv=1)$y } deriv.fv <- function(expr, which="*", ..., method=c("spline", "numeric"), kinks=NULL, periodic=FALSE, Dperiodic=periodic) { f <- expr method <- match.arg(method) ## select columns ## if(length(which) == 1L && which %in% .Spatstat.FvAbbrev) { if(length(which) == 1L) { if(which == ".x") stop("Cannot smooth the function argument") which <- fvnames(f, which) } if(any(nbg <- !(which %in% names(f)))) stop(paste("Unrecognised column", ngettext(sum(nbg), "name", "names"), commasep(sQuote(which[nbg])), "in argument", sQuote("which"))) relevant <- names(f) %in% which ## get rname <- fvnames(f, ".x") df <- as.data.frame(f) rpos <- which(colnames(df) == rname) rvals <- df[,rpos] yvals <- df[,relevant,drop=FALSE] nr <- length(rvals) ## if(Dperiodic) { ## Derivative should be periodic ## Recycle data to imitate periodicity DR <- diff(range(rvals)) rvals <- c(rvals[-nr] - DR, rvals, rvals[-1L] + DR) yleft <- yvals[-nr, , drop=FALSE] yright <- yvals[-1L, , drop=FALSE] if(!periodic) { ## original data are not periodic (e.g. cdf of angular variable) ## but derivative must be periodic jump <- matrix(as.numeric(yvals[nr,] - yvals[1L, ]), nr-1L, ncol(yvals), byrow=TRUE) yleft <- yleft - jump yright <- yright + jump } yvals <- rbind(yleft, yvals, yright) actual <- nr:(2*nr - 1L) NR <- length(rvals) } else { NR <- nr actual <- 1:nr } ## cut x axis into intervals? if(is.null(kinks)) { cutx <- factor(rep(1, NR)) } else { rr <- range(rvals) if(periodic) kinks <- c(kinks-DR, kinks, kinks+DR) breaks <- sortunique(kinks) if(breaks[1L] > rr[1L]) breaks <- c(rr[1L], breaks) if(max(breaks) < rr[2L]) breaks <- c(breaks, rr[2L]) cutx <- cut(rvals, breaks=breaks, include.lowest=TRUE) } ## process for(segment in levels(cutx)) { ii <- (cutx == segment) yy <- yvals[ii, , drop=FALSE] switch(method, numeric = { dydx <- apply(yy, 2, diff)/diff(rvals[ii]) nd <- nrow(dydx) dydx <- rbind(dydx, dydx[nd, ]) }, spline = { dydx <- apply(yy, 2, derivative, r=rvals[ii], ...) }) df[ii[actual], relevant] <- dydx[ actual, ] } ## pack up result <- f result[,] <- df ## tweak name of function if(!is.null(yl <- attr(f, "ylab"))) attr(result, "ylab") <- substitute(bold(D)~Fx, list(Fx=yl)) if(!is.null(ye <- attr(f, "yexp"))) attr(result, "yexp") <- substitute(bold(D)~Fx, list(Fx=ye)) ## tweak mathematical labels attr(result, "labl")[relevant] <- paste0("bold(D)~", attr(f, "labl")[relevant]) return(result) } deriv.fv }) increment.fv <- function(f, delta) { stopifnot(is.fv(f)) check.1.real(delta) stopifnot(delta > 0) half <- delta/2 xx <- with(f, .x) ynames <- fvnames(f, ".") yy <- as.data.frame(lapply(ynames, function(a, xx, f, h) { g <- as.function(f, value=a) g(xx+h)-g(xx-h) }, xx=xx, f=f, h=half)) Y <- f Y[,ynames] <- yy ## tweak name of function if(!is.null(yl <- attr(f, "ylab"))) attr(Y, "ylab") <- substitute(Delta~Fx, list(Fx=yl)) if(!is.null(ye <- attr(f, "yexp"))) attr(Y, "yexp") <- substitute(Delta~Fx, list(Fx=ye)) ## tweak mathematical labels relevant <- colnames(Y) %in% ynames attr(Y, "labl")[relevant] <- paste0("Delta~", attr(f, "labl")[relevant]) ## tweak recommended range attr(Y, "alim") <- intersect.ranges(attr(f, "alim"), range(xx) + c(1,-1)*half) return(Y) } spatstat.core/R/densityVoronoi.R0000644000176200001440000000476514144333462016443 0ustar liggesusers#' #' densityVoronoi.R #' #' $Revision: 1.18 $ $Date: 2019/04/25 04:03:11 $ #' densityVoronoi <- function(X, ...) { UseMethod("densityVoronoi") } densityVoronoi.ppp <- function(X, f=1, ..., counting=FALSE, fixed=FALSE, nrep=1, verbose=TRUE) { stopifnot(is.ppp(X)) nX <- npoints(X) check.1.real(f) if(badprobability(f)) stop("f should be a probability between 0 and 1") check.1.integer(nrep) stopifnot(nrep >= 1) duped <- anyDuplicated(X) ## ntess <- floor(f * nX) if(ntess == 0) { ## naive estimate of intensity if(f > 0 && verbose) splat("Tiny threshold: returning uniform intensity estimate") W <- X$window lam <- nX/area(W) return(as.im(lam, W, ...)) } if(ntess == nX) { ## Voronoi/Dirichlet estimate if(!duped) { tes <- dirichlet(X) tesim <- nnmap(X, what="which", ...) num <- 1 } else { UX <- unique(X) tes <- dirichlet(UX) tesim <- nnmap(UX, what="which", ...) idx <- nncross(X, UX, what="which") num <- as.integer(table(factor(idx, levels=seq_len(npoints(UX))))) } lam <- num/tile.areas(tes) out <- eval.im(lam[tesim]) return(out) } if(nrep > 1) { ## estimate is the average of nrep randomised estimates total <- 0 if(verbose) cat(paste("Computing", nrep, "intensity estimates...")) state <- list() for(i in seq_len(nrep)) { estimate <- densityVoronoi(X, f, ..., counting=counting, fixed=fixed, nrep=1) total <- eval.im(total + estimate) if(verbose) state <- progressreport(i, nrep, state=state) } if(verbose) cat("Done.\n") average <- eval.im(total/nrep) return(average) } ## perform thinning if(!fixed) { itess <- thinjump(nX, f) tessfrac <- f } else { itess <- sample(seq_len(nX), ntess, replace=FALSE) tessfrac <- as.numeric(ntess)/nX } Xtess <- X[itess] if(duped) Xtess <- unique(Xtess) ## make tessellation tes <- dirichlet(Xtess) ## estimate intensity in each tile if(!counting) { tilemass <- 1 expansion <- 1/tessfrac } else { Xcount <- X[-itess] tilemap <- tileindex(Xcount$x, Xcount$y, tes) tilemass <- as.numeric(table(tilemap)) expansion <- 1/(1-tessfrac) } lam <- expansion * tilemass/tile.areas(tes) ## estimate of intensity at each location tesim <- nnmap(Xtess, what="which", ...) out <- eval.im(lam[tesim]) return(out) } spatstat.core/R/rppm.R0000644000176200001440000000740514144333463014361 0ustar liggesusers#' #' rppm.R #' #' Recursive Partitioning for Point Process Models #' #' $Revision: 1.14 $ $Date: 2020/12/19 05:25:06 $ rppm <- function(..., rpargs=list()) { ## do the equivalent of ppm(...) cl <- match.call() cl[[1]] <- as.name('ppm') if("rpargs" %in% names(cl)) cl$rpargs <- NULL cl$forcefit <- TRUE pfit <- eval(cl, envir=parent.frame()) ## if(!is.poisson(pfit)) warning("Interpoint interaction will be ignored", call.=FALSE) df <- getglmdata(pfit) gf <- getglmfit(pfit) sf <- getglmsubset(pfit) rp <- do.call(rpart, resolve.defaults(list(formula=formula(gf), data=df, subset=sf, weights=df$.mpl.W), rpargs, list(method="poisson"))) result <- list(pfit=pfit, rp=rp) class(result) <- c("rppm", class(result)) return(result) } # undocumented as.ppm.rppm <- function(object) { object$pfit } print.rppm <- function(x, ...) { splat("Point process model with recursive partitioning") splat("Data:", sQuote(x$pfit$Qname)) splat("Covariates:", commasep(sQuote(variablesinformula(formula(x$pfit))))) splat("Regression tree:") print(x$rp) invisible(NULL) } plot.rppm <- local({ argsPlotRpart <- c("x", "uniform", "branch", "compress", "margin", "minbranch") argsTextRpart <- c("splits", "label", "FUN", "all", "pretty", "digits", "use.n", "fancy", "fwidth", "fheight", "bg", "minlength") plot.rppm <- function(x, ..., what=c("tree", "spatial"), treeplot=NULL) { xname <- short.deparse(substitute(x)) what <- match.arg(what) switch(what, tree = { xrp <- x$rp if(is.function(treeplot)) return(treeplot(xrp, ...)) dont.complain.about(xrp) out <- do.call.matched(plot, list(x=quote(xrp), ...), funargs=argsPlotRpart, extrargs=graphicsPars("plot")) # note: plot.rpart does not pass arguments to 'lines' do.call.matched(text, list(x=quote(xrp), ...), funargs=argsTextRpart, extrargs=graphicsPars("text")) }, spatial = { p <- predict(x) dont.complain.about(p) out <- do.call("plot", resolve.defaults(list(x=quote(p)), list(...), list(main=xname))) }) return(invisible(out)) } plot.rppm }) #' prune method prune.rppm <- function(tree, ...) { tree$rp <- rpart::prune(tree$rp, ...) return(tree) } #' predict method predict.rppm <- function(object, ...) { model <- object$pfit tree <- object$rp #' assemble covariates for prediction, using rules of predict.ppm co <- predict(model, ..., type="covariates", check=FALSE, repair=FALSE) newdata <- co$newdata masque <- co$mask #' perform prediction using the tree pred <- predict(tree, newdata=co$newdata) #' pack up appropriately if(is.null(masque)) return(pred) imago <- as.im(masque, value=1.0) if(!is.marked(model)) { out <- imago out[] <- pred } else { lev <- levels(marks(data.ppm(model))) nlev <- length(lev) out <- rep(list(imago), nlev) names(out) <- lev splitpred <- split(pred, newdata$marks) for(i in seq_len(nlev)) out[[i]][] <- splitpred[[i]] out <- as.solist(out) } return(out) } fitted.rppm <- function(object, ...) { predict(object, locations=data.ppm(object$pfit)) } spatstat.core/R/rmhexpand.R0000644000176200001440000001454314144333463015372 0ustar liggesusers# # rmhexpand.R # # Rules/data for expanding the simulation window in rmh # # $Revision: 1.8 $ $Date: 2016/02/11 10:17:12 $ # # Establish names and rules for each type of expansion RmhExpandRule <- local({ .RmhExpandTable <- list(area=list(descrip ="Area expansion factor", minval = 1, expands = function(x) { unname(x) > 1 }), length=list(descrip ="Length expansion factor", minval = 1, expands = function(x) { unname(x) > 1 }), distance=list(descrip="Expansion buffer distance", minval = 0, expands = function(x) { unname(x) > 0 })) RmhExpandRule <- function(nama) { if(length(nama) == 0) nama <- "area" if(length(nama) > 1) stop("Internal error: too many names in RmhExpandRule", call.=FALSE) if(!(nama %in% names(.RmhExpandTable))) stop(paste("Internal error: unrecognised expansion type", sQuote(nama)), call.=FALSE) return(.RmhExpandTable[[nama]]) } RmhExpandRule }) rmhexpand <- function(x=NULL, ..., area=NULL, length=NULL, distance=NULL) { trap.extra.arguments(..., .Context="In rmhexpand") # check for incompatibility n <- (!is.null(x)) + (!is.null(area)) + (!is.null(length)) + (!is.null(distance)) if(n > 1) stop("Only one argument should be given") # absorb other arguments into 'x' if(is.null(x) && n > 0) { if(!is.null(area)) x <- c(area=area) if(!is.null(length)) x <- c(length=length) if(!is.null(distance)) x <- c(distance=distance) } if(is.null(x)) { # No expansion rule supplied. # Use spatstat default, indicating that the user did not choose it. force.exp <- force.noexp <- FALSE x <- spatstat.options("expand") x <- rmhexpand(x)$expand } else { # process x if(inherits(x, "rmhexpand")) return(x) if(is.owin(x)) { force.exp <- TRUE force.noexp <- FALSE } else { # expecting c(name=value) or list(name=value) if(is.list(x)) x <- unlist(x) if(!is.numeric(x)) stop(paste("Expansion argument must be either", "a number, a window, or NULL.\n")) # x is numeric check.1.real(x, "In rmhexpand(x)") explain.ifnot(is.finite(x), "In rmhexpand(x)") # an unlabelled numeric value is interpreted as an area expansion factor if(!any(nzchar(names(x)))) names(x) <- "area" # validate rule <- RmhExpandRule(names(x)) if(x < rule$minval) { warning(paste(rule$descrip, "<", rule$minval, "has been reset to", rule$minval), call.=FALSE) x[] <- rule$minval } force.exp <- rule$expands(x) force.noexp <- !force.exp } } result <- list(expand=x, force.exp=force.exp, force.noexp=force.noexp) class(result) <- "rmhexpand" return(result) } .no.expansion <- list(expand=c(area=1), force.exp=FALSE, force.noexp=TRUE) class(.no.expansion) <- "rmhexpand" print.rmhexpand <- function(x, ..., prefix=TRUE) { if(prefix) cat("Expand the simulation window? ") if(x$force.noexp) { cat("No.\n") } else { if(x$force.exp) cat("Yes:\n") else cat("Not determined. Default is:\n") y <- x$expand if(is.null(y)) { print(rmhexpand(spatstat.options("expand")), prefix=FALSE) } else if(is.numeric(y)) { descrip <- RmhExpandRule(names(y))$descrip cat(paste("\t", descrip, unname(y), "\n")) } else { print(y) } } return(invisible(NULL)) } summary.rmhexpand <- function(object, ...) { decided <- with(object, force.exp || force.noexp) ex <- object$expand if(is.null(ex)) ex <- rmhexpand(spatstat.options("expand"))$expand if(is.owin(ex)) { willexpand <- TRUE descrip <- "Window" } else if(is.numeric(ex)) { rule <- RmhExpandRule(names(ex)) descrip <- rule$descrip willexpand <- if(object$force.exp) TRUE else if(object$force.noexp) FALSE else (unname(ex) > rule$minval) } else stop("Internal error: unrecognised format in summary.rmhexpand", call.=FALSE) out <- list(rule.decided=decided, window.decided=decided && is.owin(ex), expand=ex, descrip=descrip, willexpand=willexpand) class(out) <- "summary.rmhexpand" return(out) } print.summary.rmhexpand <- function(x, ...) { cat("Expansion rule\n") ex <- x$expand if(x$window.decided) { cat("Window is decided.\n") print(ex) } else { if(x$rule.decided) { cat("Rule is decided.\n") } else { cat("Rule is not decided.\nDefault is:\n") } if(!x$willexpand) { cat("No expansion\n") } else { if(is.numeric(ex)) cat(paste(x$descrip, ex, "\n")) else print(ex) } } return(invisible(NULL)) } expand.owin <- function(W, ...) { ex <- list(...) if(length(ex) > 1) stop("Too many arguments") # get an rmhexpand object if(inherits(ex[[1]], "rmhexpand")) { ex <- ex[[1]] } else ex <- do.call(rmhexpand, ex) f <- ex$expand if(is.null(f)) return(W) if(is.owin(f)) return(f) if(!is.numeric(f)) stop("Format not understood") switch(names(f), area = { if(f == 1) return(W) bb <- boundingbox(W) xr <- bb$xrange yr <- bb$yrange fff <- (sqrt(f) - 1)/2 Wexp <- grow.rectangle(bb, fff * diff(xr), fff * diff(yr)) }, length = { if(f == 1) return(W) bb <- boundingbox(W) xr <- bb$xrange yr <- bb$yrange fff <- (f - 1)/2 Wexp <- grow.rectangle(bb, fff * diff(xr), fff * diff(yr)) }, distance = { if(f == 0) return(W) Wexp <- if(is.rectangle(W)) grow.rectangle(W, f) else dilation(W, f) }, stop("Internal error: unrecognised type") ) return(Wexp) } will.expand <- function(x) { stopifnot(inherits(x, "rmhexpand")) if(x$force.exp) return(TRUE) if(x$force.noexp) return(FALSE) return(summary(x)$willexpand) } is.expandable <- function(x) { UseMethod("is.expandable") } change.default.expand <- function(x, newdefault) { stopifnot(inherits(x, "rmhexpand")) decided <- with(x, force.exp || force.noexp) if(!decided) x$expand <- rmhexpand(newdefault)$expand return(x) } spatstat.core/R/detpointprocfamilyfun.R0000644000176200001440000004521614144333462020031 0ustar liggesusers## detpointprocfamilyfun.R ## ## $Revision: 1.8 $ $Date: 2021/07/14 09:44:04 $ ## ## This file contains the function `detpointprocfamilyfun' ## to define new DPP model family functions ## and a print method for class `detpointprocfamilyfun' ## as well as the currently defined ## - dppBessel ## - dppCauchy ## - dppGauss ## - dppMatern ## - dppPowerExp detpointprocfamilyfun <- local({ names_formals <- function(f, dots = FALSE){ nam <- names(formals(f)) if(!dots) nam <- nam[nam!="..."] return(nam) } detpointprocfamilyfun <- function(kernel=NULL, specden=NULL, basis="fourierbasis", convkernel=NULL, Kfun=NULL, valid=NULL, intensity=NULL, dim=2, name="User-defined", isotropic=TRUE, range=NULL, parbounds=NULL, specdenrange=NULL, startpar=NULL, ...) { ## Check which functions are given, check them for sanity and ## extract argument names and other stuff given <- NULL if(!is.null(kernel)){ if(!is.function(kernel)) stop("If kernel is given it must be a function.") given <- "kernel" kernelnames <- names_formals(kernel) if(length(kernelnames)<1L) stop("kernel function must have at least one argument") kernelnames <- kernelnames[-1L] } if(!is.null(specden)){ if(!is.function(specden)) stop("If specden is given it must be a function.") given <- c(given, "specden") specdennames <- names_formals(specden) if(length(specdennames)<1L) stop("specden function must have at least one argument") specdennames <- specdennames[-1L] } if(is.null(given)) stop("At least one of kernel or specden must be provided.") if(length(given)==2){ if(!setequal(kernelnames,specdennames)) stop("argument names of kernel and specden must match.") } if(is.element("kernel",given)){ parnames <- kernelnames } else{ parnames <- specdennames } if(!is.null(convkernel)){ given <- c(given,"convkernel") if(!is.function(convkernel)||length(formals(convkernel))<2) stop("If convkernel is given it must be a function with at least two arguments.") if(!setequal(parnames,names_formals(convkernel)[-(1:2)])) stop("argument names of convkernel must match argument names of kernel and/or specden.") } if(!is.null(Kfun)){ given <- c(given,"Kfun") if(!is.function(Kfun)||length(formals(Kfun))<1L) stop("If Kfun is given it must be a function with at least one arguments.") if(!setequal(parnames,names_formals(Kfun)[-1L])) stop("argument names of Kfun must match argument names of kernel and/or specden.") } if(!is.null(valid)){ if(!(is.function(valid)&&setequal(parnames,names_formals(valid)))) stop("argument names of valid must match argument names of kernel and/or specden.") } else{ warning("No function for checking parameter validity provided. ANY numerical value for the parameters will be accepted.") } if(!is.null(intensity)&&!(is.character(intensity)&&length(intensity)==1L&&is.element(intensity, parnames))) stop("argument intensity must be NULL or have length one, be of class character and match a parameter name") if(!(is.character(dim)|is.numeric(dim))|length(dim)!=1L) stop("argument dim must have length one and be of class character or numeric") if(is.character(dim)){ if(!is.element(dim, parnames)) stop("When dim is a character it must agree with one of the parameter names of the model") } else{ dim <- round(dim) if(dim<1L) stop("When dim is a numeric it must be a positive integer") } ## Catch extra unknown args (will be appended to output object below). dots <- list(...) ## Create output object. out <- function(...){ caller <- match.call()[[1L]] caller <- eval(substitute(caller), parent.frame()) fixedpar <- list(...) nam <- names(fixedpar) if(length(fixedpar)>0&&is.null(nam)) stop(paste("Named arguments are required. Please supply parameter values in a", sQuote("tag=value"), "form")) match <- is.element(nam, parnames) if(sum(!match)>0) warning(paste("Not all supplied argument(s) make sense. Valid arguments are: ", paste(parnames, collapse = ", "), ". The following supplied argument(s) will be ignored: ", paste(nam[!match], collapse = ", "), sep = "")) fixedpar <- fixedpar[match] ## Code to always fix the dimension to a numeric when calling the function ####### if(is.character(dim) && !is.element(dim,names(fixedpar))){ dimpar <- structure(list(2), .Names=dim) fixedpar <- c(fixedpar, dimpar) } ## Detect inhomogeneous intensity (an image), and replace by max and an image for thinning thin <- NULL if(!is.null(intensity)){ lambda <- getElement(fixedpar, intensity) if(is.im(lambda)){ lambdamax <- max(lambda) thin <- lambda/lambdamax fixedpar[[intensity]] <- lambdamax } } obj <- list(fixedpar = fixedpar, freepar = parnames[!is.element(parnames,names(fixedpar))], kernel = kernel, specden = specden, convkernel = convkernel, intensity = intensity, thin = thin, dim = dim, name = name, range = range, valid = valid, parbounds = parbounds, specdenrange = specdenrange, startpar = startpar, isotropic = isotropic, caller = caller, basis = basis ) obj <- append(obj, dots) class(obj) <- "detpointprocfamily" return(obj) } class(out) <- c("detpointprocfamilyfun", "pointprocfamilyfun", class(out)) attr(out, "parnames") <- parnames attr(out, "name") <- name return(out) } detpointprocfamilyfun } ) print.detpointprocfamilyfun <- function(x, ...){ cat(paste(attr(x, "name"), "determinantal point process model family\n")) cat("The parameters of the family are:\n") cat(attr(x, "parnames"), sep = ", ") cat("\n") invisible(NULL) } dppBessel <- detpointprocfamilyfun( name="Bessel", kernel=function(x, lambda, alpha, sigma, d){ a <- 0.5*(sigma+d) y <- abs(x/alpha) # Kernel: lambda*2^a*gamma(a+1)*besselJ(2*y*sqrt(a),a) / (2*y*sqrt(a))^a logrslt <- log(lambda) + a*log(2) + lgamma(a+1) - a*log(2*y*sqrt(a)) rslt <- exp(logrslt) * besselJ(2*y*sqrt(a), a) rslt[x==0] <- lambda return(rslt) }, Dpcf=function(x, lambda, alpha, sigma, d){ a <- 0.5*(sigma+d) z <- sqrt(a)*x/alpha dalpha <- ifelse(x==0, 0, -2*gamma(a+1)^2*besselJ(x = 2*z, nu=a)*alpha^{2*a-2}*(x*sqrt(a))^(-2*a)*( a*alpha*besselJ(x=2*z, nu=a) - x*sqrt(a)*(besselJ(x = 2*z, nu = a - 1) - besselJ(x = 2*z, nu = a + 1)))) dsigma <- ifelse(x == 0, 0, -2 * besselJ(x = 2*z, nu=a) * gamma(a+1)^2/z^a * ((0.5 * besselJ(x = 2*z, nu=a)*digamma(a+1) + 0.5 * x * 0.5 * (besselJ(x = 2*z, nu = a-1) - besselJ(x=2*z, nu=a+1))/(alpha*sqrt(a)))/z^a - (0.5 * z^a * (0.5 * log(a) + log(x) - log(alpha)) + x * 2*a * z^(a-1)/(8 * alpha * sqrt(a))) * besselJ(x = 2*z, nu=a)/z^(2*a))) return(c(lambda=0, alpha=dalpha, sigma=dsigma)) }, specden=function(x, lambda, alpha, sigma, d){ a <- sigma+d # specden: lambda*(2*pi)^(d/2)*alpha^d*gamma(0.5*a+1)/a^(d/2)/gamma(sigma/2+1)*(1-2*pi^2*alpha^2*x^2/a)^(sigma/2) logrslt <- log(lambda) + (d/2)*log(2*pi) + d*log(alpha) + lgamma(0.5*a+1) logrslt <- logrslt - (d/2)*log(a) - lgamma(sigma/2+1) tmp <- 1-2*pi^2*alpha^2*x^2/a suppressWarnings({ logrslt <- logrslt + ifelse(tmp<0, -Inf, (sigma/2)*log(tmp)) }) return(exp(logrslt)) }, specdenrange=function(model){ p <- model$fixedpar sqrt((p$sigma+p$d)/(2*pi^2*p$alpha^2)) }, valid=function(lambda, alpha, sigma, d){ a <- sigma+d OK <- lambda>0 && alpha>0 && d>=1 && sigma>=0 if(!OK) return(FALSE) ## Upper bound for alpha (using log-scale) lognum <- log(a^(0.5*d)) + lgamma(0.5*sigma+1) logdenom <- log( lambda*(2*pi^(0.5*d))) + lgamma(0.5*a+1) logalphamax <- (1/d) * (lognum - logdenom) return(OK && log(alpha) <= logalphamax) }, isotropic=TRUE, intensity="lambda", dim="d", parbounds=function(name, lambda, alpha, sigma, d){ lognum <- log((sigma+d)^(0.5*d)) + lgamma(0.5*sigma+1) logdenom <- log(2*pi^(0.5*d)) + lgamma(0.5*(sigma+d)+1) switch(name, lambda = c(0, exp(lognum - log( alpha^d) - logdenom)) , alpha = c(0, exp((1/d) * (lognum - log(lambda) - logdenom))), sigma = c(0, switch(as.character(d), "2"=Inf, NA)), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("d" %in% model$freepar){ model <- update(model, d=spatdim(X)) } if("lambda" %in% model$freepar){ lambda <- intensity(X) while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK) lambda <- lambda/2 rslt <- c(rslt, "lambda" = lambda) } if("sigma" %in% model$freepar){ sigma <- 2 while(!is.na(OK <- valid(model <- update(model, sigma=sigma)))&&!OK) sigma <- sigma/2 rslt <- c(rslt, "sigma" = sigma) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){ alpha <- alpha/2 } rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) dppCauchy <- detpointprocfamilyfun( name="Cauchy", kernel=function(x, lambda, alpha, nu, d){ rslt <- lambda * (1+(x/alpha)^2)^(-nu-d/2) rslt[x==0] <- lambda return(rslt) }, Dpcf=function(x, lambda, alpha, nu, d){ dalpha <- (-4*nu-2*d)*x^2*alpha^(-3)*((x/alpha)^2 + 1)^(-2*nu - d - 1) dnu <- 2*log1p((x/alpha)^2)*((x/alpha)^2 + 1)^(-2*nu - d) return(c(lambda=0, alpha=dalpha, nu=dnu)) }, specden=function(x, lambda, alpha, nu, d){ y <- 2*x*alpha*pi rslt <- lambda * y^nu * besselK(x = y, nu = nu) * (sqrt(pi)*alpha)^d * exp((1-nu)*log(2) - lgamma(nu+d/2)) rslt[x==0] <- lambda * exp(lgamma(nu) - lgamma(nu+d/2)) * (sqrt(pi)*alpha)^d return(rslt) }, Kfun = function(x, lambda, alpha, nu, d){ rslt <- pi*x^2 - pi*alpha^2/(2*nu+1) * (1 - (alpha^2/(alpha^2+x^2))^(2*nu+1)) rslt[rslt<0] <- 0 return(rslt) }, valid=function(lambda, alpha, nu, d){ ## Note the upper bound on nu for numerical stability! lambda>0 && alpha>0 && nu>0 && nu<=50 && d>=1 && lambda <= gamma(nu+d/2)/(gamma(nu)*(sqrt(pi)*alpha)^d) }, isotropic=TRUE, intensity="lambda", dim="d", range=function(alpha, nu, d, bound = .99){ if(missing(alpha)) stop("The parameter alpha is missing.") if(missing(nu)) stop("The parameter nu is missing.") if(missing(d)) stop("The parameter d (giving the dimension) is missing.") if(!(is.numeric(bound)&&bound>0&&bound<1)) stop("Argument bound must be a numeric between 0 and 1.") return(alpha * sqrt((1-bound)^(-1/(2*nu+d))-1)) }, parbounds=function(name, lambda, alpha, nu, d){ switch(name, lambda = c(0, gamma(nu+d/2)/(gamma(nu)*(sqrt(pi)*alpha)^d)), alpha = c(0, (exp(lgamma(nu+d/2)-lgamma(nu))/lambda)^(1/d)/sqrt(pi)), ## nu bound only implemented for d = 2. nu = c(switch(as.character(d), "2"=pi*lambda*alpha^2, NA), Inf), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" %in% model$freepar){ lambda <- intensity(X) while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK) lambda <- lambda/2 rslt <- c(rslt, "lambda" = lambda) } if("nu" %in% model$freepar){ nu <- 2 while(!is.na(OK <- valid(model <- update(model, nu=nu)))&&!OK) nu <- nu/2 rslt <- c(rslt, "nu" = nu) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){ alpha <- alpha/2 } rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) dppGauss <- detpointprocfamilyfun( name="Gaussian", kernel=function(x, lambda, alpha, d){ rslt <- lambda*exp(-(x/alpha)^2) return(rslt) }, Dpcf=function(x, lambda, alpha, d){ dalpha <- -4*x^2/alpha^3*exp(-(x/alpha)^2)^2 return(c(lambda=0, alpha=dalpha)) }, specden=function(x, lambda, alpha, d){ lambda * (sqrt(pi)*alpha)^d * exp(-(x*alpha*pi)^2) }, convkernel=function(x, k, lambda, alpha, d){ logres <- k*log(lambda*pi*alpha^2) - log(pi*k*alpha^2) - x^2/(k*alpha^2) return(exp(logres)) }, Kfun = function(x, lambda, alpha, d){ pi*x^2 - pi*alpha^2/2*(1-exp(-2*x^2/alpha^2)) }, valid=function(lambda, alpha, d){ lambda>0 && alpha>0 && d>=1 && lambda <= (sqrt(pi)*alpha)^(-d) }, isotropic=TRUE, intensity="lambda", dim="d", range=function(alpha, bound = .99){ if(missing(alpha)) stop("The parameter alpha is missing.") if(!(is.numeric(bound)&&bound>0&&bound<1)) stop("Argument bound must be a numeric between 0 and 1.") return(alpha*sqrt(-log(sqrt(1-bound)))) }, parbounds=function(name, lambda, alpha, d){ switch(name, lambda = c(0, (sqrt(pi)*alpha)^(-d)), alpha = c(0, lambda^(-1/d)/sqrt(pi)), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" %in% model$freepar){ lambda <- intensity(X) rslt <- c(rslt, "lambda" = lambda) model <- update(model, lambda=lambda) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) dppMatern <- detpointprocfamilyfun( name="Whittle-Matern", kernel=function(x, lambda, alpha, nu, d){ rslt <- lambda*2^(1-nu) / gamma(nu) * ((x/alpha)^nu) * besselK(x = x/alpha, nu = nu) rslt[x==0] <- lambda return(rslt) }, Dpcf=function(x, lambda, alpha, nu, d){ s <- besselK(x = x/alpha, nu = nu) dalpha <- 2 * (2^(1 - nu) * x * s * (x/alpha)^nu/(alpha^2 * gamma(nu)^2) * (-2^(1 - nu) * (0.5 * (besselK(x = x/alpha, nu = nu + 1) + besselK(x = x/alpha, nu = nu - 1))) * (x/alpha)^nu + 2^(1 - nu) * nu * s * (x/alpha)^(nu - 1))) dnu <- -2/gamma(nu)^2 * (2^(1-nu)*besselK(x = x/alpha, nu = nu)*(x/alpha)^nu)^2*(log(x/(2*alpha)) + digamma(nu)) return(c(lambda=0, alpha=dalpha, nu=dnu)) }, specden=function(x, lambda, alpha, nu, d){ lambda * exp(lgamma(nu+d/2) - lgamma(nu)) * (2*sqrt(pi)*alpha)^d * (1+(2*x*alpha*pi)^2)^(-nu-d/2) }, convkernel=function(x, k, lambda, alpha, nu, d){ nu2 <- k*(nu+d/2)-d/2 logres <- (nu2)*log(x/alpha) + log(besselK(x = x/alpha, nu = nu2, expon.scaled = TRUE)) - x/alpha logres[x == 0] <- (nu2-1)*log(2) + lgamma(nu2) logres <- logres + k*log(lambda) + k*(lgamma(nu+d/2)-lgamma(nu)) + (d*k-d+1-nu2)*log(2) + d*(k-1)*log(sqrt(pi)*alpha) - lgamma(nu2+d/2) index <- which(logres == Inf) logres[index] <- -Inf return(exp(logres)) }, valid=function(lambda, alpha, nu, d){ ## Note the upper bound on nu for numerical stability! lambda>0 && alpha>0 && nu>0 && nu<=50 && d>=1 && lambda <= gamma(nu)/(gamma(nu+d/2)*(2*sqrt(pi)*alpha)^d) }, isotropic=TRUE, intensity="lambda", dim="d", range=function(alpha, nu, d, bound = .99, exact = FALSE){ if(missing(alpha)) stop("The parameter alpha is missing.") if(missing(nu)) stop("The parameter nu is missing.") if(missing(d)) stop("The parameter d (giving the dimension) is missing.") if(!is.logical(exact)) stop("Argument exact must be a logical.") if(!exact&&d==2) return(alpha * sqrt(8*nu)) ## range suggested by Haavard Rue et al. if(!(is.numeric(bound)&&bound>0&&bound<1)) stop("Argument bound must be a numeric between 0 and 1.") fun <- function(x) sqrt(1-bound)-2^(1-nu) / gamma(nu) * ((x/alpha)^nu) * besselK(x = x/alpha, nu = nu) return(uniroot(fun, c(sqrt(.Machine$double.eps),1e3*alpha*sqrt(nu)))$root) }, parbounds=function(name, lambda, alpha, nu, d){ switch(name, lambda = c(0, gamma(nu)/(gamma(nu+d/2)*(2*sqrt(pi)*alpha)^d)), alpha = c(0, (exp(lgamma(nu)-lgamma(nu+d/2))/lambda)^(1/d)/2/sqrt(pi)), ## nu bound only implemented for d = 2 and d = 4. nu = c(0, switch(as.character(d), "2"=1/(4*pi*lambda*alpha^2), "4"=sqrt(1/4+1/(lambda*16*pi*pi*alpha^4))-1/2, NA)), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" %in% model$freepar){ lambda <- intensity(X) while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK) lambda <- lambda/2 rslt <- c(rslt, "lambda" = lambda) } if("nu" %in% model$freepar){ nu <- 2 while(!is.na(OK <- valid(model <- update(model, nu=nu)))&&!OK) nu <- nu/2 rslt <- c(rslt, "nu" = nu) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){ alpha <- alpha/2 } rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) dppPowerExp <- detpointprocfamilyfun( name="Power Exponential Spectral", specden=function(x, lambda, alpha, nu, d){ lambda * gamma(d/2+1) * alpha^d / (pi^(d/2)*gamma(d/nu+1)) * exp(-(alpha*x)^nu) }, valid=function(lambda, alpha, nu, d){ ## Note the upper bound on nu for numerical stability! lambda>0 && alpha>0 && nu>0 && nu<=20 && d>=1 && lambda <= pi^(d/2)*gamma(d/nu+1) / (gamma(1+d/2)*alpha^d) }, isotropic=TRUE, intensity="lambda", dim="d", parbounds=function(name, lambda, alpha, nu, d){ switch(name, lambda = c(0, pi^(d/2)*gamma(d/nu+1) / (gamma(d/2+1)*alpha^d)), alpha = c(0, (pi^(d/2)*gamma(d/nu+1) / (lambda * gamma(d/2+1)))^(1/d)), nu = c(NA, NA), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" %in% model$freepar){ lambda <- intensity(X) while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK) lambda <- lambda/2 rslt <- c(rslt, "lambda" = lambda) } if("nu" %in% model$freepar){ nu <- 2 while(!is.na(OK <- valid(model <- update(model, nu=nu)))&&!OK) nu <- nu/2 rslt <- c(rslt, "nu" = nu) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){ alpha <- alpha/2 } rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) spatstat.core/R/fgk3.R0000644000176200001440000003770714144333462014244 0ustar liggesusers# # $Revision: 1.28 $ $Date: 2021/01/07 03:08:41 $ # # Estimates of F, G and K for three-dimensional point patterns # # # ............ user interface ............................. # K3est <- function(X, ..., rmax=NULL, nrval=128, correction=c("translation", "isotropic"), ratio=FALSE) { stopifnot(inherits(X, "pp3")) correction <- pickoption("correction", correction, c(translation="translation", trans="translation", isotropic="isotropic", iso="isotropic", best="isotropic"), multi=TRUE) trap.extra.arguments(..., .Context="In K3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) np <- npoints(X) denom <- np * (np-1)/volume(B) # this will be the output data frame K <- data.frame(r=r, theo= (4/3) * pi * r^3) desc <- c("distance argument r", "theoretical Poisson %s") K <- ratfv(K, NULL, denom, "r", quote(K[3](r)), "theo", NULL, c(0,rmax/2), c("r","{%s[%s]^{pois}}(r)"), desc, fname=c("K", "3"), ratio=ratio) # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # extract coordinates coo <- coords(X) if(any(correction %in% "translation")) { u <- k3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="translation") K <- bind.ratfv(K, data.frame(trans=u$num), u$denom, "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans", ratio=ratio) } if(any(correction %in% "isotropic")) { u <- k3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="isotropic") K <- bind.ratfv(K, data.frame(iso=u$num), u$denom, "{hat(%s)[%s]^{iso}}(r)", "isotropic-corrected estimate of %s", "iso", ratio=ratio) } # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) return(K) } G3est <- function(X, ..., rmax=NULL, nrval=128, correction=c("rs", "km", "Hanisch")) { stopifnot(inherits(X, "pp3")) correction <- pickoption("correction", correction, c(rs="rs", border="rs", km="km", KM="km", Hanisch="han", hanisch="han", best="km"), multi=TRUE) trap.extra.arguments(..., .Context="In G3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) coo <- coords(X) lambda <- nrow(coo)/volume(B) # this will be the output data frame G <- data.frame(r=r, theo= 1 - exp( - lambda * (4/3) * pi * r^3)) desc <- c("distance argument r", "theoretical Poisson %s") G <- fv(G, "r", substitute(G3(r), NULL), "theo", , c(0,rmax/2), c("r","%s[pois](r)"), desc, fname="G3") # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # collect four histograms for censored data u <- g3Cengine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval) if("rs" %in% correction) G <- bind.fv(G, data.frame(rs=u$rs), "%s[rs](r)", "reduced sample estimate of %s", "rs") if("km" %in% correction) G <- bind.fv(G, data.frame(km=u$km), "%s[km](r)", "Kaplan-Meier estimate of %s", "km") if("han" %in% correction) G <- bind.fv(G, data.frame(han=u$han), "%s[han](r)", "Normalised Hanisch estimate of %s", "han") # default is to display them all formula(G) <- . ~ r unitname(G) <- unitname(X) return(G) } F3est <- function(X, ..., rmax=NULL, nrval=128, vside=NULL, correction=c("rs", "km", "cs"), sphere=c("fudge", "ideal", "digital")) { stopifnot(inherits(X, "pp3")) sphere <- match.arg(sphere) correction <- pickoption("correction", correction, c(rs="rs", border="rs", km="km", KM="km", Kaplan="km", cs="cs", CS="cs", best="km"), multi=TRUE) trap.extra.arguments(..., .Context="In F3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) coo <- coords(X) vol <- volume(B) lambda <- nrow(coo)/vol # determine voxel size if(missing(vside)) { voxvol <- vol/spatstat.options("nvoxel") vside <- voxvol^(1/3) # ensure the shortest side is a whole number of voxels s <- shortside(B) m <- ceiling(s/vside) vside <- s/m } # compute theoretical value switch(sphere, ideal = { volsph <- (4/3) * pi * r^3 spherename <- "ideal sphere" }, fudge = { volsph <- 0.78 * (4/3) * pi * r^3 spherename <- "approximate sphere" }, digital = { volsph <- digital.volume(c(0, rmax), nrval, vside) spherename <- "digital sphere" }) theo.desc <- paste("theoretical Poisson %s using", spherename) # this will be the output data frame FF <- data.frame(r = r, theo = 1 - exp( - lambda * volsph)) desc <- c("distance argument r", theo.desc) labl <- c("r","%s[pois](r)") FF <- fv(FF, "r", substitute(F3(r), NULL), "theo", , c(0,rmax/2), labl, desc, fname="F3") # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # go u <- f3Cengine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, vside=vside) if("rs" %in% correction) FF <- bind.fv(FF, data.frame(rs=u$rs), "%s[rs](r)", "reduced sample estimate of %s", "rs") if("km" %in% correction) FF <- bind.fv(FF, data.frame(km=u$km), "%s[km](r)", "Kaplan-Meier estimate of %s", "km") if("cs" %in% correction) FF <- bind.fv(FF, data.frame(cs=u$cs), "%s[cs](r)", "Chiu-Stoyan estimate of %s", "cs") # default is to display them all formula(FF) <- . ~ r unitname(FF) <- unitname(X) return(FF) } pcf3est <- function(X, ..., rmax=NULL, nrval=128, correction=c("translation", "isotropic"), delta=NULL, adjust=1, biascorrect=TRUE) { stopifnot(inherits(X, "pp3")) correction <- pickoption("correction", correction, c(translation="translation", trans="translation", isotropic="isotropic", iso="isotropic", best="isotropic"), multi=TRUE) trap.extra.arguments(..., .Context="In pcf3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) if(is.null(delta)) { lambda <- npoints(X)/volume(B) delta <- adjust * 0.26/lambda^(1/3) } if(biascorrect) { # bias correction rondel <- r/delta biasbit <- ifelseAX(rondel > 1, 1, (3/4)*(rondel + 2/3 - (1/3)*rondel^3)) } # this will be the output data frame g <- data.frame(r=r, theo=rep.int(1, length(r))) desc <- c("distance argument r", "theoretical Poisson %s") g <- fv(g, "r", quote(g[3](r)), "theo", , c(0,rmax/2), c("r", "{%s[%s]^{pois}}(r)"), desc, fname=c("g", "3")) # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # extract coordinates coo <- coords(X) if(any(correction %in% "translation")) { u <- pcf3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="translation", delta=delta) gt <- u$f if(biascorrect) gt <- gt/biasbit g <- bind.fv(g, data.frame(trans=gt), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction %in% "isotropic")) { u <- pcf3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="isotropic", delta=delta) gi <- u$f if(biascorrect) gi <- gi/biasbit g <- bind.fv(g, data.frame(iso=gi), "{hat(%s)[%s]^{iso}}(r)", "isotropic-corrected estimate of %s", "iso") } # default is to display them all formula(g) <- . ~ r unitname(g) <- unitname(X) attr(g, "delta") <- delta return(g) } # ............ low level code .............................. # k3engine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=100, correction="translation") { code <- switch(correction, translation=0, isotropic=1) res <- .C(SC_RcallK3, as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(0), as.double(rmax), as.integer(nrval), f = as.double(numeric(nrval)), num = as.double(numeric(nrval)), denom = as.double(numeric(nrval)), as.integer(code), PACKAGE="spatstat.core") return(list(range = c(0,rmax), f = res$f, num=res$num, denom=res$denom, correction=correction)) } # # # g3engine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=10, correction="Hanisch G3") { code <- switch(correction, "minus sampling"=1, "Hanisch G3"=3) res <- .C(SC_RcallG3, as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(0), as.double(rmax), as.integer(nrval), f = as.double(numeric(nrval)), num = as.double(numeric(nrval)), denom = as.double(numeric(nrval)), as.integer(code), PACKAGE="spatstat.core") return(list(range = c(0, rmax), f = res$f, num=res$num, denom=res$denom, correction=correction)) } # # f3engine <- function(x, y, z, box=c(0,1,0,1,0,1), vside=0.05, range=c(0,1.414), nval=25, correction="minus sampling") { # code <- switch(correction, "minus sampling"=1, no=0) res <- .C(SC_RcallF3, as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(vside), as.double(range[1L]), as.double(range[2L]), m=as.integer(nval), num = as.integer(integer(nval)), denom = as.integer(integer(nval)), as.integer(code), PACKAGE="spatstat.core") r <- seq(from=range[1L], to=range[2L], length.out=nval) f <- with(res, ifelseXB(denom > 0, num/denom, 1)) return(list(r = r, f = f, num=res$num, denom=res$denom, correction=correction)) } f3Cengine <- function(x, y, z, box=c(0,1,0,1,0,1), vside=0.05, rmax=1, nrval=25) { # res <- .C(SC_RcallF3cen, as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(vside), as.double(0), as.double(rmax), m=as.integer(nrval), obs = as.integer(integer(nrval)), nco = as.integer(integer(nrval)), cen = as.integer(integer(nrval)), ncc = as.integer(integer(nrval)), upperobs = as.integer(integer(1L)), uppercen = as.integer(integer(1L)), PACKAGE="spatstat.core") r <- seq(from=0, to=rmax, length.out=nrval) # obs <- res$obs nco <- res$nco cen <- res$cen ncc <- res$ncc upperobs <- res$upperobs uppercen <- res$uppercen # breaks <- breakpts.from.r(r) km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) # ero <- eroded.volumes(as.box3(box), r) H <- cumsum(nco/ero) cs <- H/max(H[is.finite(H)]) # return(list(rs=rs, km=km$km, hazard=km$lambda, cs=cs, r=r)) } g3Cengine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=25) { # res <- .C(SC_RcallG3cen, as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(0), as.double(rmax), m=as.integer(nrval), obs = as.integer(integer(nrval)), nco = as.integer(integer(nrval)), cen = as.integer(integer(nrval)), ncc = as.integer(integer(nrval)), upperobs = as.integer(integer(1L)), uppercen = as.integer(integer(1L)), PACKAGE="spatstat.core") r <- seq(from=0, to=rmax, length.out=nrval) # obs <- res$obs nco <- res$nco cen <- res$cen ncc <- res$ncc upperobs <- res$upperobs uppercen <- res$uppercen # breaks <- breakpts.from.r(r) km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) # ero <- eroded.volumes(as.box3(box), r) H <- cumsum(nco/ero) han <- H/max(H[is.finite(H)]) return(list(rs=rs, km=km$km, hazard=km$lambda, han=han, r=r)) } pcf3engine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=100, correction="translation", delta=rmax/10) { code <- switch(correction, translation=0, isotropic=1) res <- .C(SC_Rcallpcf3, as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(0), as.double(rmax), as.integer(nrval), f = as.double(numeric(nrval)), num = as.double(numeric(nrval)), denom = as.double(numeric(nrval)), method=as.integer(code), delta=as.double(delta), PACKAGE="spatstat.core") return(list(range = c(0,rmax), f = res$f, num=res$num, denom=res$denom, correction=correction)) } # # ------------------------------------------------------------ # volume of a sphere (exact and approximate) # sphere.volume <- function(range=c(0,1.414), nval=10) { rr <- seq(from=range[1L], to=range[2L], length.out=nval) return( (4/3) * pi * rr^3) } digital.volume <- function(range=c(0, 1.414), nval=25, vside= 0.05) { # Calculate number of points in digital sphere # by performing distance transform for a single point # in the middle of a suitably large box # # This takes EIGHT TIMES AS LONG as the corresponding empirical F-hat !!! # w <- 2 * range[2L] + 2 * vside # dvol <- .C(SC_RcallF3, as.double(w/2), as.double(w/2), as.double(w/2), as.integer(1L), as.double(0), as.double(w), as.double(0), as.double(w), as.double(0), as.double(w), as.double(vside), as.double(range[1L]), as.double(range[2L]), as.integer(nval), num = as.integer(integer(nval)), denom = as.integer(integer(nval)), as.integer(0), PACKAGE="spatstat.core")$num # (vside^3) * dvol } spatstat.core/R/aaaa.R0000644000176200001440000000251214144333461014256 0ustar liggesusers#' #' aaaa.R #' #' Code that must be read before the rest of the R code in spatstat #' #' $Revision: 1.6 $ $Date: 2020/11/30 13:17:22 $ #' #' ................................................................... #' intermaker: #' Class structure for functions like 'Strauss' #' so they print a nice description. #' intermaker <- function(f, blank) { # f is the creator function like 'Strauss' class(f) <- c("intermaker", class(f)) # blank is the prototype interaction object: extract some fields desired <- c("creator", "name", "par", "parnames", "pardesc") avail <- desired[desired %in% names(blank)] attr(f, "b") <- blank[avail] return(f) } print.intermaker <- function(x, ...) { b <- attr(x, "b") argh <- names(formals(x)) explain <- NULL if(length(argh) > 0) { desc <- b$pardesc %orifnull% b$parnames namep <- names(b$par) if(length(desc) == length(namep) && all(argh %in% namep)) { names(desc) <- namep explain <- paste(", where", commasep(paste(sQuote(argh), "is the", desc[argh]))) } } blah <- paste0("Function ", b$creator, paren(paste(argh, collapse=", ")), ": creates the interpoint interaction of the ", b$name, explain) splat(blah) return(invisible(NULL)) } spatstat.core/R/is.R0000644000176200001440000000013014141452520013773 0ustar liggesusers## is.R ## original for spatstat.core is.lppm <- function(x) { inherits(x, "lppm") } spatstat.core/R/envelope3.R0000644000176200001440000000560514144333462015302 0ustar liggesusers# # envelope3.R # # simulation envelopes for pp3 # # $Revision: 1.13 $ $Date: 2016/04/25 02:34:40 $ # envelope.pp3 <- function(Y, fun=K3est, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- K3est if("clipdata" %in% names(list(...))) stop(paste("The argument", sQuote("clipdata"), "is not available for envelope.pp3")) envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) if(is.null(simulate)) { # ................................................... # Realisations of complete spatial randomness # will be generated by rpoispp # Data pattern X is argument Y # Data pattern determines intensity of Poisson process X <- Y sY <- summary(Y) Yintens <- sY$intensity Ydomain <- Y$domain # expression that will be evaluated simexpr <- if(!is.marked(Y)) { # unmarked point pattern expression(rpoispp3(Yintens, domain=Ydomain)) } else { stop("Sorry, simulation of marked 3D point patterns is not yet implemented") } # suppress warnings from code checkers dont.complain.about(Yintens, Ydomain) # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate # Data pattern is argument Y X <- Y } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=FALSE, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, expected.arg=c("rmax", "nrval"), do.pwrong=do.pwrong) } spatstat.core/R/markcorr.R0000644000176200001440000007017414144333462015225 0ustar liggesusers## ## ## markcorr.R ## ## $Revision: 1.86 $ $Date: 2021/07/10 08:53:29 $ ## ## Estimate the mark correlation function ## and related functions ## ## ------------------------------------------------------------------------ markvario <- local({ halfsquarediff <- function(m1, m2) { ((m1-m2)^2)/2 } assigntheo <- function(x, value) { x$theo <- value; return(x) } markvario <- function(X, correction=c("isotropic", "Ripley", "translate"), r=NULL, method="density", ..., normalise=FALSE) { m <- onecolumn(marks(X)) if(!is.numeric(m)) stop("Marks are not numeric") if(missing(correction)) correction <- NULL ## compute reference value Ef weights <- pointweights(X, ..., parent=parent.frame()) Ef <- if(is.null(weights)) var(m) else weighted.var(m, weights) ## Compute estimates v <- markcorr(X, f=halfsquarediff, r=r, correction=correction, method=method, normalise=normalise, ..., internal=list(Ef=Ef)) if(is.fv(v)) v <- anylist(v) ## adjust theoretical value and fix labels theoval <- if(normalise) 1 else var(m) for(i in seq_len(length(v))) { v[[i]]$theo <- theoval v[[i]] <- rebadge.fv(v[[i]], quote(gamma(r)), "gamma") } if(length(v) == 1) v <- v[[1]] return(v) } markvario }) markconnect <- local({ indicateij <- function(m1, m2, i, j) { (m1 == i) & (m2 == j) } markconnect <- function(X, i, j, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=FALSE) { stopifnot(is.ppp(X) && is.multitype(X)) if(missing(correction)) correction <- NULL marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1] if(missing(j)) j <- lev[2] ## compute reference value Ef weights <- pointweights(X, ..., parent=parent.frame()) Ef <- if(is.null(weights)) mean(marx == i) * mean(marx == j) else mean(weights * (marx == i)) * mean(weights * (marx == j)) ## compute estimates p <- markcorr(X, f=indicateij, r=r, correction=correction, method=method, ..., fargs=list(i=i, j=j), normalise=normalise, internal=list(Ef=Ef)) ## alter theoretical value and fix labels if(!normalise) { pipj <- mean(marx==i) * mean(marx==j) p$theo <- pipj } else { p$theo <- 1 } p <- rebadge.fv(p, new.ylab=substitute(p[i,j](r), list(i=paste(i),j=paste(j))), new.fname=c("p", paste0("list(", i, ",", j, ")")), new.yexp=substitute(p[list(i,j)](r), list(i=paste(i),j=paste(j)))) return(p) } markconnect }) Emark <- local({ f1 <- function(m1, m2) { m1 } Emark <- function(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=FALSE) { stopifnot(is.ppp(X) && is.marked(X)) marx <- marks(X) isvec <- is.vector(marx) && is.numeric(marx) isdf <- is.data.frame(marx) && all(sapply(as.list(marx), is.numeric)) if(!(isvec || isdf)) stop("All marks of X should be numeric") if(missing(correction)) correction <- NULL E <- markcorr(X, f1, r=r, correction=correction, method=method, ..., normalise=normalise) if(isvec) { E <- rebadge.fv(E, quote(E(r)), "E") } else { E[] <- lapply(E, rebadge.fv, new.ylab=quote(E(r)), new.fname="E") } return(E) } Emark }) Vmark <- local({ f2 <- function(m1, m2) { m1^2 } Vmark <- function(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=FALSE) { if(missing(correction)) correction <- NULL E <- Emark(X, r=r, correction=correction, method=method, ..., normalise=FALSE) E2 <- markcorr(X, f2, r=E$r, correction=correction, method=method, ..., normalise=FALSE) if(normalise) { sig2 <- var(marks(X)) if(is.matrix(sig2)) sig2 <- diag(sig2) } if(is.fv(E)) { E <- list(E) E2 <- list(E2) } V <- list() for(i in seq_along(E)) { Ei <- E[[i]] E2i <- E2[[i]] Vi <- eval.fv(E2i - Ei^2) if(normalise) Vi <- eval.fv(Vi/sig2[i]) Vi <- rebadge.fv(Vi, quote(V(r)), "V") attr(Vi, "labl") <- attr(Ei, "labl") V[[i]] <- Vi } if(length(V) == 1) return(V[[1]]) V <- as.anylist(V) names(V) <- colnames(marks(X)) return(V) } Vmark }) ############## workhorses 'markcorr' and 'markcorrint' #################### markcorrint <- Kmark <- function(X, f=NULL, r=NULL, correction=c("isotropic", "Ripley", "translate"), ..., f1=NULL, normalise=TRUE, returnL=FALSE, fargs=NULL) { ## Computes the analogue of Kest(X) ## where each pair (x_i,x_j) is weighted by w(m_i,m_j) ## ## If multiplicative=TRUE then w(u,v) = f(u) f(v) ## If multiplicative=FALSE then w(u,v) = f(u, v) ## stopifnot(is.ppp(X) && is.marked(X)) is.marked(X, dfok=FALSE) W <- Window(X) ## if(identical(sys.call()[[1]], as.name('markcorrint'))) warn.once('markcorrint', "markcorrint will be deprecated in future versions of spatstat;", "use the equivalent function Kmark") ## validate test function h <- check.testfun(f, f1, X) f <- h$f f1 <- h$f1 ftype <- h$ftype multiplicative <- ftype %in% c("mul", "product") ## ## check corrections correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) isborder <- correction %in% c("border", "bord.modif") if(any(isborder) && !multiplicative) { whinge <- paste("Border correction is not valid unless", "test function is of the form f(u,v) = f1(u)*f1(v)") correction <- correction[!isborder] if(length(correction) == 0) stop(whinge) else warning(whinge) } ## estimated intensity lambda <- intensity(X) mX <- marks(X) switch(ftype, mul={ wt <- mX/lambda K <- Kinhom(X, r=r, reciplambda=wt, correction=correction, ..., renormalise=FALSE) Ef2 <- mean(mX)^2 }, equ={ fXX <- outer(mX, mX, "==") wt <- fXX/lambda^2 K <- Kinhom(X, r=r, reciplambda2=wt, correction=correction, ..., renormalise=FALSE) mtable <- table(mX) Ef2 <- sum(mtable^2)/length(mX)^2 }, product={ f1X <- do.call(f1, append(list(mX), fargs)) wt <- f1X/lambda K <- Kinhom(X, r=r, reciplambda=wt, correction=correction, ..., renormalise=FALSE) Ef2 <- mean(f1X)^2 }, general={ fXX <- do.call(outer, append(list(mX, mX, f), fargs)) wt <- fXX/lambda^2 K <- Kinhom(X, r=r, reciplambda2=wt, correction=correction, ..., renormalise=FALSE) Ef2 <- mean(fXX) }) K$theo <- K$theo * Ef2 labl <- attr(K, "labl") if(normalise) K <- eval.fv(K/Ef2) if(returnL) K <- eval.fv(sqrt(K/pi)) attr(K, "labl") <- labl if(normalise && !returnL) { ylab <- quote(K[f](r)) fnam <- c("K", "f") } else if(normalise && returnL) { ylab <- quote(L[f](r)) fnam <- c("L", "f") } else if(!normalise && !returnL) { ylab <- quote(C[f](r)) fnam <- c("C", "f") } else { ylab <- quote(sqrt(C[f](r)/pi)) fnam <- "sqrt(C[f]/pi)" } K <- rebadge.fv(K, ylab, fnam) return(K) } markcorr <- function(X, f = function(m1, m2) { m1 * m2}, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., weights=NULL, f1=NULL, normalise=TRUE, fargs=NULL, internal=NULL) { ## mark correlation function with test function f stopifnot(is.ppp(X) && is.marked(X)) nX <- npoints(X) ## set defaults to NULL if(missing(f)) f <- NULL if(missing(correction)) correction <- NULL ## handle data frame of marks marx <- marks(X, dfok=TRUE) if(is.data.frame(marx)) { nc <- ncol(marx) result <- list() for(j in 1:nc) { Xj <- X %mark% marx[,j] result[[j]] <- markcorr(Xj, f=f, r=r, correction=correction, method=method, ..., weights=weights, f1=f1, normalise=normalise, fargs=fargs) } result <- as.anylist(result) names(result) <- colnames(marx) return(result) } ## weights if(unweighted <- is.null(weights)) { weights <- rep(1, nX) } else { weights <- pointweights(X, weights=weights, parent=parent.frame()) stopifnot(all(weights > 0)) } ## validate test function h <- check.testfun(f, f1, X) f <- h$f f1 <- h$f1 ftype <- h$ftype ## ## npts <- npoints(X) W <- X$window ## determine r values rmaxdefault <- rmax.rule("K", W, npts/area(W)) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max if(length(method) > 1) stop("Select only one method, please") if(method=="density" && !breaks$even) stop(paste("Evenly spaced r values are required if method=", sQuote("density"), sep="")) ## available selection of edge corrections depends on window correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) ## Denominator ## Ef = Ef(M,M') when M, M' are independent ## Optionally provided by other code Ef <- internal$Ef if(is.null(Ef)) { ## Apply f to every possible pair of marks, and average Ef <- switch(ftype, mul = { mean(marx * weights)^2 }, equ = { if(unweighted) { mtable <- table(marx) } else { mtable <- tapply(weights, marx, sum) mtable[is.na(mtable)] <- 0 } sum(mtable^2)/nX^2 }, product={ f1m <- do.call(f1, append(list(marx), fargs)) mean(f1m * weights)^2 }, general = { mcross <- if(is.null(fargs)) { outer(marx, marx, f) } else { do.call(outer, append(list(marx,marx,f),fargs)) } if(unweighted) { mean(mcross) } else { wcross <- outer(weights, weights, "*") mean(mcross * wcross) } }, stop("Internal error: invalid ftype")) } if(normalise) { theory <- 1 Efdenom <- Ef } else { theory <- Ef Efdenom <- 1 } if(normalise) { ## check validity of denominator if(Efdenom == 0) stop("Cannot normalise the mark correlation; the denominator is zero") else if(Efdenom < 0) warning(paste("Problem when normalising the mark correlation:", "the denominator is negative")) } ## this will be the output data frame result <- data.frame(r=r, theo= rep.int(theory,length(r))) desc <- c("distance argument r", "theoretical value (independent marks) for %s") alim <- c(0, min(rmax, rmaxdefault)) ## determine conventional name of function if(ftype %in% c("mul", "equ")) { if(normalise) { ylab <- quote(k[mm](r)) fnam <- c("k", "mm") } else { ylab <- quote(c[mm](r)) fnam <- c("c", "mm") } } else { if(normalise) { ylab <- quote(k[f](r)) fnam <- c("k", "f") } else { ylab <- quote(c[f](r)) fnam <- c("c", "f") } } result <- fv(result, "r", ylab, "theo", , alim, c("r","{%s[%s]^{iid}}(r)"), desc, fname=fnam) ## find close pairs of points close <- closepairs(X, rmax) dIJ <- close$d I <- close$i J <- close$j XI <- ppp(close$xi, close$yi, window=W, check=FALSE) ## apply f to marks of close pairs of points ## mI <- marx[I] mJ <- marx[J] ff <- switch(ftype, mul = mI * mJ, equ = (mI == mJ), product={ if(is.null(fargs)) { fI <- f1(mI) fJ <- f1(mJ) } else { fI <- do.call(f1, append(list(mI), fargs)) fJ <- do.call(f1, append(list(mJ), fargs)) } fI * fJ }, general={ if(is.null(fargs)) f(marx[I], marx[J]) else do.call(f, append(list(marx[I], marx[J]), fargs)) }) ## check values of f(M1, M2) if(is.logical(ff)) ff <- as.numeric(ff) else if(!is.numeric(ff)) stop("function f did not return numeric values") if(anyNA(ff)) switch(ftype, mul=, equ=stop("some marks were NA"), product=, general=stop("function f returned some NA values")) if(any(ff < 0)) switch(ftype, mul=, equ=stop("negative marks are not permitted"), product=, general=stop("negative values of function f are not permitted")) ## weights if(!unweighted) ff <- ff * weights[I] * weights[J] #### Compute estimates ############## if(any(correction == "none")) { ## uncorrected estimate edgewt <- rep.int(1, length(dIJ)) ## get smoothed estimate of mark covariance Mnone <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) result <- bind.fv(result, data.frame(un=Mnone), "{hat(%s)[%s]^{un}}(r)", "uncorrected estimate of %s", "un") } if(any(correction == "translate")) { ## translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) ## get smoothed estimate of mark covariance Mtrans <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) result <- bind.fv(result, data.frame(trans=Mtrans), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { ## Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) ## get smoothed estimate of mark covariance Miso <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) result <- bind.fv(result, data.frame(iso=Miso), "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso") } ## which corrections have been computed? nama2 <- names(result) corrxns <- rev(nama2[nama2 != "r"]) ## default is to display them all formula(result) <- (. ~ r) fvnames(result, ".") <- corrxns ## unitname(result) <- unitname(X) return(result) } ## mark cross-correlation function markcrosscorr <- function(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=TRUE, Xname=NULL) { if(missing(Xname)) Xname <- short.deparse(substitute(X)) stopifnot(is.ppp(X) && is.marked(X)) npts <- npoints(X) W <- Window(X) ## available selection of edge corrections depends on window correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) ## determine r values rmaxdefault <- rmax.rule("K", W, npts/area(W)) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max ## find close pairs of points close <- closepairs(X, rmax) dIJ <- close$d I <- close$i J <- close$j XI <- ppp(close$xi, close$yi, window=W, check=FALSE) ## determine estimation method if(length(method) > 1) stop("Select only one method, please") if(method=="density" && !breaks$even) stop(paste("Evenly spaced r values are required if method=", sQuote("density"), sep="")) ## ensure marks are a data frame marx <- marks(X, dfok=TRUE) if(!is.data.frame(marx)) marx <- data.frame(marks=marx) ## convert factor marks to dummy variables while(any(isfac <- sapply(marx, is.factor))) { i <- min(which(isfac)) mari <- marx[,i] levi <- levels(mari) nami <- colnames(marx)[i] dumi <- 1 * outer(mari, levi, "==") colnames(dumi) <- paste0(nami, levi) marx <- as.data.frame(append(marx[,-i,drop=FALSE], list(dumi), after=i-1)) } nc <- ncol(marx) nama <- colnames(marx) ## loop over all pairs of columns funs <- list() for(i in 1:nc) { marxi <- marx[,i] namei <- nama[i] for(j in 1:nc) { marxj <- marx[,j] namej <- nama[j] ## Denominator ## Ef = E M M' = EM EM' ## when M, M' are independent from the respective columns Ef <- mean(marxi) * mean(marxj) if(normalise) { theory <- 1 Efdenom <- Ef ## check validity of denominator if(Efdenom == 0) stop(paste("Cannot normalise the mark correlation for", namei, "x", namej, "because the denominator is zero"), call.=FALSE) else if(Efdenom < 0) warning(paste("Problem when normalising the mark correlation for", namei, "x", namej, "- the denominator is negative"), call.=FALSE) } else { theory <- Ef Efdenom <- 1 } ## this will be the output data frame df.ij <- data.frame(r=r, theo= rep.int(theory,length(r))) desc <- c("distance argument r", "theoretical value (independent marks) for %s") alim <- c(0, min(rmax, rmaxdefault)) ## determine conventional name of function mimj <- as.name(paste0(namei,".",namej)) if(normalise) { ylab <- substitute(k[mm](r), list(mm=mimj)) fnam <- c("k", as.character(mimj)) } else { ylab <- substitute(c[mm](r), list(mm=mimj)) fnam <- c("c", as.character(mimj)) } fun.ij <- fv(df.ij, "r", ylab, "theo", , alim, c("r","{%s[%s]^{ind}}(r)"), desc, fname=fnam) mI <- marxi[I] mJ <- marxj[J] ff <- mI * mJ ## check values of f(M1, M2) if(anyNA(ff)) stop("some marks were NA", call.=FALSE) if(any(ff < 0)) stop("negative marks are not permitted") ## Compute estimates ############## if(any(correction == "none")) { ## uncorrected estimate edgewt <- rep.int(1, length(dIJ)) ## get smoothed estimate of mark covariance Mnone <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) fun.ij <- bind.fv(fun.ij, data.frame(un=Mnone), "{hat(%s)[%s]^{un}}(r)", "uncorrected estimate of %s", "un") } if(any(correction == "translate")) { ## translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) ## get smoothed estimate of mark covariance Mtrans <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) fun.ij <- bind.fv(fun.ij, data.frame(trans=Mtrans), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { ## Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) ## get smoothed estimate of mark covariance Miso <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) fun.ij <- bind.fv(fun.ij, data.frame(iso=Miso), "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso") } ## which corrections have been computed? nama2 <- names(fun.ij) corrxns <- rev(nama2[nama2 != "r"]) ## default is to display them all formula(fun.ij) <- (. ~ r) fvnames(fun.ij, ".") <- corrxns ## unitname(fun.ij) <- unitname(X) funs <- append(funs, list(fun.ij)) } } # matrix mapping array entries to list positions in 'funs' witch <- matrix(1:(nc^2), nc, nc, byrow=TRUE) header <- paste("Mark cross-correlation functions for", Xname) answer <- fasp(funs, witch, rowNames=nama, colNames=nama, title=header, dataname=Xname) return(answer) } sewsmod <- function(d, ff, wt, Ef, rvals, method="smrep", ..., nwtsteps=500) { ## Smooth Estimate of Weighted Second Moment Density ## (engine for computing mark correlations, etc) ## ------ ## Vectors containing one entry for each (close) pair of points ## d = interpoint distance ## ff = f(M1, M2) where M1, M2 are marks at the two points ## wt = edge correction weight ## ----- ## Ef = E[f(M, M')] where M, M' are independent random marks ## d <- as.vector(d) ff <- as.vector(ff) wt <- as.vector(wt) switch(method, density={ fw <- ff * wt sum.fw <- sum(fw) sum.wt <- sum(wt) ## smooth estimate of kappa_f est <- density(d, weights=fw/sum.fw, from=min(rvals), to=max(rvals), n=length(rvals), ...)$y numerator <- est * sum.fw ## smooth estimate of kappa_1 est0 <- density(d, weights=wt/sum.wt, from=min(rvals), to=max(rvals), n=length(rvals), ...)$y denominator <- est0 * Ef * sum.wt result <- numerator/denominator }, sm={ ## This is slow! suppressWarnings(smok <- requireNamespace("sm")) if(!smok) stop(paste("Option method=sm requires package sm,", "which is not available")) ## smooth estimate of kappa_f fw <- ff * wt est <- sm::sm.density(d, weights=fw, eval.points=rvals, display="none", nbins=0, ...)$estimate numerator <- est * sum(fw)/sum(est) ## smooth estimate of kappa_1 est0 <- sm::sm.density(d, weights=wt, eval.points=rvals, display="none", nbins=0, ...)$estimate denominator <- est0 * (sum(wt)/ sum(est0)) * Ef result <- numerator/denominator }, smrep={ suppressWarnings(smok <- requireNamespace("sm")) if(!smok) stop(paste("Option method=smrep requires package sm,", "which is not available")) hstuff <- resolve.defaults(list(...), list(hmult=1, h.weights=NA)) if(hstuff$hmult == 1 && all(is.na(hstuff$h.weights))) warning("default smoothing parameter may be inappropriate") ## use replication to effect the weights (it's faster) nw <- round(nwtsteps * wt/max(wt)) drep.w <- rep.int(d, nw) fw <- ff * wt nfw <- round(nwtsteps * fw/max(fw)) drep.fw <- rep.int(d, nfw) ## smooth estimate of kappa_f est <- sm::sm.density(drep.fw, eval.points=rvals, display="none", ...)$estimate numerator <- est * sum(fw)/sum(est) ## smooth estimate of kappa_1 est0 <- sm::sm.density(drep.w, eval.points=rvals, display="none", ...)$estimate denominator <- est0 * (sum(wt)/ sum(est0)) * Ef result <- numerator/denominator }, loess = { ## set up data frame df <- data.frame(d=d, ff=ff, wt=wt) ## fit curve to numerator using loess fitobj <- loess(ff ~ d, data=df, weights=wt, ...) ## evaluate fitted curve at desired r values Eff <- predict(fitobj, newdata=data.frame(d=rvals)) ## normalise: ## denominator is the sample mean of all ff[i,j], ## an estimate of E(ff(M1,M2)) for M1,M2 independent marks result <- Eff/Ef }, ) return(result) } ############## user interface bits ################################## check.testfun <- local({ fmul <- function(m1, m2) { m1 * m2 } fequ <- function(m1, m2) { m1 == m2 } f1id <- function(m) { m } check.testfun <- function(f=NULL, f1=NULL, X) { ## Validate f or f1 as a test function for point pattern X ## Determine function type 'ftype' ## ("mul", "equ", "product" or "general") if(is.null(f) && is.null(f1)) { ## no functions given ## default depends on kind of marks if(is.multitype(X)) { f <- fequ ftype <- "equ" } else { f1 <- f1id ftype <- "mul" } } else if(!is.null(f1)) { ## f1 given ## specifies test function of the form f(u,v) = f1(u) f1(v) if(!is.null(f)) warning("argument f ignored (overridden by f1)") stopifnot(is.function(f1)) ftype <- "product" } else { ## f given if(is.character(fname <- f)) { switch(fname, "mul" = { f1 <- f1id ftype <- "mul" }, "equ" = { f <- fequ ftype <- "equ" }, { f <- get(fname) ftype <- "general" }) } else if(is.function(f)) { ftype <- if(isTRUE(all.equal(f, fmul))) "mul" else if(isTRUE(all.equal(f, fequ))) "equ" else "general" if(ftype == "mul" && is.multitype(X)) stop(paste("Inappropriate choice of function f;", "point pattern is multitype;", "types cannot be multiplied.")) } else stop("Argument f must be a function or the name of a function") } return(list(f=f, f1=f1, ftype=ftype)) } check.testfun }) spatstat.core/R/dgs.R0000644000176200001440000000725314144333462014160 0ustar liggesusers# # # dgs.R # # $Revision: 1.13 $ $Date: 2021/01/07 03:08:41 $ # # Diggle-Gates-Stibbard process # # # ------------------------------------------------------------------- # DiggleGatesStibbard <- local({ # .......... auxiliary functions ................ dgsTerms <- function(X, Y, idX, idY, rho) { stopifnot(is.numeric(rho)) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] idXsort <- idX[oX] idYsort <- idY[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine out <- .C(SC_Ediggatsti, nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), idsource = as.integer(idXsort), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), idtarget = as.integer(idYsort), rrho = as.double(rho), values = as.double(double(nX)), PACKAGE="spatstat.core") answer <- numeric(nX) answer[oX] <- out$values return(answer) } # ...... template object ...................... BlankDGS <- list( name = "Diggle-Gates-Stibbard process", creator = "DiggleGatesStibbard", family = "pairwise.family", # evaluated later pot = function(d, par) { rho <- par$rho v <- log(sin((pi/2) * d/rho)^2) v[ d > par$rho ] <- 0 attr(v, "IsOffset") <- TRUE v }, par = list(rho = NULL), # to be filled in later parnames = "interaction range", hasInf = TRUE, init = function(self) { rho <- self$par$rho if(!is.numeric(rho) || length(rho) != 1L || rho <= 0) stop("interaction range rho must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { rho <- self$par$rho return(rho) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { # fast evaluator for DiggleGatesStibbard interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for DiggleGatesStibbard") dont.complain.about(splitInf) rho <- potpars$rho idX <- seq_len(npoints(X)) idU <- rep.int(-1L, npoints(U)) idU[EqualPairs[,2L]] <- EqualPairs[,1L] v <- dgsTerms(U, X, idU, idX, rho) v <- matrix(v, ncol=1L) attr(v, "IsOffset") <- TRUE return(v) }, Mayer=function(coeffs, self) { # second Mayer cluster integral rho <- self$par$rho return((pi/2 - 2/pi) * rho^2) } ) class(BlankDGS) <- "interact" DiggleGatesStibbard <- function(rho) { instantiate.interact(BlankDGS, list(rho = rho)) } DiggleGatesStibbard <- intermaker(DiggleGatesStibbard, BlankDGS) DiggleGatesStibbard }) spatstat.core/R/hardcore.R0000644000176200001440000001000614144333462015160 0ustar liggesusers# # # hardcore.S # # $Revision: 1.15 $ $Date: 2018/03/19 14:44:53 $ # # The Hard core process # # Hardcore() create an instance of the Hard Core process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Hardcore <- local({ BlankHardcore <- list( name = "Hard core process", creator = "Hardcore", family = "pairwise.family", # evaluated later pot = function(d, par) { v <- 0 * d v[ d <= par$hc ] <- (-Inf) attr(v, "IsOffset") <- TRUE v }, par = list(hc = NULL), # filled in later parnames = "hard core distance", hasInf = TRUE, selfstart = function(X, self) { # self starter for Hardcore nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } md <- minnndist(X) if(md == 0) { warning(paste("Pattern contains duplicated points:", "impossible under Hardcore model")) return(self) } if(!is.na(hc <- self$par$hc)) { # value fixed by user or previous invocation # check it if(md < hc) warning(paste("Hard core distance is too large;", "some data points will have zero probability")) return(self) } # take hc = minimum interpoint distance * n/(n+1) hcX <- md * nX/(nX+1) Hardcore(hc = hcX) }, init = function(self) { hc <- self$par$hc if(length(hc) != 1) stop("hard core distance must be a single value") if(!is.na(hc) && !(is.numeric(hc) && hc > 0)) stop("hard core distance hc must be a positive number, or NA") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { hc <- self$par$hc return(hc) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { # fast evaluator for Hardcore interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Hardcore") hc <- potpars$hc # call evaluator for Strauss process counts <- strausscounts(U, X, hc, EqualPairs) forbid <- (counts != 0) if(!splitInf) { ## usual case v <- matrix(ifelseAB(forbid, -Inf, 0), ncol=1L) } else { ## separate hard core v <- matrix(0, nrow=npoints(U), ncol=1L) attr(v, "-Inf") <- forbid } attr(v, "IsOffset") <- TRUE return(v) }, Mayer=function(coeffs, self) { # second Mayer cluster integral hc <- self$par$hc return(pi * hc^2) }, Percy=function(d, coeffs, par, ...) { ## term used in Percus-Yevick type approximation H <- par$hc t <- abs(d/(2*H)) t <- pmin.int(t, 1) y <- 2 * H^2 * (pi - acos(t) + t * sqrt(1 - t^2)) return(y) } ) class(BlankHardcore) <- "interact" Hardcore <- function(hc=NA) { instantiate.interact(BlankHardcore, list(hc=hc)) } Hardcore <- intermaker(Hardcore, BlankHardcore) Hardcore }) spatstat.core/R/compileK.R0000644000176200001440000000766614144333462015156 0ustar liggesusers# compileK # # Function to take a matrix of pairwise distances # and compile a 'K' function in the format required by spatstat. # # $Revision: 1.10 $ $Date: 2018/07/21 04:05:36 $ # ------------------------------------------------------------------- compileK <- function(D, r, weights=NULL, denom=1, check=TRUE, ratio=FALSE, fname="K") { # process r values breaks <- breakpts.from.r(r) rmax <- breaks$max r <- breaks$r # check that D is a symmetric matrix with nonnegative entries if(check) stopifnot(is.matrix(D) && isSymmetric(D) && all(D >= 0)) # ignore the diagonal; throw away any D values greater than rmax ok <- (D <= rmax & D > 0) Dvalues <- D[ok] # # weights? if(!is.null(weights)) { stopifnot(is.matrix(weights) && all(dim(weights)==dim(D))) wvalues <- weights[ok] } else wvalues <- NULL # count the number of D values in each interval (r[k], r[k+1L]] counts <- whist(Dvalues, breaks=breaks$val, weights=wvalues) # cumulative counts: number of D values in [0, r[k]) Kcount <- cumsum(counts) # divide by appropriate denominator Kratio <- Kcount/denom # wrap it up as an 'fv' object for use in spatstat df <- data.frame(r=r, est=Kratio) if(!ratio) { K <- fv(df, "r", quote(K(r)), "est", . ~ r , c(0,rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname=fname) } else { num <- data.frame(r=r, est=Kcount) den <- data.frame(r=r, est=denom) K <- ratfv(df=NULL, numer=num, denom=den, "r", quote(K(r)), "est", . ~ r , c(0,rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname=fname) } return(K) } compilepcf <- function(D, r, weights=NULL, denom=1, check=TRUE, endcorrect=TRUE, ratio=FALSE, ..., fname="g") { # process r values breaks <- breakpts.from.r(r) if(!breaks$even) stop("compilepcf: r values must be evenly spaced", call.=FALSE) r <- breaks$r rmax <- breaks$max # check that D is a symmetric matrix with nonnegative entries if(check) stopifnot(is.matrix(D) && isSymmetric(D) && all(D >= 0)) # ignore the diagonal; throw away any D values greater than rmax ok <- (D <= rmax & D > 0) Dvalues <- D[ok] # # weights? if(!is.null(weights)) { stopifnot(is.matrix(weights) && all(dim(weights)==dim(D))) wvalues <- weights[ok] totwt <- sum(wvalues) normwvalues <- wvalues/totwt } else { nv <- length(Dvalues) normwvalues <- rep.int(1/nv, nv) totwt <- nv } # form kernel estimate rmin <- min(r) rmax <- max(r) nr <- length(r) den <- density(Dvalues, weights=normwvalues, from=rmin, to=rmax, n=nr, ...) gval <- den$y * totwt # normalise gval <- gval/denom # edge effect correction at r = 0 if(endcorrect) { one <- do.call(density, resolve.defaults( list(seq(rmin,rmax,length=512)), list(bw=den$bw, adjust=1), list(from=rmin, to=rmax, n=nr), list(...))) onefun <- approxfun(one$x, one$y, rule=2) gval <- gval /((rmax-rmin) * onefun(den$x)) } # wrap it up as an 'fv' object for use in spatstat df <- data.frame(r=r, est=gval) if(!ratio) { g <- fv(df, "r", quote(g(r)), "est", . ~ r , c(0,rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname=fname) } else { num <- data.frame(r=r, est=gval * denom) den <- data.frame(r=r, est=denom) g <- ratfv(df=NULL, numer=num, denom=den, "r", quote(g(r)), "est", . ~ r , c(0,rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname=fname) } attr(g, "bw") <- den$bw return(g) } spatstat.core/R/model.depends.R0000644000176200001440000000730314144333462016120 0ustar liggesusers# # Determine which 'canonical variables' depend on a supplied covariate # # $Revision: 1.11 $ $Date: 2021/03/29 08:11:44 $ # model.depends <- function(object) { ## supplied covariates fo <- formula(object) if(length(as.list(fo)) == 3) { ## formula has a response: strip it fo <- fo[-2] } covars <- variablesinformula(fo) mm <- model.matrix(object) depends <- matrix(FALSE, ncol(mm), length(covars), dimnames=list(colnames(mm), covars)) ## model term labels tt <- terms(object) lab <- attr(tt, "term.labels") ## map from canonical covariates to term labels ass <- attr(mm, "assign") %orifnull% object[["assign"]] ## determine which canonical covariate depends on which supplied covariate if(length(ass) == ncol(mm)) { for(i in seq(along=ass)) { if(ass[i] == 0) # 0 is the intercept term depends[i,] <- FALSE else { turm <- lab[ass[i]] depends[i, ] <- covars %in% all.vars(parse(text=turm)) } } } else { warning("model.depends: unable to determine the dependence structure", call.=FALSE) } ## detect offsets if(!is.null(oo <- attr(tt, "offset")) && ((noo <- length(oo)) > 0)) { # entries of 'oo' index the list of variables in terms object vv <- attr(tt, "variables") offdep <- matrix(FALSE, noo, length(covars)) offnms <- character(noo) for(i in seq_len(noo)) { offseti <- languageEl(vv, oo[i] + 1) offdep[i, ] <- covars %in% all.vars(offseti) offnms[i] <- deparse(offseti) } rownames(offdep) <- offnms colnames(offdep) <- covars attr(depends, "offset") <- offdep } return(depends) } model.is.additive <- function(object) { dep <- model.depends(object) hit <- t(dep) %*% dep diag(hit) <- 0 ok <- all(hit == 0) return(ok) } model.covariates <- function(object, fitted=TRUE, offset=TRUE) { md <- model.depends(object) nm <- colnames(md) keep <- rep.int(FALSE, length(nm)) # variables used in formula with coefficients if(fitted) keep <- apply(md, 2, any) # variables used in offset if(offset) { oo <- attr(md, "offset") if(!is.null(oo)) keep <- keep | apply(oo, 2, any) } return(nm[keep]) } has.offset.term <- function(object) { # model terms tt <- terms(object) oo <- attr(tt, "offset") return(!is.null(oo) && (length(oo) > 0)) } has.offset <- function(object) { has.offset.term(object) || !is.null(model.offset(model.frame(object))) } check.separable <- function(dmat, covname, isconstant, fatal=TRUE) { #' Determine whether the effect of 'covname' is separable from other terms. #' dmat = model.depends(model) #' Find covariates entangled with 'covname' in the model relevant <- dmat[, covname] othercov <- (colnames(dmat) != covname) conflict <- dmat[relevant, othercov, drop=FALSE] if(!any(conflict)) return(TRUE) #' names of entangled covariates entangled <- colnames(conflict)[matcolany(conflict)] #' not problematic if constant if(is.null(names(isconstant))) names(isconstant) <- colnames(dmat) ok <- unlist(isconstant[entangled]) conflict[ , ok] <- FALSE if(!any(conflict)) return(TRUE) #' there are conflicts conflictterms <- matrowany(conflict) conflictcovs <- matcolany(conflict) whinge <- paste("The covariate", sQuote(covname), "cannot be separated from the", ngettext(sum(conflictcovs), "covariate", "covariates"), commasep(sQuote(colnames(conflict)[conflictcovs])), "in the model", ngettext(sum(conflictterms), "term", "terms"), commasep(sQuote(rownames(conflict)[conflictterms]))) if(fatal) stop(whinge, call.=FALSE) warning(whinge, call.=FALSE) return(FALSE) } spatstat.core/R/varcount.R0000644000176200001440000001054114144333464015240 0ustar liggesusers#' #' varcount.R #' #' Variance of N(B) #' #' $Revision: 1.15 $ $Date: 2021/08/12 09:28:12 $ #' varcount <- function(model, B=Window(model), ..., dimyx=NULL) { stopifnot(is.owin(B) || is.im(B) || is.function(B)) if(is.owin(B)) { f <- NULL lambdaB <- predict(model, locations=B, ngrid=dimyx, type="intensity") } else { f <- if(is.im(B)) B else as.im(B, W=as.owin(model), ..., dimyx=dimyx) B <- as.owin(f) lambdaB <- predict(model, locations=B, type="intensity") } ## important range of distances ## need to integrate over [0, R] R <- reach(model, epsilon=0.001) if(!isTRUE(is.finite(R))) R <- NULL if(!is.null(R)) { ## detect very small cluster radius (or very large window) diam <- diameter(Frame(B)) if((R < 0.001 * diam) && (area(erosion(B, R))/area(B) > 0.999)) { #' integrate by parts and ignore edge effect K <- Kmodel(model) if(is.function(K)) { excess <- K(diam) - pi * diam^2 if(is.null(f)) { E <- integral(lambdaB) V <- integral(lambdaB^2) } else { E <- integral(lambdaB * f) V <- integral((lambdaB * f)^2) } v <- E + V * excess if(is.finite(v)) return(v) } } } g <- pcfmodel(model) if(!is.function(g)) stop("Pair correlation function is not available") v <- varcountEngine(g, B, lambdaB, f, R=R) return(v) } varcountEngine <- local({ varcountEngine <- function(g, B, lambdaB, f=1, R=NULL, what=c("variance","excess","pairs","squared")) { ## variance = var(N) ## excess = var(N) - E(N) ## pairs = E[ N(N-1) ] ## squared = E[N^2] what <- match.arg(what) g1 <- function(r) { g(r) - 1 } if(missing(f) || is.null(f) || identical(f, 1)) { v <- switch(what, variance = integral(lambdaB) + dublin(g1, B, lambdaB, R=R), excess = dublin(g1, B, lambdaB, R=R), pairs = dublin(g, B, lambdaB, R=R), squared = integral(lambdaB) + dublin(g, B, lambdaB, R=R)) } else if(min(f) >= 0) { ## nonnegative integrand v <- switch(what, variance = integral(lambdaB * f^2) + dublin(g1, B, lambdaB * f, R=R), excess = dublin(g1, B, lambdaB * f, R=R), pairs = dublin(g, B, lambdaB * f, R=R), squared = integral(lambdaB * f^2) + dublin(g, B, lambdaB * f, R=R)) } else if(max(f) <= 0) { ## nonpositive integrand v <- switch(what, variance=integral(lambdaB * f^2) + dublin(g1, B, lambdaB * (-f), R=R), excess = dublin(g1, B, lambdaB * (-f), R=R), pairs = dublin(g, B, lambdaB * (-f), R=R), squared =integral(lambdaB * f^2) + dublin(g, B, lambdaB * (-f), R=R)) } else { ## integrand has both positive and negative parts lamfplus <- eval.im(lambdaB * pmax(0, f)) lamfminus <- eval.im(lambdaB * pmax(0, -f)) h <- switch(what, variance = g1, excess = g1, pairs = g, squared = g) co <- (dublin(h, B, lamfplus, R=R) + dublin(h, B, lamfminus, R=R) - dublin(h, B, lamfplus, lamfminus, R=R) - dublin(h, B, lamfminus, lamfplus, R=R)) v <- switch(what, variance = integral(lambdaB * f^2) + co, excess = co, pairs = co, squared = integral(lambdaB * f^2) + co) } return(v) } dublin <- function(h, B, f, f2, R=NULL) { ## Double integral ## \int_B \int_B h(|u-v|) f(u) f(v) du dv ## or \int_B \int_B h(|u-v|) f(u) f2(v) du dv ## Assume h, f, f2 are nonnegative ## R = reach of model dr <- R/100 if(missing(f2)) { ## \int_B \int_B h(|u-v|) f(u) f(v) du dv M <- distcdf(B, dW=f, nr=NULL, delta=dr) ## integrate a <- integral(f)^2 * as.numeric(stieltjes(h, M)) } else { ## \int_B \int_B h(|u-v|) f(u) f2(v) du dv M <- distcdf(B, dW=f, dV=f2, nr=NULL, delta=dr) ## integrate a <- integral(f) * integral(f2) * as.numeric(stieltjes(h, M)) } return(a) } varcountEngine }) spatstat.core/R/Tstat.R0000644000176200001440000002107314144333461014475 0ustar liggesusers# # tstat.R Estimation of T function # # $Revision: 1.12 $ $Date: 2018/07/02 15:45:48 $ # Tstat <- local({ # helper functions diffrange <- function(z) diff(range(z, na.rm=TRUE)) edgetri.Trans <- function(X, triid, trim=spatstat.options("maxedgewt")) { triid <- as.matrix(triid) ntri <- nrow(triid) if(ntri == 0) return(numeric(0)) W <- rescue.rectangle(as.owin(X)) if(W$type != "rectangle") stop("Translation correction is only implemented for rectangular windows") x <- matrix(X$x[triid], nrow=ntri) y <- matrix(X$y[triid], nrow=ntri) dx <- apply(x, 1, diffrange) dy <- apply(y, 1, diffrange) wide <- diff(W$xrange) high <- diff(W$yrange) weight <- wide * high/((wide - dx) * (high - dy)) weight <- pmin.int(trim, weight) return(weight) } # helper function implemented.for.T <- function(correction, windowtype, explicit) { rect <- (windowtype == "rectangle") if(any(correction == "best")) { # select best available correction correction <- if(rect) "translate" else "border" } else { # available selection of edge corrections depends on window if(!rect) { tra <- (correction == "translate") if(any(tra)) { whinge <- "Translation correction is only implemented for rectangular windows" if(explicit) { if(all(tra)) stop(whinge) else warning(whinge) } correction <- correction[!tra] } } } return(correction) } # .......... main function .................... Tstat <- function(X, ..., r=NULL, rmax=NULL, correction=c("border", "translate"), ratio=FALSE, verbose=TRUE) { verifyclass(X, "ppp") # rfixed <- !is.null(r) npts <- npoints(X) W <- Window(X) areaW <- area(W) lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) lambda3 <- (npts * (npts - 1) * (npts - 2))/(areaW^3) rmaxdefault <- if(!is.null(rmax)) rmax else rmax.rule("K", W, lambda) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(!correction.given) correction <- c("border", "bord.modif", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.T(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame TT <- data.frame(r=r, theo= (pi/2) * (pi - 3 * sqrt(3)/4) * r^4) desc <- c("distance argument r", "theoretical Poisson %s") TT <- fv(TT, "r", quote(T(r)), "theo", , alim, c("r","%s[pois](r)"), desc, fname="T") # save numerator and denominator? if(ratio) { denom <- lambda2 * areaW numT <- eval.fv(denom * TT) denT <- eval.fv(denom + TT * 0) attributes(numT) <- attributes(denT) <- attributes(TT) attr(numT, "desc")[2] <- "numerator for theoretical Poisson %s" attr(denT, "desc")[2] <- "denominator for theoretical Poisson %s" } # identify all close pairs rmax <- max(r) close <- closepairs(X, rmax, what="ijd", twice=FALSE, neat=FALSE) I <- close$i J <- close$j DIJ <- close$d nI <- length(I) # estimate computation time if(verbose) { nTmax <- nI * (nI-1) /2 esttime <- exp(1.25 * log(nTmax) - 21.5) message(paste("Searching", nTmax, "potential triangles;", "estimated time", codetime(esttime))) } # find triangles with their diameters tri <- trianglediameters(I, J, DIJ, nvert=npts) stopifnot(identical(colnames(tri), c("i", "j", "k", "diam"))) # reassemble so each triangle appears 3 times, once for each vertex II <- with(tri, c(i, j, k)) DD <- with(tri, rep.int(diam, 3)) if(any(correction == "none")) { # uncorrected! For demonstration purposes only! wh <- whist(DD, breaks$val) # no weights numTun <- cumsum(wh) denTun <- lambda3 * areaW # uncorrected estimate of T Tun <- numTun/denTun TT <- bind.fv(TT, data.frame(un=Tun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") if(ratio) { # save numerator and denominator numT <- bind.fv(numT, data.frame(un=numTun), "hat(%s)[un](r)", "numerator of uncorrected estimate of %s", "un") denT <- bind.fv(denT, data.frame(un=denTun), "hat(%s)[un](r)", "denominator of uncorrected estimate of %s", "un") } } if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(X) bI <- b[II] # apply reduced sample algorithm RS <- Kount(DD, bI, b, breaks) if(any(correction == "bord.modif")) { # modified border correction denom.area <- eroded.areas(W, r) numTbm <- RS$numerator denTbm <- lambda3 * denom.area Tbm <- numTbm/denTbm TT <- bind.fv(TT, data.frame(bord.modif=Tbm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numT <- bind.fv(numT, data.frame(bord.modif=numTbm), "hat(%s)[bordm](r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denT <- bind.fv(denT, data.frame(bord.modif=denTbm), "hat(%s)[bordm](r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } if(any(correction == "border")) { numTb <- RS$numerator denTb <- lambda2 * RS$denom.count Tb <- numTb/denTb TT <- bind.fv(TT, data.frame(border=Tb), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border") if(ratio) { numT <- bind.fv(numT, data.frame(border=numTb), "hat(%s)[bord](r)", "numerator of border-corrected estimate of %s", "border") denT <- bind.fv(denT, data.frame(border=denTb), "hat(%s)[bord](r)", "denominator of border-corrected estimate of %s", "border") } } } if(any(correction == "translate")) { # translation correction # apply to triangle list edgewt <- edgetri.Trans(X, tri[, 1:3]) wh <- whist(tri$diam, breaks$val, edgewt) numTtrans <- 3 * cumsum(wh) denTtrans <- lambda3 * areaW Ttrans <- numTtrans/denTtrans h <- diameter(W)/2 Ttrans[r >= h] <- NA TT <- bind.fv(TT, data.frame(trans=Ttrans), "hat(%s)[trans](r)", "translation-corrected estimate of %s", "trans") if(ratio) { numT <- bind.fv(numT, data.frame(trans=numTtrans), "hat(%s)[trans](r)", "numerator of translation-corrected estimate of %s", "trans") denT <- bind.fv(denT, data.frame(trans=denTtrans), "hat(%s)[trans](r)", "denominator of translation-corrected estimate of %s", "trans") } } # default plot will display all edge corrections formula(TT) <- . ~ r unitname(TT) <- unitname(X) # if(ratio) { # finish up numerator & denominator formula(numT) <- formula(denT) <- . ~ r unitname(numT) <- unitname(denT) <- unitname(TT) # tack on to result TT <- rat(TT, numT, denT, check=FALSE) } return(TT) } Tstat }) spatstat.core/R/rLGCP.R0000644000176200001440000001015414144333463014305 0ustar liggesusers#' #' rLGCP.R #' #' simulation of log-Gaussian Cox process #' #' original code by Abdollah Jalilian #' #' modifications by Adrian Baddeley, Ege Rubak and Tilman Davies #' #' $Revision: 1.23 $ $Date: 2021/04/07 01:17:38 $ #' rLGCP <- local({ rLGCP <- function(model="exp", mu = 0, param = NULL, ..., win=NULL, saveLambda=TRUE, nsim=1, drop=TRUE) { ## validate if (is.numeric(mu)) { check.1.real(mu, paste("if", sQuote("mu"), "is numeric,")) } else if(!is.function(mu) && !is.im(mu)) stop(paste(sQuote("mu"), "must be a constant, a function or an image")) check.1.integer(nsim) stopifnot(nsim >= 1) ## check for outdated usage if(!all(nzchar(names(param)))) stop("Outdated syntax of argument 'param' to rLGCP", call.=FALSE) ## do.rLGCP(model=model, mu=mu, param=param, ..., win=win, saveLambda=saveLambda, nsim=nsim, drop=drop) } do.rLGCP <- function(model="exp", mu = 0, param = NULL, ..., win=NULL, saveLambda=TRUE, eps = NULL, dimyx = NULL, xy = NULL, modelonly=FALSE, Lambdaonly=FALSE, nsim=1, drop=TRUE) { ## make RF model object from RandomFields package ## get the 'model generator' modgen <- getRandomFieldsModelGen(model) ## now create a RandomFields 'model' object rfmodel <- do.call(modgen, append(as.list(param), list(...))) if(!inherits(rfmodel, "RMmodel")) stop("Unable to create RandomFields model object", call.=FALSE) ## undocumented exit - return the RandomFields model object only if(modelonly) return(rfmodel) ## simulation window win.given <- !is.null(win) mu.image <- is.im(mu) win <- if(win.given) as.owin(win) else if(mu.image) as.owin(mu) else owin() if(win.given && mu.image && !is.subset.owin(win, as.owin(mu))) stop(paste("The spatial domain of the pixel image", sQuote("mu"), "does not cover the simulation window", sQuote("win"))) ## convert win to a mask w <- as.mask(w=win, eps=eps, dimyx=dimyx, xy=xy) xcol <- w$xcol yrow <- w$yrow dimw <- w$dim ## evaluate 'mu' at pixels of mask if(is.numeric(mu)) { muxy <- mu } else { xy <- rasterxy.mask(w, drop=FALSE) xx <- xy$x yy <- xy$y muxy <- if (is.function(mu)) mu(xx,yy) else lookup.im(mu, xx, yy, naok=TRUE, strict=TRUE) muxy[is.na(muxy)] <- -Inf } ## corresponding image template Lambda <- as.im(w) ## generate 'nsim' realisations of a zero-mean Gaussian random field Z spc <- RandomFields::RFoptions()$general$spConform if(spc) RandomFields::RFoptions(spConform=FALSE) z <- RandomFields::RFsimulate(rfmodel, xcol, yrow, grid = TRUE, n=nsim) if(spc) RandomFields::RFoptions(spConform=TRUE) if(is.null(dim(z))) stop("RFsimulate did not return a matrix or array", call.=FALSE) ## ensure 3D array if(length(dim(z)) == 2) z <- array(z, dim=c(dim(z), 1)) ## transform to spatstat convention z <- aperm(z, c(2,1,3)) ## safety checks if(!all(dim(z)[1:2] == dim(Lambda))) stop("Internal error: wrong matrix dimensions in rLGCP", call.=FALSE) if(Lambdaonly) { ## undocumented exit - return Lambda only Lambdalist <- vector(mode="list", length=nsim) for(i in 1:nsim) { ## Extract i-th realisation of Z; convert to log-Gaussian image Lambda$v[] <- exp(muxy + z[,,i]) ## save as i-th realisation of Lambda Lambdalist[[i]] <- Lambda } return(simulationresult(Lambdalist, nsim, drop)) } ## generate realisations of LGCP result <- vector(mode="list", length=nsim) for(i in 1:nsim) { ## Extract i-th realisation of Z; convert to log-Gaussian image Lambda$v[] <- exp(muxy + z[,,i]) ## generate Poisson points X <- rpoispp(Lambda)[win] ## if(saveLambda) attr(X, "Lambda") <- Lambda result[[i]] <- X } if(drop && nsim == 1) return(result[[1]]) names(result) <- paste("Simulation", 1:nsim) return(as.solist(result)) } rLGCP }) spatstat.core/R/poisson.R0000644000176200001440000000172714144333463015076 0ustar liggesusers# # # poisson.S # # $Revision: 1.8 $ $Date: 2015/10/21 09:06:57 $ # # The Poisson process # # Poisson() create an object of class 'interact' describing # the (null) interpoint interaction structure # of the Poisson process. # # # ------------------------------------------------------------------- # Poisson <- local({ BlankPoisson <- list( name = "Poisson process", creator = "Poisson", family = NULL, pot = NULL, par = NULL, parnames = NULL, init = function(...) { }, update = function(...) { }, print = function(self) { cat("Poisson process\n") invisible() }, valid = function(...) { TRUE }, project = function(...) NULL, irange = function(...) { 0 }, version=NULL ) class(BlankPoisson) <- "interact" Poisson <- function() { BlankPoisson } Poisson <- intermaker(Poisson, BlankPoisson) Poisson }) spatstat.core/R/clusterfunctions.R0000644000176200001440000000710414144333461017007 0ustar liggesusers## clusterfunctions.R ## ## Contains the generic functions: ## - clusterkernel ## - clusterfield ## - clusterradius. ## ## $Revision: 1.7 $ $Date: 2021/08/08 08:54:55 $ ## clusterkernel <- function(model, ...) { UseMethod("clusterkernel") } clusterkernel.kppm <- function(model, ...) { kernelR <- Kpcf.kppm(model, what = "kernel") f <- function(x, y = 0, ...){ kernelR(sqrt(x^2+y^2)) } return(f) } clusterkernel.character <- function(model, ...){ info <- spatstatClusterModelInfo(model, onlyPCP = TRUE) internalkernel <- info$kernel dots <- list(...) par <- c(kappa = 1, scale = dots$scale) par <- info$checkpar(par, old = TRUE) nam <- info$clustargsnames margs <- NULL if(!is.null(nam)) margs <- dots[nam] f <- function(x, y = 0, ...){ internalkernel(par = par, rvals = sqrt(x^2+y^2), margs = margs) } return(f) } clusterfield <- function(model, locations = NULL, ...) { UseMethod("clusterfield") } clusterfield.kppm <- function(model, locations = NULL, ...) { f <- clusterkernel(model) if(is.null(locations)){ if(!is.stationary(model)) stop("The model is non-stationary. The argument ", sQuote("locations"), " must be given.") locations <- centroid.owin(Window(model), as.ppp = TRUE) } clusterfield.function(f, locations, ..., mu = model$mu) } clusterfield.character <- function(model, locations = NULL, ...){ f <- clusterkernel(model, ...) clusterfield.function(f, locations, ...) } clusterfield.function <- function(model, locations = NULL, ..., mu = NULL) { if(is.null(locations)){ locations <- ppp(.5, .5, window=square(1)) } else if(!is.ppp(locations)) stop("Argument ", sQuote("locations"), " must be a point pattern (ppp).") if("sigma" %in% names(list(...)) && "sigma" %in% names(formals(model))) warning("Currently ", sQuote("sigma"), "cannot be passed as an extra argument to the kernel function. ", "Please redefine the kernel function to use another argument name.") rslt <- density(locations, kernel=model, ..., edge=FALSE) if(is.null(mu)) return(rslt) mu <- as.im(mu, W=rslt) if(min(mu)<0) stop("Cluster reference intensity ", sQuote("mu"), " is negative.") return(rslt*mu) } clusterradius <- function(model, ...){ UseMethod("clusterradius") } clusterradius.character <- function(model, ..., thresh = NULL, precision = FALSE){ info <- spatstatClusterModelInfo(model, onlyPCP=FALSE) if(!isTRUE(info$isPCP)) { warning("cluster radius is only defined for cluster processes", call.=FALSE) return(NA) } rmax <- info$range(..., thresh = thresh) if(precision && is.function(info$ddist)){ ddist <- function(r) info$ddist(r, ...) prec <- integrate(ddist, 0, rmax) attr(rmax, "prec") <- prec } return(rmax) } clusterradius.kppm <- function(model, ..., thresh = NULL, precision = FALSE){ a <- list(model = model$clusters, thresh = thresh, precision = precision) a <- append(a, as.list(c(model$clustpar, model$clustargs))) do.call(clusterradius.character, a) } reach.kppm <- function(x, ..., epsilon) { thresh <- if(missing(epsilon)) NULL else epsilon if(x$isPCP) return(2 * clusterradius.kppm(x, ..., thresh=thresh)) ## use pair correlation g <- pcfmodel(x) ## find upper bound if(is.null(thresh)) thresh <- 0.01 f <- function(r) { g(r) - 1 - thresh } scal <- as.list(x$par)$scale %orifnull% 1 for(a in scal * 2^(0:10)) { if(f(a) < 0) break; } if(f(a) > 0) return(Inf) ## solve g(r) = 1 + epsilon b <- uniroot(f, c(0, a))$root return(b) } spatstat.core/R/ordthresh.R0000644000176200001440000000330714144333463015402 0ustar liggesusers# # # ordthresh.S # # $Revision: 1.12 $ $Date: 2018/03/15 07:37:41 $ # # Ord process with threshold potential # # OrdThresh() create an instance of the Ord process # [an object of class 'interact'] # with threshold potential # # # ------------------------------------------------------------------- # OrdThresh <- local({ BlankOrdThresh <- list( name = "Ord process with threshold potential", creator = "OrdThresh", family = "ord.family", pot = function(d, par) { (d <= par$r) }, par = list(r = NULL), parnames = "threshold distance", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("threshold distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) is.finite(loggamma) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(...) { return(Inf) }, version=NULL ) class(BlankOrdThresh) <- "interact" OrdThresh <- function(r) { instantiate.interact(BlankOrdThresh, list(r=r)) } OrdThresh <- intermaker(OrdThresh, BlankOrdThresh) OrdThresh }) spatstat.core/R/predictmppm.R0000644000176200001440000004110214144333464015720 0ustar liggesusers# # predictmppm.R # # $Revision: 1.17 $ $Date: 2020/11/01 00:36:17 $ # # # ------------------------------------------------------------------- predict.mppm <- local({ predict.mppm <- function(object, ..., newdata=NULL, type=c("trend", "cif"), ngrid=40, locations=NULL, verbose=FALSE) { ## ## 'object' is the output of mppm() ## model <- object verifyclass(model, "mppm") ## isMulti <- is.multitype(model) modelsumry <- summary(model) depends.on.row <- modelsumry$depends.on.row fixedinteraction <- modelsumry$ikind$fixedinteraction ndata.old <- model$npat ## ## ...................................................................... if(verbose) cat("Inspecting arguments...") ## ...................................................................... ## ## hidden arguments selfcheck <- resolve.defaults(list(...), list(selfcheck=FALSE))$selfcheck ## ## Argument 'type' ## type <- pickoption("type", type, c(trend="trend", lambda="cif", cif="cif"), multi=TRUE) want.trend <- "trend" %in% type want.cif <- "cif" %in% type ## ## Argument 'newdata' ## use.olddata <- is.null(newdata) if(use.olddata) { newdata <- model$data newdataname <- "Original data" ndata.new <- ndata.old new.id <- NULL } else { stopifnot(is.data.frame(newdata) || is.hyperframe(newdata)) newdataname <- sQuote("newdata") ndata.new <- nrow(newdata) new.id <- NULL if(depends.on.row) { #' require row serial numbers 'id' new.id <- newdata$id if(is.null(new.id)) { #' no serial numbers given #' implicitly use the old serial numbers if(ndata.new != ndata.old) stop(paste("'newdata' must have the same number of rows", "as the original 'data' argument", paren(paste("namely", ndata.old)), "because the model depends on the row index"), call.=FALSE) } else { #' serial numbers given #' validate them if(!is.factor(new.id) && !is.integer(new.id)) stop("newdata$id should be a factor or integer vector", call.=FALSE) if(is.integer(new.id)) { new.id <- factor(new.id, levels=1:ndata.old) } else if(!identical(levels(new.id), as.character(1:ndata.old))) { stop(paste0("Levels of newdata$id must be 1:", ndata.old), call.=FALSE) } } } } ## ## Argument 'locations' ## if(is.hyperframe(locations)) locations <- locations[,1,drop=TRUE] if(is.list(locations)) cls <- unique(sapply(locations, class)) loctype <- if(is.null(locations)) "null" else if(is.data.frame(locations)) "data.frame" else if(is.list(locations)) { if(any(c("ppp", "quad") %in% cls)) "points" else if("owin" %in% cls) { if(all(sapply(locations, is.mask))) "mask" else "window" } else "unknown" } else "unknown" ## ...................................................................... if(verbose) cat("done.\nDeciding type of locations for prediction...") ## ...................................................................... need.grid <- switch(loctype, null =TRUE, data.frame=FALSE, points =FALSE, mask =FALSE, window =TRUE, unknown =stop("Unrecognised format for locations")) make.image <- need.grid || (loctype == "mask") ## locationvars <- c("x", "y", "id", if(isMulti) "marks" else NULL) ## ## if(need.grid) { ## prediction on a grid is required if(is.data.frame(newdata)) stop(paste("Cannot predict model on a grid;", newdataname, "are a data frame")) } else { ## prediction at `locations' is required if(is.hyperframe(newdata)) { ## check consistency between locations and newdata nloc <- length(locations) if(nloc != ndata.new) stop(paste("Length of argument", sQuote("locations"), paren(nloc), "does not match number of rows in", newdataname, paren(ndata.new))) } else { ## newdata is a data frame if(!is.data.frame(locations)) stop(paste(newdataname, "is a data frame; locations must be a data frame")) else { stopifnot(nrow(locations) == nrow(newdata)) dup <- names(newdata) %in% names(locations) if(any(dup)) for(nam in names(newdata)[dup]) if(!isTRUE(all.equal(newdata[,nam], locations[,nam]))) stop(paste("The data frames newdata and locations", "both have a column called", sQuote(nam), "but the entries differ")) nbg <- !(locationvars %in% c(names(newdata),names(locations))) if(any(nbg)) stop(paste(ngettext(sum(nbg), "Variable", "Variables"), commasep(locationvars[nbg]), "not provided")) ## merge the two data frames newdata <- cbind(newdata[,!dup], locations) locations <- NULL } } } ## ...................................................................... if(verbose) cat("done.\nExtracting details of point process model...") ## ...................................................................... ## extract fitted glm/gam/glmm object FIT <- model$Fit$FIT MOADF <- model$Fit$moadf ## extract names of interaction variables Vnamelist <- model$Fit$Vnamelist vnames <- unlist(Vnamelist) ## determine which interaction is applicable on each row interactions <- model$Inter$interaction hyperinter <- is.hyperframe(interactions) ninter <- if(hyperinter) nrow(interactions) else 1L if(hyperinter && ninter > 1) { if(fixedinteraction) { interactions <- interactions[1L, ] } else { ## interaction depends on row if(!is.null(new.id)) { ## row sequence specified; extract the relevant rows interactions <- interactions[as.integer(new.id), ] } else { ## rows of newdata implicitly correspond to rows of original data if(ninter != ndata.new) stop(paste("Number of rows of newdata", paren(ndata.new), "does not match number of interactions in model", paren(ninter))) } } } ## extract possible types, if model is multitype if(isMulti) { levlist <- unique(lapply(data.mppm(model), levelsofmarks)) if(length(levlist) > 1) stop("Internal error: the different point patterns have inconsistent marks", call.=FALSE) marklevels <- levlist[[1L]] } else marklevels <- list(NULL) # sic ## ...................................................................... if(verbose) { cat("done.\n") if(use.olddata) splat("Using original hyperframe of data") else splat("newdata is a", if(is.data.frame(newdata)) "data frame" else "hyperframe") } ## ...................................................................... ## if(is.data.frame(newdata)) { ## ## newdata is a DATA FRAME ## if(need.grid) stop("Cannot predict model on a grid; newdata is a data frame") if(verbose) cat("Computing prediction..") ## use newdata as covariates nbg <- !(locationvars %in% names(newdata)) if(any(nbg)) stop(paste(ngettext(sum(nbg), "variable", "variables"), commasep(locationvars[nbg]), "not provided")) ## create output data frame answer <- as.data.frame(matrix(, nrow=nrow(newdata), ncol=0), row.names=row.names(newdata)) if(want.trend) { ## add interaction components, set to zero (if any) if(length(vnames) > 0) newdata[, vnames] <- 0 ## compute fitted values answer$trend <- Predict(FIT, newdata=newdata, type="response") } if(want.cif) { if(is.poisson(object)) { ## cif = trend answer$cif <- if(want.trend) answer$trend else Predict(FIT, newdata=newdata, type="response") } else { warning("Computation of the cif is not yet implemented when newdata is a data frame") ## split data frame by 'id' ## compute interaction components using existing point patterns ## compute fitted values } } if(verbose) cat("done.\n") return(answer) } ## ...................................................................... ## newdata is a HYPERFRAME ## if(verbose) cat("Building data for prediction...") sumry.new <- summary(newdata) ndata.new <- sumry.new$ncases ## name of response point pattern in model Yname <- model$Info$Yname ## ## Determine response point patterns if known. ## Extract from newdata if available ## Otherwise from the original data if appropriate if(verbose) cat("(responses)...") Y <- if(Yname %in% sumry.new$col.names) newdata[, Yname, drop=TRUE, strip=FALSE] else if(ndata.new == ndata.old) data[, Yname, drop=TRUE, strip=FALSE] else NULL ## if(want.cif && is.null(Y)) stop(paste("Cannot compute cif:", "newdata does not contain column", dQuote(Yname), "of response point patterns")) ## ## Determine windows for prediction if(verbose) cat("(windows)...") Wins <- if(!need.grid) lapply(locations, as.owin, fatal=FALSE) else if(!is.null(Y)) lapply(Y, as.owin, fatal=FALSE) else NULL if(is.null(Wins) || any(sapply(Wins, is.null))) stop("Cannot determine windows where predictions should be made") ## ## if(is.null(Y)) { ## only want trend; empty patterns will do Y <- lapply(Wins, emptypattern) } ## ensure Y contains data points only if(is.quad(Y[[1]])) Y <- lapply(Y, getElement, name="data") ## Determine locations for prediction if(need.grid) { ## Generate grids of dummy locations if(verbose) cat("(grids)...") Gridded <- lapply(Wins, gridsample, ngrid=ngrid) Dummies <- lapply(Gridded, getElement, name="D") Templates <- lapply(Gridded, getElement, name="I") } else { ## locations are given somehow if(verbose) cat("(locations)...") switch(loctype, points = { Dummies <- locations }, mask = { Dummies <- lapply(locations, punctify) Templates <- lapply(locations, as.im) }, stop("Internal error: illegal loctype")) } ## .......................................... ## ............... PREDICTION ............... ## .......................................... ## initialise hyperframe of predicted values Answer <- newdata[,integer(0),drop=FALSE] if(depends.on.row) Answer$id <- factor(levels(MOADF$id)) ## Loop over possible types, or execute once: ## /////////////////////////////////////////// for(lev in marklevels) { ## Pack prediction locations into quadschemes if(verbose) { cat("Building quadschemes") if(isMulti) cat(paste("with mark", lev)) cat("...") } if(isMulti) { ## assign current mark level to all dummy points flev <- factor(lev, levels=marklevels) Dummies <- lapply(Dummies, "marks<-", value=flev) } Quads <- mapply(quad, data=Y, dummy=Dummies, SIMPLIFY=FALSE, USE.NAMES=FALSE) ## Insert quadschemes into newdata newdata[, Yname] <- Quads ## compute the Berman-Turner frame if(verbose) cat("done.\nStarting prediction...(Berman-Turner frame)...") moadf <- mppm(formula = model$formula, data = newdata, interaction = interactions, iformula = model$iformula, random = model$random, use.gam = model$Fit$use.gam, correction = model$Info$correction, rbord = model$Info$rbord, backdoor = TRUE) ## compute fitted values if(verbose) cat("(glm prediction)...") values <- moadf[, locationvars] if(want.cif) values$cif <- Predict(FIT, newdata=moadf, type="response") if(want.trend) { if(length(vnames) == 0) { ## Poisson model: trend = cif values$trend <- if(want.cif) values$cif else Predict(FIT, newdata=moadf, type="response") } else { ## zero the interaction components moadf[, vnames] <- 0 ## compute fitted values values$trend <- Predict(FIT, newdata=moadf, type="response") } } if(verbose) cat("done.\nReshaping results...") ## ## Reshape results ## separate answers for each image values <- split(values, values$id) ## Trends <- list() Lambdas <- list() if(!make.image) { if(verbose) cat("(marked point patterns)...") ## values become marks attached to locations for(i in seq(ndata.new)) { Val <- values[[i]] Loc <- Dummies[[i]] isdum <- !is.data(Quads[[i]]) if(selfcheck) if(length(isdum) != length(Val$trend)) stop("Internal error: mismatch between data frame and locations") if(want.trend) Trends[[i]] <- Loc %mark% (Val$trend[isdum]) if(want.cif) Lambdas[[i]] <- Loc %mark% (Val$cif[isdum]) } } else { if(verbose) cat("(pixel images)...") ## assign values to pixel images for(i in seq(ndata.new)) { values.i <- values[[i]] Q.i <- Quads[[i]] values.i <- values.i[!is.data(Q.i), ] Template.i <- Templates[[i]] ok.i <- !is.na(Template.i$v) if(sum(ok.i) != nrow(values.i)) stop("Internal error: mismatch between data frame and image") if(selfcheck) { dx <- rasterx.im(Template.i)[ok.i] - values.i$x dy <- rastery.im(Template.i)[ok.i] - values.i$y cat(paste("i=", i, "range(dx) =", paste(range(dx), collapse=", "), "range(dy) =", paste(range(dy), collapse=", "), "\n")) } if(want.trend) { Trend.i <- Template.i Trend.i$v[ok.i] <- values.i$trend Trends[[i]] <- Trend.i } if(want.cif) { Lambda.i <- Template.i Lambda.i$v[ok.i] <- values.i$cif Lambdas[[i]] <- Lambda.i } } } if(verbose) cat("done reshaping.\n") if(want.trend) { trendname <- paste0("trend", lev) Answer[,trendname] <- Trends } if(want.cif) { cifname <- paste0("cif", lev) Answer[,cifname] <- Lambdas } } ## /////////// end loop over possible types ////////////////// return(Answer) } ## helper functions emptypattern <- function(w) { ppp(numeric(0), numeric(0), window=w) } levelsofmarks <- function(X) { levels(marks(X)) } gridsample <- function(W, ngrid) { masque <- as.mask(W, dimyx=ngrid) xx <- raster.x(masque) yy <- raster.y(masque) xpredict <- xx[masque$m] ypredict <- yy[masque$m] Dummy <- ppp(xpredict, ypredict, window=W) Image <- as.im(masque) return(list(D=Dummy, I=Image)) } punctify <- function(M) { xx <- raster.x(M) yy <- raster.y(M) xpredict <- xx[M$m] ypredict <- yy[M$m] return(ppp(xpredict, ypredict, window=M)) } Predict <- function(object, newdata, type=c("link", "response")) { type <- match.arg(type) if(inherits(object, "glmmPQL")) { class(object) <- class(object)[-1L] pred <- predict(object, newdata=newdata) if(type == "response") pred <- object$family$linkinv(pred) } else { pred <- predict(object, newdata=newdata, type=type) } return(as.numeric(pred)) } predict.mppm }) spatstat.core/R/plot.mppm.R0000644000176200001440000000131314144333464015322 0ustar liggesusers# # plot.mppm.R # # $Revision: 1.6 $ $Date: 2020/12/19 05:25:06 $ # # plot.mppm <- function(x, ..., trend=TRUE, cif=FALSE, se=FALSE, how=c("image", "contour", "persp")) { xname <- deparse(substitute(x)) how <- match.arg(how) subs <- subfits(x) dont.complain.about(subs) arglist <- resolve.defaults(list(x=quote(subs), how=how), list(...), list(main=xname)) if(trend) do.call(plot, c(arglist, list(trend=TRUE, cif=FALSE, se=FALSE))) if(cif) do.call(plot, c(arglist, list(trend=FALSE, cif=TRUE, se=FALSE))) if(se) do.call(plot, c(arglist, list(trend=FALSE, cif=FALSE, se=TRUE))) invisible(NULL) } spatstat.core/R/parres.R0000644000176200001440000005057414144333463014704 0ustar liggesusers# # parres.R # # code to plot transformation diagnostic # # $Revision: 1.16 $ $Date: 2020/11/17 03:47:24 $ # parres <- function(model, covariate, ..., smooth.effect=FALSE, subregion=NULL, bw="nrd0", adjust=1, from=NULL,to=NULL, n=512, bw.input = c("points", "quad"), bw.restrict = FALSE, covname) { callstring <- paste(deparse(sys.call()), collapse = "") modelname <- deparse(substitute(model)) stopifnot(is.ppm(model)) if(missing(covariate)) { mc <- model.covariates(model) if(length(mc) == 1) covariate <- mc else stop("covariate must be provided") } if(missing(covname)) covname <- sensiblevarname(deparse(substitute(covariate)), "X") if(is.marked(model)) stop("Sorry, this is not yet implemented for marked models") if(!is.null(subregion)) stopifnot(is.owin(subregion)) if(is.null(adjust)) adjust <- 1 bw.input <- match.arg(bw.input) # validate model modelcall <- model$callstring if(is.null(modelcall)) modelcall <- model$call if(is.null(getglmfit(model))) model <- update(model, forcefit=TRUE) # extract spatial locations Q <- quad.ppm(model) # datapoints <- Q$data quadpoints <- union.quad(Q) Z <- is.data(Q) wts <- w.quad(Q) nQ <- npoints(quadpoints) # fitted intensity lam <- fitted(model, type="trend") # subset of quadrature points used to fit model subQset <- getglmsubset(model) if(is.null(subQset)) subQset <- rep.int(TRUE, nQ) # restriction to subregion insubregion <- if(!is.null(subregion)) { inside.owin(quadpoints, w=subregion) } else rep.int(TRUE, nQ) ################################################################ # Inverse lambda residuals rx <- residuals(model, type="inverse") resid <- with(rx, "increment") ################################################################# # identify the covariate # if(length(covariate) == 0) stop("No covariate specified") covtype <- "unknown" if(!is.character(covariate)) { # Covariate is some kind of data, treated as external covariate covtype <- "external" beta <- 0 covvalues <- evalCovariate(covariate, quadpoints) } else { # Argument is name of covariate covname <- covariate if(length(covname) > 1) stop("Must specify only one covariate") # 'original covariates' orig.covars <- variablesinformula(formula(model)) # 'canonical covariates' canon.covars <- names(coef(model)) # offsets offset.covars <- offsetsinformula(formula(model)) # if(covname %in% orig.covars) { # one of the original covariates covtype <- "original" covvalues <- evalCovariate(findCovariate(covname, model), quadpoints) } else if(covname %in% canon.covars) { # one of the canonical covariates covtype <- "canonical" mm <- model.matrix(model) covvalues <- mm[, covname] ## extract the corresponding coefficient beta <- coef(model)[[covname]] } else if(covname %in% offset.covars) { # an offset term only covtype <- "offset" mf <- model.frame(model, subset=rep.int(TRUE, n.quad(Q))) if(!(covname %in% colnames(mf))) stop(paste("Internal error: offset term", covname, "not found in model frame")) covvalues <- mf[, covname] ## fixed coefficient (not an estimated parameter) beta <- 1 } else{ # must be an external covariate (i.e. not used in fitted model) covtype <- "external" beta <- 0 covvalues <- evalCovariate(findCovariate(covname, model), quadpoints) } } # validate covvalues # if(is.null(covvalues)) stop("Unable to extract covariate values") if(length(covvalues) != npoints(quadpoints)) stop(paste("Internal error: number of covariate values =", length(covvalues), "!=", npoints(quadpoints), "= number of quadrature points")) vtype <- typeof(covvalues) switch(vtype, real=, double = { }, integer = { warning("Covariate is integer-valued") }, stop(paste("Cannot handle covariate of type", sQuote(vtype)))) ################################################################# # Compute covariate effect if(covtype != "original") { effect <- beta * covvalues mediator <- covtype effectfundata <- list(beta=beta) effectFun <- function(x) { (effectfundata$beta) * x } isoffset <- (covtype == "offset") names(isoffset) <- covname } else { ## `original' covariate (passed as argument to ppm) ## may determine one or more canonical covariates and/or offsets origcovdf <- getppmOriginalCovariates(model)[insubregion, , drop=FALSE] isconstant <- lapply(origcovdf, function(z) { length(unique(z)) == 1 }) ## ## Initialise termnames <- character(0) termbetas <- numeric(0) isoffset <- logical(0) mediator <- character(0) effect <- 0 effectFun <- function(x) { effectFun.can(x) + effectFun.off(x) } effectFun.can <- effectFun.off <- function(x) { 0 * x } ## Identify relevant canonical covariates dmat <- model.depends(model) if(!(covname %in% colnames(dmat))) stop("Internal error: cannot match covariate names") relevant <- dmat[, covname] if(any(relevant)) { # original covariate determines one or more canonical covariates mediator <- "canonical" ## check whether covariate is separable check.separable(dmat, covname, isconstant) ## Extract information about relevant model terms termnames <- rownames(dmat)[relevant] isoffset <- rep.int(FALSE, length(termnames)) names(isoffset) <- termnames ## Extract relevant canonical covariates mm <- model.matrix(model) termvalues <- mm[, relevant, drop=FALSE] ## extract corresponding coefficients termbetas <- coef(model)[relevant] ## evaluate model effect effect <- as.numeric(termvalues %*% termbetas) ## check length if(length(effect) != npoints(quadpoints)) stop(paste("Internal error: number of values of fitted effect =", length(effect), "!=", npoints(quadpoints), "= number of quadrature points")) ## Trap loglinear case if(length(termnames) == 1 && identical(termnames, covname)) { covtype <- "canonical" beta <- termbetas } ## construct the corresponding function gd <- getglmdata(model) goodrow <- min(which(complete.cases(gd))) defaultdata <- gd[goodrow, , drop=FALSE] #' set interaction terms to zero if(length(Vnames <- model$internal$Vnames)) defaultdata[,Vnames] <- 0 gf <- getglmfit(model) effectfundata.can <- list(covname=covname, fmla = rhs.of.formula(formula(gf)), termbetas = termbetas, defaultdata = defaultdata, relevant = relevant, termnames = termnames) effectFun.can <- function(x) { d <- effectfundata.can # replicate default data to correct length df <- as.data.frame(lapply(d$defaultdata, rep, length(x))) # overwrite value of covariate with new data df[,covname] <- x # construct model matrix m <- model.matrix(d$fmla, df) # check it conforms to expected structure if(!identical(colnames(m)[d$relevant], d$termnames)) stop("Internal error: mismatch in term names in effectFun") me <- m[, d$relevant, drop=FALSE] y <- me %*% as.matrix(d$termbetas, ncol=1) return(y) } } if(!is.null(offmat <- attr(dmat, "offset")) && any(relevant <- offmat[, covname])) { ## covariate appears in a model offset term mediator <- c(mediator, "offset") ## check whether covariate is separable check.separable(offmat, covname, isconstant) ## collect information about relevant offset offnames <- rownames(offmat)[relevant] termnames <- c(termnames, offnames) noff <- length(offnames) termbetas <- c(termbetas, rep.int(1, noff)) isoffset <- c(isoffset, rep.int(TRUE, noff)) names(termbetas) <- names(isoffset) <- termnames ## extract values of relevant offset mf <- model.frame(model, subset=rep.int(TRUE, n.quad(Q))) if(any(nbg <- !(offnames %in% colnames(mf)))) stop(paste("Internal error:", ngettext(sum(nbg), "offset term", "offset terms"), offnames[nbg], "not found in model frame")) effex <- mf[, offnames, drop=FALSE] effect <- effect + rowSums(effex) # # construct the corresponding function gd <- getglmdata(model) goodrow <- min(which(complete.cases(gd))) defaultdata <- gd[goodrow, , drop=FALSE] #' set interaction terms to zero if(length(Vnames <- model$internal$Vnames)) defaultdata[,Vnames] <- 0 gf <- getglmfit(model) effectfundata.off <- list(covname=covname, fmla = rhs.of.formula(formula(gf)), defaultdata = defaultdata, offnames = offnames) effectFun.off <- function(x) { d <- effectfundata.off # replicate default data to correct length df <- as.data.frame(lapply(d$defaultdata, rep, length(x))) # overwrite value of covariate with new data df[,covname] <- x # construct model FRAME mf <- model.frame(d$fmla, df) # check it conforms to expected structure if(!all(d$offnames %in% colnames(mf))) stop("Internal error: mismatch in term names in effectFun") moff <- mf[, d$offnames, drop=FALSE] y <- rowSums(moff) return(y) } } if(length(termnames) == 0) { # Sanity clause # (everyone knows there ain't no Sanity Clause...) warning(paste("Internal error: could not find any", "canonical covariates or offset terms", "that depended on the covariate", sQuote(covname))) # Assume it's an external covariate (i.e. not used in fitted model) covtype <- "external" beta <- 0 effect <- beta * covvalues effectFun <- function(x) { 0 * x } isoffset <- FALSE names(isoffset) <- covname } } #### Canonical covariates and coefficients switch(covtype, original={ cancovs <- termnames canbeta <- termbetas }, offset = , canonical={ cancovs <- covname canbeta <- beta }, external={ cancovs <- canbeta <- NA }) ################################################################# # Validate covariate values # locations that must have finite values operative <- if(bw.restrict) insubregion & subQset else subQset nbg.cov <- !is.finite(covvalues) if(any(offending <- nbg.cov & operative)) { warning(paste(sum(offending), "out of", length(offending), "covariate values discarded because", ngettext(sum(offending), "it is", "they are"), "NA or infinite")) } nbg.eff <- !is.finite(effect) if(any(offending <- nbg.eff & operative)) { warning(paste(sum(offending), "out of", length(offending), "values of fitted effect discarded because", ngettext(sum(offending), "it is", "they are"), "NA or infinite")) } ################################################################# # Restrict data to 'operative' points # with finite values nbg <- nbg.cov | nbg.eff ok <- !nbg & operative if(sum(ok) < 2) { warning("Not enough data; returning NULL") return(NULL) } if(!all(ok)) { Q <- Q[ok] covvalues <- covvalues[ok] quadpoints <- quadpoints[ok] resid <- resid[ok] lam <- lam[ok] effect <- effect[ok] insubregion <- insubregion[ok] Z <- Z[ok] wts <- wts[ok] } #################################################### # assemble data for smoothing x <- covvalues y <- resid/wts if(smooth.effect) y <- y + effect w <- wts # if(makefrom <- is.null(from)) from <- min(x) if(maketo <- is.null(to)) to <- max(x) #################################################### # determine smoothing bandwidth # from 'operative' data switch(bw.input, quad = { # bandwidth selection from covariate values at all quadrature points numer <- unnormdensity(x, weights=w*y, bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- numer$bw }, points= { # bandwidth selection from covariate values at data points fake <- unnormdensity(x[Z], weights=1/lam[Z], bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- fake$bw numer <- unnormdensity(x, weights=w*y, bw=sigma, adjust=1, n=n,from=from,to=to, ...) }) #################################################### # Restrict data and recompute numerator if required if(!is.null(subregion) && !bw.restrict) { # Bandwidth was computed on all data # Restrict to subregion and recompute numerator if(sum(insubregion) < 2) { warning("Not enough useable data in subregion; returning NULL") return(NULL) } x <- x[insubregion] y <- y[insubregion] w <- w[insubregion] Z <- Z[insubregion] lam <- lam[insubregion] if(makefrom) from <- min(x) if(maketo) to <- max(x) numer <- unnormdensity(x, weights=w*y, bw=sigma, adjust=1, n=n,from=from,to=to, ...) } #################################################### # Compute denominator denom <- unnormdensity(x, weights=w, bw=sigma, adjust=1, n=n,from=from,to=to, ...) #################################################### # Determine recommended plot range alim <- c(from, to) nZ <- sum(Z) if(nZ > 5) { xr <- range(as.vector(x[Z]), finite=TRUE) alimx <- xr + 0.1 * diff(xr) * c(-1,1) alim <- intersect.ranges(alim, alimx) } #################################################### # Compute terms interpolate <- function(x,y) { if(inherits(x, "density") && missing(y)) approxfun(x$x, x$y, rule=2) else approxfun(x, y, rule=2) } numfun <- interpolate(numer) denfun <- interpolate(denom) xxx <- numer$x yyy <- numfun(xxx)/denfun(xxx) # variance estimation # smooth 1/lambda(u) with smaller bandwidth tau <- sigma/sqrt(2) varnumer <- unnormdensity(x, weights=w/lam, bw=tau, adjust=1, n=n,from=from,to=to, ...) varnumfun <- interpolate(varnumer) varestxxx <- varnumfun(xxx)/(2 * sigma * sqrt(pi) * denfun(xxx)^2) sd <- sqrt(varestxxx) # alternative estimate of variance using data points only if(nZ > 1) { varXnumer <- unnormdensity(x[Z], weights=1/lam[Z]^2, bw=tau, adjust=1, n=n,from=from,to=to, ...) varXnumfun <- interpolate(varXnumer) varXestxxx <- varXnumfun(xxx)/(2 * sigma * sqrt(pi) * denfun(xxx)^2) sdX <- sqrt(varXestxxx) } else sdX <- rep(NA, length(xxx)) # fitted effect effxxx <- effectFun(xxx) # add fitted effect of covariate, if not added before smoothing if(!smooth.effect) yyy <- yyy + effxxx #################################################### # pack into fv object df <- data.frame(xxx=xxx, h =yyy, varh=varestxxx, hi=yyy+2*sd, lo=yyy-2*sd, hiX=yyy+2*sdX, loX=yyy-2*sdX, fit=effxxx) # remove any funny characters in name of covariate (e.g. if it is an offset) Covname <- make.names(covname) names(df)[1] <- Covname desc <- c(paste("covariate", sQuote(covname)), "Smoothed partial residual", "Variance", "Upper limit of pointwise 5%% significance band (integral)", "Lower limit of pointwise 5%% significance band (integral)", "Upper limit of pointwise 5%% significance band (sum)", "Lower limit of pointwise 5%% significance band (sum)", paste("Parametric fitted effect of", sQuote(covname))) rslt <- fv(df, argu=Covname, ylab=substitute(h(X), list(X=as.name(covname))), valu="h", fmla= as.formula(paste(". ~ ", Covname)), alim=alim, labl=c(covname, paste("%s", paren(covname), sep=""), paste("var", paren(covname), sep=""), paste("hi", paren(covname), sep=""), paste("lo", paren(covname), sep=""), paste("hiX", paren(covname), sep=""), paste("loX", paren(covname), sep=""), paste("fit", paren(covname), sep="")), desc=desc, fname="h", yexp=as.expression(substitute(hat(h)(X), list(X=covname)))) attr(rslt, "dotnames") <- c("h", "hi", "lo", "fit") fvnames(rslt, ".s") <- c("hi", "lo") # add special class data class(rslt) <- c("parres", class(rslt)) attr(rslt, "stuff") <- list(covname = paste(covname, collapse=""), covtype = covtype, mediator = mediator, cancovs = cancovs, canbeta = canbeta, isoffset = isoffset, modelname = modelname, modelcall = modelcall, callstring = callstring, sigma = sigma, smooth.effect = smooth.effect, restricted = !is.null(subregion), bw.input = bw.input) return(rslt) } print.parres <- function(x, ...) { cat("Transformation diagnostic (class parres)\n") s <- attr(x, "stuff") cat(paste("for the", s$covtype, "covariate", sQuote(s$covname), if(s$covtype != "external") "in" else "for", "the fitted model", if(nchar(s$modelcall) < 30) "" else "\n\t", s$modelcall, "\n")) switch(s$covtype, original={ cancovs <- s$cancovs med <- s$mediator isoffset <- s$isoffset if(is.null(isoffset)) isoffset <- rep.int(FALSE, length(cancovs)) ncc <- length(cancovs) nfitted <- sum(!isoffset) noff <- sum(isoffset) explainfitted <- explainoff <- character(0) if(noff > 0) explainoff <- paste("offset", ngettext(noff, "term", "terms"), commasep(dQuote(cancovs[isoffset]))) if(nfitted > 0) explainfitted <- paste( paste(med[med != "offset"], collapse=" and "), ngettext(nfitted, "term", "terms"), commasep(dQuote(cancovs[!isoffset]))) splat("Fitted effect: ", if(ncc > 1) "sum of" else NULL, paste(c(explainfitted, explainoff), collapse=" and ")) }, external={ cat("Note: effect estimate not justified by delta method\n") }, offset={}, canonical={}) # earlier versions were equivalent to restricted=FALSE if(identical(s$restricted, TRUE)) cat("\t--Diagnostic computed for a subregion--\n") cat(paste("Call:", s$callstring, "\n")) cat(paste("Actual smoothing bandwidth sigma =", signif(s$sigma,5), "\n")) # earlier versions were equivalent to smooth.effect=TRUE sme <- !identical(s$smooth.effect, FALSE) if(sme) { cat("Algorithm: smooth(effect + residual)\n\n") } else { cat("Algorithm: effect + smooth(residual)\n\n") } NextMethod("print") } plot.parres <- function(x, ...) { xname <- short.deparse(substitute(x)) force(x) do.call(plot.fv, resolve.defaults(list(quote(x)), list(...), list(main=xname, shade=c("hi", "lo")))) } spatstat.core/R/Ksector.R0000644000176200001440000001722614144333461015015 0ustar liggesusers# # Ksector.R Estimation of 'sector K function' # # $Revision: 1.5 $ $Date: 2014/11/10 10:41:14 $ # Ksector <- function(X, begin=0, end=360, ..., units=c("degrees", "radians"), r=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), domain = NULL, ratio=FALSE, verbose=TRUE) { verifyclass(X, "ppp") # rfixed <- !is.null(r) || !is.null(breaks) npts <- npoints(X) W <- Window(X) areaW <- area(W) lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) rmaxdefault <- rmax.rule("K", W, lambda) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max if(!is.null(domain)) { domain <- as.owin(domain) stopifnot(is.subset.owin(domain, Window(X))) areaW <- area(domain) } units <- match.arg(units) switch(units, radians = { if(missing(end)) end <- 2 * pi check.1.real(begin) check.1.real(end) check.in.range(begin, c(-pi, 2*pi)) check.in.range(end, c(0, 2*pi)) stopifnot(begin < end) stopifnot((end - begin) <= 2 * pi) BEGIN <- begin END <- end Bname <- simplenumber(begin/pi, "pi") %orifnull% signif(begin, 3) Ename <- simplenumber(end/pi, "pi") %orifnull% signif(end, 3) }, degrees = { check.1.real(begin) check.1.real(end) check.in.range(begin, c(-90, 360)) check.in.range(end, c(0, 360)) stopifnot(begin < end) stopifnot((end - begin) <= 360) if(verbose && (end - begin) <= 2 * pi) warning("Very small interval in degrees: did you mean radians?") BEGIN <- pi* (begin/180) END <- pi * (end/180) Bname <- signif(begin, 3) Ename <- signif(end, 3) }) ## choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="good", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) ## replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) ## retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) ## recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) ## labels subscripts <- paste("sector", Bname, Ename, sep=",") ylabel <- paste("K[", subscripts, "]") ylab <- eval(parse(text=paste("quote(", ylabel, ")"))) # ylab <- parse(text=paste("K[sector,", Bname, ",", Ename, "]")) # yexp <- substitute(K[list(sector,B,E)](r), # list(B=Bname, E=Ename)) yexp <- parse(text=paste("K[list(", subscripts, ")]")) fname <- c("K", paste("list", paren(subscripts))) ## this will be the output data frame Kdf <- data.frame(r=r, theo = ((END-BEGIN)/2) * r^2) desc <- c("distance argument r", "theoretical Poisson %s") denom <- lambda2 * areaW K <- ratfv(Kdf, NULL, denom, "r", ylab = ylab, valu = "theo", fmla = NULL, alim =alim, labl = c("r","{%s[%s]^{pois}}(r)"), desc = desc, fname=fname, yexp=yexp, ratio=ratio) ## identify all close pairs rmax <- max(r) close <- as.data.frame(closepairs(X, rmax)) if(!is.null(domain)) { ## restrict to pairs with first point in 'domain' indom <- with(close, inside.owin(xi, yi, domain)) close <- close[indom, , drop=FALSE] } ## select pairs in angular range ang <- with(close, atan2(dy, dx)) %% (2*pi) if(BEGIN >= 0) { ## 0 <= begin < end ok <- (BEGIN <= ang) & (ang <= END) } else { ## begin < 0 <= end ok <- (ang >= 2 * pi + BEGIN) | (ang <= END) } close <- close[ok, , drop=FALSE] ## pairwise distances DIJ <- close$d if(any(correction == "none")) { # uncorrected! For demonstration purposes only! wh <- whist(DIJ, breaks$val) # no weights numKun <- cumsum(wh) denKun <- lambda2 * areaW # uncorrected estimate of K K <- bind.ratfv(K, data.frame(un=numKun), denKun, "{hat(%s)[%s]^{un}}(r)", "uncorrected estimate of %s", "un", ratio=ratio) } if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(X) I <- close$i bI <- b[I] if(!is.null(domain)) b <- b[inside.owin(X, , w=domain)] # apply reduced sample algorithm RS <- Kount(DIJ, bI, b, breaks) if(any(correction == "bord.modif")) { # modified border correction denom.area <- eroded.areas(W, r, subset=domain) numKbm <- RS$numerator denKbm <- lambda2 * denom.area K <- bind.ratfv(K, data.frame(bord.modif=numKbm), data.frame(bord.modif=denKbm), "{hat(%s)[%s]^{bordm}}(r)", "modified border-corrected estimate of %s", "bord.modif", ratio=ratio) } if(any(correction == "border")) { numKb <- RS$numerator denKb <- lambda * RS$denom.count K <- bind.ratfv(K, data.frame(border=numKb), data.frame(border=denKb), "{hat(%s)[%s]^{bord}}(r)", "border-corrected estimate of %s", "border", ratio=ratio) } } if(any(correction == "translate")) { ## Ohser-Stoyan translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE) wh <- whist(DIJ, breaks$val, edgewt) numKtrans <- cumsum(wh) denKtrans <- lambda2 * areaW h <- diameter(as.rectangle(W))/2 numKtrans[r >= h] <- NA K <- bind.ratfv(K, data.frame(trans=numKtrans), denKtrans, "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans", ratio=ratio) } if(any(correction == "isotropic")) { ## Ripley isotropic correction XI <- ppp(close$xi, close$yi, window=W, check=FALSE) edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) wh <- whist(DIJ, breaks$val, edgewt) numKiso <- cumsum(wh) denKiso <- lambda2 * areaW h <- diameter(W)/2 numKiso[r >= h] <- NA K <- bind.ratfv(K, data.frame(iso=numKiso), denKiso, "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso", ratio=ratio) } # # default plot will display all edge corrections formula(K) <- . ~ r nama <- rev(colnames(K)) nama <- nama[!(nama %in% c("r", "rip", "ls"))] fvnames(K, ".") <- nama unitname(K) <- unitname(X) # copy to other components if(ratio) K <- conform.ratfv(K) return(K) } spatstat.core/R/fitted.mppm.R0000644000176200001440000000346514144333464015635 0ustar liggesusers# # fitted.mppm.R # # method for 'fitted' for mppm objects # # $Revision: 1.3 $ $Date: 2020/01/01 04:43:21 $ # fitted.mppm <- function(object, ..., type="lambda", dataonly=FALSE) { # sumry <- summary(object) type <- pickoption("type", type, c(lambda="lambda", cif ="lambda", trend ="trend"), multi=FALSE, exact=FALSE) # extract fitted model object and data frame glmfit <- object$Fit$FIT glmdata <- object$Fit$moadf # interaction names Vnames <- unlist(object$Fit$Vnamelist) interacting <- (length(Vnames) > 0) # row identifier id <- glmdata$id # Modification of `glmdata' may be required if(interacting) switch(type, trend={ # zero the interaction statistics glmdata[ , Vnames] <- 0 }, lambda={ # Find any dummy points with zero conditional intensity forbid <- matrowany(as.matrix(glmdata[, Vnames]) == -Inf) # exclude from predict.glm glmdata <- glmdata[!forbid, ] }) # Compute predicted [conditional] intensity values values <- predict(glmfit, newdata=glmdata, type="response") # Note: the `newdata' argument is necessary in order to obtain # predictions at all quadrature points. If it is omitted then # we would only get predictions at the quadrature points j # where glmdata$SUBSET[j]=TRUE. if(interacting && type=="lambda") { # reinsert zeroes vals <- numeric(length(forbid)) vals[forbid] <- 0 vals[!forbid] <- values values <- vals } names(values) <- NULL if(dataonly) { # extract only data values isdata <- (glmdata$.mpl.Y != 0) values <- values[isdata] id <- id[isdata] } return(split(values, id)) } spatstat.core/R/smoothfv.R0000644000176200001440000000323014144333464015241 0ustar liggesusers# # smoothfv.R # # $Revision: 1.15 $ $Date: 2020/11/30 13:11:33 $ # # smooth.fv <- function(x, which="*", ..., # method=c("smooth.spline", "loess"), # xinterval=NULL) { # .Deprecated("Smooth.fv", package="spatstat", # msg="smooth.fv is deprecated: use the generic Smooth with a capital S") # Smooth(x, which=which, ..., method=method, xinterval=xinterval) # } Smooth.fv <- function(X, which="*", ..., method=c("smooth.spline", "loess"), xinterval=NULL) { x <- X stopifnot(is.character(which)) method <- match.arg(method) if(!is.null(xinterval)) check.range(xinterval) if(length(which) == 1 && which %in% .Spatstat.FvAbbrev) { if(which == ".x") stop("Cannot smooth the function argument") which <- fvnames(x, which) } if(any(nbg <- !(which %in% names(x)))) stop(paste("Unrecognised column", ngettext(sum(nbg), "name", "names"), commasep(sQuote(which[nbg])), "in argument", sQuote("which"))) xx <- x[[fvnames(x, ".x")]] # process each column of function values for(ynam in which) { yy <- x[[ynam]] ok <- is.finite(yy) if(!is.null(xinterval)) ok <- ok & inside.range(xx, xinterval) switch(method, smooth.spline = { ss <- smooth.spline(xx[ok], yy[ok], ...) yhat <- predict(ss, xx[ok])$y }, loess = { df <- data.frame(x=xx[ok], y=yy[ok]) lo <- loess(y ~ x, df, ...) yhat <- predict(lo, df[,"x", drop=FALSE]) }) yy[ok] <- yhat x[[ynam]] <- yy } return(x) } spatstat.core/R/randomImage.R0000644000176200001440000000057314144333463015625 0ustar liggesusers#' #' randomImage.R #' #' Functions for generating random images #' #' $Revision: 1.1 $ $Date: 2015/03/23 10:44:04 $ #' #' rnoise <- function(rgen=runif, w=square(1), ...) { a <- do.call.matched(as.mask, list(w=w, ...), sieve=TRUE) W <- a$result argh <- a$otherargs Z <- as.im(W) n <- sum(W$m) Z[] <- do.call(rgen, append(list(n=n), argh)) return(Z) } spatstat.core/R/dffit.R0000644000176200001440000000212214144333462014465 0ustar liggesusers#' #' dffit.R #' #' $Revision: 1.1 $ $Date: 2018/04/19 05:04:59 $ #' #' Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2018 dffit <- function(object, ...) UseMethod("dffit") dffit.ppm <- function(object, ..., collapse=FALSE, dfb=NULL) { db <- dfb %orifnull% dfbetas(object, ...) Z <- model.matrix(object, drop=FALSE, irregular=TRUE) if(!all(dim(db) == dim(Z))) { #' internal error - mismatch in quadrature points - fall back U <- db$loc Z <- sapply(model.images(object, irregular=TRUE), "[", i=U) } #' ensure 0 * (-Inf) = 0 if(any(a <- (db$val == 0) & (Z == -Inf))) Z[a] <- 0 #' smoothed density must be handled separately sm <- attr(db, "smoothdensity") attr(db, "smoothdensity") <- NULL #' do the main calculation Y <- db * Z #' also calculate the smoothed density if(!is.null(sm)) { ZZ <- model.images(object, irregular=TRUE) HH <- mapply(harmonise, ZZ=ZZ, sm=sm) sm <- mapply("*", e1=HH$sm, e2=HH$ZZ, SIMPLIFY=FALSE) attr(Y, "smoothdensity") <- as.solist(sm) } if(collapse) Y <- Reduce("+", unstack(Y)) return(Y) } spatstat.core/R/multihard.R0000644000176200001440000001450414144333463015372 0ustar liggesusers# # # multihard.R # # $Revision: 1.20 $ $Date: 2021/11/08 07:07:48 $ # # The Hard core process # # Hardcore() create an instance of the Hard Core process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # MultiHard <- local({ # .... multitype hard core potential MHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[i] type (mark) of point U[j] # # get matrices of interaction radii h <- par$hradii # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) # list all UNORDERED pairs of types to be checked # (the interaction must be symmetric in type, and scored as such) uptri <- (row(h) <= col(h)) & (!is.na(h)) mark1 <- (lx[row(h)])[uptri] mark2 <- (lx[col(h)])[uptri] # corresponding names mark1name <- (lxname[row(h)])[uptri] mark2name <- (lxname[col(h)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) # list all ORDERED pairs of types to be checked # (to save writing the same code twice) different <- mark1 != mark2 mark1o <- c(mark1, mark2[different]) mark2o <- c(mark2, mark1[different]) nordpairs <- length(mark1o) # unordered pair corresponding to each ordered pair ucode <- c(1:npairs, (1:npairs)[different]) # # create numeric array for result z <- array(0, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # apply the relevant hard core distance to each pair of points hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- array(0, dim=dim(d)) value[forbid] <- -Inf # assign value[i,j] -> z[i,j,k] where k is relevant interaction code for(i in 1:nordpairs) { # data points with mark m1 Xsub <- (tx == mark1o[i]) # quadrature points with mark m2 Qsub <- (tu == mark2o[i]) # assign z[Xsub, Qsub, ucode[i]] <- value[Xsub, Qsub] } } attr(z, "IsOffset") <- rep.int(TRUE, npairs) return(z) } #### end of 'pot' function #### # ............ template object ................... BlankMH <- list( name = "Multitype Hardcore process", creator = "MultiHard", family = "pairwise.family", # evaluated later pot = MHpotential, par = list(types=NULL, hradii = NULL), # filled in later parnames = c("possible types", "hardcore distances"), pardesc = c("vector of possible types", "matrix of hardcore distances"), hasInf = TRUE, selfstart = function(X, self) { types <- self$par$types hradii <- self$par$hradii if(!is.null(types) && !is.null(hradii)) return(self) if(is.null(types)) types <- levels(marks(X)) if(is.null(hradii)) { marx <- marks(X) d <- nndist(X, by=marx) h <- aggregate(d, by=list(from=marx), min) h <- as.matrix(h[, -1, drop=FALSE]) m <- table(marx) mm <- outer(m, m, pmin) hradii <- h * mm/(mm+1) dimnames(hradii) <- list(types, types) } MultiHard(types=types,hradii=hradii) }, init = function(self) { types <- self$par$types if(!is.null(types)) { h <- self$par$hradii nt <- length(types) if(!is.null(h)) MultiPair.checkmatrix(h, nt, sQuote("hradii")) if(length(types) == 0) stop(paste("The", sQuote("types"), "argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } } }, update = NULL, # default OK print = function(self) { h <- self$par$hradii if(waxlyrical('gory')) { if(!is.null(h)) splat(nrow(h), "types of points") types <- self$par$types if(!is.null(types)) { splat("Possible types:") print(noquote(types)) } else splat("Possible types:\t not yet determined") } if(!is.null(h)) { splat("Hardcore radii:") print(signif(h, getOption("digits"))) } else splat("Hardcore radii:\t not yet determined") invisible() }, interpret = function(coeffs, self) { # there are no regular parameters (woo-hoo!) return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { h <- self$par$hradii return(max(0, h, na.rm=TRUE)) }, version=NULL # fix later ) class(BlankMH) <- "interact" MultiHard <- function(hradii=NULL, types=NULL) { if((missing(hradii) || !is.matrix(hradii)) && is.matrix(types)) { ## old syntax: (types=NULL, hradii) hradii <- types types <- NULL } if(!is.null(hradii)) hradii[hradii == 0] <- NA out <- instantiate.interact(BlankMH, list(types=types, hradii = hradii)) if(!is.null(types)) dimnames(out$par$hradii) <- list(types, types) return(out) } MultiHard <- intermaker(MultiHard, BlankMH) MultiHard }) spatstat.core/R/pairwise.family.R0000644000176200001440000004753114144333463016512 0ustar liggesusers# # # pairwise.family.S # # $Revision: 1.72 $ $Date: 2020/11/17 03:47:24 $ # # The pairwise interaction family of point process models # # pairwise.family: object of class 'isf' defining pairwise interaction # # # ------------------------------------------------------------------- # pairwise.family <- list( name = "pairwise", print = function(self) { cat("Pairwise interaction family\n") }, plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction unitz <- unitname(fint) if(is.null(inter) || is.null(inter$family) || inter$family$name != "pairwise") stop("Tried to plot the wrong kind of interaction") # get fitted coefficients of interaction terms # and set coefficients of offset terms to 1 Vnames <- fint$Vnames IsOffset <- fint$IsOffset coeff <- rep.int(1, length(Vnames)) names(coeff) <- Vnames coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]] # pairpot <- inter$pot potpars <- inter$par rmax <- reach(fint, epsilon=1e-3) xlim <- list(...)$xlim if(is.infinite(rmax)) { if(!is.null(xlim)) rmax <- max(xlim) else { warning("Reach of interaction is infinite; need xlim to plot it") return(invisible(NULL)) } } if(is.null(d)) { dmax <- 1.25 * rmax d <- seq(from=0, to=dmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) dmax <- max(d) } if(is.null(xlim)) xlim <- c(0, dmax) types <- potpars$types if(is.null(types)) { # compute potential function as `fv' object dd <- matrix(d, ncol=1) p <- pairpot(dd, potpars) if(length(dim(p))==2) p <- array(p, dim=c(dim(p),1), dimnames=NULL) if(dim(p)[3] != length(coeff)) stop("Dimensions of potential do not match coefficient vector") for(k in seq_len(dim(p)[3])) p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] ) y <- exp(apply(p, c(1,2), sum)) ylim <- range(0, 1.1, y, finite=TRUE) fun <- fv(data.frame(r=d, h=y, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "pairwise interaction term h(r)", "reference value 1"), unitname=unitz) if(plotit) do.call(plot.fv, resolve.defaults(list(quote(fun)), list(...), list(ylim=ylim))) return(invisible(fun)) } else{ # compute each potential and store in `fasp' object if(!is.factor(types)) types <- factor(types, levels=types) m <- length(types) nd <- length(d) dd <- matrix(rep.int(d, m), nrow=nd * m, ncol=m) tx <- rep.int(types, rep.int(nd, m)) ty <- types p <- pairpot(dd, tx, ty, potpars) if(length(dim(p))==2) p <- array(p, dim=c(dim(p),1), dimnames=NULL) if(dim(p)[3] != length(coeff)) stop("Dimensions of potential do not match coefficient vector") for(k in seq_len(dim(p)[3])) p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] ) y <- exp(apply(p, c(1,2), sum)) ylim <- range(0, 1.1, y, finite=TRUE) fns <- vector(m^2, mode="list") which <- matrix(, m, m) for(i in seq_len(m)) { for(j in seq_len(m)) { # relevant position in matrix ijpos <- i + (j-1) * m which[i,j] <- ijpos # extract values of potential yy <- y[tx == types[i], j] # make fv object fns[[ijpos]] <- fv(data.frame(r=d, h=yy, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "pairwise interaction term h(r)", "reference value 1"), unitname=unitz) # } } funz <- fasp(fns, which=which, formulae=list(cbind(h, one) ~ r), title="Fitted pairwise interactions", rowNames=paste(types), colNames=paste(types)) if(plotit) do.call(plot.fasp, resolve.defaults(list(quote(funz)), list(...), list(ylim=ylim))) return(invisible(funz)) } }, # end of function `plot' # ---------------------------------------------------- eval = function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ..., Reach=NULL, precomputed=NULL, savecomputed=FALSE, pot.only=FALSE) { # # This is the eval function for the `pairwise' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # For the `pairwise' family of pairwise-interaction processes, # this eval function computes the distances between points, # invokes 'pairpot' to evaluate the potential between each pair of points, # applies edge corrections, and then sums the pair potential terms. # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ########################################################################## # POTENTIAL: # # The pair potential function 'pairpot' should be either # pairpot(d, par) [for potentials that don't depend on marks] # or # pairpot(d, tx, tu, par) [for potentials that do depend on mark] # where d is a matrix of interpoint distances, # tx is the vector of types for the data points, # tu is the vector of types for all quadrature points # and # par is a list of parameters for the potential. # # The additional argument 'splitInf' is also permitted. # # It must return a matrix with the same dimensions as d # or an array with its first two dimensions the same as the dimensions of d. pt <- PairPotentialType(pairpot) # includes validation of pair potential ## edge correction argument if(length(correction) > 1) stop("Only one edge correction allowed at a time!") if(!any(correction == c("periodic", "border", "translate", "translation", "isotropic", "Ripley", "none"))) stop(paste("Unrecognised edge correction", sQuote(correction))) no.correction <- #### Compute basic data # Decide whether to apply faster algorithm using 'closepairs' use.closepairs <- (correction %in% c("none", "border", "translate", "translation")) && !is.null(Reach) && is.finite(Reach) && is.null(precomputed) && !savecomputed if(!is.null(precomputed)) { # precomputed X <- precomputed$X U <- precomputed$U EqualPairs <- precomputed$E M <- precomputed$M } else { U <- as.ppp(U, X$window) # i.e. X$window is DEFAULT window if(!use.closepairs) # Form the matrix of distances M <- crossdist(X, U, periodic=(correction=="periodic")) } nX <- npoints(X) nU <- npoints(U) dimM <- c(nX, nU) # Evaluate the pairwise potential without edge correction if(use.closepairs) { POT <- evalPairPotential(X,U,EqualPairs,pairpot,potpars,Reach) } else { POT <- do.call.matched(pairpot, list(d=M, tx=marks(X), tu=marks(U), par=potpars)) } # Determine whether each component of potential is an offset IsOffset <- attr(POT, "IsOffset") # Check errors and special cases if(!is.matrix(POT) && !is.array(POT)) { if(length(POT) == 0 && X$n == 0) # empty pattern POT <- array(POT, dim=c(dimM,1)) else stop("Pair potential did not return a matrix or array") } if(length(dim(POT)) == 1 || any(dim(POT)[1:2] != dimM)) { whinge <- paste0( "The pair potential function ",short.deparse(substitute(pairpot)), " must produce a matrix or array with its first two dimensions\n", "the same as the dimensions of its input.\n") stop(whinge) } # make it a 3D array if(length(dim(POT))==2) POT <- array(POT, dim=c(dim(POT),1), dimnames=NULL) #' positive case if(splitInf) { IsNegInf <- (POT == -Inf) POT[IsNegInf] <- 0 } # handle corrections if(correction == "translate" || correction == "translation") { edgewt <- edge.Trans(X, U) # sanity check ("everybody knows there ain't no...") if(!is.matrix(edgewt)) stop("internal error: edge.Trans() did not yield a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Trans() has wrong dimensions") POT <- c(edgewt) * POT } else if(correction == "isotropic" || correction == "Ripley") { # weights are required for contributions from QUADRATURE points edgewt <- t(edge.Ripley(U, t(M), X$window)) if(!is.matrix(edgewt)) stop("internal error: edge.Ripley() did not return a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Ripley() has wrong dimensions") POT <- c(edgewt) * POT } # No pair potential term between a point and itself if(length(EqualPairs) > 0) { nplanes <- dim(POT)[3] for(k in 1:nplanes) { POT[cbind(EqualPairs, k)] <- 0 if(splitInf) IsNegInf[cbind(EqualPairs, k)] <- FALSE } } # reattach the negative infinity for re-use by special code if(splitInf) attr(POT, "IsNegInf") <- IsNegInf # Return just the pair potential? if(pot.only) return(POT) # Sum the pairwise potentials over data points for each quadrature point V <- apply(POT, c(2,3), sum) # Handle positive case if(splitInf) attr(V, "-Inf") <- apply(IsNegInf, 2, any) # attach the original pair potentials attr(V, "POT") <- POT # attach the offset identifier attr(V, "IsOffset") <- IsOffset # pass computed information out the back door if(savecomputed) attr(V, "computed") <- list(E=EqualPairs, M=M) return(V) }, ######### end of function $eval suffstat = function(model, X=NULL, callstring="pairwise.family$suffstat") { # for pairwise models only (possibly nonstationary) verifyclass(model, "ppm") if(!identical(model$interaction$family$name,"pairwise")) stop("Model is not a pairwise interaction process") if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") modelX <- update(model, X, method="mpl") } # find data points which do not contribute to pseudolikelihood mplsubset <- getglmdata(modelX)$.mpl.SUBSET mpldata <- is.data(quad.ppm(modelX)) contribute <- mplsubset[mpldata] Xin <- X[contribute] Xout <- X[!contribute] # partial model matrix arising from ordered pairs of data points # which both contribute to the pseudolikelihood Empty <- X[numeric(0)] momINxIN <- partialModelMatrix(Xin, Empty, model, "suffstat") # partial model matrix arising from ordered pairs of data points # the second of which does not contribute to the pseudolikelihood mom <- partialModelMatrix(Xout, Xin, model, "suffstat") indx <- Xout$n + seq_len(Xin$n) momINxOUT <- mom[indx, , drop=FALSE] # parameters order2 <- names(coef(model)) %in% model$internal$Vnames order1 <- !order2 result <- 0 * coef(model) if(any(order1)) { # first order contributions can be determined from INxIN o1terms <- momINxIN[ , order1, drop=FALSE] o1sum <- colSums(o1terms) result[order1] <- o1sum } if(any(order2)) { # adjust for double counting of ordered pairs in INxIN but not INxOUT o2termsINxIN <- momINxIN[, order2, drop=FALSE] o2termsINxOUT <- momINxOUT[, order2, drop=FALSE] o2sum <- colSums(o2termsINxIN)/2 + colSums(o2termsINxOUT) result[order2] <- o2sum } return(result) }, ######### end of function $suffstat delta2 = function(X, inte, correction, ..., sparseOK=FALSE) { #' Sufficient statistic for second order conditional intensity #' for pairwise interaction processes #' Equivalent to evaluating pair potential. if(is.ppp(X)) { seqX <- seq_len(npoints(X)) E <- cbind(seqX, seqX) R <- reach(inte) result <- pairwise.family$eval(X,X,E, inte$pot,inte$par, correction, pot.only=TRUE, Reach=R, splitInf=TRUE) M <- attr(result, "IsNegInf") if(sparseOK) result <- as.sparse3Darray(result) if(!is.null(M)) { #' validate if(length(dim(M)) != 3) stop("Internal error: IsNegInf is not a 3D array") #' collapse vector-valued potential, yielding a matrix M <- apply(M, c(1,2), any) if(!is.matrix(M)) M <- matrix(M, nrow=nX) #' count conflicts hits <- colSums(M) #' hits[j] == 1 implies that X[j] violates hard core with only one X[i] #' and therefore changes status if X[i] is deleted. deltaInf <- M deltaInf[, hits != 1] <- FALSE if(sparseOK) deltaInf <- as(deltaInf, "sparseMatrix") #' attr(result, "deltaInf") <- deltaInf } } else if(is.quad(X)) { U <- union.quad(X) izdat <- is.data(X) nU <- npoints(U) nX <- npoints(X$data) seqU <- seq_len(nU) E <- cbind(seqU, seqU) R <- reach(inte) result <- pairwise.family$eval(U,U,E, inte$pot,inte$par, correction, pot.only=TRUE, Reach=R, splitInf=TRUE) M <- attr(result, "IsNegInf") if(sparseOK) result <- as.sparse3Darray(result) if(!is.null(M)) { #' validate if(length(dim(M)) != 3) stop("Internal error: IsNegInf is not a 3D array") #' consider conflicts with data points MXU <- M[izdat, , , drop=FALSE] #' collapse vector-valued potential, yielding a matrix MXU <- apply(MXU, c(1,2), any) if(!is.matrix(MXU)) MXU <- matrix(MXU, nrow=nX) #' count data points conflicting with each quadrature point nhitdata <- colSums(MXU) #' for a conflicting pair U[i], U[j], #' status of U[j] will change when U[i] is added/deleted #' iff EITHER #' U[i] = X[i] is a data point and #' U[j] is only in conflict with X[i], deltaInf <- apply(M, c(1,2), any) deltaInf[izdat, nhitdata != 1] <- FALSE #' OR #' U[i] is a dummy point, #' U[j] has no conflicts with X. deltaInf[!izdat, nhitdata != 0] <- FALSE #' if(sparseOK) deltaInf <- as(deltaInf, "sparseMatrix") #' attr(result, "deltaInf") <- deltaInf } } return(result) } ######### end of function $delta2 ) ######### end of list class(pairwise.family) <- "isf" # externally visible PairPotentialType <- function(pairpot) { stopifnot(is.function(pairpot)) fop <- names(formals(pairpot)) v <- match(list(fop), list(c("d", "par"), c("d", "tx", "tu", "par"))) if(is.na(v)) stop("Formal arguments of pair potential function are not understood", call.=FALSE) marked <- (v == 2) return(list(marked=marked)) } evalPairPotential <- function(X, P, E, pairpot, potpars, R) { # Evaluate pair potential without edge correction weights nX <- npoints(X) nP <- npoints(P) pt <- PairPotentialType(pairpot) # includes validation # determine dimension of potential, etc fakePOT <- do.call.matched(pairpot, list(d=matrix(, 0, 0), tx=marks(X)[integer(0)], tu=marks(P)[integer(0)], par=potpars)) IsOffset <- attr(fakePOT, "IsOffset") fakePOT <- ensure3Darray(fakePOT) Vnames <- dimnames(fakePOT)[[3]] p <- dim(fakePOT)[3] # Identify close pairs X[i], P[j] cl <- crosspairs(X, P, R, what="ijd") I <- cl$i J <- cl$j D <- matrix(cl$d, ncol=1) # deal with empty cases if(nX == 0 || nP == 0 || length(I) == 0) { di <- c(nX, nP, p) dn <- list(NULL, NULL, Vnames) result <- array(0, dim=di, dimnames=dn) attr(result, "IsOffset") <- IsOffset return(result) } # evaluate potential for close pairs # POT is a 1-column matrix or array, with rows corresponding to close pairs if(!pt$marked) { # unmarked POT <- do.call.matched(pairpot, list(d=D, par=potpars)) IsOffset <- attr(POT, "IsOffset") } else { # marked marX <- marks(X) marP <- marks(P) if(!identical(levels(marX), levels(marP))) stop("Internal error: marks of X and P have different levels") types <- levels(marX) mI <- marX[I] mJ <- marP[J] POT <- NULL # split data by type of P[j] for(k in types) { relevant <- which(mJ == k) if(length(relevant) > 0) { fk <- factor(k, levels=types) POTk <- do.call.matched(pairpot, list(d=D[relevant, , drop=FALSE], tx=mI[relevant], tu=fk, par=potpars)) POTk <- ensure3Darray(POTk) if(is.null(POT)) { #' use first result of 'pairpot' to determine dimension POT <- array(, dim=c(length(I), 1, dim(POTk)[3])) #' capture information about offsets, and names of interaction terms IsOffset <- attr(POTk, "IsOffset") Vnames <- dimnames(POTk)[[3]] } # insert values just computed POT[relevant, , ] <- POTk } } } POT <- ensure3Darray(POT) p <- dim(POT)[3] # create result array result <- array(0, dim=c(npoints(X), npoints(P), p), dimnames=list(NULL, NULL, Vnames)) # insert results II <- rep(I, p) JJ <- rep(J, p) KK <- rep(1:p, each=length(I)) IJK <- cbind(II, JJ, KK) result[IJK] <- POT # finally identify identical pairs and set value to 0 if(length(E) > 0) { E.rep <- apply(E, 2, rep, times=p) p.rep <- rep(1:p, each=nrow(E)) result[cbind(E.rep, p.rep)] <- 0 } attr(result, "IsOffset") <- IsOffset return(result) } spatstat.core/R/rmhstart.R0000644000176200001440000000473414144333463015251 0ustar liggesusers# # # rmhstart.R # # $Revision: 1.12 $ $Date: 2016/02/11 10:17:12 $ # # rmhstart <- function(start, ...) { UseMethod("rmhstart") } rmhstart.rmhstart <- function(start, ...) { return(start) } rmhstart.list <- function(start, ...) { st <- do.call.matched(rmhstart.default, start) return(st) } rmhstart.default <- function(start=NULL, ..., n.start=NULL, x.start=NULL) { if(!is.null(start) || length(list(...)) > 0) stop("Syntax should be rmhstart(n.start) or rmhstart(x.start)") ngiven <- !is.null(n.start) xgiven <- !is.null(x.start) # n.start and x.start are incompatible if(ngiven && xgiven) stop("Give only one of the arguments n.start and x.start") given <- if(ngiven) "n" else if(xgiven) "x" else "none" # Validate arguments if(ngiven && !is.numeric(n.start)) stop("n.start should be numeric") if(xgiven) { # We can't check x.start properly because we don't have the relevant window # Just check that it is INTERPRETABLE as a point pattern xx <- as.ppp(x.start, W=ripras, fatal=FALSE) if(is.null(xx)) stop(paste("x.start should be a point pattern object,", "or coordinate data in a format recognised by as.ppp")) } else xx <- NULL ################################################################### # return augmented list out <- list(n.start=n.start, x.start=x.start, given=given, xx=xx) class(out) <- c("rmhstart", class(out)) return(out) } print.rmhstart <- function(x, ...) { verifyclass(x, "rmhstart") cat("Metropolis-Hastings algorithm starting parameters\n") cat("Initial state: ") switch(x$given, none={ cat("not given\n") }, x = { cat("given as x.start\n") if(is.ppp(x$x.start)) print(x$x.start) else cat(paste("(x,y) coordinates of", x$xx$n, "points (window unspecified)\n")) cat("\n") }, n = { n.start <- x$n.start nstring <- if(length(n.start) == 1) paste(n.start) else paste("(", paste(n.start, collapse=","), ")", sep="") cat(paste("number fixed at n.start =", nstring, "\n")) } ) } update.rmhstart <- function(object, ...) { do.call.matched(rmhstart.default, resolve.defaults(list(...), as.list(object), .StripNull=TRUE)) } spatstat.core/R/effectfun.R0000644000176200001440000001505014144333462015342 0ustar liggesusers# # effectfun.R # # $Revision: 1.25 $ $Date: 2020/01/23 05:36:51 $ # effectfun <- local({ okclasses <- c("ppm", "kppm", "lppm", "dppm", "rppm", "profilepl") effectfun <- function(model, covname, ..., se.fit=FALSE, nvalues=256) { if(!inherits(model, okclasses)) stop(paste("First argument 'model' should be a fitted model of class", commasep(sQuote(okclasses), " or ")), call.=FALSE) orig.model <- model model <- as.ppm(model) dotargs <- list(...) #' determine names of covariates involved intern.names <- if(is.marked.ppm(model)) c("x", "y", "marks") else c("x", "y") needed.names <- variablesinformula(rhs.of.formula(formula(model))) #' check for clashes/quirks if("lambda" %in% needed.names) { if(is.dppm(orig.model) && ( identical.formulae(formula(model), ~offset(log(lambda))-1) || identical.formulae(formula(model), ~log(lambda)-1) )) stop("effectfun is not defined for a DPP model with fixed intensity", call.=FALSE) intensityname <- setdiff(c("Lambda", "intensity"), needed.names)[1] } else intensityname <- "lambda" ## validate the relevant covariate if(missing(covname) || is.null(covname)) { mc <- model.covariates(model) if(length(mc) == 1) covname <- mc else stop("covname must be provided") } if(!is.character(covname)) stop("covname should be a character string", call.=FALSE) if(length(covname) != 1L) stop("covname should be a single character string", call.=FALSE) # check that fixed values for all other covariates are provided given.covs <- names(dotargs) if(any(uhoh <- !(needed.names %in% c(given.covs, covname)))) { nuh <- sum(uhoh) stop(paste(ngettext(nuh, "A value for the covariate", "Values for the covariates"), commasep(dQuote(needed.names[uhoh])), "must be provided (as", ngettext(nuh, "an argument", "arguments"), "to effectfun)")) } #' establish type and range of covariate values check.1.integer(nvalues) stopifnot(nvalues >= 128) N0 <- nvalues if(covname == "x") { covtype <- "real" W <- as.owin(data.ppm(model)) Zr <- W$xrange Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0) } else if(covname == "y") { covtype <- "real" W <- as.owin(data.ppm(model)) Zr <- W$yrange Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0) } else if(covname == "marks") { covtype <- "factor" Zvals <- levels(marks(data.ppm(model))) } else { # covariate is external if(is.data.frame(covdf <- model$covariates) && (covname %in% names(covdf))) { Z <- covdf[,covname] covtype <- typeof(Z) if(covtype == "double") covtype <- "real" switch(covtype, real={ Zr <- range(Z) Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0) }, integer={ Zr <- range(Z) Zvals <- seq(from=Zr[1L], to=Zr[2L], by=ceiling((diff(Zr)+1)/N0)) }, factor={ Zvals <- levels(Z) }, logical={ Zvals <- c(FALSE, TRUE) }, stop(paste("Cannot handle covariate of type", dQuote(covtype))) ) } else { Z <- getdataobjects(covname, environment(formula(model)), model$covariates)[[1L]] if(is.null(Z)) stop(paste("Cannot find covariate", sQuote(covname)), call.=FALSE) # convert to image if(!is.im(Z)) Z <- as.im(Z, W=as.owin(model)) covtype <- Z$type switch(covtype, real={ Zr <- summary(Z)$range Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0) }, factor={ Zvals <- levels(Z) }, logical={ Zvals <- c(FALSE, TRUE) }, stop(paste("Cannot handle covariate of type", dQuote(covtype))) ) } } # set up data frames of fake data for predict method # First set up default, constant value for each covariate N <- length(Zvals) fakeloc <- resolve.defaults(dotargs, list(x=0, y=0))[c("x","y")] if(is.marked.ppm(model)) { if("marks" %in% names(dotargs)) { fakeloc$marks <- dotargs$marks dotargs <- dotargs[names(dotargs) != "marks"] } else { lev <- levels(marks(data.ppm(model))) fakeloc$marks <- lev[1L] } } fakeloc <- lapply(fakeloc, padout, N=N) fakecov <- lapply(dotargs, padout, N=N) # Overwrite value for covariate of interest if(covname %in% intern.names) fakeloc[[covname]] <- Zvals else fakecov[[covname]] <- Zvals # convert to data frame fakeloc <- do.call(data.frame, fakeloc) fakecov <- if(length(fakecov) > 0) do.call(data.frame, fakecov) else NULL # # Now predict pred <- predict(orig.model, locations=fakeloc, covariates=fakecov, se=se.fit) if(!se.fit) lambda <- pred else { lambda <- pred$estimate se <- pred$se sedf <- data.frame(se =se, hi = lambda + 2 * se, lo = lambda - 2 * se) } # dfin <- if(!is.null(fakecov)) cbind(fakeloc, fakecov) else fakeloc dfin <- dfin[covname] dflam <- data.frame(lambda=lambda) names(dflam) <- intensityname df <- cbind(dfin, dflam) # if(covtype != "real") { result <- df if(se.fit) result <- cbind(result, sedf) } else { bc <- paren(covname) result <- fv(df, argu=covname, ylab=substitute(lambda(X), list(X=as.name(covname), lambda=as.name(intensityname))), labl=c(covname, paste("hat(%s)", bc)), valu=intensityname, alim=Zr, desc=c(paste("value of covariate", covname), "fitted intensity"), fname=intensityname) if(se.fit) { result <- bind.fv(result, sedf, labl=c(paste("se[%s]", bc), paste("%s[hi]", bc), paste("%s[lo]", bc)), desc=c("standard error of fitted trend", "upper limit of pointwise 95%% CI for trend", "lower limit of pointwise 95%% CI for trend")) fvnames(result, ".") <- c(intensityname, "hi", "lo") fvnames(result, ".s") <- c("hi", "lo") formula(result) <- paste(". ~ ", covname) } } return(result) } padout <- function(x,N) { rep.int(x[1L],N) } effectfun }) spatstat.core/R/strauss.R0000644000176200001440000001577014144333464015114 0ustar liggesusers# # # strauss.R # # $Revision: 2.46 $ $Date: 2021/01/07 03:08:41 $ # # The Strauss process # # Strauss() create an instance of the Strauss process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Strauss <- local({ # create blank template object without family and pars BlankStrauss <- list( name = "Strauss process", creator = "Strauss", family = "pairwise.family", # evaluated later pot = function(d, par) { d <= par$r }, par = list(r = NULL), # to be filled in parnames = "interaction distance", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) return(is.finite(loggamma) && (loggamma <= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(anyNA(coeffs)) return(r) loggamma <- coeffs[1] if(abs(loggamma) <= epsilon) return(0) else return(r) }, version=NULL, # to be filled in # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { #' fast evaluator for Strauss interaction dont.complain.about(splitInf) if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Strauss") r <- potpars$r answer <- strausscounts(U, X, r, EqualPairs) return(matrix(answer, ncol=1)) }, Mayer=function(coeffs, self) { # second Mayer cluster integral gamma <- exp(as.numeric(coeffs[1])) r <- self$par$r return((1-gamma) * pi * r^2) }, Percy=function(d, coeffs, par, ...) { ## term used in Percus-Yevick type approximation gamma <- exp(as.numeric(coeffs[1])) R <- par$r t <- abs(d/(2*R)) t <- pmin.int(t, 1) y <- 2 * R^2 * (pi * (1-gamma) - (1-gamma)^2 * (acos(t) - t * sqrt(1 - t^2))) return(y) }, delta2 = function(X, inte, correction, ..., sparseOK=FALSE) { r <- inte$par$r X <- as.ppp(X) # algorithm is the same for data and dummy points nX <- npoints(X) cl <- weightedclosepairs(X, r, correction=correction, what="indices") if(is.null(cl)) return(NULL) v <- sparseMatrix(i=cl$i, j=cl$j, x=cl$weight, dims=c(nX, nX)) if(!sparseOK) v <- as.matrix(v) return(v) } ) class(BlankStrauss) <- "interact" # Finally define main function Strauss <- function(r) { instantiate.interact(BlankStrauss, list(r=r)) } Strauss <- intermaker(Strauss, BlankStrauss) Strauss }) # generally accessible functions strausscounts <- function(U, X, r, EqualPairs=NULL) { answer <- crosspaircounts(U,X,r) # subtract counts of identical pairs if(length(EqualPairs) > 0) { nU <- npoints(U) idcount <- as.integer(table(factor(EqualPairs[,2L], levels=1:nU))) answer <- answer - idcount } return(answer) } closepaircounts <- function(X, r) { stopifnot(is.ppp(X)) stopifnot(is.numeric(r) && length(r) == 1) stopifnot(is.finite(r)) stopifnot(r >= 0) # sort in increasing order of x coordinate oX <- fave.order(X$x) Xsort <- X[oX] nX <- npoints(X) # call C routine (defined in Estrauss.c) out <- .C(SC_Cclosepaircounts, nxy = as.integer(nX), x = as.double(Xsort$x), y = as.double(Xsort$y), rmaxi = as.double(r), counts = as.integer(integer(nX)), PACKAGE="spatstat.core") answer <- integer(nX) answer[oX] <- out$counts return(answer) } crosspaircounts <- function(X, Y, r) { stopifnot(is.ppp(X)) stopifnot(is.numeric(r) && length(r) == 1) stopifnot(is.finite(r)) stopifnot(r >= 0) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine (defined in Estrauss.c) out <- .C(SC_Ccrosspaircounts, nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), rrmax = as.double(r), counts = as.integer(integer(nX)), PACKAGE="spatstat.core") answer <- integer(nX) answer[oX] <- out$counts return(answer) } weightedclosepairs <- function(X, r, correction, what=c("all", "indices", "ijd")) { what <- match.arg(what) ## return list(i,j,..,weight) for all r-close pairs switch(correction, none = , border = { cl <- closepairs(X, r, what=what) weight <- rep(1, length(cl$i)) }, isotropic = , Ripley = { if(what == "indices") { cl <- closepairs(X, r, what="ijd") weight <- edge.Ripley(X[cl$i], cl$d) cl <- cl[c("i", "j")] } else { cl <- closepairs(X, r, what=what) weight <- edge.Ripley(X[cl$i], cl$d) } }, translate = { cl <- closepairs(X, r, what="all") weight <- edge.Trans(dx = cl$dx, dy = cl$dy, W = Window(X), paired=TRUE) switch(what, indices = { cl <- cl[c("i", "j")] }, ijd = { cl <- cl[c("i", "j", "d")] }, all = { }) }, periodic = { cl <- closepairs(X, r, what=what, periodic=TRUE) weight <- rep(1, length(cl$i)) }, { warning(paste("Unrecognised correction", sQuote(correction)), call.=FALSE) return(NULL) } ) result <- append(cl, list(weight=as.numeric(weight))) return(result) } spatstat.core/R/relrisk.ppm.R0000644000176200001440000003307414144333463015652 0ustar liggesusers## ## relrisk.ppm.R ## ## $Revision: 1.9 $ $Date: 2019/01/08 07:44:07 $ ## relrisk.ppm <- local({ relrisk.ppm <- function(X, ..., at=c("pixels", "points"), relative=FALSE, se=FALSE, casecontrol=TRUE, control=1, case, ngrid=NULL, window=NULL) { stopifnot(is.ppm(X)) stopifnot(is.multitype(X)) control.given <- !missing(control) case.given <- !missing(case) at <- match.arg(at) if(!relative && (control.given || case.given)) { aa <- c("control", "case")[c(control.given, case.given)] nn <- length(aa) warning(paste(ngettext(nn, "Argument", "Arguments"), paste(sQuote(aa), collapse=" and "), ngettext(nn, "was", "were"), "ignored, because relative=FALSE")) } model <- X Y <- data.ppm(model) types <- levels(marks(Y)) ntypes <- length(types) # np <- length(coef(model)) ## compute probabilities or risks if(ntypes == 2 && casecontrol) { if(control.given || !case.given) { stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:2) } else if(is.character(control)) { icontrol <- match(control, types) if(is.na(icontrol)) stop(paste("No points have mark =", control)) } else stop(paste("Unrecognised format for argument", sQuote("control"))) if(!case.given) icase <- 3 - icontrol } if(case.given) { stopifnot(length(case) == 1) if(is.numeric(case)) { icase <- case <- as.integer(case) stopifnot(case %in% 1:2) } else if(is.character(case)) { icase <- match(case, types) if(is.na(icase)) stop(paste("No points have mark =", case)) } else stop(paste("Unrecognised format for argument", sQuote("case"))) if(!control.given) icontrol <- 3 - icase } switch(at, pixels= { ## estimate is a single image ## compute images of intensities of each mark lambda.each <- predict(model, ngrid=ngrid, window=window) if(!relative) { ## compute probabilities.. ## total intensity (image) lambda.all <- im.apply(lambda.each, sum, check=FALSE) ## WAS: lambda.all <- Reduce("+", lambda.each) if(!se) { result <- lambda.each[[icase]]/lambda.all result <- killglitches(result) } else { probs <- lapply(lambda.each, "/", e2=lambda.all) probs <- as.solist(lapply(probs, killglitches)) estimate <- probs[[icase]] SE <- SEprobPixels(model, probs)[[icase]] SE <- killglitches(SE) result <- list(estimate=estimate, SE=SE) } } else { ## relative risks lambda.ctrl <- lambda.each[[icontrol]] if(!se) { result <- lambda.each[[icase]]/lambda.ctrl result <- killglitches(result) } else { risks <- lapply(lambda.each, "/", e2=lambda.ctrl) risks <- as.solist(lapply(risks, killglitches)) estimate <- risks[[icase]] SE <- SErelriskPixels(model, risks, icontrol)[[icase]] SE <- killglitches(SE) result <- list(estimate=estimate, SE=SE) } } }, points={ ## compute intensities of each type Ycase <- unmark(Y) %mark% factor(types[icase], levels=types) Yctrl <- unmark(Y) %mark% factor(types[icontrol], levels=types) lambda.case <- predict(model, locations=Ycase) lambda.ctrl <- predict(model, locations=Yctrl) if(!relative) { ## compute probabilities ## total intensity lambda.all <- lambda.case + lambda.ctrl prob.case <- lambda.case/lambda.all if(!se) { result <- prob.case } else { probs <- matrix(, length(prob.case), 2) probs[,icase] <- prob.case probs[,icontrol] <- 1 - prob.case SE <- SEprobPoints(model, probs)[,icase] result <- list(estimate=prob.case, SE=SE) } } else { ## compute relative risks risk.case <- lambda.case/lambda.ctrl if(!se) { result <- risk.case } else { risks <- matrix(, length(risk.case), 2) risks[,icase] <- risk.case risks[,icontrol] <- 1 SE <- SErelriskPoints(model, risks, icontrol)[,icase] result <- list(estimate=risk.case, SE=SE) } } }) } else { ## several types if(relative) { ## need 'control' type stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:ntypes) } else if(is.character(control)) { icontrol <- match(control, types) if(is.na(icontrol)) stop(paste("No points have mark =", control)) } else stop(paste("Unrecognised format for argument", sQuote("control"))) } switch(at, pixels={ ## estimate is a list of images ## Compute images of intensities of each type lambda.each <- predict(model, ngrid=ngrid, window=window) if(!relative) { ## compute probabilities... ## image of total intensity lambda.all <- im.apply(lambda.each, sum, check=FALSE) ## WAS: lambda.all <- Reduce("+", lambda.each) probs <- lapply(lambda.each, "/", e2=lambda.all) probs <- as.solist(lapply(probs, killglitches)) if(!se) { result <- probs } else { SE <- SEprobPixels(model, probs) SE <- as.solist(lapply(SE, killglitches)) result <- list(estimate=probs, SE=SE) } } else { ## compute relative risks risks <- lapply(lambda.each, "/", e2=lambda.each[[icontrol]]) risks <- as.solist(lapply(risks, killglitches)) if(!se) { result <- risks } else { SE <- SErelriskPixels(model, risks, icontrol) SE <- as.solist(lapply(SE, killglitches)) result <- list(estimate=risks, SE=SE) } } }, points = { ## matrix of intensities of each type at each point ## rows=locations, cols=types lambda.each <- sapply(types, predictfortype, loc=unmark(Y), model=model, types=types) if(!relative) { ## compute probabilities lambda.all <- rowSums(lambda.each) probs <- lambda.each/lambda.all if(!se) { result <- probs } else { SE <- SEprobPoints(model, probs) result <- list(estimate=probs, SE=SE) } } else { ## compute relative risks risks <- lambda.each/lambda.each[,icontrol] if(!se) { result <- risks } else { SE <- SErelriskPoints(model, risks, icontrol) result <- list(estimate=risks, SE=SE) } } }) } return(result) } modmats <- function(model) { # model matrices for data locations for each possible mark QM <- quad.ppm(model) Y <- QM$data suppressWarnings({ QR <- quadscheme.replicated(Y, unmark(Y[FALSE])) }) sourceid <- QR$param$sourceid ## canonical covariates mm <- model.matrix(model, Q=QR) ## mm is a matrix with one column for canonical covariate ## and one row for each marked point in QR. mm <- cbind(data.frame(".s"=sourceid, ".m"=marks(QR)), mm) ## Split by marks ss <- split(mm, mm$.m) ## Reorganise into compatible matrices zz <- lapply(ss, reorg) return(zz) } reorg <- function(x) { z <- x rownames(z) <- NULL z[x$.s, ] <- z return(z[,-(1:2), drop=FALSE]) } SErelriskPoints <- function(model, riskvalues, icontrol) { ## riskvalues is a matrix with rows=data locations, cols=types types <- colnames(riskvalues) ntypes <- length(types) ## S.um <- modmats(model) S.um <- lapply(S.um, as.matrix) ## S.um is a list of matrices, one for each possible type, ## each matrix having one row per data location dS.um <- lapply(S.um, "-", e2=S.um[[icontrol]]) R.um <- mapply("*", dS.um, as.list(as.data.frame(riskvalues)), SIMPLIFY=FALSE) ## likewise R.um is a list of matrices ## vc <- vcov(model) VAR <- lapply(R.um, quadform, v=vc) VAR <- do.call(cbind, VAR) SE <- sqrt(VAR) colnames(SE) <- types return(SE) } msubtract <- function(z1, z2) mapply("-", e1=z1, e2=z2, SIMPLIFY=FALSE) mmultiply <- function(z1, z2) solapply(z1, "*", e2=z2) SErelriskPixels <- function(model, riskvalues, icontrol) { ## riskvalues is an imlist types <- names(riskvalues) ntypes <- length(types) ## canonical covariates S.um <- model.images(model) ## S.um is a hyperframe with one column for each mark value ## and one row for each canonical covariate dS.um <- lapply(S.um, msubtract, z2=S.um[,icontrol,drop=TRUE]) R.um <- mapply(mmultiply, z1=dS.um, z2=riskvalues, SIMPLIFY=FALSE) VAR <- vector(mode="list", length=ntypes) ntypes <- length(types) vc <- vcov(model) ncoef <- nrow(vc) for(type in 1:ntypes) { v <- 0 Rum <- R.um[[type]] for(i in 1:ncoef) { for(j in 1:ncoef) { v <- v + Rum[[i]] * vc[i,j] * Rum[[j]] } } VAR[[type]] <- v } names(VAR) <- types VAR <- as.solist(VAR) SE <- as.solist(lapply(VAR, sqrt)) return(SE) } SEprobPixels <- function(model, probvalues) { ## probvalues is an imlist types <- names(probvalues) ntypes <- length(types) ## canonical covariates S.um <- model.images(model) ## S.um is a hyperframe with one column for each mark value ## and one row for each canonical covariate ncoef <- length(coef(model)) Sbar.u <- vector(mode="list", length=ncoef) for(k in 1:ncoef) { A <- mapply("*", e1=S.um[k,,drop=TRUE], e2=probvalues, SIMPLIFY=FALSE) Sbar.u[[k]] <- im.apply(A, sum) } ## Sbar.u is a list of images, one for each canonical covariate Sdif.um <- lapply(as.list(S.um), msubtract, z2=Sbar.u) ## Sdif.um is a list of lists of images. ## List of length ntypes, ## each entry being an imlist of length ncoef P.um <- mapply(mmultiply, Sdif.um, probvalues, SIMPLIFY=FALSE) ## P.um is same format as Sdif.um vc <- vcov(model) ncoef <- nrow(vc) VAR <- vector(mode="list", length=ntypes) for(m in 1:ntypes) { v <- 0 Pum <- P.um[[m]] for(i in 1:ncoef) { for(j in 1:ncoef) { v <- v + Pum[[i]] * vc[i,j] * Pum[[j]] } } VAR[[m]] <- v } names(VAR) <- types VAR <- as.solist(VAR) SE <- as.solist(lapply(VAR, sqrt)) } SEprobPoints <- function(model, probvalues) { ## probvalues is a matrix with row=location and column=type types <- colnames(probvalues) ntypes <- length(types) ## canonical covariates S.um <- modmats(model) S.um <- lapply(S.um, as.matrix) ## S.um is a list of matrices, one for each possible type, ## each matrix having rows=locations and cols=covariates ## Weight each matrix by its mark probabilities SW <- mapply("*", e1=S.um, e2=as.list(as.data.frame(probvalues)), SIMPLIFY=FALSE) ## average them Sbar.u <- Reduce("+", SW) ## Sbar.u is a matrix with rows=locations and cols=covariates Sdif.um <- lapply(S.um, "-", e2=Sbar.u) ## Sdif.um is a list of matrices like S.um P.um <- mapply("*", e1=Sdif.um, e2=as.list(as.data.frame(probvalues)), SIMPLIFY=FALSE) ## P.um likewise vc <- vcov(model) VAR <- lapply(P.um, quadform, v=vc) VAR <- do.call(cbind, VAR) SE <- sqrt(VAR) colnames(SE) <- types return(SE) } predictfortype <- function(type, model, types, loc) { predict(model, locations=loc %mark% factor(type, levels=types)) } killglitches <- function(z, eps=.Machine$double.eps) { ra <- range(z, finite=TRUE) if(max(abs(ra)) < eps) { z[] <- 0 return(z) } if(diff(ra) < eps) z[] <- mean(z, na.rm=TRUE) return(z) } relrisk.ppm }) spatstat.core/R/edgeRipley.R0000644000176200001440000002004614144333462015467 0ustar liggesusers# # edgeRipley.R # # $Revision: 1.20 $ $Date: 2021/10/25 10:26:05 $ # # Ripley isotropic edge correction weights # # edge.Ripley(X, r, W) compute isotropic correction weights # for centres X[i], radii r[i,j], window W # # To estimate the K-function see the idiom in "Kest.S" # ####################################################################### edge.Ripley <- local({ small <- function(x) { abs(x) < .Machine$double.eps } hang <- function(d, r) { nr <- nrow(r) nc <- ncol(r) answer <- matrix(0, nrow=nr, ncol=nc) # replicate d[i] over j index d <- matrix(d, nrow=nr, ncol=nc) hit <- (d < r) answer[hit] <- acos(d[hit]/r[hit]) answer } edge.Ripley <- function(X, r, W=Window(X), method=c("C", "interpreted"), maxweight=100, internal=list()) { # X is a point pattern, or equivalent X <- as.ppp(X, W) W <- X$window method <- match.arg(method) debug <- resolve.1.default(list(debug=FALSE), internal) repair <- resolve.1.default(list(repair=TRUE), internal) switch(W$type, rectangle={}, polygonal={ if(method != "C") stop(paste("Ripley isotropic correction for polygonal windows", "requires method = ", dQuote("C"))) }, mask={ stop(paste("sorry, Ripley isotropic correction", "is not implemented for binary masks")) } ) n <- npoints(X) if(is.matrix(r) && nrow(r) != n) stop("the number of rows of r should match the number of points in X") if(!is.matrix(r)) { if(length(r) != n) stop("length of r is incompatible with the number of points in X") r <- matrix(r, nrow=n) } # Nr <- nrow(r) Nc <- ncol(r) if(Nr * Nc == 0) return(r) ########## x <- X$x y <- X$y switch(method, interpreted = { ######## interpreted R code for rectangular case ######### # perpendicular distance from point to each edge of rectangle # L = left, R = right, D = down, U = up dL <- x - W$xrange[1L] dR <- W$xrange[2L] - x dD <- y - W$yrange[1L] dU <- W$yrange[2L] - y # detect whether any points are corners of the rectangle corner <- (small(dL) + small(dR) + small(dD) + small(dU) >= 2) # angle between (a) perpendicular to edge of rectangle # and (b) line from point to corner of rectangle bLU <- atan2(dU, dL) bLD <- atan2(dD, dL) bRU <- atan2(dU, dR) bRD <- atan2(dD, dR) bUL <- atan2(dL, dU) bUR <- atan2(dR, dU) bDL <- atan2(dL, dD) bDR <- atan2(dR, dD) # The above are all vectors [i] # Now we compute matrices [i,j] # half the angle subtended by the intersection between # the circle of radius r[i,j] centred on point i # and each edge of the rectangle (prolonged to an infinite line) aL <- hang(dL, r) aR <- hang(dR, r) aD <- hang(dD, r) aU <- hang(dU, r) # apply maxima # note: a* are matrices; b** are vectors; # b** are implicitly replicated over j index cL <- pmin.int(aL, bLU) + pmin.int(aL, bLD) cR <- pmin.int(aR, bRU) + pmin.int(aR, bRD) cU <- pmin.int(aU, bUL) + pmin.int(aU, bUR) cD <- pmin.int(aD, bDL) + pmin.int(aD, bDR) # total exterior angle ext <- cL + cR + cU + cD ext <- matrix(ext, Nr, Nc) # add pi/2 for corners if(any(corner)) ext[corner,] <- ext[corner,] + pi/2 # OK, now compute weight weight <- 1 / (1 - ext/(2 * pi)) }, C = { ############ C code ############################# switch(W$type, rectangle={ if(!debug) { z <- .C(SC_ripleybox, nx=as.integer(n), x=as.double(x), y=as.double(y), rmat=as.double(r), nr=as.integer(Nc), #sic xmin=as.double(W$xrange[1L]), ymin=as.double(W$yrange[1L]), xmax=as.double(W$xrange[2L]), ymax=as.double(W$yrange[2L]), epsilon=as.double(.Machine$double.eps), out=as.double(numeric(Nr * Nc)), PACKAGE="spatstat.core") } else { z <- .C(SC_ripboxDebug, nx=as.integer(n), x=as.double(x), y=as.double(y), rmat=as.double(r), nr=as.integer(Nc), #sic xmin=as.double(W$xrange[1L]), ymin=as.double(W$yrange[1L]), xmax=as.double(W$xrange[2L]), ymax=as.double(W$yrange[2L]), epsilon=as.double(.Machine$double.eps), out=as.double(numeric(Nr * Nc)), PACKAGE="spatstat.core") } weight <- matrix(z$out, nrow=Nr, ncol=Nc) }, polygonal={ Y <- edges(W) bd <- bdist.points(X) if(!debug) { z <- .C(SC_ripleypoly, nc=as.integer(n), xc=as.double(x), yc=as.double(y), bd=as.double(bd), nr=as.integer(Nc), rmat=as.double(r), nseg=as.integer(Y$n), x0=as.double(Y$ends$x0), y0=as.double(Y$ends$y0), x1=as.double(Y$ends$x1), y1=as.double(Y$ends$y1), out=as.double(numeric(Nr * Nc)), PACKAGE="spatstat.core") } else { z <- .C(SC_rippolDebug, nc=as.integer(n), xc=as.double(x), yc=as.double(y), bd=as.double(bd), nr=as.integer(Nc), rmat=as.double(r), nseg=as.integer(Y$n), x0=as.double(Y$ends$x0), y0=as.double(Y$ends$y0), x1=as.double(Y$ends$x1), y1=as.double(Y$ends$y1), out=as.double(numeric(Nr * Nc)), PACKAGE="spatstat.core") } angles <- matrix(z$out, nrow = Nr, ncol = Nc) weight <- 2 * pi/angles } ) } ) ## eliminate wild values if(repair) weight <- matrix(pmax.int(1, pmin.int(maxweight, weight)), nrow=Nr, ncol=Nc) return(weight) } edge.Ripley }) rmax.Ripley <- function(W) { W <- as.owin(W) if(is.rectangle(W)) return(boundingradius(W)) if(is.polygonal(W) && length(W$bdry) == 1L) return(boundingradius(W)) ## could have multiple connected components pieces <- tiles(tess(image=connected(W))) answer <- sapply(pieces, boundingradius) return(as.numeric(answer)) } spatstat.core/R/hierpair.family.R0000644000176200001440000003150714144333462016465 0ustar liggesusers# # # hierpair.family.R # # $Revision: 1.12 $ $Date: 2020/11/16 01:32:06 $ # # The family of hierarchical pairwise interactions # # # ------------------------------------------------------------------- # hierpair.family <- list( name = "hierpair", print = function(self) { splat("Hierarchical pairwise interaction family") }, plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction if(is.null(inter) || is.null(inter$family) || inter$family$name != "hierpair") stop("Tried to plot the wrong kind of interaction") # get fitted coefficients of interaction terms # and set coefficients of offset terms to 1 Vnames <- fint$Vnames IsOffset <- fint$IsOffset coeff <- rep.int(1, length(Vnames)) names(coeff) <- Vnames coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]] # pairpot <- inter$pot potpars <- inter$par rmax <- reach(fint, epsilon=1e-3) xlim <- list(...)$xlim if(is.infinite(rmax)) { if(!is.null(xlim)) rmax <- max(xlim) else { warning("Reach of interaction is infinite; need xlim to plot it") return(invisible(NULL)) } } if(is.null(d)) { dmax <- 1.25 * rmax d <- seq(from=0, to=dmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) dmax <- max(d) } if(is.null(xlim)) xlim <- c(0, dmax) types <- potpars$types if(is.null(types)) stop("Unable to determine types of points") if(!is.factor(types)) types <- factor(types, levels=types) ## compute each potential and store in `fasp' object m <- length(types) nd <- length(d) dd <- matrix(rep.int(d, m), nrow=nd * m, ncol=m) tx <- rep.int(types, rep.int(nd, m)) ty <- types p <- pairpot(dd, tx, ty, potpars) if(length(dim(p))==2) p <- array(p, dim=c(dim(p),1), dimnames=NULL) if(dim(p)[3L] != length(coeff)) stop("Dimensions of potential do not match coefficient vector") for(k in seq_len(dim(p)[3L])) p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] ) y <- exp(apply(p, c(1,2), sum)) ylim <- range(0, 1.1, y, finite=TRUE) fns <- vector(m^2, mode="list") which <- matrix(, m, m) for(i in seq_len(m)) { for(j in seq_len(m)) { ## relevant position in matrix ijpos <- i + (j-1L) * m which[i,j] <- ijpos ## extract values of potential yy <- y[tx == types[i], j] ## make fv object fns[[ijpos]] <- fv(data.frame(r=d, h=yy, one=1), "r", quote(h(r)), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "pairwise interaction term h(r)", "reference value 1")) } } funz <- fasp(fns, which=which, formulae=list(cbind(h, one) ~ r), title="Fitted pairwise interactions", rowNames=paste(types), colNames=paste(types)) if(plotit) do.call(plot.fasp, resolve.defaults(list(quote(funz)), list(...), list(ylim=ylim, ylab="Pairwise interaction", xlab="Distance"))) return(invisible(funz)) }, # end of function `plot' # ---------------------------------------------------- eval = function(X,U,EqualPairs,pairpot,potpars,correction, ..., Reach=NULL, precomputed=NULL, savecomputed=FALSE, pot.only=FALSE) { ## ## This is the eval function for the `hierpair' family. ## fop <- names(formals(pairpot)) if(isTRUE(all.equal(fop, c("d", "par")))) marx <- FALSE else if(isTRUE(all.equal(fop, c("d", "tx", "tu", "par")))) marx <- TRUE else stop("Formal arguments of pair potential function are not understood") ## edge correction argument if(length(correction) > 1) stop("Only one edge correction allowed at a time!") if(!any(correction == c("periodic", "border", "translate", "translation", "isotropic", "Ripley", "none"))) stop(paste("Unrecognised edge correction", sQuote(correction))) no.correction <- #### Compute basic data # Decide whether to apply faster algorithm using 'closepairs' use.closepairs <- FALSE && (correction %in% c("none", "border", "translate", "translation")) && !is.null(Reach) && is.finite(Reach) && is.null(precomputed) && !savecomputed if(!is.null(precomputed)) { # precomputed X <- precomputed$X U <- precomputed$U EqualPairs <- precomputed$E M <- precomputed$M } else { U <- as.ppp(U, X$window) # i.e. X$window is DEFAULT window if(!use.closepairs) # Form the matrix of distances M <- crossdist(X, U, periodic=(correction=="periodic")) } nX <- npoints(X) nU <- npoints(U) dimM <- c(nX, nU) # Evaluate the pairwise potential without edge correction if(use.closepairs) POT <- evalPairPotential(X,U,EqualPairs,pairpot,potpars,Reach) else if(!marx) POT <- pairpot(M, potpars) else POT <- pairpot(M, marks(X), marks(U), potpars) # Determine whether each column of potential is an offset IsOffset <- attr(POT, "IsOffset") # Check errors and special cases if(!is.matrix(POT) && !is.array(POT)) { if(length(POT) == 0 && X$n == 0) # empty pattern POT <- array(POT, dim=c(dimM,1)) else stop("Pair potential did not return a matrix or array") } if(length(dim(POT)) == 1 || any(dim(POT)[1:2] != dimM)) { whinge <- paste0( "The pair potential function ",short.deparse(substitute(pairpot)), " must produce a matrix or array with its first two dimensions\n", "the same as the dimensions of its input.\n") stop(whinge) } # make it a 3D array if(length(dim(POT))==2) POT <- array(POT, dim=c(dim(POT),1), dimnames=NULL) if(correction == "translate" || correction == "translation") { edgewt <- edge.Trans(X, U) # sanity check ("everybody knows there ain't no...") if(!is.matrix(edgewt)) stop("internal error: edge.Trans() did not yield a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Trans() has wrong dimensions") POT <- c(edgewt) * POT } else if(correction == "isotropic" || correction == "Ripley") { # weights are required for contributions from QUADRATURE points edgewt <- t(edge.Ripley(U, t(M), X$window)) if(!is.matrix(edgewt)) stop("internal error: edge.Ripley() did not return a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Ripley() has wrong dimensions") POT <- c(edgewt) * POT } # No pair potential term between a point and itself if(length(EqualPairs) > 0) { nplanes <- dim(POT)[3L] for(k in 1:nplanes) POT[cbind(EqualPairs, k)] <- 0 } # Return just the pair potential? if(pot.only) return(POT) # Sum the pairwise potentials V <- apply(POT, c(2,3), sum) # attach the original pair potentials attr(V, "POT") <- POT # attach the offset identifier attr(V, "IsOffset") <- IsOffset # pass computed information out the back door if(savecomputed) attr(V, "computed") <- list(E=EqualPairs, M=M) return(V) }, ######### end of function $eval suffstat = function(model, X=NULL, callstring="hierpair.family$suffstat") { # for hierarchical pairwise models only (possibly nonstationary) verifyclass(model, "ppm") if(!identical(model$interaction$family$name,"hierpair")) stop("Model is not a hierarchical pairwise interaction process") if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") modelX <- update(model, X, method="mpl") } # find data points which do not contribute to pseudolikelihood mplsubset <- getglmdata(modelX)$.mpl.SUBSET mpldata <- is.data(quad.ppm(modelX)) contribute <- mplsubset[mpldata] Xin <- X[contribute] Xout <- X[!contribute] # partial model matrix arising from ordered pairs of data points # which both contribute to the pseudolikelihood Empty <- X[integer(0)] momINxIN <- partialModelMatrix(Xin, Empty, model, "suffstat") # partial model matrix at data points which contribute to the pseudolikelihood momIN <- partialModelMatrix(X, Empty, model, "suffstat")[contribute, , drop=FALSE] # partial model matrix arising from ordered pairs of data points # the second of which does not contribute to the pseudolikelihood mom <- partialModelMatrix(Xout, Xin, model, "suffstat") indx <- Xout$n + seq_len(Xin$n) momINxOUT <- mom[indx, , drop=FALSE] ## determine which canonical covariates are true second-order terms ## eg 'mark1x1' typ <- levels(marks(X)) vn <- paste0("mark", typ, "x", typ) order2 <- names(coef(model)) %in% vn order1 <- !order2 result <- 0 * coef(model) if(any(order1)) { # first order contributions (including 'mark1x2' etc) o1terms <- momIN[ , order1, drop=FALSE] o1sum <- colSums(o1terms) result[order1] <- o1sum } if(any(order2)) { # adjust for double counting of ordered pairs in INxIN but not INxOUT o2termsINxIN <- momINxIN[, order2, drop=FALSE] o2termsINxOUT <- momINxOUT[, order2, drop=FALSE] o2sum <- colSums(o2termsINxIN)/2 + colSums(o2termsINxOUT) result[order2] <- o2sum } return(result) }, ######### end of function $suffstat delta2 = function(X, inte, correction, ...) { # Sufficient statistic for second order conditional intensity # for hierarchical pairwise interaction processes # Equivalent to evaluating pair potential. if(is.ppp(X)) { seqX <- seq_len(npoints(X)) E <- cbind(seqX, seqX) R <- reach(inte) POT <- hierpair.family$eval(X,X,E, inte$pot,inte$par, correction, pot.only=TRUE, Reach=R, splitInf=TRUE) result <- aperm(POT, c(2,1,3)) M <- attr(POT, "IsNegInf") if(!is.null(M)) { #' validate if(length(dim(M)) != 3) stop("Internal error: IsNegInf is not a 3D array") M <- aperm(M, c(2,1,3)) #' collapse vector-valued potential, yielding a matrix M <- apply(M, c(1,2), any) if(!is.matrix(M)) M <- matrix(M, nrow=nX) #' count conflicts hits <- colSums(M) #' hits[j] == 1 implies that X[j] violates hard core with only one X[i] #' and therefore changes status if X[i] is deleted. deltaInf <- M deltaInf[, hits != 1] <- FALSE attr(result, "deltaInf") <- deltaInf } } else if(is.quad(X)) { U <- union.quad(X) izdat <- is.data(X) nU <- npoints(U) nX <- npoints(X$data) seqU <- seq_len(nU) E <- cbind(seqU, seqU) R <- reach(inte) POT <- hierpair.family$eval(U,U,E, inte$pot,inte$par, correction, pot.only=TRUE, Reach=R, splitInf=TRUE) result <- aperm(POT, c(2,1,3)) M <- attr(POT, "IsNegInf") if(!is.null(M)) { #' validate if(length(dim(M)) != 3) stop("Internal error: IsNegInf is not a 3D array") M <- aperm(M, c(2,1,3)) #' consider conflicts with data points MXU <- M[izdat, , , drop=FALSE] #' collapse vector-valued potential, yielding a matrix MXU <- apply(MXU, c(1,2), any) if(!is.matrix(MXU)) MXU <- matrix(MXU, nrow=nX) #' count data points conflicting with each quadrature point nhitdata <- colSums(MXU) #' for a conflicting pair U[i], U[j], #' status of U[j] will change when U[i] is added/deleted #' iff EITHER #' U[i] = X[i] is a data point and #' U[j] is only in conflict with X[i], deltaInf <- apply(M, c(1,2), any) deltaInf[izdat, nhitdata != 1] <- FALSE #' OR #' U[i] is a dummy point, #' U[j] has no conflicts with X. deltaInf[!izdat, nhitdata != 0] <- FALSE attr(result, "deltaInf") <- deltaInf } } return(result) } ######### end of function $delta2 ) ######### end of list class(hierpair.family) <- "isf" spatstat.core/R/Kmodel.R0000644000176200001440000000032414144333461014605 0ustar liggesusers# # Kmodel.R # # Kmodel and pcfmodel # # $Revision: 1.1 $ $Date: 2011/05/30 14:02:21 $ # Kmodel <- function(model, ...) { UseMethod("Kmodel") } pcfmodel <- function(model, ...) { UseMethod("pcfmodel") } spatstat.core/R/hybrid.family.R0000644000176200001440000001565414144333462016150 0ustar liggesusers# # hybrid.family.R # # $Revision: 1.14 $ $Date: 2020/11/16 01:32:06 $ # # Hybrid interactions # # hybrid.family: object of class 'isf' defining pairwise interaction # # ------------------------------------------------------------------- # hybrid.family <- list( name = "hybrid", print = function(self) { cat("Hybrid interaction family\n") }, plot = function(fint, ..., d=NULL, plotit=TRUE, separate=FALSE) { # plot hybrid interaction if possible verifyclass(fint, "fii") inter <- fint$interaction if(is.null(inter) || is.null(inter$family) || inter$family$name != "hybrid") stop("Tried to plot the wrong kind of interaction") if(is.null(d)) { # compute reach and determine max distance for plots dmax <- 1.25 * reach(inter) if(!is.finite(dmax)) { # interaction has infinite reach # Are plot limits specified? xlim <- resolve.defaults(list(...), list(xlim=c(0, Inf))) if(all(is.finite(xlim))) dmax <- max(xlim) else stop("Interaction has infinite reach; need to specify xlim or d") } d <- seq(0, dmax, length=256) } # get fitted coefficients of interaction terms # and set coefficients of offset terms to 1 Vnames <- fint$Vnames IsOffset <- fint$IsOffset coeff <- rep.int(1, length(Vnames)) names(coeff) <- Vnames coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]] # extract the component interactions interlist <- inter$par # check that they are all pairwise interactions families <- unlist(lapply(interlist, interactionfamilyname)) if(!separate && !all(families == "pairwise")) { warning(paste("Cannot compute the resultant function;", "not all components are pairwise interactions;", "plotting each component separately")) separate <- TRUE } # deal with each interaction ninter <- length(interlist) results <- list() for(i in 1:ninter) { interI <- interlist[[i]] nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. prefixlength <- nchar(nameI.) Vprefix <- substr(Vnames, 1, prefixlength) relevant <- (Vprefix == nameI.) # construct fii object for this component fitinI <- fii(interI, coeff[relevant], Vnames[relevant], IsOffset[relevant]) # convert to fv object a <- plot(fitinI, ..., d=d, plotit=FALSE) aa <- list(a) names(aa) <- nameI results <- append(results, aa) } # computation of resultant is only implemented for fv objects if(!separate && !all(unlist(lapply(results, is.fv)))) { warning(paste("Cannot compute the resultant function;", "not all interaction components yielded an fv object;", "plotting separate results for each component")) separate <- TRUE } # return separate 'fv' or 'fasp' objects if required results <- as.anylist(results) if(separate) { if(plotit) { main0 <- "Pairwise interaction components" do.call(plot, resolve.defaults(list(quote(results)), list(...), list(main=main0))) } return(invisible(results)) } # multiply together to obtain resultant pairwise interaction ans <- results[[1L]] if(ninter >= 2) { for(i in 2:ninter) { Fi <- results[[i]] ans <- eval.fv(ans * Fi) } copyover <- c("ylab", "yexp", "labl", "desc", "fname") attributes(ans)[copyover] <- attributes(results[[1L]])[copyover] } main0 <- "Resultant pairwise interaction" if(plotit) do.call(plot, resolve.defaults(list(quote(ans)), list(...), list(main=main0))) return(invisible(ans)) }, eval = function(X,U,EqualPairs,pot,pars,correction, ...) { # `pot' is ignored; `pars' is a list of interactions nU <- length(U$x) V <- matrix(, nU, 0) IsOffset <- logical(0) for(i in 1:length(pars)) { # extract i-th component interaction interI <- pars[[i]] nameI <- names(pars)[[i]] # compute potential for i-th component VI <- evalInteraction(X, U, EqualPairs, interI, correction, ...) if(ncol(VI) > 0) { if(ncol(VI) > 1 && is.null(colnames(VI))) # make up names colnames(VI) <- paste("Interaction", seq(ncol(VI)), sep=".") # prefix label with name of i-th component colnames(VI) <- paste(nameI, dimnames(VI)[[2L]], sep=".") # handle IsOffset offI <- attr(VI, "IsOffset") if(is.null(offI)) offI <- rep.int(FALSE, ncol(VI)) # tack on IsOffset <- c(IsOffset, offI) # append to matrix V V <- cbind(V, VI) } } if(any(IsOffset)) attr(V, "IsOffset") <- IsOffset return(V) }, delta2 = function(X, inte, correction, ..., sparseOK=FALSE) { ## Sufficient statistic for second order conditional intensity result <- NULL deltaInf <- FALSE interlist <- inte$par for(ii in interlist) { v <- NULL ## look for 'delta2' in component interaction 'ii' if(!is.null(delta2 <- ii$delta2) && is.function(delta2)) v <- delta2(X, ii, correction, sparseOK=sparseOK) ## look for 'delta2' in family of component 'ii' if(is.null(v) && !is.null(delta2 <- ii$family$delta2) && is.function(delta2)) v <- delta2(X, ii, correction, sparseOK=sparseOK) if(is.null(v)) { ## no special algorithm available: generic algorithm needed return(NULL) } if(is.null(result)) { result <- v } else if(inherits(v, c("sparse3Darray", "sparseMatrix"))) { result <- bind.sparse3Darray(result, v, along=3) } else { result <- abind::abind(as.array(result), v, along=3) } deltaInf <- deltaInf | (attr(v, "deltaInf") %orifnull% FALSE) } if(length(dim(deltaInf))) attr(result, "deltaInf") <- deltaInf return(result) }, suffstat = NULL ) class(hybrid.family) <- "isf" spatstat.core/R/cdftest.R0000644000176200001440000003273214144333461015036 0ustar liggesusers# # cdftest.R # # $Revision: 2.26 $ $Date: 2021/04/08 03:45:49 $ # # cdf.test <- function(...) { UseMethod("cdf.test") } cdf.test.ppp <- function(X, covariate, test=c("ks", "cvm", "ad"), ..., interpolate=TRUE, jitter=TRUE) { Xname <- short.deparse(substitute(X)) covname <- singlestring(short.deparse(substitute(covariate))) test <- match.arg(test) if(is.character(covariate)) covname <- covariate if(!is.marked(X, dfok=TRUE)) { # unmarked model <- ppm(X) modelname <- "CSR" } else if(is.multitype(X)) { # multitype mf <- summary(X)$marks$frequency if(all(mf > 0)) { model <- ppm(X ~marks) modelname <- "CSRI" } else { warning("Ignoring marks, because some mark values have zero frequency") X <- unmark(X) model <- ppm(X) modelname <- "CSR" } } else { # marked - general case X <- unmark(X) warning("marks ignored") model <- ppm(X) modelname <- "CSR" } dont.complain.about(model) do.call(spatialCDFtest, resolve.defaults(list(model=quote(model), covariate=quote(covariate), test=test), list(interpolate=interpolate, jitter=jitter), list(...), list(modelname=modelname, covname=covname, dataname=Xname))) } cdf.test.ppm <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE) { modelname <- short.deparse(substitute(model)) covname <- singlestring(short.deparse(substitute(covariate))) test <- match.arg(test) verifyclass(model, "ppm") if(is.character(covariate)) covname <- covariate if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call(spatialCDFtest, resolve.defaults(list(model=quote(model), covariate=quote(covariate), test=test), list(interpolate=interpolate, jitter=jitter, nsim=nsim, verbose=verbose), list(...), list(modelname=modelname, covname=covname))) } cdf.test.slrm <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., modelname=NULL, covname=NULL) { # get names if(is.null(modelname)) modelname <- short.deparse(substitute(model)) if(is.null(covname)) covname <- short.deparse(substitute(covariate)) dataname <- model$CallInfo$responsename test <- match.arg(test) # stopifnot(is.slrm(model)) stopifnot(is.im(covariate)) # extract data prob <- fitted(model) covim <- as.im(covariate, W=as.owin(prob)) probvalu <- as.matrix(prob) covvalu <- as.matrix(covim) ok <- !is.na(probvalu) & !is.na(covvalu) probvalu <- as.vector(probvalu[ok]) covvalu <- as.vector(covvalu[ok]) # compile weighted cdf's FZ <- ewcdf(covvalu, probvalu/sum(probvalu)) X <- model$Data$response ZX <- safelookup(covim, X) # Ensure support of cdf includes the range of the data xxx <- knots(FZ) yyy <- FZ(xxx) if(min(xxx) > min(ZX)) { xxx <- c(min(ZX), xxx) yyy <- c(0, yyy) } if(max(xxx) < max(ZX)) { xxx <- c(xxx, max(ZX)) yyy <- c(yyy, 1) } if(length(xxx) > 1) { #' non-degenerate cdf ## replace by piecewise linear approximation FZ <- approxfun(xxx, yyy, rule=2) } # now apply cdf U <- FZ(ZX) # Test uniformity of transformed values result <- switch(test, ks = ks.test(U, "punif", ...), cvm = cvm.test(U, "punif", ...), ad = ad.test(U, "punif", ...)) testname <- switch(test, ks="Kolmogorov-Smirnov", cvm="Cramer-Von Mises", ad="Anderson-Darling") # modify the 'htest' entries result$method <- paste("Spatial", testname, "test of", "inhomogeneous Poisson process", "in two dimensions") result$data.name <- paste("covariate", sQuote(paste(covname, collapse="")), "evaluated at points of", sQuote(dataname), "\n and transformed to uniform distribution under", sQuote(modelname)) # additional class 'cdftest' class(result) <- c("cdftest", class(result)) attr(result, "prep") <- list(Zvalues=covvalu, ZX=ZX, FZ=FZ, FZX=ecdf(ZX), U=U) attr(result, "info") <- list(modelname=modelname, covname=covname, dataname=dataname, csr=FALSE) return(result) } #............. helper functions ........................# spatialCDFtest <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., dimyx=NULL, eps=NULL, interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE, modelname=NULL, covname=NULL, dataname=NULL) { ## conduct test based on comparison of CDF's of covariate values test <- match.arg(test) ## compute the essential data fra <- spatialCDFframe(model, covariate, dimyx=dimyx, eps=eps, interpolate=interpolate, jitter=jitter, modelname=modelname, covname=covname, dataname=dataname) ## calculate the test statistic result <- spatialCDFtestCalc(fra, test=test, ...) if(is.poisson(model)) return(result) ## Gibbs model: perform Monte Carlo test result$poisson.p.value <- pobs <- result$p.value result$poisson.statistic <- tobs <- result$statistic Xsim <- simulate(model, nsim=nsim, progress=verbose) sim.pvals <- sim.stats <- numeric(nsim) if(verbose) { cat("Processing.. ") state <- list() } for(i in seq_len(nsim)) { model.i <- update(model, Xsim[[i]]) fra.i <- spatialCDFframe(model.i, covariate, dimyx=dimyx, eps=eps, interpolate=interpolate, jitter=jitter, modelname=modelname, covname=covname, dataname=dataname) res.i <- spatialCDFtestCalc(fra.i, test=test, ..., details=FALSE) sim.pvals[i] <- res.i$p.value sim.stats[i] <- res.i$statistic if(verbose) state <- progressreport(i, nsim, state=state) } if(verbose) cat("Done.\n") result$sim.pvals <- sim.pvals result$sim.stats <- sim.stats ## Monte Carlo p-value ## For tied p-values, first compare values of test statistics ## (because p = 0 may occur due to rounding) ## otherwise resolve ties by randomisation nless <- sum(sim.pvals < pobs) nplus <- sum(sim.pvals == pobs & sim.stats > tobs) nties <- sum(sim.pvals == pobs & sim.stats == tobs) result$p.value <- (nless + nplus + sample(0:nties, 1L))/(nsim+1L) ## modify the 'htest' entries testname <- switch(test, ks="Kolmogorov-Smirnov", cvm="Cramer-Von Mises", ad="Anderson-Darling") result$method <- paste("Monte Carlo spatial", testname, "test", "of Gibbs process in", fra$info$spacename) return(result) } spatialCDFtestCalc <- function(fra, test=c("ks", "cvm", "ad"), ..., details=TRUE) { test <- match.arg(test) values <- fra$values info <- fra$info ## Test uniformity of transformed values U <- values$U result <- switch(test, ks = ks.test(U, "punif", ...), cvm = cvm.test(U, "punif", ...), ad = ad.test(U, "punif", ...)) # shortcut for internal use only if(!details) return(result) ## add a full explanation, internal data, etc. ## modify the 'htest' entries csr <- info$csr ispois <- info$ispois modelname <- if(csr) "CSR" else if(ispois) "inhomogeneous Poisson process" else "Gibbs process" testname <- switch(test, ks="Kolmogorov-Smirnov", cvm="Cramer-Von Mises", ad="Anderson-Darling") result$method <- paste("Spatial", testname, "test of", modelname, "in", info$spacename) result$data.name <- paste("covariate", sQuote(singlestring(info$covname)), "evaluated at points of", sQuote(info$dataname), "\n and transformed to uniform distribution under", if(csr) info$modelname else sQuote(info$modelname)) ## include internal data attr(result, "frame") <- fra ## additional class 'cdftest' class(result) <- c("cdftest", class(result)) return(result) } spatialCDFframe <- function(model, covariate, ..., jitter=TRUE) { # evaluate CDF of covariate values at data points and at pixels stuff <- evalCovar(model, covariate, ..., jitter=jitter) # extract values <- stuff$values # info <- stuff$info Zvalues <- values$Zvalues lambda <- values$lambda weights <- values$weights ZX <- values$ZX # compute empirical cdf of Z values at points of X FZX <- ecdf(ZX) # form weighted cdf of Z values in window wts <- lambda * weights sumwts <- sum(wts) FZ <- ewcdf(Zvalues, wts/sumwts) # Ensure support of cdf includes the range of the data xxx <- knots(FZ) yyy <- FZ(xxx) minZX <- min(ZX, na.rm=TRUE) minxxx <- min(xxx, na.rm=TRUE) if(minxxx > minZX) { xxx <- c(minZX, xxx) yyy <- c(0, yyy) } maxZX <- max(ZX, na.rm=TRUE) maxxxx <- max(xxx, na.rm=TRUE) if(maxxxx < maxZX) { xxx <- c(xxx, maxZX) yyy <- c(yyy, 1) } if(length(xxx) > 1) { #' non-degenerate cdf ## replace by piecewise linear approximation FZ <- approxfun(xxx, yyy, rule=2) } # now apply cdf U <- FZ(ZX) if(jitter) { ## Z values have already been jittered, but this does not guarantee ## that U values are distinct nU <- length(U) U <- U + runif(nU, -1, 1)/max(100, 2*nU) U <- pmax(0, pmin(1, U)) } # pack up stuff$values$FZ <- FZ stuff$values$FZX <- FZX stuff$values$U <- U stuff$values$EN <- sumwts ## integral of intensity = expected number of pts class(stuff) <- "spatialCDFframe" return(stuff) } plot.cdftest <- function(x, ..., style=c("cdf", "PP", "QQ"), lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=2, lty0=2, do.legend=TRUE) { style <- match.arg(style) fram <- attr(x, "frame") if(!is.null(fram)) { values <- fram$values info <- fram$info } else { # old style values <- attr(x, "prep") info <- attr(x, "info") } # cdf of covariate Z over window FZ <- values$FZ # cdf of covariate values at data points FZX <- values$FZX # blurb covname <- info$covname covdescrip <- switch(covname, x="x coordinate", y="y coordinate", paste("covariate", dQuote(covname))) # plot it switch(style, cdf={ # plot both cdf's superimposed qZ <- get("x", environment(FZ)) pZ <- get("y", environment(FZ)) main <- c(x$method, paste("based on distribution of", covdescrip), paste("p-value=", signif(x$p.value, 4))) do.call(plot.default, resolve.defaults( list(x=qZ, y=pZ, type="l"), list(...), list(lwd=lwd0, col=col0, lty=lty0), list(xlab=info$covname, ylab="probability", main=main))) plot(FZX, add=TRUE, do.points=FALSE, lwd=lwd, col=col, lty=lty) if(do.legend) legend("topleft", c("observed", "expected"), lwd=c(lwd,lwd0), col=c(col2hex(col), col2hex(col0)), lty=c(lty2char(lty),lty2char(lty0))) }, PP={ # plot FZX o (FZ)^{-1} pX <- get("y", environment(FZX)) qX <- get("x", environment(FZX)) p0 <- FZ(qX) do.call(plot.default, resolve.defaults( list(x=p0, y=pX), list(...), list(col=col), list(xlim=c(0,1), ylim=c(0,1), xlab="Theoretical probability", ylab="Observed probability", main=""))) abline(0,1, lwd=lwd0, col=col0, lty=lty0) }, QQ={ # plot (FZX)^{-1} o FZ pZ <- get("y", environment(FZ)) qZ <- get("x", environment(FZ)) FZinverse <- approxfun(pZ, qZ, rule=2) pX <- get("y", environment(FZX)) qX <- get("x", environment(FZX)) qZX <- FZinverse(pX) Zrange <- range(qZ, qX, qZX) xlab <- paste("Theoretical quantile of", covname) ylab <- paste("Observed quantile of", covname) do.call(plot.default, resolve.defaults( list(x=qZX, y=qX), list(...), list(col=col), list(xlim=Zrange, ylim=Zrange, xlab=xlab, ylab=ylab, main=""))) abline(0,1, lwd=lwd0, col=col0, lty=lty0) }) return(invisible(NULL)) } spatstat.core/R/hierarchy.R0000644000176200001440000000257414144333462015362 0ustar liggesusers## hierarchy.R ## ## Support functions for hierarchical interactions ## ## $Revision: 1.1 $ $Date: 2015/05/26 08:39:56 $ hierarchicalordering <- function(i, s) { s <- as.character(s) if(inherits(i, "hierarchicalordering")) { ## already a hierarchical ordering if(length(s) != length(i$labels)) stop("Tried to change the number of types in the hierarchical order") i$labels <- s return(i) } n <- length(s) possible <- if(is.character(i)) s else seq_len(n) j <- match(i, possible) if(any(uhoh <- is.na(j))) stop(paste("Unrecognised", ngettext(sum(uhoh), "level", "levels"), commasep(sQuote(i[uhoh])), "amongst possible levels", commasep(sQuote(s)))) if(length(j) < n) stop("Ordering is incomplete") ord <- order(j) m <- matrix(, n, n) rel <- matrix(ord[row(m)] <= ord[col(m)], n, n) dimnames(rel) <- list(s, s) x <- list(indices=j, ordering=ord, labels=s, relation=rel) class(x) <- "hierarchicalordering" x } print.hierarchicalordering <- function(x, ...) { splat(x$labels[x$indices], collapse=" ~> ") invisible(NULL) } hiermat <- function (x, h) { stopifnot(is.matrix(x)) isna <- is.na(x) x[] <- as.character(x) x[isna] <- "" if(inherits(h, "hierarchicalordering")) ## allows h to be NULL, etc x[!(h$relation)] <- "" return(noquote(x)) } spatstat.core/R/pairpiece.R0000644000176200001440000001046414144333463015343 0ustar liggesusers# # # pairpiece.S # # $Revision: 1.23 $ $Date: 2018/03/15 07:37:41 $ # # A pairwise interaction process with piecewise constant potential # # PairPiece() create an instance of the process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # PairPiece <- local({ # .... auxiliary functions ........ delP <- function(i, r) { r <- r[-i] nr <- length(r) if(nr == 0) return(Poisson()) if(nr == 1) return(Strauss(r)) return(PairPiece(r)) } # ..... template .......... BlankPairPiece <- list( name = "Piecewise constant pairwise interaction process", creator = "PairPiece", family = "pairwise.family", # evaluated later pot = function(d, par) { r <- par$r nr <- length(r) out <- array(FALSE, dim=c(dim(d), nr)) out[,,1] <- (d < r[1]) if(nr > 1) { for(i in 2:nr) out[,,i] <- (d >= r[i-1]) & (d < r[i]) } out }, par = list(r = NULL), # filled in later parnames = "interaction thresholds", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || !all(r > 0)) stop("interaction thresholds r must be positive numbers") if(length(r) > 1 && !all(diff(r) > 0)) stop("interaction thresholds r must be strictly increasing") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { r <- self$par$r npiece <- length(r) # extract coefficients gammas <- exp(as.numeric(coeffs)) # name them gn <- gammas names(gn) <- paste("[", c(0,r[-npiece]),",", r, ")", sep="") # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_i", printable=dround(gn))) }, valid = function(coeffs, self) { # interaction parameters gamma gamma <- (self$interpret)(coeffs, self)$param$gammas if(!all(is.finite(gamma))) return(FALSE) return(all(gamma <= 1) || gamma[1] == 0) }, project = function(coeffs, self){ # interaction parameters gamma gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction thresholds r[i] r <- self$par$r # check for NA or Inf bad <- !is.finite(gamma) # gamma > 1 forbidden unless hard core ishard <- is.finite(gamma[1]) && (gamma[1] == 0) if(!ishard) bad <- bad | (gamma > 1) if(!any(bad)) return(NULL) if(spatstat.options("project.fast") || sum(bad) == 1) { # remove smallest threshold with an unidentifiable parameter firstbad <- min(which(bad)) return(delP(firstbad, r)) } else { # consider all candidate submodels subs <- lapply(which(bad), delP, r=r) return(subs) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(all(is.na(coeffs))) return(max(r)) gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- (abs(log(gamma)) > epsilon) if(!any(active)) return(0) else return(max(r[active])) }, Mayer=function(coeffs, self) { # second Mayer cluster integral r <- self$par$r gamma <- (self$interpret)(coeffs, self)$param$gammas # areas of annuli between r[i-1], r[i] areas <- pi * diff(c(0,r)^2) return(sum(areas * (1-gamma))) }, version=NULL # filled in later ) class(BlankPairPiece) <- "interact" PairPiece <- function(r) { instantiate.interact(BlankPairPiece, list(r=r)) } PairPiece <- intermaker(PairPiece, BlankPairPiece) PairPiece }) spatstat.core/R/bw.CvLHeat.R0000644000176200001440000000146514141452520015271 0ustar liggesusers#' #' bw.CvLHeat #' #' Cronie-van Lieshout bandwidth selection for Diffusion smoothing #' #' Copyright (c) 2020 Adrian Baddeley, Tilman Davies and Suman Rakshit #' GNU Public Licence >= 2.0 bw.CvLHeat <- function(X, ..., srange=NULL, ns=16, sigma=NULL, leaveoneout=TRUE, verbose=TRUE) { #' compute intensity estimates b <- HeatEstimates.ppp(X, ..., srange=srange, ns=ns, sigma=sigma, leaveoneout=leaveoneout, verbose=verbose) lambda <- b$lambda h <- b$h hname <- b$hname #' compute Cronie-van Lieshout criterion AW <- area.owin(Window(X)) CV <- (rowSums(1/lambda) - AW)^2 iopt <- which.min(CV) result <- bw.optim(CV, h, iopt, criterion="Cronie-van Lieshout criterion", hname=hname) return(result) } spatstat.core/R/thresholding.R0000644000176200001440000000763214144333464016100 0ustar liggesusers#' #' Selection of threshold #' #' Copyright (c) 2020 Adrian Baddeley, Warick Brown, Robin K. Milne, #' Gopalan Nair, Suman Rakshit, Tom Lawrence, Aloke Phatak, Shih Ching Fu #' #' GNU Public Licence >= 2 #' #' $Revision: 1.1 $ $Date: 2021/10/01 04:03:52 $ #' #' #' threshold selection #' inputs: #' X deposit locations #' Z covariate thresholdSelect <- function(X, Z, method=c("Y", "LL", "AR", "t", "C"), Zname) { if(!is.ppp(X)) stop("X should be a point pattern (class ppp)") if(missing(Zname)) Zname <- short.deparse(substitute(Z)) method <- match.arg(method) fit <- ppm(X) a <- evalCovar(fit, Z, jitter=FALSE)$values FF <- ecdf(a$ZX) GG <- ecdf(a$Zvalues) n <- npoints(X) A <- area(Window(a$Zimage)) zrange <- range(range(a$ZX), range(a$Zimage)) zz <- seq(zrange[1], zrange[2], length.out=1028) nz <- n * (pz <- FF(zz)) Az <- A * (sz <- GG(zz)) Cz <- log((nz/Az)/((n-nz)/(A-Az))) yy <- switch(method, C = Cz, t = Cz/sqrt(1/nz + 1/(n-nz)), LL = { n * log(nz/Az) - (n-nz) * Cz - n }, AR = { sqrt(sz * (1-sz)) * (nz/sz - (n-nz)/(1-sz)) }, Y = { pz - sz }) yy[!is.finite(yy)] <- -Inf critname <- switch(method, C = "WofE contrast", t = "studentised contrast", LL = "profile log likelihood", AR = "Akman-Raftery criterion", Y = "Youden criterion") bw.optim(yy, zz, optimum="max", cvname=method, hname=Zname, criterion=critname, unitname=if(inherits(Z, "distfun")) unitname(X) else NULL) } #' confidence interval for threshold thresholdCI <- local({ thresholdCI <- function(X, Z, confidence=0.95, nsim=1000, parametric=FALSE) { #' bootstrap confidence interval for Youden estimate only. if(!is.ppp(X)) stop("X should be a point pattern (class ppp)") a <- evalCovar(ppm(X), Z, jitter=FALSE)$values FF <- ecdf(a$ZX) GG <- ecdf(a$Zvalues) est <- Youden(FF,GG) b <- simthresh(FF, GG, npoints(X), nsim, parametric) zCI <- quantCI(b$z, est[["z"]], confidence=confidence) sCI <- quantCI(b$s, est[["s"]], confidence=confidence) rbind(z=zCI, s=sCI) } #' Underlying code based on cumulative distribution functions #' inputs: #' F = ecdf of covariate values for data points #' G = ecdf of covariate values for study region Youden <- function(F, G) { zz <- get("x", envir=environment(F)) iopt <- which.max(F(zz) - G(zz)) zopt <- zz[iopt] sopt <- G(zopt) return(c(z=zopt, s=sopt)) } Fpredicted <- function(F, G, zest) { if(missing(zest)) zest <- Youden(F,G)[["z"]] plow <- F(zest) glow <- G(zest) #' mixture of unif[0, glow] and unif[glow, 1] with weights plow, 1-plow zz <- get("x", envir=environment(G)) pp <- get("y", envir=environment(G)) qq <- ifelse(pp < glow, plow*(pp/glow), plow + (1-plow)*(pp-glow)/(1-glow)) FF <- approxfun(zz, qq, rule=2) return(FF) } inversefunction <- function(F) { zz <- get("x", envir=environment(F)) pz <- get("y", envir=environment(F)) Finv <- approxfun(pz, zz, rule=2) return(Finv) } simthresh <- function(F, G, ndata, nsim=100, parametric) { if(parametric) F <- Fpredicted(F, G) Finv <- inversefunction(F) zout <- sout <- numeric(nsim) zz <- get("x", envir=environment(G)) for(isim in 1:nsim) { zsim <- Finv(runif(ndata)) Fhat <- ecdf(zsim) iopt <- which.max(Fhat(zz) - G(zz)) zopt <- zz[iopt] sopt <- G(zopt) zout[isim] <- zopt sout[isim] <- sopt } return(data.frame(z=zout, s=sout)) } quantCI <- function(x, xest, confidence=0.95) { xleft <- quantile(x[x<=xest], 1-confidence) xright <- quantile(x[x>=xest], confidence) achieved <- mean(x >= xleft & x <= xright) return(c(lo=unname(xleft), hi=unname(xright), conf=achieved)) } thresholdCI }) spatstat.core/R/rPSNCP.R0000644000176200001440000001554714144333463014456 0ustar liggesusers#' simulation of product shot-noise Cox process #' Original: (c) Abdollah Jalilian 2021 #' Adapted to spatstat by Adrian Baddeley #' $Revision: 1.5 $ $Date: 2021/05/16 02:24:17 $ rPSNCP <- local({ ## =================================================================== ## kernel functions ## =================================================================== bkernels <- list( ## Gaussian kernel with bandwidth omega Thomas = function(r, omega, ...){ exp(- r^2/(2 * omega^2)) / (2 * pi * omega^2) }, ## Variance-Gamma (Bessel) kernel ## with bandwidth omega and shape parameter nu.ker VarGamma = function(r, omega, nu.ker){ stopifnot(nu.ker > -1/2) sigma2 <- 1 / (4 * pi * nu.ker * omega^2) u <- r/omega u <- ifelse(u > 0, (u^nu.ker) * besselK(u, nu.ker) / (2^(nu.ker - 1) * gamma(nu.ker)), 1) return(abs(sigma2 * u)) }, ## Cauchy kernel with bandwith omega Cauchy = function(r, omega, ...){ ((1 + (r / omega)^2)^(-1.5)) / (2 * pi * omega^2) } ## end of 'bkernels' list ) ## =================================================================== ## simulating from the product shot-noise Cox processes ## =================================================================== ## simulation from the null model of independent shot-noise components rPSNCP0 <- function(lambda, kappa, omega, kernels=NULL, nu.ker=NULL, win=owin(), nsim=1, drop=TRUE, ..., cnames=NULL, epsth=0.001 # , mc.cores=1L ) { m <- length(lambda) if ((length(kappa) != m) || length(omega) != m ) stop("arguments kappa and omega must have the same length as lambda") if (is.null(kernels)) kernels <- rep("Thomas", m) else if(length(kernels) != m) stop("length of argument 'kernels' must equal the number of components") if(is.null(nu.ker)) nu.ker <- rep(-1/4, m) lambda <- as.list(lambda) if (is.null(cnames)) cnames <- 1:m ## simulation from the null model of independent shot-noise components corefun0 <- function(dumm) { xp <- yp <- numeric(0) mp <- integer(0) for (i in 1:m) { mui <- lambda[[i]]/kappa[i] Xi <- switch(kernels[i], Thomas = rThomas(kappa[i], scale=omega[i], mu=mui, win=win, ...), Cauchy = rCauchy(kappa[i], scale=omega[i], mu=mui, win=win, thresh=epsth, ...), VarGamma = rVarGamma(kappa[i], scale=omega[i], mu=mui, win=win, nu.ker=nu.ker[i], nu.pcf=NULL, thresh=epsth, ...)) xp <- c(xp, Xi$x) yp <- c(yp, Xi$y) mp <- c(mp, rep.int(i, Xi$n)) } mp <- factor(mp, labels=cnames) out <- ppp(xp, yp, window=win, marks=mp, check=FALSE) return(out) } ## outlist <- if (mc.cores == 1) lapply(1:nsim, corefun0) ## else parallel::mclapply(1:nsim, corefun0, mc.cores=mc.cores) outlist <- lapply(1:nsim, corefun0) outlist <- simulationresult(outlist, nsim, drop) return(outlist) } # =================================================================== # simulation from the model rPSNCP <- function(lambda=rep(100, 4), kappa=rep(25, 4), omega=rep(0.03, 4), alpha=matrix(runif(16, -1, 3), nrow=4, ncol=4), kernels=NULL, nu.ker=NULL, win=owin(), nsim=1, drop=TRUE, ..., cnames=NULL, epsth=0.001 # , mc.cores=1L ) { m <- length(lambda) if ((length(kappa) != m) || length(omega) != m ) stop("Arguments kappa and omega must have the same length as lambda") if (!all(dim(alpha) == c(m, m))) stop("Dimensions of matrix alpha are not correct") if (is.null(kernels)) kernels <- rep("Thomas", m) else if(length(kernels) != m) stop("Length of argument kernels must equal the number of components") if (is.null(nu.ker)) nu.ker <- rep(-1/4, m) diag(alpha) <- 0 if(all(alpha == 0)) return(rPSNCP0(lambda=lambda, kappa=kappa, omega=omega, kernels=kernels, nu.ker=nu.ker, win=win, nsim=nsim, cnames=cnames, ..., epsth=epsth # , mc.cores=mc.cores )) lambda <- as.list(lambda) frame <- boundingbox(win) dframe <- diameter(frame) W <- as.mask(win, ...) Wdim <- dim(W) wx <- as.vector(raster.x(W)) wy <- as.vector(raster.y(W)) sigma <- rmax <- numeric(m) for (i in 1:m) { if(is.im(lambda[[i]])) lambda[[i]] <- as.im(lambda[[i]], dimyx=Wdim, W=W) keri <- function(r){ bkernels[[kernels[i]]](r, omega[i], nu.ker[i]) } keri0 <- keri(0) sigma[i] <- kappa[i] / keri0 kerithresh <- function(r){ keri(r) / keri0 - epsth} rmax[i] <- uniroot(kerithresh, lower = omega[i] / 2, upper = 5 * dframe)$root # 4 * omega[i] # } dilated <- grow.rectangle(frame, max(rmax)) corefun <- function(idumm) { Phi <- lapply(kappa, rpoispp, win=dilated) fr <- vector("list", length=m) for (i in 1:m) { keri <- function(r){ bkernels[[kernels[i]]](r, omega[i], nu.ker[i]) } keri0 <- keri(0) Phii <- Phi[[i]] fr[[i]] <- keri(crossdist.default(wx, wy, Phii$x, Phii$y)) / keri0 } if (is.null(cnames)) cnames <- 1:m xp <- yp <- numeric(0) mp <- integer(0) for (i in 1:m) { Si <- rowSums(fr[[i]]) / sigma[i] E <- matrix(1, nrow=length(wx), ncol=m) for (j in (1:m)[-i]) { E[, j] <- apply(1 + alpha[j, i] * fr[[j]], 1, prod) * exp(-alpha[j, i] * sigma[j]) } values <- Si * apply(E, 1, prod) Lam <- im(values, xcol=W$xcol, yrow=W$yrow, unitname = unitname(W)) rhoi <- lambda[[i]] Xi <- rpoispp(rhoi * Lam) xp <- c(xp, Xi$x) yp <- c(yp, Xi$y) mp <- c(mp, rep.int(i, Xi$n)) } mp <- factor(mp, labels=cnames) simout <- ppp(xp, yp, window=win, marks=mp, check=FALSE) # attr(simout, "parents") <- Phi return(simout) } ## outlist <- if (mc.cores == 1) lapply(1:nsim, corefun) ## else parallel::mclapply(1:nsim, corefun, mc.cores=mc.cores) outlist <- lapply(1:nsim, corefun) outlist <- simulationresult(outlist, nsim, drop) return(outlist) } rPSNCP }) spatstat.core/R/alltypes.R0000644000176200001440000001616414144333461015240 0ustar liggesusers# # alltypes.R # # $Revision: 1.37 $ $Date: 2019/03/18 09:22:24 $ # # alltypes <- function(X, fun="K", ..., dataname=NULL,verb=FALSE,envelope=FALSE,reuse=TRUE) { # # Function 'alltypes' --- calculates a summary function for # each type, or each pair of types, in a multitype point pattern # if(is.ppp(X)) classname <- "ppp" else if(is.lpp(X)) classname <- "lpp" else stop("X should be a ppp or lpp object") if(is.null(dataname)) dataname <- short.deparse(substitute(X)) # -------------------------------------------------------------------- # First inspect marks if(!is.marked(X)) { nmarks <- 0 marklabels <- "" } else { if(!is.multitype(X)) stop("the marks must be a factor") # ensure type names are parseable (for mathematical labels) levels(marks(X)) <- make.parseable(levels(marks(X))) mks <- marks(X) ma <- levels(mks) nmarks <- length(ma) marklabels <- paste(ma) } # --------------------------------------------------------------------- # determine function name f.is.name <- is.name(substitute(fun)) fname <- if(f.is.name) paste(as.name(substitute(fun))) else if(is.character(fun)) fun else sQuote("fun") # --------------------------------------------------------------------- # determine function to be called if(is.function(fun)) { estimator <- fun } else if(is.character(fun)) { # First try matching one of the standard abbreviations K, G etc estimator <- getSumFun(fun, classname, (nmarks > 0), fatal=FALSE) if(is.null(estimator)) estimator <- get(fun, mode="function") } else stop(paste(sQuote("fun"), "should be a function or a character string")) # ------------------------------------------------------------------ # determine how the function shall be called. # indices.expected <- sum(c("i", "j") %in% names(formals(estimator))) apply.to.split <- (indices.expected == 0 && nmarks > 1) if(apply.to.split) ppsplit <- split(X) # -------------------------------------------------------------------- # determine array dimensions and margin labels witch <- if(nmarks == 0) matrix(1L, nrow=1L, ncol=1L, dimnames=list("","")) else if (nmarks == 1) matrix(1L, nrow=1L, ncol=1L, dimnames=list(marklabels, marklabels)) else if(indices.expected != 2) matrix(1L:nmarks, nrow=nmarks, ncol=1L, dimnames=list(marklabels, "")) else matrix(1L:(nmarks^2),ncol=nmarks,nrow=nmarks, byrow=TRUE, dimnames=list(marklabels, marklabels)) # ------------ start computing ------------------------------- # if computing envelopes, first generate simulated patterns # using undocumented feature of envelope() if(envelope && reuse) { L <- do.call(spatstat.core::envelope, resolve.defaults( list(quote(X), fun=estimator), list(internal=list(eject="patterns")), list(...), switch(1L+indices.expected, NULL, list(i=ma[1L]), list(i=ma[1L], j=ma[2L]), NULL), list(verbose=verb))) intern <- attr(L, "internal") } else intern <- L <- NULL # compute function array and build up 'fasp' object fns <- list() k <- 0 maxerr.action <- if(verb) "warn" else "null" for(i in 1L:nrow(witch)) { Y <- if(apply.to.split) ppsplit[[i]] else X for(j in 1L:ncol(witch)) { if(verb) cat("i =",i,"j =",j,"\n") currentfv <- if(!envelope) switch(1L+indices.expected, estimator(Y, ...), estimator(Y, i=ma[i], ...), estimator(Y, i=ma[i], j=ma[j], ...)) else do.call(spatstat.core::envelope, resolve.defaults( list(quote(Y), estimator), list(simulate=L, internal=intern), list(verbose=FALSE), list(...), list(Yname=dataname, silent=TRUE, maxerr.action=maxerr.action), switch(1L+indices.expected, NULL, list(i=ma[i]), list(i=ma[i], j=ma[j]), NULL))) k <- k+1 fns[[k]] <- as.fv(currentfv) } } einfo <- lapply(fns, attr, which="einfo") gaveup <- sapply(lapply(einfo, getElement, name="gaveup"), isTRUE) if(any(gaveup)) { ng <- sum(gaveup) warning(paste(ng, "out of", length(fns), "envelopes", ngettext(ng, "was", "were"), "not computed, due to errors in evaluating", "the summary functions for simulated patterns")) } # wrap up into 'fasp' object title <- paste(if(nmarks > 1) "array of " else NULL, if(envelope) "envelopes of " else NULL, fname, if(nmarks <= 1) " function " else " functions ", "for ", dataname, ".", sep="") rslt <- fasp(fns, which=witch, formulae=NULL, dataname=dataname, title=title, checkfv=FALSE) return(rslt) } # Lookup table for standard abbreviations of functions getSumFun <- local({ ftable <- rbind( data.frame(class="ppp", marked=FALSE, abbrev=c("F", "G", "J", "K", "L", "pcf"), full=c("Fest", "Gest", "Jest", "Kest", "Lest", "pcf"), stringsAsFactors=FALSE), data.frame(class="ppp", marked=TRUE, abbrev=c("F", "G", "J", "K", "L", "pcf"), full= c("Fest", "Gcross", "Jcross", "Kcross", "Lcross", "pcfcross"), stringsAsFactors=FALSE), data.frame(class="lpp", marked=FALSE, abbrev=c("K", "pcf"), full=c("linearK", "linearpcf"), stringsAsFactors=FALSE), data.frame(class="lpp", marked=TRUE, abbrev=c("K", "pcf"), full=c("linearKcross", "linearpcfcross"), stringsAsFactors=FALSE) ) getfun <- function(abbreviation, classname, ismarked, fatal=TRUE) { matches <- with(ftable, which(abbrev == abbreviation & class == classname & marked == ismarked)) if(length(matches) == 0) { if(!fatal) return(NULL) stop(paste("No match to function abbreviation", sQuote(abbreviation), "for class", sQuote(classname))) } if(length(matches) > 1) stop("Ambiguous function name") fullname <- ftable$full[matches] get(fullname, mode="function") } getfun }) spatstat.core/R/rmhmodel.ppm.R0000644000176200001440000003321514144333463016003 0ustar liggesusers# # rmhmodel.ppm.R # # convert ppm object into format palatable to rmh.default # # $Revision: 2.64 $ $Date: 2017/06/05 10:31:58 $ # # .Spatstat.rmhinfo # rmhmodel.ppm() # .Spatstat.Rmhinfo <- list( "Multitype Hardcore process" = function(coeffs, inte) { # hard core radii r[i,j] hradii <- inte$par[["hradii"]] return(list(cif='multihard', par=list(hradii=hradii), ntypes=ncol(hradii))) }, "Lennard-Jones process" = function(coeffs, inte) { pa <- inte$interpret(coeffs,inte)$param sigma <- pa[["sigma"]] epsilon <- pa[["epsilon"]] return(list(cif='lennard', par=list(sigma=sigma, epsilon=epsilon), ntypes=1)) }, "Fiksel process" = function(coeffs, inte) { hc <- inte$par[["hc"]] r <- inte$par[["r"]] kappa <- inte$par[["kappa"]] a <- inte$interpret(coeffs,inte)$param$a return(list(cif='fiksel', par=list(r=r,hc=hc,kappa=kappa,a=a), ntypes=1)) }, "Diggle-Gates-Stibbard process" = function(coeffs, inte) { rho <- inte$par[["rho"]] return(list(cif='dgs', par=list(rho=rho), ntypes=1)) }, "Diggle-Gratton process" = function(coeffs, inte) { kappa <- inte$interpret(coeffs,inte)$param$kappa delta <- inte$par[["delta"]] rho <- inte$par[["rho"]] return(list(cif='diggra', par=list(kappa=kappa,delta=delta,rho=rho), ntypes=1)) }, "Hard core process" = function(coeffs, inte) { hc <- inte$par[["hc"]] return(list(cif='hardcore', par=list(hc=hc), ntypes=1)) }, "Geyer saturation process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] sat <- inte$par[["sat"]] return(list(cif='geyer', par=list(gamma=gamma,r=r,sat=sat), ntypes=1)) }, "Soft core process" = function(coeffs, inte) { kappa <- inte$par[["kappa"]] sigma <- inte$interpret(coeffs,inte)$param$sigma return(list(cif="sftcr", par=list(sigma=sigma,kappa=kappa), ntypes=1)) }, "Strauss process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] return(list(cif = "strauss", par = list(gamma = gamma, r = r), ntypes=1)) }, "Strauss - hard core process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] hc <- inte$par[["hc"]] return(list(cif='straush', par=list(gamma=gamma,r=r,hc=hc), ntypes=1)) }, "Triplets process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] return(list(cif = "triplets", par = list(gamma = gamma, r = r), ntypes=1)) }, "Penttinen process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] return(list(cif='penttinen', par=list(gamma=gamma, r=r), ntypes=1)) }, "Multitype Strauss process" = function(coeffs, inte) { # interaction radii r[i,j] radii <- inte$par[["radii"]] # interaction parameters gamma[i,j] gamma <- (inte$interpret)(coeffs, inte)$param$gammas return(list(cif='straussm', par=list(gamma=gamma,radii=radii), ntypes=ncol(radii))) }, "Multitype Strauss Hardcore process" = function(coeffs, inte) { # interaction radii r[i,j] iradii <- inte$par[["iradii"]] # hard core radii r[i,j] hradii <- inte$par[["hradii"]] # interaction parameters gamma[i,j] gamma <- (inte$interpret)(coeffs, inte)$param$gammas return(list(cif='straushm', par=list(gamma=gamma,iradii=iradii,hradii=hradii), ntypes=ncol(iradii))) }, "Piecewise constant pairwise interaction process" = function(coeffs, inte) { r <- inte$par[["r"]] gamma <- (inte$interpret)(coeffs, inte)$param$gammas h <- stepfun(r, c(gamma, 1)) return(list(cif='lookup', par=list(h=h), ntypes=1)) }, "Area-interaction process" = function(coeffs, inte) { r <- inte$par[["r"]] eta <- (inte$interpret)(coeffs, inte)$param$eta return(list(cif='areaint', par=list(eta=eta,r=r), ntypes=1)) }, "hybrid Geyer process" = function(coeffs, inte) { r <- inte$par[["r"]] sat <- inte$par[["sat"]] gamma <- (inte$interpret)(coeffs,inte)$param$gammas return(list(cif='badgey',par=list(gamma=gamma,r=r,sat=sat), ntypes=1)) }, "Hybrid interaction"= function(coeffs, inte){ # for hybrids, $par is a list of the component interactions interlist <- inte$par # check for Poisson components ispois <- unlist(lapply(interlist, is.poisson)) if(all(ispois)) { # reduces to Poisson Z <- list(cif='poisson', par=list()) return(Z) } else if(any(ispois)) { # remove Poisson components interlist <- interlist[!ispois] } # N <- length(interlist) cifs <- character(N) pars <- vector(mode="list", length=N) ntyp <- integer(N) for(i in 1:N) { interI <- interlist[[i]] # forbid hybrids-of-hybrids - these should not occur anyway if(interI$name == "Hybrid interaction") stop("Simulation of a hybrid-of-hybrid interaction is not implemented") # get RMH mapping for I-th component siminfoI <- .Spatstat.Rmhinfo[[interI$name]] if(is.null(siminfoI)) stop(paste("Simulation of a fitted", sQuote(interI$name), "has not yet been implemented"), call.=FALSE) # nameI is the tag that identifies I-th component in hybrid nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) # extract coefficients # (there may be none, if this interaction is an 'offset') coeffsI <- coeffs[relevant] # remove the prefix so the coefficients are recognisable to 'siminfoI' if(any(relevant)) names(coeffsI) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) # compute RMH info ZI <- siminfoI(coeffsI, interI) cifs[i] <- ZI$cif pars[[i]] <- ZI$par ntyp[i] <- ZI$ntypes } nt <- unique(ntyp[ntyp != 1]) if(length(nt) > 1) stop(paste("Hybrid components have different numbers of types:", commasep(nt))) if(N == 1) { # single cif: revert to original format: par is a list of parameters Z <- list(cif=cifs[1], par=pars[[1]], ntypes=ntyp) } else { # hybrid cif: par is a list of lists of parameters Z <- list(cif=cifs, par=pars, ntypes=ntyp) } return(Z) } ) # OTHER MODELS not yet implemented: # # # interaction object rmh.default # ------------------ ----------- # # OrdThresh # rmhmodel.ppm <- function(model, w, ..., verbose=TRUE, project=TRUE, control=rmhcontrol(), new.coef=NULL) { ## converts ppm object `model' into format palatable to rmh.default verifyclass(model, "ppm") argh <- list(...) if(!is.null(new.coef)) model <- tweak.coefs(model, new.coef) ## Ensure the fitted model is valid ## (i.e. exists mathematically as a point process) if(!valid.ppm(model)) { if(project) { if(verbose) cat("Model is invalid - projecting it\n") model <- project.ppm(model, fatal=TRUE) } else stop("The fitted model is not a valid point process") } if(verbose) cat("Extracting model information...") ## Extract essential information Y <- summary(model, quick="no variances") if(Y$marked && !Y$multitype) stop("Not implemented for marked point processes other than multitype") if(Y$uses.covars && is.data.frame(model$covariates)) stop(paste("This model cannot be simulated, because the", "covariate values were given as a data frame.")) ## enforce defaults for `control' control <- rmhcontrol(control) ## adjust to peculiarities of model control <- rmhResolveControl(control, model) ######## Interpoint interaction if(Y$poisson) { Z <- list(cif="poisson", par=list()) # par is filled in later } else { ## First check version number of ppm object if(Y$antiquated) stop(paste("This model was fitted by a very old version", "of the package: spatstat", Y$version, "; simulation is not possible.", "Re-fit the model using your original code")) else if(Y$old) warning(paste("This model was fitted by an old version", "of the package: spatstat", Y$version, ". Re-fit the model using update.ppm", "or your original code")) ## Extract the interpoint interaction object inte <- Y$entries$interaction ## Determine whether the model can be simulated using rmh siminfo <- .Spatstat.Rmhinfo[[inte$name]] if(is.null(siminfo)) stop(paste("Simulation of a fitted", sQuote(inte$name), "has not yet been implemented")) ## Get fitted model's canonical coefficients coeffs <- Y$entries$coef if(newstyle.coeff.handling(inte)) { ## extract only the interaction coefficients Vnames <- Y$entries$Vnames IsOffset <- Y$entries$IsOffset coeffs <- coeffs[Vnames[!IsOffset]] } ## Translate the model to the format required by rmh.default Z <- siminfo(coeffs, inte) if(is.null(Z)) stop("The model cannot be simulated") else if(is.null(Z$cif)) stop(paste("Internal error: no cif returned from .Spatstat.Rmhinfo")) } ## Don't forget the types if(Y$multitype && is.null(Z$types)) Z$types <- levels(Y$entries$marks) ######## Window for result if(missing(w) || is.null(w)) { ## check for outdated argument name 'win' if(!is.na(m <- match("win", names(argh)))) { warning("Argument 'win' to rmhmodel.ppm is deprecated; use 'w'") w <- argh[[m]] argh <- argh[-m] } else w <- Y$entries$data$window } Z$w <- w ######## Expanded window for simulation? covims <- if(Y$uses.covars) model$covariates[Y$covars.used] else NULL wsim <- rmhResolveExpansion(w, control, covims, "covariate")$wsim ###### Trend or Intensity ############ if(verbose) cat("Evaluating trend...") if(Y$stationary) { ## first order terms (beta or beta[i]) are carried in Z$par beta <- as.numeric(Y$trend$value) Z$trend <- NULL } else { ## trend terms present ## all first order effects are subsumed in Z$trend beta <- if(!Y$marked) 1 else rep.int(1, length(Z$types)) ## predict on window possibly larger than original data window Z$trend <- if(wsim$type == "mask") predict(model, window=wsim, type="trend", locations=wsim) else predict(model, window=wsim, type="trend") } Ncif <- length(Z$cif) if(Ncif == 1) { ## single interaction Z$par[["beta"]] <- beta } else { ## hybrid interaction if(all(Z$ntypes == 1)) { ## unmarked model: scalar 'beta' is absorbed in first cif absorb <- 1 } else { ## multitype model: vector 'beta' is absorbed in a multitype cif absorb <- min(which(Z$ntypes > 1)) } Z$par[[absorb]]$beta <- beta ## other cifs have par$beta = 1 for(i in (1:Ncif)[-absorb]) Z$par[[i]]$beta <- rep.int(1, Z$ntypes[i]) } if(verbose) cat("done.\n") Z <- do.call(rmhmodel, append(list(Z), argh)) return(Z) } rmhResolveExpansion <- function(win, control, imagelist, itype="covariate") { # Determine expansion window for simulation ex <- control$expand # The following is redundant because it is implied by !will.expand(ex) # if(ex$force.noexp) { # # Expansion prohibited # return(list(wsim=win, expanded=FALSE)) # } # Is expansion contemplated? if(!will.expand(ex)) return(list(wsim=win, expanded=FALSE)) # Proposed expansion window wexp <- expand.owin(win, ex) # Check feasibility isim <- unlist(lapply(imagelist, is.im)) imagelist <- imagelist[isim] if(length(imagelist) == 0) { # Unlimited expansion is feasible return(list(wsim=wexp, expanded=TRUE)) } # Expansion is limited to domain of image data # Determine maximum possible expansion window wins <- lapply(imagelist, as.owin) cwin <- do.call(intersect.owin, unname(wins)) if(!is.subset.owin(wexp, cwin)) { # Cannot expand to proposed window if(ex$force.exp) stop(paste("Cannot expand the simulation window,", "because the", itype, "images do not cover", "the expanded window"), call.=FALSE) # Take largest possible window wexp <- intersect.owin(wexp, cwin) } return(list(wsim=wexp, expanded=TRUE)) } spatstat.core/R/lurkslrm.R0000644000176200001440000001544014141452520015245 0ustar liggesusers#' #' lurkslrm.R #' #' Lurking variable plot for spatial logistic regression model #' #' $Revision: 1.3 $ $Date: 2021/10/30 06:21:03 $ #' lurking.slrm <- function(object, covariate, type="raw", cumulative=TRUE, ..., plot.it=TRUE, plot.sd=TRUE, clipwindow=NULL, rv=NULL, envelope=FALSE, nsim=39, nrank=1, typename, covname, oldstyle=FALSE, check=TRUE, verbose=TRUE, nx=128, splineargs=list(spar=0.5), internal=NULL) { cl <- match.call() clenv <- parent.frame() verifyclass(object, "slrm") ## default name for covariate if(missing(covname) || is.null(covname)) { co <- cl$covariate covname <- if(is.name(co)) as.character(co) else if(is.expression(co)) format(co[[1]]) else NULL } Xsim <- NULL if(!isFALSE(envelope)) { ## compute simulation envelope Xsim <- NULL if(!isTRUE(envelope)) { ## some kind of object Y <- envelope if(is.list(Y) && all(sapply(Y, is.ppp))) { Xsim <- Y envelope <- TRUE } else if(inherits(Y, "envelope")) { Xsim <- attr(Y, "simpatterns") if(is.null(Xsim)) stop("envelope does not contain simulated point patterns") envelope <- TRUE } else stop("Unrecognised format of argument: envelope") nXsim <- length(Xsim) if(missing(nsim) && (nXsim < nsim)) { warning(paste("Only", nXsim, "simulated patterns available")) nsim <- nXsim } } } ## may need to refit the model if(is.expression(covariate)) { ## expression could involve variables that are not stored in object neednames <- all.vars(covariate) if(!all(neednames %in% colnames(object$Data$df))) object <- update(object, save.all.vars=TRUE) } ## match type argument type <- pickoption("type", type, c(raw="raw", inverse="inverse", pearson="pearson", Pearson="pearson")) if(missing(typename)) typename <- switch(type, raw="raw residuals", inverse="inverse-lambda residuals", pearson="Pearson residuals") ################################################################# ## extract data from fitted model Data <- object$Data ## original data pattern X <- Data$response ## spatial locations and weights used in fit df <- Data$df quadpoints <- ppp(df$x, df$y, window=Window(X)) Z <- as.logical(df[,1]) datapoints <- quadpoints[Z] wts <- exp(df[,"logpixelarea"]) ################################################################# ## compute the covariate if(is.im(covariate)) { covvalues <- covariate[quadpoints, drop=FALSE] covrange <- internal$covrange %orifnull% range(covariate, finite=TRUE) } else if(is.vector(covariate) && is.numeric(covariate)) { covvalues <- covariate covrange <- internal$covrange %orifnull% range(covariate, finite=TRUE) if(length(covvalues) != npoints(quadpoints)) stop("Length of covariate vector,", length(covvalues), "!=", npoints(quadpoints), ", number of quadrature points") } else if(is.expression(covariate)) { ## Expression involving covariates in the fitted object if(!is.null(object$Data$covariates)) { ## Expression may involve an external variable neednames <- all.vars(covariate) missingnames <- setdiff(neednames, colnames(df)) if(length(missingnames)) { ## missing variables should be 'external' foundvars <- mget(missingnames, parent.frame(), ifnotfound=rep(list(NULL), length(missingnames))) bad <- sapply(foundvars, is.null) if(any(bad)) { nbad <- sum(bad) stop(paste(ngettext(nbad, "Variable", "Variables"), commasep(sQuote(missingnames[bad])), "not found"), call.=FALSE) } founddata <- mpl.get.covariates(foundvars, quadpoints) df <- cbind(df, founddata) } } ## Evaluate expression sp <- parent.frame() covvalues <- eval(covariate, envir=df, enclos=sp) covrange <- internal$covrange %orifnull% range(covvalues, finite=TRUE) if(!is.numeric(covvalues)) stop("The evaluated covariate is not numeric") } else stop(paste("The", sQuote("covariate"), "should be either", "a pixel image, an expression or a numeric vector")) ################################################################# ## Secret exit if(identical(internal$getrange, TRUE)) return(covrange) ################################################################ ## Residuals/marks attached to appropriate locations. ## Stoyan-Grabarnik weights are attached to the data points only. ## Others (residuals) are attached to all quadrature points. resvalues <- if(!is.null(rv)) rv else if(type=="eem") eem(object, check=check) else residuals(object, type=type, check=check) if(inherits(resvalues, "imlist")) { if(length(resvalues) > 1) stop("Not implemented for vector-valued residuals") resvalues <- resvalues[[1]] } if(is.im(resvalues)) resvalues <- resvalues[quadpoints] ## NAMES OF THINGS ## name of the covariate if(is.null(covname)) covname <- if(is.expression(covariate)) covariate else "covariate" ## type of residual/mark if(missing(typename)) typename <- if(!is.null(rv)) "rv" else "" clip <- !is.null(clipwindow) ## CALCULATE stuff <- LurkEngine(object=object, type=type, cumulative=cumulative, plot.sd=plot.sd, quadpoints=quadpoints, wts=wts, Z=Z, subQset=TRUE, covvalues=covvalues, resvalues=resvalues, clip=clip, clipwindow=clipwindow, cov.is.im=is.im(covariate), covrange=covrange, typename=typename, covname=covname, cl=cl, clenv=clenv, oldstyle=oldstyle, check=check, verbose=verbose, nx=nx, splineargs=splineargs, envelope=envelope, nsim=nsim, nrank=nrank, Xsim=Xsim, internal=internal) ## --------------- PLOT ---------------------------------- if(plot.it && inherits(stuff, "lurk")) { plot(stuff, ...) return(invisible(stuff)) } else { return(stuff) } } spatstat.core/R/is.cadlag.R0000644000176200001440000000053614144333462015225 0ustar liggesusers#' #' is.cadlag.R #' #' Test whether a stepfun is cadlag/rcll #' (continue a droite; limites a gauche) #' #' $Revision: 1.4 $ $Date: 2020/11/30 04:10:33 $ is.cadlag <- function (s) { stopifnot(is.stepfun(s)) r <- knots(s) h <- s(r) n <- length(r) r1 <- c(r[-1L],r[n]+1) rm <- (r+r1)/2 hm <- s(rm) isTRUE(all.equal(h,hm)) } spatstat.core/R/compareFit.R0000644000176200001440000000503114144333462015464 0ustar liggesusers# # compareFit.R # # $Revision: 1.3 $ $Date: 2015/10/21 09:06:57 $ compareFit <- function(object, Fun, r=NULL, breaks=NULL, ..., trend=~1, interaction=Poisson(), rbord=NULL, modelnames=NULL, same=NULL, different=NULL) { dotargs <- list(...) h <- hyperframe(obj=object, tren=trend, inte=interaction) N <- nrow(h) if(N == 0) stop("No objects specified") # determine rbord for summary statistics if(is.null(rbord) && !is.null(interaction)) rbord <- max(with(h, reach(inte))) h$rbord <- rbord # try to get nice model names if(is.null(modelnames)) { if(inherits(trend, "formula") && is.interact(interaction) && inherits(object, c("anylist", "listof")) && all(nzchar(names(object))) && length(names(object)) == nrow(h)) modelnames <- names(object) else if(inherits(trend, c("anylist", "listof")) && all(nzchar(names(trend))) && length(names(trend)) == nrow(h)) modelnames <- names(trend) else if(inherits(interaction, c("anylist", "listof")) && all(nzchar(names(interaction))) && length(names(interaction)) == nrow(h)) modelnames <- names(interaction) else modelnames <- row.names(h) } row.names(h) <- make.names(modelnames) # fix a common vector of r values if(is.null(r)) { # compute first function fun1 <- with(h[1L,,drop=TRUE,strip=FALSE], do.call(Fun, append(list(object=obj, trend=tren, interaction=inte, rbord=rbord, r=NULL, breaks=breaks), dotargs))) # extract r values r <- with(fun1, .x) } # compute the subsequent functions if(N == 1L) funs2toN <- NULL else funs2toN <- with(h[-1L, , drop=TRUE, strip=FALSE], do.call(Fun, append(list(object=obj, trend=tren, interaction=inte, rbord=rbord, r=r), dotargs))) if(N == 2) funs2toN <- list(funs2toN) # collect all functions in a list funs <- as.anylist(append(list(fun1), funs2toN)) names(funs) <- row.names(h) # collapse together out <- collapse.fv(funs, same=same, different=different) return(out) } spatstat.core/R/rmhcontrol.R0000644000176200001440000001766714144333463015605 0ustar liggesusers# # # rmhcontrol.R # # $Revision: 1.35 $ $Date: 2019/12/31 04:56:58 $ # # rmhcontrol <- function(...) { UseMethod("rmhcontrol") } rmhcontrol.rmhcontrol <- function(...) { argz <- list(...) if(length(argz) == 1) return(argz[[1]]) stop("Arguments not understood") } rmhcontrol.list <- function(...) { argz <- list(...) nama <- names(argz) if(length(argz) == 1 && !any(nzchar(nama))) do.call(rmhcontrol.default, argz[[1]]) else do.call.matched(rmhcontrol.default, argz) } rmhcontrol.default <- function(..., p=0.9, q=0.5, nrep=5e5, expand=NULL, periodic=NULL, ptypes=NULL, x.cond=NULL, fixall=FALSE, nverb=0, nsave=NULL, nburn=nsave, track=FALSE, pstage=c("block", "start")) { argh <- list(...) nargh <- length(argh) if(nargh > 0) { # allow rmhcontrol(NULL), otherwise flag an error if(!(nargh == 1 && is.null(argh[[1]]))) stop(paste("Unrecognised arguments to rmhcontrol;", "valid arguments are listed in help(rmhcontrol.default)")) } # impose default values if(missing(p)) p <- spatstat.options("rmh.p") if(missing(q)) q <- spatstat.options("rmh.q") if(missing(nrep)) nrep <- spatstat.options("rmh.nrep") # validate arguments if(!is.numeric(p) || length(p) != 1 || p < 0 || p > 1) stop("p should be a number in [0,1]") if(!is.numeric(q) || length(q) != 1 || q < 0 || q > 1) stop("q should be a number in [0,1]") if(!is.numeric(nrep) || length(nrep) != 1 || nrep < 1) stop("nrep should be an integer >= 1") nrep <- as.integer(nrep) if(!is.numeric(nverb) || length(nverb) != 1 || nverb < 0 || nverb > nrep) stop("nverb should be an integer <= nrep") nverb <- as.integer(nverb) if(!is.logical(fixall) || length(fixall) != 1) stop("fixall should be a logical value") if(!is.null(periodic) && (!is.logical(periodic) || length(periodic) != 1)) stop(paste(sQuote("periodic"), "should be a logical value or NULL")) if(saving <- !is.null(nsave)) { nsave <- as.integer(as.vector(nsave)) if(length(nsave) == 1L) { if(nsave <= 0) stop("nsave should be a positive integer") stopifnot(nsave < nrep) } else { stopifnot(all(nsave > 0)) stopifnot(sum(nsave) <= nrep) } if(missing(nburn) || is.null(nburn)) { nburn <- min(nsave[1], nrep-sum(nsave)) } else { check.1.integer(nburn) stopifnot(nburn + sum(nsave) <= nrep) } } stopifnot(is.logical(track)) pstage <- match.arg(pstage) ################################################################# # Conditioning on point configuration # # condtype = "none": no conditioning # condtype = "Palm": conditioning on the presence of specified points # condtype = "window": conditioning on the configuration in a subwindow # if(is.null(x.cond)) { condtype <- "none" n.cond <- NULL } else if(is.ppp(x.cond)) { condtype <- "window" n.cond <- x.cond$n } else if(is.data.frame(x.cond)) { if(ncol(x.cond) %in% c(2,3)) { condtype <- "Palm" n.cond <- nrow(x.cond) } else stop("Wrong number of columns in data frame x.cond") } else if(is.list(x.cond)) { if(length(x.cond) %in% c(2,3)) { x.cond <- as.data.frame(x.cond) condtype <- "Palm" n.cond <- nrow(x.cond) } else stop("Wrong number of components in list x.cond") } else stop("Unrecognised format for x.cond") if(condtype == "Palm" && n.cond == 0) { warning(paste("Ignored empty configuration x.cond;", "conditional (Palm) simulation given an empty point pattern", "is equivalent to unconditional simulation"), call.=FALSE) condtype <- "none" x.cond <- NULL n.cond <- NULL } ################################################################# # Fixing the number of points? # # fixcode = 1 <--> no conditioning # fixcode = 2 <--> conditioning on n = number of points # fixcode = 3 <--> conditioning on the number of points of each type. fixcode <- 2 - (p<1) + fixall - fixall*(p<1) fixing <- switch(fixcode, "none", "n.total", "n.each.type") # Warn about silly combination if(fixall && p < 1) warning("fixall = TRUE conflicts with p < 1. Ignored.", call.=FALSE) ############################################################### # `expand' determines expansion of the simulation window expand <- rmhexpand(expand) # No expansion is permitted if we are conditioning on the # number of points if(fixing != "none") { if(expand$force.exp) stop(paste("When conditioning on the number of points,", "no expansion may be done."), call.=FALSE) # no expansion expand <- .no.expansion } ################################################################### # return augmented list out <- list(p=p, q=q, nrep=nrep, nverb=nverb, expand=expand, periodic=periodic, ptypes=ptypes, fixall=fixall, fixcode=fixcode, fixing=fixing, condtype=condtype, x.cond=x.cond, saving=saving, nsave=nsave, nburn=nburn, track=track, pstage=pstage) class(out) <- c("rmhcontrol", class(out)) return(out) } print.rmhcontrol <- function(x, ...) { verifyclass(x, "rmhcontrol") splat("Metropolis-Hastings algorithm control parameters") splat("Probability of shift proposal: p =", x$p) if(x$fixing == "none") { splat("Conditional probability of death proposal: q =", x$q) if(!is.null(x$ptypes)) { splat("Birth proposal probabilities for each type of point:") print(x$ptypes) } } switch(x$fixing, none={}, n.total=splat("The total number of points is fixed"), n.each.type=splat("The number of points of each type is fixed")) switch(x$condtype, none={}, window={ splat("Conditional simulation given the", "configuration in a subwindow") print(x$x.cond$window) }, Palm={ splat("Conditional simulation of Palm type") }) splat("Number of M-H iterations: nrep =", x$nrep) if(x$saving) { nsave <- x$nsave len <- length(nsave) howmany <- if(len == 1L) nsave else if(len < 5L) commasep(nsave) else paste(paste(nsave[1:5], collapse=", "), "[...]") splat("After a burn-in of", x$nburn, "iterations,", "save point pattern after every", howmany, "iterations.") } pstage <- x$pstage %orifnull% "start" hdr <- "Generate random proposal points:" switch(pstage, start = splat(hdr, "at start of simulations."), block = splat(hdr, "before each block of", if(length(x$nsave) == 1L) x$nsave else "", "iterations.")) cat(paste("Track proposal type and acceptance/rejection?", if(x$track) "yes" else "no", "\n")) if(x$nverb > 0) cat(paste("Progress report every nverb=", x$nverb, "iterations\n")) else cat("No progress reports (nverb = 0).\n") # invoke print.rmhexpand print(x$expand) cat("Periodic edge correction? ") if(is.null(x$periodic)) cat("Not yet determined.\n") else if(x$periodic) cat("Yes.\n") else cat("No.\n") # return(invisible(NULL)) } default.rmhcontrol <- function(model, w=NULL) { # set default for 'expand' return(rmhcontrol(expand=default.expand(model, w=w))) } update.rmhcontrol <- function(object, ...) { do.call.matched(rmhcontrol.default, resolve.defaults(list(...), as.list(object), .StripNull=TRUE)) } rmhResolveControl <- function(control, model) { # adjust control information once the model is known stopifnot(inherits(control, "rmhcontrol")) # change *default* expansion rule to something appropriate for model # (applies only if expansion rule is undecided) control$expand <- change.default.expand(control$expand, default.expand(model)) return(control) } spatstat.core/R/dg.R0000644000176200001440000001561214144333462013773 0ustar liggesusers# # dg.S # # $Revision: 1.23 $ $Date: 2021/01/07 03:08:41 $ # # Diggle-Gratton pair potential # # DiggleGratton <- local({ # .... auxiliary functions ...... diggraterms <- function(X, Y, idX, idY, delta, rho, splitInf=FALSE) { stopifnot(is.numeric(delta)) stopifnot(is.numeric(rho)) stopifnot(delta < rho) ## sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] idXsort <- idX[oX] idYsort <- idY[oY] nX <- npoints(X) nY <- npoints(Y) ## call C routine if(!splitInf) { ## usual case: allow cif to be zero because of hard core out <- .C(SC_Ediggra, nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), idsource = as.integer(idXsort), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), idtarget = as.integer(idYsort), ddelta = as.double(delta), rrho = as.double(rho), values = as.double(double(nX)), PACKAGE="spatstat.core") answer <- integer(nX) answer[oX] <- out$values } else { ## split off the hard core terms and return them separately out <- .C(SC_ESdiggra, nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), idsource = as.integer(idXsort), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), idtarget = as.integer(idYsort), ddelta = as.double(delta), rrho = as.double(rho), positive = as.double(double(nX)), hardcore = as.integer(integer(nX)), PACKAGE="spatstat.core") answer <- integer(nX) hardcore <- logical(nX) answer[oX] <- out$positive hardcore[oX] <- as.logical(out$hardcore) attr(answer, "hardcore") <- hardcore } return(answer) } # .......... template object .......... BlankDG <- list( name = "Diggle-Gratton process", creator = "DiggleGratton", family = "pairwise.family", #evaluated later pot = function(d, par) { delta <- par$delta rho <- par$rho above <- (d > rho) inrange <- (!above) & (d > delta) h <- above + inrange * (d - delta)/(rho - delta) return(log(h)) }, par = list(delta=NULL, rho=NULL), # to be filled in later parnames = list("lower limit delta", "upper limit rho"), hasInf = TRUE, selfstart = function(X, self) { # self starter for DiggleGratton nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } md <- minnndist(X) if(!is.na(delta <- self$par$delta)) { # value fixed by user or previous invocation # check it if(md < delta) warning(paste("Hard core distance delta is too large;", "some data points will have zero probability")) return(self) } if(md == 0) warning(paste("Pattern contains duplicated points:", "hard core distance delta must be zero")) # take hard core = minimum interpoint distance * n/(n+1) deltaX <- md * nX/(nX+1) DiggleGratton(delta=deltaX, rho=self$par$rho) }, init = function(self) { delta <- self$par$delta rho <- self$par$rho if(!is.numeric(rho) || length(rho) != 1L) stop("upper limit rho must be a single number") stopifnot(is.finite(rho)) if(!is.na(delta)) { if(!is.numeric(delta) || length(delta) != 1L) stop("lower limit delta must be a single number") stopifnot(delta >= 0) stopifnot(rho > delta) } else stopifnot(rho >= 0) }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { kappa <- as.numeric(coeffs[1L]) return(list(param=list(kappa=kappa), inames="exponent kappa", printable=dround(kappa))) }, valid = function(coeffs, self) { kappa <- as.numeric(coeffs[1L]) return(is.finite(kappa) && (kappa >= 0)) }, project = function(coeffs, self) { kappa <- as.numeric(coeffs[1L]) if(is.finite(kappa) && (kappa >= 0)) return(NULL) return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { rho <- self$par$rho if(all(is.na(coeffs))) return(rho) kappa <- coeffs[1L] delta <- self$par$delta if(abs(kappa) <= epsilon) return(delta) else return(rho) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { ## fast evaluator for DiggleGratton interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for DiggleGratton") delta <- potpars$delta rho <- potpars$rho idX <- seq_len(npoints(X)) idU <- rep.int(-1L, npoints(U)) idU[EqualPairs[,2L]] <- EqualPairs[,1L] values <- diggraterms(U, X, idU, idX, delta, rho, splitInf) result <- log(pmax.int(0, values)) result <- matrix(result, ncol=1L) if(!splitInf) attr(result, "-Inf") <- attr(values, "hardcore") return(result) }, Mayer=function(coeffs, self) { # second Mayer cluster integral rho <- self$par$rho delta <- self$par$delta width <- rho - delta kappa <- coeffs[1L] ans <- pi * (rho^2 - 2 * rho* width/(kappa + 1) + 2 * width^2/((kappa + 1) * (kappa + 2))) return(ans) } ) class(BlankDG) <- "interact" DiggleGratton <- function(delta=NA, rho) { instantiate.interact(BlankDG, list(delta=delta, rho=rho)) } DiggleGratton <- intermaker(DiggleGratton, BlankDG) DiggleGratton }) spatstat.core/R/smoothfun.R0000644000176200001440000000427014144333464015423 0ustar liggesusers## ## smoothfun.R ## ## Exact 'funxy' counterpart of Smooth.ppp ## ## $Revision: 1.9 $ $Date: 2018/09/07 05:29:50 $ Smoothfun <- function(X, ...) { UseMethod("Smoothfun") } Smoothfun.ppp <- function(X, sigma=NULL, ..., weights=NULL, edge=TRUE, diggle=FALSE) { verifyclass(X, "ppp") if(!is.marked(X, dfok=TRUE)) stop("X should be a marked point pattern") ## handle weights now weightsgiven <- !missing(weights) && !is.null(weights) if(weightsgiven) { # convert to numeric if(is.im(weights)) { weights <- safelookup(weights, X) # includes warning if NA } else if(is.expression(weights)) weights <- eval(weights, envir=as.data.frame(X), enclos=parent.frame()) if(length(weights) == 0) weightsgiven <- FALSE } if(weightsgiven) { check.nvector(weights, npoints(X)) } else weights <- NULL ## X <- coerce.marks.numeric(X) ## stuff <- list(Xdata=X, values=marks(X), weights=weights, edge=edge, diggle=diggle, ...) ## ## determine smoothing parameters ker <- resolve.2D.kernel(sigma=sigma, ..., x=X, bwfun=bw.smoothppp, allow.zero=TRUE) stuff[c("sigma", "varcov")] <- ker[c("sigma", "varcov")] ## g <- function(x, y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] Xquery <- as.ppp(Y, Window(stuff$Xdata)) do.call(smoothcrossEngine, append(list(Xquery=Xquery), stuff)) } g <- funxy(g, as.rectangle(as.owin(X))) class(g) <- c("Smoothfun", class(g)) return(g) } print.Smoothfun <- function(x, ...) { cat("function(x,y)", "which returns", "values", "interpolated from", fill=TRUE) X <- get("X", envir=environment(x)) print(X, ...) return(invisible(NULL)) } ## Method for as.im ## (enables plot.funxy, persp.funxy, contour.funxy to work for this class) as.im.Smoothfun <- function(X, W=Window(X), ..., approx=TRUE) { stuff <- get("stuff", envir=environment(X)) if(!approx) { #' evaluate exactly at grid points result <- as.im.function(X, W=W, ...) } else { #' faster, approximate evaluation using FFT if(!is.null(W)) stuff$X <- stuff$X[W] result <- do.call(Smooth, resolve.defaults(list(...), stuff)) } return(result) } spatstat.core/R/smooth.ppp.R0000644000176200001440000011257614144333464015521 0ustar liggesusers# # smooth.ppp.R # # Smooth the marks of a point pattern # # $Revision: 1.77 $ $Date: 2021/01/07 03:08:41 $ # # smooth.ppp <- function(X, ..., weights=rep(1, npoints(X)), at="pixels") { # .Deprecated("Smooth.ppp", package="spatstat", # msg="smooth.ppp is deprecated: use the generic Smooth with a capital S") # Smooth(X, ..., weights=weights, at=at) # } Smooth <- function(X, ...) { UseMethod("Smooth") } Smooth.solist <- function(X, ...) { solapply(X, Smooth, ...) } Smooth.ppp <- function(X, sigma=NULL, ..., weights=rep(1, npoints(X)), at="pixels", adjust=1, varcov=NULL, edge=TRUE, diggle=FALSE, kernel="gaussian", scalekernel=is.character(kernel), geometric=FALSE) { verifyclass(X, "ppp") if(!is.marked(X, dfok=TRUE, na.action="fatal")) stop("X should be a marked point pattern", call.=FALSE) X <- coerce.marks.numeric(X) if(!all(is.finite(as.matrix(marks(X))))) stop("Some mark values are Inf, NaN or NA", call.=FALSE) at <- pickoption("output location type", at, c(pixels="pixels", points="points")) ## trivial case if(npoints(X) == 0) { cn <- colnames(marks(X)) nc <- length(cn) switch(at, points = { result <- if(nc == 0) numeric(0) else matrix(, 0, nc, dimnames=list(NULL, cn)) }, pixels = { result <- as.im(NA_real_, Window(X)) if(nc) { result <- as.solist(rep(list(result), nc)) names(result) <- cn } }) return(result) } ## ensure weights are numeric if(weightsgiven <- !missing(weights) && !is.null(weights)) { pa <- parent.frame() weights <- pointweights(X, weights=weights, parent=pa) weightsgiven <- !is.null(weights) } else weights <- NULL ## geometric mean smoothing if(geometric) return(ExpSmoothLog(X, sigma=sigma, ..., at=at, adjust=adjust, varcov=varcov, kernel=kernel, scalekernel=scalekernel, weights=weights, edge=edge, diggle=diggle)) ## determine smoothing parameters if(scalekernel) { ker <- resolve.2D.kernel(sigma=sigma, ..., adjust=adjust, varcov=varcov, kernel=kernel, x=X, bwfun=bw.smoothppp, allow.zero=TRUE) sigma <- ker$sigma varcov <- ker$varcov adjust <- 1 } ## infinite bandwidth if(bandwidth.is.infinite(sigma)) { #' uniform estimate nX <- npoints(X) if(is.null(weights)) weights <- rep(1, nX) marx <- marks(X) single <- is.null(dim(marx)) wtmark <- weights * marx totwt <- sum(weights) totwtmark <- if(single) sum(wtmark) else colSums(wtmark) W <- Window(X) switch(at, pixels = { result <- solapply(totwtmark/totwt, as.im, W=W, ...) names(result) <- colnames(marx) if(single) result <- result[[1L]] }, points = { denominator <- rep(totwt, nX) numerator <- rep(totwtmark, each=nX) if(!single) numerator <- matrix(numerator, nrow=nX) leaveoneout <- resolve.1.default(list(leaveoneout=TRUE), list(...)) if(leaveoneout) { numerator <- numerator - wtmark denominator <- denominator - weights } result <- numerator/denominator if(!single) colnames(result) <- colnames(marx) }) return(result) } ## Diggle's edge correction? if(diggle && !edge) warning("Option diggle=TRUE overridden by edge=FALSE") diggle <- diggle && edge ## ## cutoff distance (beyond which the kernel value is treated as zero) cutoff <- cutoff2Dkernel(kernel, sigma=sigma, varcov=varcov, scalekernel=scalekernel, adjust=adjust, ..., fatal=TRUE) ## if(cutoff < minnndist(X)) { # very small bandwidth leaveoneout <- resolve.1.default("leaveoneout", list(...), list(leaveoneout=TRUE)) if(!leaveoneout && at=="points") { warning(paste("Bandwidth is close to zero:", "original values returned")) Y <- marks(X) } else { warning(paste("Bandwidth is close to zero:", "nearest-neighbour interpolation performed")) Y <- nnmark(X, ..., k=1, at=at) } return(Y) } if(diggle) { ## absorb Diggle edge correction into weights vector edg <- second.moment.calc(X, sigma, what="edge", ..., varcov=varcov, adjust=adjust, kernel=kernel, scalekernel=scalekernel) ei <- safelookup(edg, X, warn=FALSE) weights <- if(weightsgiven) weights/ei else 1/ei weights[!is.finite(weights)] <- 0 weightsgiven <- TRUE } ## rescale weights to avoid numerical gremlins if(weightsgiven && ((mw <- median(abs(weights))) > 0)) weights <- weights/mw ## calculate... marx <- marks(X) uhoh <- NULL if(!is.data.frame(marx)) { # ........ vector of marks ................... values <- marx if(is.factor(values)) warning("Factor valued marks were converted to integers", call.=FALSE) values <- as.numeric(values) ## detect constant values ra <- range(values, na.rm=TRUE) if(diff(ra) == 0) { switch(at, points = { result <- values }, pixels = { M <- do.call.matched(as.mask, list(w=as.owin(X), ...)) result <- as.im(ra[1], M) }) } else { switch(at, points={ result <- do.call(smoothpointsEngine, resolve.defaults(list(x=quote(X), values=quote(values), weights=quote(weights), sigma=sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, edge=FALSE), list(...))) }, pixels={ values.weights <- if(weightsgiven) values * weights else values dont.complain.about(values.weights) numerator <- do.call(density.ppp, resolve.defaults(list(x=quote(X), at="pixels", weights = quote(values.weights), sigma=sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, edge=FALSE), list(...))) denominator <- do.call(density.ppp, resolve.defaults(list(x=quote(X), at="pixels", weights = quote(weights), sigma=sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, edge=FALSE), list(...))) result <- eval.im(numerator/denominator) ## trap small values of denominator ## trap NaN and +/- Inf values of result, but not NA eps <- .Machine$double.eps nbg <- eval.im(is.infinite(result) | is.nan(result) | (denominator < eps)) if(any(as.matrix(nbg), na.rm=TRUE)) { warning(paste("Numerical underflow detected:", "sigma is probably too small"), call.=FALSE) uhoh <- unique(c(uhoh, "underflow")) ## l'Hopital's rule distX <- distmap(X, xy=numerator) whichnn <- attr(distX, "index") nnvalues <- eval.im(values[whichnn]) result[nbg] <- nnvalues[nbg] } uhoh <- attr(numerator, "warnings") }) } } else { ## ......... data frame of marks .................. ## convert to numerical values if(any(sapply(as.list(marx), is.factor))) warning("Factor columns of marks were converted to integers", call.=FALSE) marx <- asNumericMatrix(marx) ## detect constant columns ra <- apply(marx, 2, range, na.rm=TRUE) isconst <- (apply(ra, 2, diff) == 0) if(anyisconst <- any(isconst)) { oldmarx <- marx # oldX <- X marx <- marx[, !isconst] X <- X %mark% marx } if(any(!isconst)) { ## compute denominator denominator <- do.call(density.ppp, resolve.defaults(list(x=quote(X), at=at, weights = quote(weights), sigma=sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, edge=FALSE), list(...))) ## compute numerator for each column of marks marx.weights <- if(weightsgiven) marx * weights else marx dont.complain.about(marx.weights) numerators <- do.call(density.ppp, resolve.defaults(list(x=quote(X), at=at, weights = quote(marx.weights), sigma=sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, edge=FALSE), list(...))) uhoh <- attr(numerators, "warnings") ## calculate ratios switch(at, points={ if(is.null(uhoh)) { ## numerators is a matrix (or may have dropped to vector) if(is.data.frame(numerators)) { numerators <- as.matrix(numerators) } else if(!is.matrix(numerators)) { numerators <- matrix(unlist(numerators), nrow=npoints(X)) } ratio <- numerators/denominator if(any(badpoints <- matrowany(!is.finite(ratio)))) { whichnnX <- nnwhich(X) ratio[badpoints,] <- as.matrix(marx[whichnnX[badpoints], , drop=FALSE]) } } else { warning("returning original values") ratio <- marx } result <- as.data.frame(ratio) colnames(result) <- colnames(marx) }, pixels={ ## numerators is a list of images (or may have dropped to 'im') if(is.im(numerators)) numerators <- list(numerators) result <- solapply(numerators, "/", e2=denominator) eps <- .Machine$double.eps denOK <- eval.im(denominator >= eps) if(!is.null(uhoh) || !all(denOK)) { ## compute nearest neighbour map on same raster distX <- distmap(X, xy=denominator) whichnnX <- attr(distX, "index") ## fix images allgood <- TRUE for(j in 1:length(result)) { ratj <- result[[j]] valj <- marx[,j] goodj <- eval.im(is.finite(ratj) & denOK) result[[j]] <- eval.im(goodj, ratj, valj[whichnnX]) allgood <- allgood && all(goodj) } if(!allgood) { warning(paste("Numerical underflow detected:", "sigma is probably too small"), call.=FALSE) uhoh <- unique(c(uhoh, "underflow")) } } names(result) <- colnames(marx) }) } else result <- NULL if(anyisconst) { partresult <- result switch(at, points = { nX <- npoints(X) result <- matrix(, nX, ncol(oldmarx)) if(length(partresult) > 0) result[,!isconst] <- as.matrix(partresult) result[,isconst] <- rep(ra[1,isconst], each=nX) colnames(result) <- colnames(oldmarx) }, pixels = { result <- vector(mode="list", length=ncol(oldmarx)) if(length(partresult) > 0) { result[!isconst] <- partresult M <- as.owin(partresult[[1]]) } else { M <- do.call.matched(as.mask, list(w=as.owin(X), ...)) } result[isconst] <- lapply(ra[1, isconst], as.im, W=M) result <- as.solist(result) names(result) <- colnames(oldmarx) }) } } ## wrap up attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov attr(result, "warnings") <- uhoh return(result) } smoothpointsEngine <- function(x, values, sigma, ..., kernel="gaussian", scalekernel=is.character(kernel), weights=NULL, varcov=NULL, leaveoneout=TRUE, sorted=FALSE, cutoff=NULL, debug=FALSE) { stopifnot(is.logical(leaveoneout)) if(!is.null(dim(values))) stop("Internal error: smoothpointsEngine does not support multidimensional values") #' detect constant values if(diff(range(values, na.rm=TRUE)) == 0) { result <- values attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } validate2Dkernel(kernel) if(is.character(kernel)) kernel <- match2DkernelName(kernel) isgauss <- identical(kernel, "gaussian") ## Handle weights that are meant to be null if(length(weights) == 0) weights <- NULL ## infinite bandwidth if(bandwidth.is.infinite(sigma)) { #' uniform estimate nX <- npoints(x) if(is.null(weights)) weights <- rep(1, nX) wtval <- weights * values totwt <- sum(weights) totwtval <- sum(wtval) denominator <- rep(totwt, nX) numerator <- rep(totwtval, nX) if(leaveoneout) { numerator <- numerator - wtval denominator <- denominator - weights } result <- numerator/denominator return(result) } ## cutoff distance (beyond which the kernel value is treated as zero) ## NB: input argument 'cutoff' is either NULL or ## an absolute distance (if scalekernel=FALSE) ## a number of standard deviations (if scalekernel=TRUE) cutoff <- cutoff2Dkernel(kernel, sigma=sigma, varcov=varcov, scalekernel=scalekernel, cutoff=cutoff, fatal=TRUE) ## cutoff is now an absolute distance if(debug) cat(paste("cutoff=", cutoff, "\n")) # detect very small bandwidth nnd <- nndist(x) nnrange <- range(nnd) if(cutoff < nnrange[1]) { if(leaveoneout && (npoints(x) > 1)) { warning("Very small bandwidth; values of nearest neighbours returned") result <- values[nnwhich(x)] } else { warning("Very small bandwidth; original values returned") result <- values } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov attr(result, "warnings") <- "underflow" return(result) } if(leaveoneout) { # ensure cutoff includes at least one point cutoff <- max(1.1 * nnrange[2], cutoff) } sd <- if(is.null(varcov)) sigma else sqrt(max(eigen(varcov)$values)) if(isgauss && spatstat.options("densityTransform") && spatstat.options("densityC")) { ## .................. experimental C code ..................... if(debug) cat('Using experimental code!\n') npts <- npoints(x) result <- numeric(npts) ## transform to standard coordinates xx <- x$x yy <- x$y if(is.null(varcov)) { xx <- xx/(sqrt(2) * sigma) yy <- yy/(sqrt(2) * sigma) } else { Sinv <- solve(varcov) xy <- cbind(xx, yy) %*% matrixsqrt(Sinv/2) xx <- xy[,1] yy <- xy[,2] sorted <- FALSE } ## cutoff in standard coordinates cutoff <- cutoff/(sqrt(2) * sd) ## sort into increasing order of x coordinate (required by C code) if(!sorted) { oo <- fave.order(xx) xx <- xx[oo] yy <- yy[oo] vv <- values[oo] } else { vv <- values } if(is.null(weights)) { zz <- .C(SC_Gsmoopt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), result = as.double(double(npts)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[oo] <- zz$result } else { wtsort <- weights[oo] zz <- .C(SC_Gwtsmoopt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[oo] <- zz$result } if(any(nbg <- (is.infinite(result) | is.nan(result)))) { # NaN or +/-Inf can occur if bandwidth is small # Use mark of nearest neighbour (by l'Hopital's rule) result[nbg] <- values[nnwhich(x)[nbg]] } } else if(isgauss && spatstat.options("densityC")) { # .................. C code ........................... if(debug) cat('Using standard code.\n') npts <- npoints(x) result <- numeric(npts) # sort into increasing order of x coordinate (required by C code) if(sorted) { xx <- x$x yy <- x$y vv <- values } else { oo <- fave.order(x$x) xx <- x$x[oo] yy <- x$y[oo] vv <- values[oo] } if(is.null(varcov)) { # isotropic kernel if(is.null(weights)) { zz <- .C(SC_smoopt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(npts)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[oo] <- zz$result } else { wtsort <- weights[oo] zz <- .C(SC_wtsmoopt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sig = as.double(sd), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[oo] <- zz$result } } else { # anisotropic kernel Sinv <- solve(varcov) flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C(SC_asmoopt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), result = as.double(double(npts)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[oo] <- zz$result } else { wtsort <- weights[oo] zz <- .C(SC_awtsmoopt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[oo] <- zz$result } } if(any(nbg <- (is.infinite(result) | is.nan(result)))) { # NaN or +/-Inf can occur if bandwidth is small # Use mark of nearest neighbour (by l'Hopital's rule) result[nbg] <- values[nnwhich(x)[nbg]] } } else { #' Either a non-Gaussian kernel or using older, partly interpreted code #' compute weighted densities if(is.null(weights)) { # weights are implicitly equal to 1 numerator <- do.call(density.ppp, resolve.defaults(list(x=quote(x), at="points", weights = quote(values), sigma=sigma, varcov=varcov, leaveoneout=leaveoneout, sorted=sorted, kernel=kernel, scalekernel=scalekernel), list(...), list(edge=FALSE))) denominator <- do.call(density.ppp, resolve.defaults(list(x=quote(x), at="points", sigma=sigma, varcov=varcov, leaveoneout=leaveoneout, sorted=sorted, kernel=kernel, scalekernel=scalekernel), list(...), list(edge=FALSE))) } else { values.weights <- values * weights dont.complain.about(values.weights) numerator <- do.call(density.ppp, resolve.defaults(list(x=quote(x), at="points", weights = quote(values.weights), sigma=sigma, varcov=varcov, leaveoneout=leaveoneout, sorted=sorted, kernel=kernel, scalekernel=scalekernel), list(...), list(edge=FALSE))) denominator <- do.call(density.ppp, resolve.defaults(list(x=quote(x), at="points", weights = quote(weights), sigma=sigma, varcov=varcov, leaveoneout=leaveoneout, sorted=sorted, kernel=kernel, scalekernel=scalekernel), list(...), list(edge=FALSE))) } if(is.null(uhoh <- attr(numerator, "warnings"))) { result <- numerator/denominator result <- ifelseXB(is.finite(result), result, NA_real_) } else { warning("returning original values") result <- values attr(result, "warnings") <- uhoh } } # pack up and return attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } markmean <- function(X, ...) { stopifnot(is.marked(X)) Y <- Smooth(X, ...) return(Y) } markvar <- function(X, sigma=NULL, ..., weights=NULL, varcov=NULL) { stopifnot(is.marked(X)) if(is.expression(weights)) weights <- eval(weights, envir=as.data.frame(X), enclos=parent.frame()) E1 <- Smooth(X, sigma=sigma, varcov=varcov, weights=weights, ...) X2 <- X %mark% marks(X)^2 ## ensure smoothing bandwidth is the same! sigma <- attr(E1, "sigma") varcov <- attr(E1, "varcov") E2 <- Smooth(X2, sigma=sigma, varcov=varcov, weights=weights, ...) V <- eval.im(E2 - E1^2) return(V) } bw.smoothppp <- function(X, nh=spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE, kernel="gaussian") { stopifnot(is.ppp(X)) stopifnot(is.marked(X)) X <- coerce.marks.numeric(X) # rearrange in ascending order of x-coordinate (for C code) X <- X[fave.order(X$x)] # marx <- marks(X) dimmarx <- dim(marx) if(!is.null(dimmarx)) marx <- as.matrix(as.data.frame(marx)) # determine a range of bandwidth values # n <- npoints(X) if(is.null(hmin) || is.null(hmax)) { W <- Window(X) # a <- area(W) d <- diameter(as.rectangle(W)) # Stoyan's rule of thumb stoyan <- bw.stoyan(X) # rule of thumb based on nearest-neighbour distances nnd <- nndist(X) nnd <- nnd[nnd > 0] if(is.null(hmin)) { hmin <- max(1.1 * min(nnd), stoyan/5) hmin <- min(d/8, hmin) } if(is.null(hmax)) { hmax <- max(stoyan * 20, 3 * mean(nnd), hmin * 2) hmax <- min(d/2, hmax) } } else stopifnot(hmin < hmax) # h <- geomseq(from=hmin, to=hmax, length.out=nh) cv <- numeric(nh) # # compute cross-validation criterion for(i in seq_len(nh)) { yhat <- Smooth(X, sigma=h[i], at="points", leaveoneout=TRUE, kernel=kernel, sorted=TRUE) if(!is.null(dimmarx)) yhat <- as.matrix(as.data.frame(yhat)) cv[i] <- mean((marx - yhat)^2) } # optimize result <- bw.optim(cv, h, hname="sigma", creator="bw.smoothppp", criterion="Least Squares Cross-Validation", warnextreme=warn, hargnames=c("hmin", "hmax"), unitname=unitname(X)) return(result) } smoothcrossEngine <- function(Xdata, Xquery, values, sigma, ..., weights=NULL, varcov=NULL, kernel="gaussian", scalekernel=is.character(kernel), sorted=FALSE, cutoff=NULL) { validate2Dkernel(kernel) if(is.character(kernel)) kernel <- match2DkernelName(kernel) isgauss <- identical(kernel, "gaussian") && scalekernel if(!is.null(dim(weights))) stop("weights must be a vector") ndata <- npoints(Xdata) nquery <- npoints(Xquery) if(nquery == 0 || ndata == 0) { if(is.null(dim(values))) return(rep(NA_real_, nquery)) nuttin <- matrix(NA_real_, nrow=nquery, ncol=ncol(values)) colnames(nuttin) <- colnames(values) return(nuttin) } # validate weights if(is.matrix(values) || is.data.frame(values)) { k <- ncol(values) stopifnot(nrow(values) == npoints(Xdata)) values <- as.data.frame(values) } else { k <- 1L stopifnot(length(values) == npoints(Xdata) || length(values) == 1) if(length(values) == 1L) values <- rep(values, ndata) } ## infinite bandwidth if(bandwidth.is.infinite(sigma)) { #' uniform estimate if(is.null(weights)) weights <- rep(1, ndata) single <- is.null(dim(values)) wtval <- weights * values totwt <- sum(weights) totwtval <- if(single) sum(wtval) else colSums(wtval) denominator <- rep(totwt, nquery) numerator <- rep(totwtval, each=nquery) if(!single) numerator <- matrix(numerator, nrow=nquery) result <- numerator/denominator if(!single) colnames(result) <- colnames(values) return(result) } ## cutoff distance (beyond which the kernel value is treated as zero) ## NB: input argument 'cutoff' is either NULL or ## an absolute distance (if scalekernel=FALSE) ## a number of standard deviations (if scalekernel=TRUE) cutoff.orig <- cutoff cutoff <- cutoff2Dkernel(kernel, sigma=sigma, varcov=varcov, scalekernel=scalekernel, cutoff=cutoff, fatal=TRUE) ## cutoff is now an absolute distance ## detect very small bandwidth nnc <- nncross(Xquery, Xdata) if(cutoff < min(nnc$dist)) { if(ndata > 1) { warning("Very small bandwidth; values of nearest neighbours returned") nw <- nnc$which result <- if(k == 1) values[nw] else values[nw,,drop=FALSE] } else { warning("Very small bandwidth; original values returned") result <- values } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov attr(result, "warnings") <- "underflow" return(result) } ## Handle weights that are meant to be null if(length(weights) == 0) weights <- NULL if(!isgauss) { ## .................. non-Gaussian kernel ........................ close <- crosspairs(Xdata, Xquery, cutoff) kerij <- evaluate2Dkernel(kernel, close$dx, close$dy, sigma=sigma, varcov=varcov, scalekernel=scalekernel, ...) ## sum the (weighted) contributions i <- close$i # data point j <- close$j # query point jfac <- factor(j, levels=seq_len(nquery)) wkerij <- if(is.null(weights)) kerij else kerij * weights[i] denominator <- tapplysum(wkerij, list(jfac)) if(k == 1L) { contribij <- wkerij * values[i] numerator <- tapplysum(contribij, list(jfac)) result <- numerator/denominator } else { result <- matrix(, nrow=nquery, ncol=k) for(kk in 1:k) { contribij <- wkerij * values[i, kk] numeratorkk <- tapplysum(contribij, list(jfac)) result[,kk] <- numeratorkk/denominator } } ## trap bad values if(any(nbg <- (is.infinite(result) | is.nan(result)))) { ## NaN or +/-Inf can occur if bandwidth is small ## Use value at nearest neighbour (by l'Hopital's rule) nnw <- nnc$which if(k == 1L) { result[nbg] <- values[nnw[nbg]] } else { bad <- which(nbg, arr.ind=TRUE) badrow <- bad[,"row"] badcol <- bad[,"col"] result[nbg] <- values[cbind(nnw[badrow], badcol)] } } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } ## .................. Gaussian kernel henceforth ........................ ## handle multiple columns of values if(is.matrix(values) || is.data.frame(values)) { k <- ncol(values) stopifnot(nrow(values) == npoints(Xdata)) values <- as.data.frame(values) result <- matrix(, nquery, k) colnames(result) <- colnames(values) if(!sorted) { ood <- fave.order(Xdata$x) Xdata <- Xdata[ood] values <- values[ood, ] ooq <- fave.order(Xquery$x) Xquery <- Xquery[ooq] } for(j in 1:k) result[,j] <- smoothcrossEngine(Xdata, Xquery, values[,j], sigma=sigma, varcov=varcov, weights=weights, kernel=kernel, scalekernel=scalekernel, cutoff=cutoff.orig, sorted=TRUE, ...) if(!sorted) { sortresult <- result result[ooq,] <- sortresult } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } ## values must be a vector stopifnot(length(values) == npoints(Xdata) || length(values) == 1) if(length(values) == 1) values <- rep(values, ndata) result <- numeric(nquery) ## coordinates and values xq <- Xquery$x yq <- Xquery$y xd <- Xdata$x yd <- Xdata$y vd <- values if(!sorted) { ## sort into increasing order of x coordinate (required by C code) ooq <- fave.order(Xquery$x) xq <- xq[ooq] yq <- yq[ooq] ood <- fave.order(Xdata$x) xd <- xd[ood] yd <- yd[ood] vd <- vd[ood] } sd <- if(is.null(varcov)) sigma else sqrt(min(eigen(varcov)$values)) if(is.null(varcov)) { ## isotropic kernel if(is.null(weights)) { zz <- .C(SC_crsmoopt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), vd = as.double(vd), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(nquery)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[ooq] <- zz$result } else { wtsort <- if(sorted) weights else weights[ood] zz <- .C(SC_wtcrsmoopt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), vd = as.double(vd), wd = as.double(wtsort), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(nquery)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[ooq] <- zz$result } } else { # anisotropic kernel Sinv <- solve(varcov) flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C(SC_acrsmoopt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), vd = as.double(vd), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[ooq] <- zz$result } else { wtsort <- if(sorted) weights else weights[ood] zz <- .C(SC_awtcrsmoopt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), vd = as.double(vd), wd = as.double(wtsort), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[ooq] <- zz$result } } if(any(nbg <- (is.infinite(result) | is.nan(result)))) { # NaN or +/-Inf can occur if bandwidth is small # Use mark of nearest neighbour (by l'Hopital's rule) result[nbg] <- values[nnc$which[nbg]] } # pack up and return attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } ExpSmoothLog <- function(X, ..., at=c("pixels", "points"), weights=NULL) { verifyclass(X, "ppp") at <- match.arg(at) if(!is.null(weights)) check.nvector(weights, npoints(X)) X <- coerce.marks.numeric(X) marx <- marks(X) d <- dim(marx) if(!is.null(d) && d[2] > 1) { switch(at, points = { Z <- lapply(unstack(X), ExpSmoothLog, ..., at=at, weights=weights) Z <- do.call(data.frame, Z) }, pixels = { Z <- solapply(unstack(X), ExpSmoothLog, ..., at=at, weights=weights) }) return(Z) } # vector or single column of numeric marks v <- as.numeric(marx) vmin <- min(v) if(vmin < 0) stop("Negative values in geometric mean smoothing", call.=FALSE) Y <- X %mark% log(v) if(vmin > 0) { Z <- Smooth(Y, ..., at=at, weights=weights) } else { yok <- is.finite(marks(Y)) YOK <- Y[yok] weightsOK <- if(is.null(weights)) NULL else weights[yok] switch(at, points = { Z <- rep(-Inf, npoints(X)) Z[yok] <- Smooth(YOK, ..., at=at, weights=weightsOK) }, pixels = { isfinite <- nnmark(Y %mark% yok, ...) support <- solutionset(isfinite) Window(YOK) <- support Z <- as.im(-Inf, W=Window(Y), ...) Z[support] <- Smooth(YOK, ..., at=at, weights=weightsOK)[] }) } return(exp(Z)) } spatstat.core/R/plot.ppm.R0000644000176200001440000000542414144333463015153 0ustar liggesusers# # plot.ppm.S # # $Revision: 2.12 $ $Date: 2016/06/11 08:02:17 $ # # plot.ppm() # Plot a point process model fitted by ppm(). # # # plot.ppm <- function(x, ngrid = c(40,40), superimpose = TRUE, trend=TRUE, cif=TRUE, se=TRUE, pause = interactive(), how=c("persp","image", "contour"), plot.it=TRUE, locations=NULL, covariates=NULL, ...) { model <- x # Plot a point process model fitted by ppm(). # verifyclass(model, "ppm") # # find out what kind of model it is # mod <- summary(model, quick="entries") stationary <- mod$stationary poisson <- mod$poisson marked <- mod$marked multitype <- mod$multitype data <- mod$entries$data if(marked) { if(!multitype) stop("Not implemented for general marked point processes") else mrkvals <- levels(marks(data)) } else mrkvals <- 1 # ntypes <- length(mrkvals) # # Interpret options # ----------------- # # Whether to plot trend, cif, se if(!trend && !cif && !se) { cat(paste("Nothing plotted;", sQuote("trend"), ",", sQuote("cif"), "and", sQuote("se"), "are all FALSE\n")) return(invisible(NULL)) } # Suppress uninteresting plots # unless explicitly instructed otherwise if(missing(trend)) trend <- !stationary if(missing(cif)) cif <- !poisson if(missing(se)) se <- poisson && !stationary else if(se && !poisson) { warning(paste("standard error calculation", "is only implemented for Poisson models")) se <- FALSE } if(!trend && !cif && !se) { cat("Nothing plotted -- all plots selected are flat surfaces.\n") return(invisible(NULL)) } # # style of plot: suppress pseudo-default # if(missing(how)) how <- "image" # # # Do the prediction # ------------------ out <- list() surftypes <- c("trend","cif","se")[c(trend,cif,se)] ng <- if(missing(ngrid) && !missing(locations)) NULL else ngrid for (ttt in surftypes) { p <- predict(model, ngrid=ng, locations=locations, covariates=covariates, type = ttt, getoutofjail=TRUE) # permit outdated usage type="se" if(is.im(p)) p <- list(p) out[[ttt]] <- p } # Make it a plotppm object # ------------------------ class(out) <- "plotppm" attr(out, "mrkvals") <- mrkvals # Actually plot it if required # ---------------------------- if(plot.it) { if(!superimpose) data <- NULL if(missing(pause)) pause <- NULL plot(out,data=data,trend=trend,cif=cif,se=se,how=how,pause=pause, ...) } return(invisible(out)) } spatstat.core/R/lennard.R0000644000176200001440000000707014144333462015023 0ustar liggesusers# # # lennard.R # # $Revision: 1.22 $ $Date: 2018/03/15 07:37:41 $ # # Lennard-Jones potential # # # ------------------------------------------------------------------- # LennardJones <- local({ BlankLJ <- list( name = "Lennard-Jones process", creator = "LennardJones", family = "pairwise.family", # evaluated later pot = function(d, par) { sig0 <- par$sigma0 if(is.na(sig0)) { d6 <- d^{-6} p <- array(c(-d6^2,d6),dim=c(dim(d),2)) } else { # expand around sig0 and set large numbers to Inf drat <- d/sig0 d6 <- drat^{-6} p <- array(c(-d6^2,d6),dim=c(dim(d),2)) small <- (drat < 1/4) small <- array(c(small, small), dim=c(dim(d), 2)) p[small] <- -Inf big <- (drat > 4) big <- array(c(big, big), dim=c(dim(d), 2)) p[big] <- 0 } return(p) }, par = list(sigma0=NULL), # filled in later parnames = "Initial approximation to sigma", hasInf = TRUE, selfstart = function(X, self) { # self starter for Lennard Jones # attempt to set value of 'sigma0' if(!is.na(self$par$sigma0)) { # value fixed by user or previous invocation return(self) } if(npoints(X) < 2) { # not enough points return(self) } s0 <- minnndist(X) if(s0 == 0) { warning(paste("Pattern contains duplicated points:", "impossible under Lennard-Jones model")) s0 <- mean(nndist(X)) if(s0 == 0) return(self) } LennardJones(s0) }, init = function(...){}, # do nothing update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { theta1 <- as.numeric(coeffs[1L]) theta2 <- as.numeric(coeffs[2L]) sig0 <- self$par$sigma0 if(is.na(sig0)) sig0 <- 1 if(sign(theta1) * sign(theta2) == 1) { sigma <- sig0 * (theta1/theta2)^(1/6) epsilon <- (theta2^2)/(4 * theta1) } else { sigma <- NA epsilon <- NA } return(list(param=list(sigma=sigma, epsilon=epsilon), inames="interaction parameters", printable=signif(c(sigma=sigma,epsilon=epsilon)))) }, valid = function(coeffs, self) { p <- unlist(self$interpret(coeffs, self)$param) return(all(is.finite(p) & (p > 0))) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { if(anyNA(coeffs) || epsilon == 0) return(Inf) sig0 <- self$par$sigma0 if(is.na(sig0)) sig0 <- 1 theta1 <- abs(coeffs[1L]) theta2 <- abs(coeffs[2L]) return(sig0 * max((theta1/epsilon)^(1/12), (theta2/epsilon)^(1/6))) }, version=NULL # filled in later ) class(BlankLJ) <- "interact" LennardJones <- function(sigma0=NA) { if(is.null(sigma0) || !is.finite(sigma0)) sigma0 <- NA instantiate.interact(BlankLJ, list(sigma0=sigma0)) } LennardJones <- intermaker(LennardJones, BlankLJ) LennardJones }) spatstat.core/R/saturated.R0000644000176200001440000000333114144333464015372 0ustar liggesusers# # # saturated.S # # $Revision: 1.10 $ $Date: 2019/02/20 03:34:50 $ # # Saturated pairwise process with user-supplied potential # # Saturated() create a saturated pairwise process # [an object of class 'interact'] # with user-supplied potential # # # ------------------------------------------------------------------- # Saturated <- function(pot, name) { if(missing(name)) name <- "Saturated process with user-defined potential" fop <- names(formals(pot)) if(!isTRUE(all.equal(fop, c("d", "par"))) && !isTRUE(all.equal(fop, c("d", "tx", "tu", "par")))) stop(paste("Formal arguments of pair potential function", sQuote("pot"), "must be either (d, par) or (d, tx, tu, par)")) out <- list( name = name, creator = "Saturated", family = pairsat.family, pot = pot, par = NULL, parnames = NULL, hasInf = NA, init = NULL, update = function(self, ...){ do.call(Saturated, resolve.defaults(list(...), list(pot=self$pot, name=self$name))) } , print = function(self) { cat("Potential function:\n") print(self$pot) invisible() }, version=versionstring.spatstat() ) class(out) <- "interact" return(out) } Saturated <- intermaker(Saturated, list(creator="Saturated", name="saturated process with user-defined potential", par=formals(Saturated), parnames=list("the potential", "the name of the interaction"))) spatstat.core/R/fiksel.R0000644000176200001440000001407514144333462014660 0ustar liggesusers# # # fiksel.R # # $Revision: 1.19 $ $Date: 2021/01/07 03:08:41 $ # # Fiksel interaction # # ee Stoyan Kendall Mcke 1987 p 161 # # ------------------------------------------------------------------- # Fiksel <- local({ # ......... auxiliary functions ........... fikselterms <- function(U, X, r, kappa, EqualPairs=NULL) { answer <- crossfikselterms(U, X, r, kappa) nU <- npoints(U) # subtract contrinbutions from identical pairs (exp(-0) = 1 for each) if(length(EqualPairs) > 0) { idcount <- as.integer(table(factor(EqualPairs[,2L], levels=1:nU))) answer <- answer - idcount } return(answer) } crossfikselterms <- function(X, Y, r, kappa) { stopifnot(is.numeric(r)) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine out <- .C(SC_Efiksel, nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), rrmax = as.double(r), kkappa = as.double(kappa), values = as.double(double(nX)), PACKAGE="spatstat.core") answer <- integer(nX) answer[oX] <- out$values return(answer) } # ........ template object .............. BlankFiksel <- list( name = "Fiksel process", creator = "Fiksel", family = "pairwise.family", # evaluated later pot = function(d, par) { v <- (d <= par$r) * exp( - d * par$kappa) v[ d <= par$hc ] <- (-Inf) v }, par = list(r = NULL, hc = NULL, kappa=NULL), # filled in later parnames = c("interaction distance", "hard core distance", "rate parameter"), hasInf = TRUE, selfstart = function(X, self) { # self starter for Fiksel nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } md <- minnndist(X) if(!is.na(hc <- self$par$hc)) { # value fixed by user or previous invocation # check it if(md < hc) warning(paste("Hard core distance is too large;", "some data points will have zero probability")) return(self) } if(md == 0) warning(paste("Pattern contains duplicated points:", "hard core must be zero")) # take hc = minimum interpoint distance * n/(n+1) hcX <- md * nX/(nX+1) Fiksel(r=self$par$r, hc = hcX, kappa=self$par$kappa) }, init = function(self) { r <- self$par$r hc <- self$par$hc kappa <- self$par$kappa check.1.real(r) check.1.real(kappa) if(!is.na(hc)) { check.1.real(hc) stopifnot(hc > 0) stopifnot(r > hc) } else stopifnot(r > 0) }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { a <- as.numeric(coeffs[1L]) return(list(param=list(a=a), inames="interaction strength a", printable=signif(a))) }, valid = function(coeffs, self) { a <- (self$interpret)(coeffs, self)$param$a return(is.finite(a)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) hc <- self$par$hc if(hc > 0) return(Hardcore(hc)) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r hc <- self$par$hc if(anyNA(coeffs)) return(r) a <- coeffs[1L] if(abs(a) <= epsilon) return(hc) else return(r) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { ## fast evaluator for Fiksel interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Fiksel") r <- potpars$r hc <- potpars$hc kappa <- potpars$kappa hclose <- (strausscounts(U, X, hc, EqualPairs) != 0) fikselbit <- fikselterms(U, X, r, kappa, EqualPairs) if(!splitInf) { answer <- ifelseAX(hclose, -Inf, fikselbit) answer <- matrix(answer, ncol=1L) } else { answer <- fikselbit answer <- matrix(answer, ncol=1L) attr(answer, "-Inf") <- hclose } return(answer) }, Mayer=function(coeffs, self) { # second Mayer cluster integral a <- as.numeric(coeffs[1L]) r <- self$par$r hc <- self$par$hc kappa <- self$par$kappa f <- function(x, kappa, a){ 2 * pi * x * (1 - exp(a * exp(-x * kappa))) } hardbit <- integrate(f=f, lower=hc, upper=r, a=a, kappa=kappa) mess <- hardbit[["message"]] if(!identical(mess, "OK")) { warning(mess) return(NA) } return(pi * hc^2 + hardbit$value) } ) class(BlankFiksel) <- "interact" Fiksel <- function(r, hc=NA, kappa) { instantiate.interact(BlankFiksel, list(r = r, hc = hc, kappa=kappa)) } Fiksel <- intermaker(Fiksel, BlankFiksel) Fiksel }) spatstat.core/R/reach.R0000644000176200001440000000162714144333463014465 0ustar liggesusers# # reach.R # # $Revision: 1.8 $ $Date: 2007/10/24 09:41:15 $ # reach <- function(x, ...) { UseMethod("reach") } reach.interact <- function(x, ...) { verifyclass(x, "interact") irange <- x$irange if(is.null(irange)) return(Inf) if(!is.function(irange)) stop("Internal error - x$irange is not a function") ir <- irange(x) if(is.na(ir)) ir <- Inf return(ir) } reach.ppm <- function(x, ..., epsilon=0) { verifyclass(x, "ppm") # Poisson case if(is.poisson.ppm(x)) return(0) # extract info inte <- x$interaction coeffs <- coef(x) if(newstyle.coeff.handling(inte)) { # extract only interaction coefficients Vnames <- x$internal$Vnames coeffs <- coeffs[Vnames] } # apply 'irange' function irange <- inte$irange if(is.null(irange)) return(Inf) ir <- irange(inte, coeffs, epsilon=epsilon) if(is.na(ir)) ir <- Inf return(ir) } spatstat.core/R/scanstat.R0000644000176200001440000002525314144333464015225 0ustar liggesusers## ## scanstat.R ## ## Spatial scan statistics ## ## $Revision: 1.19 $ $Date: 2021/01/07 03:08:41 $ ## scanmeasure <- function(X, ...){ UseMethod("scanmeasure") } scanmeasure.ppp <- function(X, r, ..., method=c("counts", "fft")) { method <- match.arg(method) check.1.real(r) ## enclosing window R <- as.rectangle(as.owin(X)) ## determine pixel resolution M <- as.mask(R, ...) ## expand domain to include centres of all circles intersecting R W <- grow.mask(M, r) ## switch(method, counts = { ## direct calculation using C code ## get new dimensions dimyx <- W$dim xr <- W$xrange yr <- W$yrange nr <- dimyx[1] nc <- dimyx[2] ## n <- npoints(X) zz <- .C(SC_scantrans, x=as.double(X$x), y=as.double(X$y), n=as.integer(n), xmin=as.double(xr[1]), ymin=as.double(yr[1]), xmax=as.double(xr[2]), ymax=as.double(yr[2]), nr=as.integer(nr), nc=as.integer(nc), R=as.double(r), counts=as.integer(numeric(prod(dimyx))), PACKAGE="spatstat.core") zzz <- matrix(zz$counts, nrow=dimyx[1], ncol=dimyx[2], byrow=TRUE) Z <- im(zzz, xrange=xr, yrange=yr, unitname=unitname(X)) }, fft = { ## Previous version of scanmeasure.ppp had ## Y <- pixellate(X, ..., padzero=TRUE) ## but this is liable to Gibbs phenomena. ## Instead, convolve with small Gaussian (sd = 1 pixel width) sigma <- with(W, unique(c(xstep, ystep))) Y <- density(X, ..., sigma=sigma) ## invoke scanmeasure.im Z <- scanmeasure(Y, r) Z <- eval.im(as.integer(round(Z))) }) return(Z) } scanmeasure.im <- function(X, r, ...) { D <- disc(radius=r) eps <- with(X, c(xstep,ystep)) if(any(eps >= 2 * r)) return(eval.im(X * pi * r^2)) D <- as.im(as.mask(D, eps=eps)) Z <- imcov(X, D) return(Z) } scanPoisLRTS <- function(nZ, nG, muZ, muG, alternative) { nZco <- nG - nZ muZco <- muG - muZ nlogn <- function(n, a) ifelse(n == 0, 0, n * log(n/a)) ll <- nlogn(nZ, muZ) + nlogn(nZco, muZco) - nlogn(nG, muG) criterion <- (nZ * muZco - muZ * nZco) switch(alternative, less={ ll[criterion > 0] <- 0 }, greater={ ll[criterion < 0] <- 0 }, two.sided={}) return(2 * ll) } scanBinomLRTS <- function(nZ, nG, muZ, muG, alternative) { nZco <- nG - nZ muZco <- muG - muZ nlogn <- function(n, a) ifelse(n == 0, 0, n * log(n/a)) logbin <- function(k, n) { nlogn(k, n) + nlogn(n-k, n) } ll <- logbin(nZ, muZ) + logbin(nZco, muZco) - logbin(nG, muG) criterion <- (nZ * muZco - muZ * nZco) switch(alternative, less={ ll[criterion > 0] <- 0 }, greater={ ll[criterion < 0] <- 0 }, two.sided={}) return(2 * ll) } scanLRTS <- function(X, r, ..., method=c("poisson", "binomial"), baseline=NULL, case=2, alternative=c("greater", "less", "two.sided"), saveopt = FALSE, Xmask=NULL) { stopifnot(is.ppp(X)) stopifnot(check.nvector(r)) method <- match.arg(method) alternative <- match.arg(alternative) if(is.null(Xmask)) Xmask <- as.mask(as.owin(X), ...) switch(method, poisson={ Y <- X if(is.null(baseline)) { mu <- as.im(Xmask, value=1) } else if(is.ppm(baseline)) { if(is.marked(baseline)) stop("baseline is a marked point process: not supported") mu <- predict(baseline, locations=Xmask) } else if(is.im(baseline) || is.function(baseline)) { mu <- as.im(baseline, W=Xmask) } else stop(paste("baseline should be", "a pixel image, a function, or a fitted model")) nG <- npoints(Y) }, binomial={ stopifnot(is.multitype(X)) lev <- levels(marks(X)) if(length(lev) != 2) warning("X should usually be a bivariate (2-type) point pattern") if(!is.null(baseline)) stop("baseline is not supported in the binomial case") if(is.character(case) && !(case %in% lev)) stop(paste("Unrecognised label for cases:", sQuote(case))) if(is.numeric(case) && !(case %in% seq_along(lev))) stop(paste("Undefined level:", case)) Y <- split(X)[[case]] nG <- npoints(Y) mu <- unmark(X) }) ## The following line ensures that the same pixel resolution information ## is passed to the two calls to 'scanmeasure' below Y$window <- Xmask ## nr <- length(r) lrts <- vector(mode="list", length=nr) for(i in 1:nr) { ri <- r[i] nZ <- scanmeasure(Y, ri) muZ <- scanmeasure(mu, ri) if(!compatible.im(nZ, muZ)) { ha <- harmonise.im(nZ, muZ) nZ <- ha[[1]] muZ <- ha[[2]] } switch(method, poisson = { muG <- integral.im(mu) lrts[[i]] <- eval.im(scanPoisLRTS(nZ, nG, muZ, muG, alternative)) }, binomial = { muG <- npoints(mu) lrts[[i]] <- eval.im(scanBinomLRTS(nZ, nG, muZ, muG, alternative)) }) } if(length(lrts) == 1) { result <- lrts[[1]] } else { result <- im.apply(lrts, max) if(saveopt) attr(result, "iopt") <- im.apply(lrts, which.max) } return(result) } scan.test <- function(X, r, ..., method=c("poisson", "binomial"), nsim = 19, baseline=NULL, case = 2, alternative=c("greater", "less", "two.sided"), verbose=TRUE) { dataname <- short.deparse(substitute(X)) stopifnot(is.ppp(X)) method <- match.arg(method) alternative <- match.arg(alternative) stopifnot(is.numeric(r)) check.1.real(nsim) if(!(round(nsim) == nsim && nsim > 1)) stop("nsim should be an integer > 1") regionname <- paste("circles of radius", if(length(r) == 1) r else paste("between", min(r), "and", max(r))) ## ## compute observed loglikelihood function ## This also validates the arguments. obsLRTS <- scanLRTS(X=X, r=r, method=method, alternative=alternative, baseline=baseline, case=case, ..., saveopt=TRUE) obs <- max(obsLRTS) sim <- numeric(nsim) ## determine how to simulate switch(method, binomial={ methodname <- c("Spatial scan test", "Null hypothesis: constant relative risk", paste("Candidate cluster regions:", regionname), "Likelihood: binomial", paste("Monte Carlo p-value based on", nsim, "simulations")) lev <- levels(marks(X)) names(lev) <- lev casename <- lev[case] counted <- paste("points with mark", sQuote(casename), "inside cluster region") simexpr <- expression(rlabel(X)) }, poisson={ counted <- paste("points inside cluster region") X <- unmark(X) Xwin <- as.owin(X) Xmask <- as.mask(Xwin, ...) if(is.null(baseline)) { nullname <- "Complete Spatial Randomness (CSR)" lambda <- intensity(X) simexpr <- expression(runifpoispp(lambda, Xwin)) dont.complain.about(lambda) } else if(is.ppm(baseline)) { nullname <- baseline$callstring rmhstuff <- rmh(baseline, preponly=TRUE, verbose=FALSE) simexpr <- expression(rmhEngine(rmhstuff)) dont.complain.about(rmhstuff) } else if(is.im(baseline) || is.function(baseline)) { nullname <- "Poisson process with intensity proportional to baseline" base <- as.im(baseline, W=Xmask) alpha <- npoints(X)/integral.im(base) lambda <- eval.im(alpha * base) simexpr <- expression(rpoispp(lambda)) dont.complain.about(lambda) } else stop(paste("baseline should be", "a pixel image, a function, or a fitted model")) methodname <- c("Spatial scan test", paste("Null hypothesis:", nullname), paste("Candidate cluster regions:", regionname), "Likelihood: Poisson", paste("Monte Carlo p-value based on", nsim, "simulations")) }) if(verbose) { cat("Simulating...") pstate <- list() } for(i in 1:nsim) { if(verbose) pstate <- progressreport(i, nsim, state=pstate) Xsim <- eval(simexpr) simLRTS <- scanLRTS(X=Xsim, r=r, method=method, alternative=alternative, baseline=baseline, case=case, ...) sim[i] <- max(simLRTS) } pval <- mean(c(sim,obs) >= obs, na.rm=TRUE) names(obs) <- "maxLRTS" nm.alternative <- switch(alternative, greater="Excess of", less="Deficit of", two.sided="Two-sided: excess or deficit of", stop("Unknown alternative")) nm.alternative <- paste(nm.alternative, counted) result <- list(statistic = obs, p.value = pval, alternative = nm.alternative, method = methodname, data.name = dataname) class(result) <- c("scan.test", "htest") attr(result, "obsLRTS") <- obsLRTS attr(result, "X") <- X attr(result, "r") <- r return(result) } plot.scan.test <- function(x, ..., what=c("statistic", "radius"), do.window=TRUE) { xname <- short.deparse(substitute(x)) what <- match.arg(what) Z <- as.im(x, what=what) dont.complain.about(Z) do.call(plot, resolve.defaults(list(x=quote(Z)), list(...), list(main=xname))) if(do.window) { X <- attr(x, "X") plot(as.owin(X), add=TRUE, invert=TRUE) } invisible(NULL) } as.im.scan.test <- function(X, ..., what=c("statistic", "radius")) { Y <- attr(X, "obsLRTS") what <- match.arg(what) if(what == "radius") { iopt <- attr(Y, "iopt") r <- attr(X, "r") Y <- eval.im(r[iopt]) } return(as.im(Y, ...)) } spatstat.core/R/interact.R0000644000176200001440000002612214144333462015210 0ustar liggesusers# # interact.S # # # $Revision: 1.30 $ $Date: 2020/11/30 10:19:35 $ # # Class 'interact' representing the interpoint interaction # of a point process model # (e.g. Strauss process with a given threshold r) # # Class 'isf' representing a generic interaction structure # (e.g. pairwise interactions) # # These do NOT specify the "trend" part of the model, # only the "interaction" component. # # The analogy is: # # glm() ppm() # # model formula trend formula # # family interaction # # That is, the 'systematic' trend part of a point process # model is specified by a 'trend' formula argument to ppm(), # and the interpoint interaction is specified as an 'interact' # object. # # You only need to know about these classes if you want to # implement a new point process model. # # THE DISTINCTION: # An object of class 'isf' describes an interaction structure # e.g. pairwise interaction, triple interaction, # pairwise-with-saturation, Dirichlet interaction. # Think of it as determining the "order" of interaction # but not the specific interaction potential function. # # An object of class 'interact' completely defines the interpoint # interactions in a specific point process model, except for the # regular parameters of the interaction, which are to be estimated # by ppm() or otherwise. An 'interact' object specifies the values # of all the 'nuisance' or 'irregular' parameters. An example # is the Strauss process with a given, fixed threshold r # but with the parameters beta and gamma undetermined. # # DETAILS: # # An object of class 'isf' contains the following: # # $name Name of the interaction structure # e.g. "pairwise" # # $print How to 'print()' this object # [A function; invoked by the 'print' method # 'print.isf()'] # # $eval A function which evaluates the canonical # sufficient statistic for an interaction # of this general class (e.g. any pairwise # interaction.) # # If lambda(u,X) denotes the conditional intensity at a point u # for the point pattern X, then we assume # log lambda(u, X) = theta . S(u,X) # where theta is the vector of regular parameters, # and we call S(u,X) the sufficient statistic. # # A typical calling sequence for the $eval function is # # (f$eval)(X, U, E, potentials, potargs, correction) # # where X is the data point pattern, U is the list of points u # at which the sufficient statistic S(u,X) is to be evaluated, # E is a logical matrix equivalent to (X[i] == U[j]), # $potentials defines the specific potential function(s) and # $potargs contains any nuisance/irregular parameters of these # potentials [the $potargs are passed to the $potentials without # needing to be understood by $eval.] # $correction is the name of the edge correction method. # # # An object of class 'interact' contains the following: # # # $name Name of the specific potential # e.g. "Strauss" # # $family Object of class "isf" describing # the interaction structure # # $pot The interaction potential function(s) # -- usually a function or list of functions. # (passed as an argument to $family$eval) # # $par list of any nuisance/irregular parameters # (passed as an argument to $family$eval) # # $parnames vector of long names/descriptions # of the parameters in 'par' # # $init() initialisation action # or NULL indicating none required # # $update() A function to modify $par # [Invoked by 'update.interact()'] # or NULL indicating a default action # # $print How to 'print()' this object # [Invoked by 'print' method 'print.interact()'] # or NULL indicating a default action # # -------------------------------------------------------------------------- print.isf <- function(x, ...) { if(is.null(x)) return(invisible(NULL)) verifyclass(x, "isf") if(!is.null(x$print)) (x$print)(x) invisible(NULL) } print.interact <- function(x, ..., family, brief=FALSE, banner=TRUE) { verifyclass(x, "interact") if(missing(family)) family <- waxlyrical('extras') #' Print name of model if(banner) { if(family && !brief && !is.null(xf <- x$family)) print.isf(xf) splat(if(!brief) "Interaction:" else NULL, x$name, sep="") } # Now print the parameters if(!is.null(x$print)) { (x$print)(x) } else { # default # just print the parameter names and their values pwords <- x$parnames parval <- x$par pwords <- paste(toupper(substring(pwords, 1, 1)), substring(pwords, 2), sep="") isnum <- sapply(parval, is.numeric) parval[isnum] <- lapply(parval[isnum], signif, digits=getOption("digits")) splat(paste(paste0(pwords, ":\t", parval), collapse="\n")) } invisible(NULL) } is.interact <- function(x) { inherits(x, "interact") } update.interact <- function(object, ...) { verifyclass(object, "interact") if(!is.null(object$update)) (object$update)(object, ...) else { # Default # First update the version if(outdated.interact(object)) object <- reincarnate.interact(object) # just match the arguments in "..." # with those in object$par and update them want <- list(...) if(length(want) > 0) { m <- match(names(want),names(object$par)) nbg <- is.na(m) if(any(nbg)) { which <- paste((names(want))[nbg]) warning(paste("Arguments not matched: ", which)) } m <- m[!nbg] object$par[m] <- want } # call object's own initialisation routine if(!is.null(object$init)) (object$init)(object) object } } is.poisson.interact <- function(x) { verifyclass(x, "interact") is.null(x$family) } parameters.interact <- function(model, ...) { model$par } # Test whether interact object was made by an older version of spatstat outdated.interact <- function(object) { ver <- object$version older <- is.null(ver) || (package_version(ver) < versionstring.spatstat()) return(older) } # Test whether the functions in the interaction object # expect the coefficient vector to contain ALL coefficients, # or only the interaction coefficients. # This change was introduced in 1.11-0, at the same time # as interact objects were given version numbers. newstyle.coeff.handling <- function(object) { stopifnot(inherits(object, "interact")) ver <- object$version old <- is.null(ver) || (package_version(ver) < "1.11") return(!old) } # ###### # # Re-create an interact object in the current version of spatstat # # reincarnate.interact <- function(object) { # re-creates an interact object in the current version of spatstat if(!is.null(object$update)) { newobject <- (object$update)(object) return(newobject) } par <- object$par # pot <- object$pot name <- object$name # get creator function creator <- object$creator if(is.null(creator)) { # old version: look up list creator <- .Spatstat.Old.InteractionList[[name]] if(is.null(creator)) stop(paste("Don't know how to update", sQuote(name), "to current version of spatstat")) } if(is.character(creator)) creator <- get(creator) if(!is.function(creator) && !is.expression(creator)) stop("Internal error: creator is not a function or expression") # call creator if(is.expression(creator)) newobject <- eval(creator) else { # creator is a function # It's assumed that the creator function's arguments are # either identical to components of 'par' (the usual case) # or to one of the components of the object itself (Ord, Saturated) # or to printfun=object$print (Pairwise). argnames <- names(formals(creator)) available <- append(par, object) available <- append(available, list(printfun=object$print)) ok <- argnames %in% names(available) if(!all(ok)) stop(paste("Internal error:", ngettext(sum(!ok), "argument", "arguments"), paste(sQuote(argnames[!ok]), collapse=", "), "in creator function were not understood")) newobject <- do.call(creator, available[argnames]) } if(!inherits(newobject, "interact")) stop("Internal error: creator did not return an object of class interact") return(newobject) } # This list is necessary to deal with older formats of 'interact' objects # which did not include the creator name .Spatstat.Old.InteractionList <- list("Diggle-Gratton process" = "DiggleGratton", "Geyer saturation process" = "Geyer", "Lennard-Jones potential" = "LennardJones", "Multitype Strauss process" = "MultiStrauss", "Multitype Strauss Hardcore process" = "MultiStraussHard", "Ord process with threshold potential"="OrdThresh", "Piecewise constant pairwise interaction process"="PairPiece", "Poisson process" = "Poisson", "Strauss process" = "Strauss", "Strauss - hard core process" = "StraussHard", "Soft core process" = "Softcore", # weird ones: "Ord process with user-defined potential" = expression(Ord(object$pot)), "Saturated process with user-defined potential" =expression(Saturated(object$pot)), "user-defined pairwise interaction process"= expression( Pairwise(object$pot, par=object$par, parnames=object$parnames, printfun=object$print)) ) as.interact <- function(object) { UseMethod("as.interact") } as.interact.interact <- function(object) { verifyclass(object, "interact") return(object) } interactionfamilyname <- function(x) { if(inherits(x, "isf")) return(x$name) x <- as.interact(x) return(x$family$name) } # Extract version string from interact object versionstring.interact <- function(object) { verifyclass(object, "interact") v <- object$version return(v) # NULL before 1.11-0 } #### internal code for streamlining initialisation of interactions # # x should be a partially-completed 'interact' object # instantiate.interact <- function(x, par=NULL) { if(is.character(x$family)) x$family <- get(x$family) # set parameter values x$par <- par # validate parameters if(!is.null(x$init)) x$init(x) x$version <- versionstring.spatstat() return(x) } spatstat.core/R/bermantest.R0000644000176200001440000002271114144333461015542 0ustar liggesusers# # bermantest.R # # Test statistics from Berman (1986) # # $Revision: 1.23 $ $Date: 2020/12/19 05:25:06 $ # # berman.test <- function(...) { UseMethod("berman.test") } berman.test.ppp <- function(X, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { Xname <- short.deparse(substitute(X)) covname <- short.deparse(substitute(covariate)) force(covariate) if(is.character(covariate)) covname <- covariate which <- match.arg(which) alternative <- match.arg(alternative) fitcsr <- ppm(X) dont.complain.about(fitcsr) do.call(bermantestEngine, resolve.defaults(list(quote(fitcsr), quote(covariate), which, alternative), list(...), list(modelname="CSR", covname=covname, dataname=Xname))) } berman.test.ppm <- function(model, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { modelname <- short.deparse(substitute(model)) covname <- short.deparse(substitute(covariate)) force(model) force(covariate) if(is.character(covariate)) covname <- covariate verifyclass(model, "ppm") which <- match.arg(which) alternative <- match.arg(alternative) if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call(bermantestEngine, resolve.defaults(list(quote(model), quote(covariate), which, alternative), list(...), list(modelname=modelname, covname=covname, dataname=model$Qname))) } bermantestEngine <- function(model, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ..., modelname, covname, dataname="") { csr <- is.poisson(model) && is.stationary(model) if(missing(modelname)) modelname <- if(csr) "CSR" else short.deparse(substitute(model)) if(missing(covname)) { covname <- short.deparse(substitute(covariate)) if(is.character(covariate)) covname <- covariate } which <- match.arg(which) alternative <- match.arg(alternative) if(!is.poisson(model)) stop("Only implemented for Poisson point process models") #' compute required data fram <- spatialCDFframe(model, covariate, ..., modelname=modelname, covname=covname, dataname=dataname) #' evaluate berman test statistic result <- bermantestCalc(fram, which=which, alternative=alternative) return(result) } bermantestCalc <- function(fram, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { which <- match.arg(which) alternative <- match.arg(alternative) verifyclass(fram, "spatialCDFframe") fvalues <- fram$values info <- fram$info ## values of covariate at data points ZX <- fvalues$ZX ## transformed to Unif[0,1] under H0 U <- fvalues$U ## values of covariate at pixels Zvalues <- fvalues$Zvalues ## corresponding pixel areas/weights weights <- fvalues$weights ## intensity of model lambda <- fvalues$lambda ## names modelname <- info$modelname dataname <- info$dataname covname <- info$covname switch(which, Z1={ #......... Berman Z1 statistic ..................... method <- paste("Berman Z1 test of", if(info$csr) "CSR" else "inhomogeneous Poisson process", "in", info$spacename) # sum of covariate values at data points Sn <- sum(ZX) # predicted mean and variance lamwt <- lambda * weights En <- sum(lamwt) ESn <- sum(lamwt * Zvalues) varSn <- sum(lamwt * Zvalues^2) # working, for plot method working <- list(meanZX=mean(ZX), meanZ=ESn/En) # standardise statistic <- (Sn - ESn)/sqrt(varSn) names(statistic) <- "Z1" p.value <- switch(alternative, two.sided=2 * pnorm(-abs(statistic)), less=pnorm(statistic), greater=pnorm(statistic, lower.tail=FALSE)) altblurb <- switch(alternative, two.sided="two-sided", less="mean value of covariate at random points is less than predicted under model", greater="mean value of covariate at random points is greater than predicted under model") valuename <- paste("covariate", sQuote(paste(covname, collapse="")), "evaluated at points of", sQuote(dataname)) }, Z2={ #......... Berman Z2 statistic ..................... method <- paste("Berman Z2 test of", if(info$csr) "CSR" else "inhomogeneous Poisson process", "in", info$spacename) npts <- length(ZX) statistic <- sqrt(12/npts) * (sum(U) - npts/2) working <- list(meanU=mean(U)) names(statistic) <- "Z2" p.value <- switch(alternative, two.sided=2 * pnorm(-abs(statistic)), less=pnorm(statistic), greater=pnorm(statistic, lower.tail=FALSE)) altblurb <- switch(alternative, two.sided="two-sided", less="covariate values at random points have lower quantiles than predicted under model", greater="covariate values at random points have higher quantiles than predicted under model") valuename <- paste("covariate", sQuote(paste(covname, collapse="")), "evaluated at points of", sQuote(dataname), "\n\t", "and transformed to uniform distribution under", if(info$csr) modelname else sQuote(modelname)) }) out <- list(statistic=statistic, p.value=p.value, alternative=altblurb, method=method, which=which, working=working, data.name=valuename, fram=fram) class(out) <- c("htest", "bermantest") return(out) } plot.bermantest <- function(x, ..., lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=2, lty0=2) { fram <- x$fram if(!is.null(fram)) { values <- fram$values info <- fram$info } else { # old style ks <- x$ks values <- attr(ks, "prep") info <- attr(ks, "info") } work <- x$working op <- options(useFancyQuotes=FALSE) switch(x$which, Z1={ # plot cdf's of Z FZ <- values$FZ xxx <- get("x", environment(FZ)) yyy <- get("y", environment(FZ)) main <- c(x$method, paste("based on distribution of covariate", sQuote(info$covname)), paste("Z1 statistic =", signif(x$statistic, 4)), paste("p-value=", signif(x$p.value, 4))) do.call(plot.default, resolve.defaults( list(x=xxx, y=yyy, type="l"), list(...), list(lwd=lwd0, col=col0, lty=lty0), list(xlab=info$covname, ylab="probability", main=main))) FZX <- values$FZX if(is.null(FZX)) FZX <- ecdf(values$ZX) plot(FZX, add=TRUE, do.points=FALSE, lwd=lwd, col=col, lty=lty) abline(v=work$meanZ, lwd=lwd0,col=col0, lty=lty0, xpd=FALSE) abline(v=work$meanZX, lwd=lwd,col=col, lty=lty, xpd=FALSE) }, Z2={ # plot cdf of U U <- values$U cdfU <- ecdf(U) main <- c(x$method, paste("based on distribution of covariate", sQuote(info$covname)), paste("Z2 statistic =", signif(x$statistic, 4)), paste("p-value=", signif(x$p.value, 4))) dont.complain.about(cdfU) do.call(plot.ecdf, resolve.defaults( list(quote(cdfU)), list(...), list(do.points=FALSE, asp=1), list(xlim=c(0,1), ylim=c(0,1)), list(lwd=lwd, col=col, lty=lty), list(xlab="U", ylab="relative frequency"), list(main=main))) abline(0,1,lwd=lwd0,col=col0,lty=lty0, xpd=FALSE) abline(v=0.5, lwd=lwd0,col=col0,lty=lty0, xpd=FALSE) abline(v=work$meanU, lwd=lwd,col=col,lty=lty, xpd=FALSE) }) options(op) return(invisible(NULL)) } spatstat.core/R/response.R0000644000176200001440000000114714144333463015236 0ustar liggesusers#' #' response.R #' #' Extract the values of the response, from a fitted model #' #' response <- function(object) { UseMethod("response") } response.glm <- response.lm <- function(object) { mo <- object$model if(is.null(mo)) return(NULL) te <- terms(object) rn <- attr(te, "response") if(is.null(rn)) return(NULL) y <- mo[,rn] return(y) } response.ppm <- function(object) { data.ppm(object) } response.dppm <- response.kppm <- function(object) { data.ppm(as.ppm(object)) } response.slrm <- function(object) { object$Data$response } response.mppm <- function(object) { data.mppm(object) } spatstat.core/R/psstG.R0000644000176200001440000001340014144333463014473 0ustar liggesusers# # psstG.R # # Pseudoscore residual for unnormalised G (saturation process) # # $Revision: 1.10 $ $Date: 2018/10/19 03:29:29 $ # ################################################################################ # psstG <- function(object, r=NULL, breaks=NULL, ..., model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), truecoef=NULL, hi.res=NULL) { if(is.ppm(object)) fit <- object else if(is.ppp(object) || is.quad(object)) { # convert to quadscheme if(is.ppp(object)) object <- quadscheme(object, ...) # fit model if(!is.null(model)) fit <- update(model, Q=object, forcefit=TRUE) else fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } else stop("object should be a fitted point process model or a point pattern") # rfixed <- !is.null(r) || !is.null(breaks) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy E <- equalsfun.quad(Q) # WQ <- w.quad(Q) # quadrature weights # integrals will be restricted to quadrature points # that were actually used in the fit # USED <- getglmsubset(fit) if(fit$correction == "border") { rbord <- fit$rbord b <- bdist.points(U) USED <- (b > rbord) } else USED <- rep.int(TRUE, U$n) # basic statistics Win <- Window(X) npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW # adjustments to account for restricted domain of pseudolikelihood # if(any(!USED)) { # npts.used <- sum(Z & USED) # area.used <- sum(WQ[USED]) # lambda.used <- npts.used/area.used # } else { # npts.used <- npts # area.used <- areaW # lambda.used <- lambda # } # determine breakpoints for r values rmaxdefault <- rmax.rule("G", Win, lambda) breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max # residuals res <- residuals(fit, type="raw",drop=FALSE, new.coef=truecoef, quad=hi.res) # resval <- with(res, "increment") rescts <- with(res, "continuous") # absolute weight for continuous integrals wc <- -rescts # initialise fv object df <- data.frame(r=rvals, theo=0) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~Delta~V[S](r), NULL), "theo", . ~ r, alim=c(0, rmax), c("r","%s[theo](r)"), desc, fname="bold(R)~Delta~V[S]") # First phase: ................................................. # nearest neighbours (quadrature point to data point) nn <- nncross(U, X, seq(U$n), seq(X$n)) # excludes identical pairs dIJ <- nn$dist I <- seq(U$n) J <- nn$which DD <- (I <= X$n) # TRUE for data points wcIJ <- wc okI <- USED[I] # histogram of nndist for data points only (without edge correction) Bsum <- cumsum(whist(dIJ[DD & okI], breaks$val)) # weighted histogram of nncross (without edge correction) Bint <- cumsum(whist(dIJ[okI], breaks$val, wcIJ[okI])) # residual Bres <- Bsum - Bint # tack on ans <- bind.fv(ans, data.frame(dat1=Bsum, com1=Bint, res1=Bres), c("%s[dat1](r)", "%s[com1](r)", "%s[res1](r)"), c("phase 1 pseudosum (contribution to %s)", "phase 1 pseudocompensator (contribution to %s)", "phase 1 pseudoresidual (contribution to %s)")) # Second phase: ................................................ # close pairs (quadrature point to data point) close <- crosspairs(U, X, rmax, what="ijd") dIJ <- close$d I <- close$i J <- close$j # UI <- U[I] # XJ <- X[J] EIJ <- E(I, J) # TRUE if points are identical, U[I[k]] == X[J[k]] ZI <- Z[I] # TRUE if U[I[k]] is a data point DD <- ZI & !EIJ # TRUE for pairs of distinct data points only # nDD <- sum(DD) okI <- USED[I] # residual weights # wIJ <- ifelseXY(EIJ, rescts[I], resval[I]) # absolute weight for continuous integrals wc <- -rescts wcIJ <- -rescts[I] # nearest and second-nearest neighbour distances in X nn1 <- nndist(X) nn2 <- nndist(X, k=2) nn1J <- nn1[J] nn2J <- nn2[J] # weird use of the reduced sample estimator # data sum: RSX <- Kount(dIJ[DD & okI], nn2J[DD & okI], nn2J[ZI & okI], breaks) Csum <- RSX$numerator # integral: if(spatstat.options("psstG.remove.zeroes")) okE <- okI & !EIJ else okE <- okI RSD <- Kwtsum(dIJ[okE], nn1J[okE], wcIJ[okE], nn1, rep.int(1, length(nn1)), breaks, fatal=FALSE) Cint <- RSD$numerator # Cres <- Bres + Csum - Cint # tack on ans <- bind.fv(ans, data.frame(dat2=Csum, com2=Cint, res2=Cres, dat=Bsum+Csum, com=Bint+Cint, res=Bres+Cres), c("%s[dat2](r)", "%s[com2](r)", "%s[res2](r)", "Sigma~Delta~V[S](r)", "bold(C)~Delta~V[S](r)", "bold(R)~Delta~V[S](r)"), c("phase 2 pseudosum (contribution to %s)", "phase 2 pseudocompensator (contribution to %s)", "phase 2 pseudoresidual (contribution to %s)", "pseudosum (contribution to %s)", "pseudocompensator (contribution to %s)", "pseudoresidual function %s"), "res") # restrict choice of curves in default plot fvnames(ans, ".") <- c("dat", "com", "res", "theo") # return(ans) } spatstat.core/R/fv.R0000644000176200001440000014126214144333462014015 0ustar liggesusers## ## ## fv.R ## ## class "fv" of function value objects ## ## $Revision: 1.170 $ $Date: 2021/08/26 06:45:01 $ ## ## ## An "fv" object represents one or more related functions ## of the same argument, such as different estimates of the K function. ## ## It is a data.frame with additional attributes ## ## argu column name of the function argument (typically "r") ## ## valu column name of the recommended function ## ## ylab generic label for y axis e.g. K(r) ## ## fmla default plot formula ## ## alim recommended range of function argument ## ## labl recommended xlab/ylab for each column ## ## desc longer description for each column ## ## unitname name of unit of length for 'r' ## ## shade (optional) column names of upper & lower limits ## of shading - typically a confidence interval ## ## Objects of this class are returned by Kest(), etc ## ################################################################## ## creator fv <- function(x, argu="r", ylab=NULL, valu, fmla=NULL, alim=NULL, labl=names(x), desc=NULL, unitname=NULL, fname=NULL, yexp=ylab) { stopifnot(is.data.frame(x)) ## check arguments stopifnot(is.character(argu)) if(!is.null(ylab)) stopifnot(is.character(ylab) || is.language(ylab)) if(!missing(yexp)) { if(is.null(yexp)) yexp <- ylab else stopifnot(is.language(yexp)) } stopifnot(is.character(valu)) if(!(argu %in% names(x))) stop(paste(sQuote("argu"), "must be the name of a column of x")) if(!(valu %in% names(x))) stop(paste(sQuote("valu"), "must be the name of a column of x")) if(is.null(fmla)) fmla <- paste(valu, "~", argu) else if(inherits(fmla, "formula")) { ## convert formula to string fmla <- flat.deparse(fmla) } else if(!is.character(fmla)) stop(paste(sQuote("fmla"), "should be a formula or a string")) if(missing(alim)) { ## Note: if alim is given as NULL, it is not changed. argue <- x[[argu]] alim <- range(argue[is.finite(argue)]) } else if(!is.null(alim)) { if(!is.numeric(alim) || length(alim) != 2) stop(paste(sQuote("alim"), "should be a vector of length 2")) } if(!is.character(labl)) stop(paste(sQuote("labl"), "should be a vector of strings")) stopifnot(length(labl) == ncol(x)) if(is.null(desc)) desc <- character(ncol(x)) else { stopifnot(is.character(desc)) stopifnot(length(desc) == ncol(x)) nbg <- is.na(desc) if(any(nbg)) desc[nbg] <- "" } if(!is.null(fname)) stopifnot(is.character(fname) && length(fname) %in% 1:2) ## pack attributes attr(x, "argu") <- argu attr(x, "valu") <- valu attr(x, "ylab") <- ylab attr(x, "yexp") <- yexp attr(x, "fmla") <- fmla attr(x, "alim") <- alim attr(x, "labl") <- labl attr(x, "desc") <- desc attr(x, "units") <- as.unitname(unitname) attr(x, "fname") <- fname attr(x, "dotnames") <- NULL attr(x, "shade") <- NULL ## class(x) <- c("fv", class(x)) return(x) } .Spatstat.FvAttrib <- c( "argu", "valu", "ylab", "yexp", "fmla", "alim", "labl", "desc", "units", "fname", "dotnames", "shade") ## putSpatstatVariable("FvAttrib", .Spatstat.FvAttrib) as.data.frame.fv <- function(x, ...) { stopifnot(is.fv(x)) fva <- .Spatstat.FvAttrib attributes(x)[fva] <- NULL class(x) <- "data.frame" x } #' is.fv() is now defined in spatstat.geom/R/is.R ## as.fv <- function(x) { UseMethod("as.fv") } as.fv.fv <- function(x) x as.fv.data.frame <- function(x) { if(ncol(x) < 2) stop("Need at least 2 columns") return(fv(x, names(x)[1L], , names(x)[2L])) } as.fv.matrix <- function(x) { y <- as.data.frame(x) if(any(bad <- is.na(names(y)))) names(y)[bad] <- paste0("V", which(bad)) return(as.fv.data.frame(y)) } ## other methods for as.fv are described in the files for the relevant classes. vanilla.fv <- function(x) { ## remove everything except basic fv characteristics retain <- c("names", "row.names", .Spatstat.FvAttrib) attributes(x) <- attributes(x)[retain] class(x) <- c("fv", "data.frame") return(x) } print.fv <- local({ maxwords <- function(z, m) { max(0, which(cumsum(nchar(z) + 1) <= m+1)) } usewords <- function(z, n) paste(z[1:n], collapse=" ") print.fv <- function(x, ..., tight=FALSE) { verifyclass(x, "fv") terselevel <- spatstat.options("terse") showlabels <- waxlyrical('space', terselevel) showextras <- waxlyrical('extras', terselevel) nama <- names(x) a <- attributes(x) if(!is.null(ylab <- a$ylab)) { if(is.language(ylab)) ylab <- flat.deparse(ylab) } if(!inherits(x, "envelope")) { splat("Function value object", paren(paste("class", sQuote("fv")))) if(!is.null(ylab)) { xlab <- fvlabels(x, expand=TRUE)[[a$argu]] splat("for the function", xlab, "->", ylab) } } ## Descriptions .. desc <- a$desc ## .. may require insertion of ylab if(!is.null(ylab) && any(grepl("%s", desc))) desc <- sprintf(desc, ylab) ## Labels .. labl <- fvlabels(x, expand=TRUE) ## Avoid overrunning text margin maxlinewidth <- options('width')[[1L]] key.width <- max(nchar(nama)) labl.width <- if(showlabels) max(nchar(labl), nchar("Math.label")) else 0 desc.width <- max(nchar(desc), nchar("Description")) fullwidth <- key.width + labl.width + desc.width + 2 if(fullwidth > maxlinewidth && tight) { ## try shortening the descriptions so that it all fits on one line spaceleft <- maxlinewidth - (key.width + labl.width + 2) desc <- truncline(desc, spaceleft) desc.width <- max(nchar(desc), nchar("Description")) fullwidth <- key.width + labl.width + desc.width + 2 } spaceleft <- maxlinewidth - (key.width + 1) if(desc.width > spaceleft) { ## Descriptions need to be truncated to max line width desc <- truncline(desc, spaceleft) desc.width <- max(nchar(desc), nchar("Description")) fullwidth <- key.width + labl.width + desc.width + 2 } if(showextras) { fullwidth <- pmin(maxlinewidth, fullwidth) fullline <- paste0(rep(".", fullwidth), collapse="") cat(fullline, fill=TRUE) } df <- data.frame(Math.label=labl, Description=desc, row.names=nama, stringsAsFactors=FALSE) if(!showlabels) df <- df[,-1L,drop=FALSE] print(df, right=FALSE) ## if(showextras) { cat(fullline, fill=TRUE) splat("Default plot formula: ", flat.deparse(as.formula(a$fmla))) splat("where", dQuote("."), "stands for", commasep(sQuote(fvnames(x, ".")), ", ")) if(length(a$shade)) splat("Columns", commasep(sQuote(a$shade)), "will be plotted as shading (by default)") alim <- a$alim splat("Recommended range of argument", paste0(a$argu, ":"), if(!is.null(alim)) prange(signif(alim, 5)) else "not specified") rang <- signif(range(with(x, .x)), 5) splat("Available range", "of argument", paste0(a$argu, ":"), prange(rang)) ledge <- summary(unitname(x))$legend if(!is.null(ledge)) splat(ledge) } return(invisible(NULL)) } print.fv }) ## manipulating the names in fv objects .Spatstat.FvAbbrev <- c( ".x", ".y", ".s", ".", "*", ".a") ## putSpatstatVariable("FvAbbrev", .Spatstat.FvAbbrev) fvnames <- function(X, a=".") { verifyclass(X, "fv") if(!is.character(a)) stop("argument a must be a character string") if(length(a) != 1) return(lapply(a, function(b, Z) fvnames(Z, b), Z=X)) namesX <- names(X) if(a %in% namesX) return(a) vnames <- setdiff(namesX, attr(X, "argu")) answer <- switch(a, ".y" = attr(X, "valu"), ".x" = attr(X, "argu"), ".s" = attr(X, "shade"), ".a" = vnames, "*" = rev(vnames), "." = attr(X, "dotnames") %orifnull% rev(vnames), { stop(paste("Unrecognised abbreviation", sQuote(a)), call.=FALSE) }) return(answer) } "fvnames<-" <- function(X, a=".", value) { verifyclass(X, "fv") if(!is.character(a) || length(a) > 1) stop(paste("argument", sQuote("a"), "must be a character string")) ## special cases if(a == "." && length(value) == 0) { ## clear the dotnames attr(X, "dotnames") <- NULL return(X) } if(a == ".a" || a == "*") { warning("Nothing changed; use names(X) <- value to change names", call.=FALSE) return(X) } ## validate the names switch(a, ".x"=, ".y"={ if(!is.character(value) || length(value) != 1) stop("value should be a single string") }, ".s"={ if(!is.character(value) || length(value) != 2) stop("value should be a vector of 2 character strings") }, "."={ if(!is.character(value)) stop("value should be a character vector") }, stop(paste("Unrecognised abbreviation", dQuote(a))) ) ## check the names match existing column names tags <- names(X) if(any(nbg <- !(value %in% tags))) stop(paste(ngettext(sum(nbg), "The string", "The strings"), commasep(dQuote(value[nbg])), ngettext(sum(nbg), "does not match the name of any column of X", "do not match the names of any columns of X"))) ## reassign names switch(a, ".x"={ attr(X, "argu") <- value }, ".y"={ attr(X, "valu") <- value }, ".s"={ attr(X, "shade") <- value }, "."={ attr(X, "dotnames") <- value }) return(X) } "names<-.fv" <- function(x, value) { nama <- colnames(x) indx <- which(nama == fvnames(x, ".x")) indy <- which(nama == fvnames(x, ".y")) inds <- which(nama %in% fvnames(x, ".s")) ind. <- which(nama %in% fvnames(x, ".")) ## rename columns of data frame x <- NextMethod("names<-") ## adjust other tags fvnames(x, ".x") <- value[indx] fvnames(x, ".y") <- value[indy] fvnames(x, ".") <- value[ind.] if(length(inds)) fvnames(x, ".s") <- value[inds] namemap <- setNames(lapply(value, as.name), nama) formula(x) <- flat.deparse(eval(substitute(substitute(fom, um), list(fom=as.formula(formula(x)), um=namemap)))) return(x) } fvlabels <- function(x, expand=FALSE) { lab <- attr(x, "labl") if(expand && !is.null(fname <- attr(x, "fname"))) { ## expand plot labels using function name nstrings <- max(substringcount("%s", lab)) ## pad with blanks nextra <- nstrings - length(fname) if(nextra > 0) fname <- c(fname, rep("", nextra)) ## render if(nstrings > 0) lab <- do.call(sprintf, append(list(lab), as.list(fname))) } ## remove empty space lab <- gsub(" ", "", lab) names(lab) <- names(x) return(lab) } "fvlabels<-" <- function(x, value) { stopifnot(is.fv(x)) stopifnot(is.character(value)) stopifnot(length(value) == length(fvlabels(x))) attr(x, "labl") <- value return(x) } flatfname <- function(x) { fn <- if(is.character(x)) x else attr(x, "fname") if(length(fn) > 1) fn <- paste0(fn[1L], "[", paste(fn[-1L], collapse=" "), "]") as.name(fn) } makefvlabel <- function(op=NULL, accent=NULL, fname, sub=NULL, argname="r") { ## de facto standardised label a <- "%s" if(!is.null(accent)) a <- paste0(accent, paren(a)) ## eg hat(%s) if(!is.null(op)) a <- paste0("bold", paren(op), "~", a) ## eg bold(var)~hat(%s) if(is.null(sub)) { if(length(fname) != 1) { a <- paste0(a, "[%s]") a <- paren(a, "{") } } else { if(length(fname) == 1) { a <- paste0(a, paren(sub, "[")) } else { a <- paste0(a, paren("%s", "["), "^", paren(sub, "{")) a <- paren(a, "{") } } a <- paste0(a, paren(argname)) return(a) } fvlabelmap <- local({ magic <- function(x) { subx <- paste("substitute(", x, ", NULL)") out <- try(eval(parse(text=subx)), silent=TRUE) if(inherits(out, "try-error")) out <- as.name(make.names(subx)) out } fvlabelmap <- function(x, dot=TRUE) { labl <- fvlabels(x, expand=TRUE) ## construct mapping from identifiers to labels map <- as.list(labl) map <- lapply(map, magic) names(map) <- colnames(x) if(dot) { ## also map "." and ".a" to name of target function if(!is.null(ye <- attr(x, "yexp"))) map <- append(map, list("."=ye, ".a"=ye)) ## map other fvnames to their corresponding labels map <- append(map, list(".x"=map[[fvnames(x, ".x")]], ".y"=map[[fvnames(x, ".y")]])) if(length(fvnames(x, ".s"))) { shex <- unname(map[fvnames(x, ".s")]) shadexpr <- substitute(c(A,B), list(A=shex[[1L]], B=shex[[2L]])) map <- append(map, list(".s" = shadexpr)) } } return(map) } fvlabelmap }) ## map from abbreviations to expressions involving the column names, ## for use in eval(substitute(...)) fvexprmap <- function(x) { dotnames <- fvnames(x, ".") u <- if(length(dotnames) == 1) as.name(dotnames) else as.call(lapply(c("cbind", dotnames), as.name)) ux <- as.name(fvnames(x, ".x")) uy <- as.name(fvnames(x, ".y")) umap <- list(.=u, .a=u, .x=ux, .y=uy) if(length(shnm <- fvnames(x, ".s"))) { shadexpr <- substitute(cbind(A,B), list(A=as.name(shnm[1L]), B=as.name(shnm[2L]))) umap <- append(umap, list(.s = shadexpr)) } return(umap) } fvlegend <- local({ fvlegend <- function(object, elang) { ## Compute mathematical legend(s) for column(s) in fv object ## transformed by language expression 'elang'. ## The expression must already be in 'expanded' form. ## The result is an expression, or expression vector. ## The j-th entry of the vector is an expression for the ## j-th column of function values. ee <- distributecbind(as.expression(elang)) map <- fvlabelmap(object, dot = TRUE) eout <- as.expression(lapply(ee, invokemap, map=map)) return(eout) } invokemap <- function(ei, map) { eval(substitute(substitute(e, mp), list(e = ei, mp = map))) } fvlegend }) bind.fv <- function(x, y, labl=NULL, desc=NULL, preferred=NULL, clip=FALSE) { verifyclass(x, "fv") ax <- attributes(x) if(is.fv(y)) { ## y is already an fv object ay <- attributes(y) if(!identical(ax$fname, ay$fname)) { ## x and y represent different functions ## expand the labels separately fvlabels(x) <- fvlabels(x, expand=TRUE) fvlabels(y) <- fvlabels(y, expand=TRUE) ax <- attributes(x) ay <- attributes(y) } ## check compatibility of 'r' values xr <- ax$argu yr <- ay$argu rx <- x[[xr]] ry <- y[[yr]] if(length(rx) != length(ry)) { if(!clip) stop("fv objects x and y have incompatible domains") # restrict both objects to a common domain ra <- intersect.ranges(range(rx), range(ry)) x <- x[inside.range(rx, ra), ] y <- y[inside.range(ry, ra), ] rx <- x[[xr]] ry <- y[[yr]] } if(length(rx) != length(ry) || max(abs(rx-ry)) > .Machine$double.eps) stop("fv objects x and y have incompatible values of r") ## reduce y to data frame and strip off 'r' values ystrip <- as.data.frame(y) yrpos <- which(colnames(ystrip) == yr) ystrip <- ystrip[, -yrpos, drop=FALSE] ## determine descriptors if(is.null(labl)) labl <- attr(y, "labl")[-yrpos] if(is.null(desc)) desc <- attr(y, "desc")[-yrpos] ## y <- ystrip } else { ## y is a matrix or data frame y <- as.data.frame(y) } ## check for duplicated column names allnames <- c(colnames(x), colnames(y)) if(any(dup <- duplicated(allnames))) { nbg <- unique(allnames[dup]) nn <- length(nbg) warning(paste("The column", ngettext(nn, "name", "names"), commasep(sQuote(nbg)), ngettext(nn, "was", "were"), "duplicated. Unique names were generated")) allnames <- make.names(allnames, unique=TRUE, allow_ = FALSE) colnames(y) <- allnames[ncol(x) + seq_len(ncol(y))] } if(is.null(labl)) labl <- paste("%s[", colnames(y), "](r)", sep="") else if(length(labl) != ncol(y)) stop(paste("length of", sQuote("labl"), "does not match number of columns of y")) if(is.null(desc)) desc <- character(ncol(y)) else if(length(desc) != ncol(y)) stop(paste("length of", sQuote("desc"), "does not match number of columns of y")) if(is.null(preferred)) preferred <- ax$valu xy <- cbind(as.data.frame(x), y) z <- fv(xy, ax$argu, ax$ylab, preferred, ax$fmla, ax$alim, c(ax$labl, labl), c(ax$desc, desc), unitname=unitname(x), fname=ax$fname, yexp=ax$yexp) return(z) } cbind.fv <- function(...) { a <- list(...) n <- length(a) if(n == 0) return(NULL) if(n == 1) { ## single argument - extract it a <- a[[1L]] ## could be an fv object if(is.fv(a)) return(a) n <- length(a) } z <- a[[1L]] if(!is.fv(z)) stop("First argument should be an object of class fv") if(n > 1) for(i in 2:n) z <- bind.fv(z, a[[i]]) return(z) } collapse.anylist <- collapse.fv <- local({ collapse.fv <- function(object, ..., same=NULL, different=NULL) { if(is.fv(object)) { x <- list(object, ...) } else if(inherits(object, "anylist")) { x <- append(object, list(...)) } else if(is.list(object) && all(sapply(object, is.fv))) { x <- append(object, list(...)) } else stop("Format not understood") if(!all(sapply(x, is.fv))) stop("arguments should be objects of class fv") same <- as.character(same) different <- as.character(different) if(anyDuplicated(c(same, different))) stop(paste("The arguments", sQuote("same"), "and", sQuote("different"), "should not have entries in common")) ## handle function argument xname <- unique(sapply(x, fvnames, a=".x")) if(length(xname) > 1) stop(paste("Objects have different names for the function argument:", commasep(sQuote(xname)))) xalias <- c(xname, ".x") same <- setdiff(same, xalias) different <- setdiff(different, xalias) ## validate either <- c(same, different) if(length(either) == 0) stop(paste("At least one column of function values must be selected", "using the arguments", sQuote("same"), "and/or", sQuote("different"))) mussung <- lapply(x, missingnames, expected=either) nbg <- Reduce(intersect, mussung) if((nbad <- length(nbg)) > 0) stop(paste(ngettext(nbad, "The column", "The columns"), commasep(sQuote(nbg)), ngettext(nbad, "is", "are"), "not present in any of the function objects")) ## .............. same .................................... ## extract the common values nsame <- length(same) if(nsame == 0) { ## Initialise using first object y <- x[[1L]] xname <- fvnames(y, ".x") yname <- fvnames(y, ".y") ## The column of 'preferred values' .y cannot be deleted. ## retain .y for now and delete it later. z <- y[, c(xname, yname)] } else { ## Find first object that contains same[1L] same1 <- same[1L] j <- min(which(sapply(x, isRecognised, expected=same1))) y <- x[[j]] xname <- fvnames(y, ".x") yname <- fvnames(y, ".y") ## possibly expand abbreviation same[1L] <- same1 <- fvnames(y, same1) if(yname != same1) yname <- fvnames(y, ".y") <- same1 z <- y[, c(xname, yname)] if(nsame > 1) { ## Find objects that contain same[2], ..., for(k in 2:nsame) { samek <- same[k] j <- min(which(sapply(x, isRecognised, expected=samek))) xj <- x[[j]] same[k] <- samek <- fvnames(xj, samek) ## identify relevant column in object xj wanted <- (names(xj) == samek) if(any(wanted)) { y <- as.data.frame(xj)[, wanted, drop=FALSE] desc <- attr(xj, "desc")[wanted] labl <- attr(xj, "labl")[wanted] ## glue onto fv object z <- bind.fv(z, y, labl=labl, desc=desc) } } } } dotnames <- same ## .............. different ............................. ## create names for different versions versionnames <- good.names(names(x), "f", seq_along(x)) shortnames <- abbreviate(versionnames, minlength=12) ## now merge the different values if(length(different)) { for(i in seq_along(x)) { ## extract values for i-th object xi <- x[[i]] diffi <- availablenames(xi, different) # which columns are available diffi <- unlist(fvnames(xi, diffi)) # expand abbreviations if used ## identify current position of columns wanted <- (names(xi) %in% diffi) if(any(wanted)) { y <- as.data.frame(xi)[, wanted, drop=FALSE] desc <- attr(xi, "desc")[wanted] labl <- attr(xi, "labl")[wanted] ## relabel prefix <- shortnames[i] preamble <- versionnames[i] names(y) <- if(ncol(y) == 1) prefix else paste(prefix,names(y),sep="") dotnames <- c(dotnames, names(y)) ## glue onto fv object z <- bind.fv(z, y, labl=paste(prefix, labl, sep="~"), desc=paste(preamble, desc)) } } } if(length(same) == 0) { ## remove the second column which was retained earlier fvnames(z, ".y") <- names(z)[3L] z <- z[, -2L] } fvnames(z, ".") <- dotnames return(z) } isRecognised <- function(z, expected) { known <- c(names(z), .Spatstat.FvAbbrev) !is.na(match(expected, known)) } missingnames <- function(z, expected) { expected[!isRecognised(z, expected)] } availablenames <- function(z, expected){ expected[isRecognised(z, expected)] } collapse.fv }) ## rename one of the columns of an fv object tweak.fv.entry <- function(x, current.tag, new.labl=NULL, new.desc=NULL, new.tag=NULL) { hit <- (names(x) == current.tag) if(!any(hit)) return(x) ## update descriptions of column i <- min(which(hit)) if(!is.null(new.labl)) attr(x, "labl")[i] <- new.labl if(!is.null(new.desc)) attr(x, "desc")[i] <- new.desc ## adjust column tag if(!is.null(new.tag)) { names(x)[i] <- new.tag ## update dotnames dn <- fvnames(x, ".") if(current.tag %in% dn ) { dn[dn == current.tag] <- new.tag fvnames(x, ".") <- dn } ## if the tweaked column is the preferred value, adjust accordingly if(attr(x, "valu") == current.tag) attr(x, "valu") <- new.tag ## if the tweaked column is the function argument, adjust accordingly if(attr(x, "argu") == current.tag) attr(x, "valu") <- new.tag } return(x) } ## change some or all of the auxiliary text in an fv object rebadge.fv <- function(x, new.ylab, new.fname, tags, new.desc, new.labl, new.yexp=new.ylab, new.dotnames, new.preferred, new.formula, new.tags) { if(!missing(new.ylab)) attr(x, "ylab") <- new.ylab if(!missing(new.yexp) || !missing(new.ylab)) attr(x, "yexp") <- new.yexp if(!missing(new.fname)) attr(x, "fname") <- new.fname if(!missing(new.desc) || !missing(new.labl) || !missing(new.tags)) { ## replace (some or all entries of) the following desc <- attr(x, "desc") labl <- attr(x, "labl") nama <- names(x) ## specified subset to be replaced if(missing(tags) || is.null(tags)) tags <- nama ## match up m <- match(tags, nama) ok <- !is.na(m) mok <- m[ok] ## replace if(!missing(new.desc)) { desc[mok] <- new.desc[ok] attr(x, "desc") <- desc } if(!missing(new.labl)) { labl[mok] <- new.labl[ok] attr(x, "labl") <- labl } if(!missing(new.tags)) { ## rename columns (using "fvnames<-" to adjust special entries) names(x)[mok] <- new.tags[ok] } } if(!missing(new.dotnames)) fvnames(x, ".") <- new.dotnames if(!missing(new.preferred)) { stopifnot(new.preferred %in% names(x)) attr(x, "valu") <- new.preferred } if(!missing(new.formula)) formula(x) <- new.formula return(x) } ## common invocations to label a function like Kdot or Kcross rebadge.as.crossfun <- function(x, main, sub=NULL, i, j) { i <- make.parseable(i) j <- make.parseable(j) if(is.null(sub)) { ## single function name like "K" ylab <- substitute(main[i, j](r), list(main=main, i=i, j=j)) fname <- c(main, paste0("list", paren(paste(i, j, sep=",")))) yexp <- substitute(main[list(i, j)](r), list(main=main, i=i, j=j)) } else { ## subscripted function name like "K[inhom]" ylab <- substitute(main[sub, i, j](r), list(main=main, sub=sub, i=i, j=j)) fname <- c(main, paste0("list", paren(paste(sub, i, j, sep=",")))) yexp <- substitute(main[list(sub, i, j)](r), list(main=main, sub=sub, i=i, j=j)) } labl <- rebadgeLabels(x, fname) y <- rebadge.fv(x, new.ylab=ylab, new.fname=fname, new.yexp=yexp, new.labl=labl) return(y) } rebadge.as.dotfun <- function(x, main, sub=NULL, i) { i <- make.parseable(i) if(is.null(sub)) { ## single function name like "K" ylab <- substitute(main[i ~ dot](r), list(main=main, i=i)) fname <- c(main, paste0(i, "~symbol(\"\\267\")")) yexp <- substitute(main[i ~ symbol("\267")](r), list(main=main, i=i)) } else { ## subscripted function name like "K[inhom]" ylab <- substitute(main[sub, i ~ dot](r), list(main=main, sub=sub, i=i)) fname <- c(main, paste0("list", paren(paste0(sub, ",", i, "~symbol(\"\\267\")")))) yexp <- substitute(main[list(sub, i ~ symbol("\267"))](r), list(main=main, sub=sub, i=i)) } labl <- rebadgeLabels(x, fname) y <- rebadge.fv(x, new.ylab=ylab, new.fname=fname, new.yexp=yexp, new.labl=labl) return(y) } rebadgeLabels <- function(x, new.fname) { fname <- attr(x, "fname") labl <- attr(x, "labl") if(length(fname) == 1L && length(new.fname) == 2L) { ## Existing function name is unsubscripted like "K" ## New function name is subscripted like "K[inhom]" ## Modify label format strings to accommodate subscripted name new.labl <- gsub("%s[", "{%s[%s]^{", labl, fixed = TRUE) new.labl <- gsub("hat(%s)[", "{hat(%s)[%s]^{", new.labl, fixed = TRUE) argu <- attr(x, "argu") new.labl <- gsub(paste0("](",argu,")"), paste0("}}(", argu, ")"), new.labl, fixed = TRUE) new.labl } else labl } ## even simpler wrapper for rebadge.fv rename.fv <- function(x, fname, ylab, yexp=ylab) { stopifnot(is.fv(x)) stopifnot(is.character(fname) && (length(fname) %in% 1:2)) argu <- fvnames(x, ".x") if(missing(ylab) || is.null(ylab)) ylab <- switch(length(fname), substitute(fn(argu), list(fn=as.name(fname), argu=as.name(argu))), substitute(fn[fsub](argu), list(fn=as.name(fname[1]), fsub=as.name(fname[2]), argu=as.name(argu)))) if(missing(yexp) || is.null(yexp)) yexp <- ylab y <- rebadge.fv(x, new.fname=fname, new.ylab=ylab, new.yexp=yexp) return(y) } ## subset extraction operator "[.fv" <- function(x, i, j, ..., drop=FALSE) { igiven <- !missing(i) jgiven <- !missing(j) y <- as.data.frame(x) if(igiven && jgiven) z <- y[i, j, drop=drop] else if(igiven) z <- y[i, , drop=drop] else if(jgiven) z <- y[ , j, drop=drop] else z <- y ## return only the selected values as a data frame or vector. if(drop) return(z) if(!jgiven) selected <- seq_len(ncol(x)) else { nameindices <- seq_along(names(x)) names(nameindices) <- names(x) selected <- as.vector(nameindices[j]) } # validate choice of selected/dropped columns nama <- names(z) argu <- attr(x, "argu") if(!(argu %in% nama)) stop(paste("The function argument", sQuote(argu), "must not be removed")) valu <- attr(x, "valu") if(!(valu %in% nama)) stop(paste("The default column of function values", sQuote(valu), "must not be removed")) # if the plot formula involves explicit mention of dropped columns, # replace it by a generic formula fmla <- as.formula(attr(x, "fmla")) if(!all(variablesinformula(fmla) %in% nama)) fmla <- as.formula(. ~ .x, env=environment(fmla)) ## If range of argument was implicitly changed, adjust "alim" alim <- attr(x, "alim") rang <- range(z[[argu]]) alim <- intersect.ranges(alim, rang, fatal=FALSE) result <- fv(z, argu=attr(x, "argu"), ylab=attr(x, "ylab"), valu=attr(x, "valu"), fmla=fmla, alim=alim, labl=attr(x, "labl")[selected], desc=attr(x, "desc")[selected], unitname=attr(x, "units"), fname=attr(x,"fname"), yexp=attr(x, "yexp")) ## carry over preferred names, if possible dotn <- fvnames(x, ".") fvnames(result, ".") <- dotn[dotn %in% colnames(result)] shad <- fvnames(x, ".s") if(length(shad) && all(shad %in% colnames(result))) fvnames(result, ".s") <- shad return(result) } ## Subset and column replacement methods ## to guard against deletion of columns "[<-.fv" <- function(x, i, j, value) { if(!missing(j)) { ## check for alterations to structure of object if((is.character(j) && !all(j %in% colnames(x))) || (is.numeric(j) && any(j > ncol(x)))) stop("Use bind.fv to add new columns to an object of class fv") if(is.null(value) && missing(i)) { ## column(s) will be removed co <- seq_len(ncol(x)) names(co) <- colnames(x) keepcol <- setdiff(co, co[j]) return(x[ , keepcol, drop=FALSE]) } } NextMethod("[<-") } "$<-.fv" <- function(x, name, value) { j <- which(colnames(x) == name) if(is.null(value)) { ## column will be removed if(length(j) != 0) return(x[, -j, drop=FALSE]) return(x) } if(length(j) == 0) { ## new column df <- data.frame(1:nrow(x), value)[,-1L,drop=FALSE] colnames(df) <- name y <- bind.fv(x, df, desc=paste("Additional variable", sQuote(name))) return(y) } NextMethod("$<-") } ## method for 'formula' formula.fv <- function(x, ...) { attr(x, "fmla") } # new generic "formula<-" <- function(x, ..., value) { UseMethod("formula<-") } "formula<-.fv" <- function(x, ..., value) { if(is.null(value)) value <- paste(fvnames(x, ".y"), "~", fvnames(x, ".x")) else if(inherits(value, "formula")) { ## convert formula to string value <- flat.deparse(value) } else if(!is.character(value)) stop("Assignment value should be a formula or a string") attr(x, "fmla") <- value return(x) } ## method for with() with.fv <- function(data, expr, ..., fun=NULL, enclos=NULL) { if(any(names(list(...)) == "drop")) stop("Outdated argument 'drop' used in with.fv") cl <- short.deparse(sys.call()) verifyclass(data, "fv") if(is.null(enclos)) enclos <- parent.frame() ## convert syntactic expression to 'expression' object # e <- as.expression(substitute(expr)) ## convert syntactic expression to call elang <- substitute(expr) ## map "." etc to names of columns of data datanames <- names(data) xname <- fvnames(data, ".x") yname <- fvnames(data, ".y") ux <- as.name(xname) uy <- as.name(yname) dnames <- intersect(datanames, fvnames(data, ".")) ud <- as.call(lapply(c("cbind", dnames), as.name)) anames <- intersect(datanames, fvnames(data, ".a")) ua <- as.call(lapply(c("cbind", anames), as.name)) if(length(snames <- fvnames(data, ".s"))) { snames <- intersect(datanames, snames) us <- as.call(lapply(c("cbind", snames), as.name)) } else us <- NULL expandelang <- eval(substitute(substitute(ee, list(.=ud, .x=ux, .y=uy, .s=us, .a=ua)), list(ee=elang))) dont.complain.about(ua, ud, us, ux, uy) evars <- all.vars(expandelang) used.dotnames <- evars[evars %in% dnames] ## evaluate expression datadf <- as.data.frame(data) results <- eval(expandelang, as.list(datadf), enclos=enclos) ## -------------------- ## commanded to return numerical values only? if(!is.null(fun) && !fun) return(results) if(!is.matrix(results) && !is.data.frame(results)) { ## result is a vector if(is.null(fun)) fun <- FALSE if(!fun || length(results) != nrow(datadf)) return(results) results <- matrix(results, ncol=1) } else { ## result is a matrix or data frame if(is.null(fun)) fun <- TRUE if(!fun || nrow(results) != nrow(datadf)) return(results) } ## result is a matrix or data frame of the right dimensions ## make a new fv object ## ensure columns of results have names if(is.null(colnames(results))) colnames(results) <- paste("col", seq_len(ncol(results)), sep="") resultnames <- colnames(results) ## get values of function argument xvalues <- datadf[[xname]] ## tack onto result matrix results <- cbind(xvalues, results) colnames(results) <- c(xname, resultnames) results <- data.frame(results) ## check for alteration of column names oldnames <- resultnames resultnames <- colnames(results)[-1L] if(any(resultnames != oldnames)) warning("some column names were illegal and have been changed") ## determine mapping (if any) from columns of output to columns of input namemap <- match(colnames(results), names(datadf)) okmap <- !is.na(namemap) ## Build up fv object ## decide which of the columns should be the preferred value newyname <- if(yname %in% resultnames) yname else resultnames[1L] ## construct default plot formula fmla <- flat.deparse(as.formula(paste(". ~", xname))) dotnames <- resultnames ## construct description strings desc <- character(ncol(results)) desc[okmap] <- attr(data, "desc")[namemap[okmap]] desc[!okmap] <- paste("Computed value", resultnames[!okmap]) ## function name (fname) and mathematical expression for function (yexp) oldyexp <- attr(data, "yexp") oldfname <- attr(data, "fname") if(is.null(oldyexp)) { fname <- cl yexp <- substitute(f(xname), list(f=as.name(fname), xname=as.name(xname))) } else { ## map 'cbind(....)' to "." for name of function only cb <- paste("cbind(", paste(used.dotnames, collapse=","), ")", sep="") compresselang <- gsub(cb, ".", flat.deparse(expandelang), fixed=TRUE) compresselang <- as.formula(paste(compresselang, "~1"))[[2L]] ## construct mapping using original function name labmap <- fvlabelmap(data, dot=TRUE) labmap[["."]] <- oldyexp yexp <- eval(substitute(substitute(ee, ff), list(ee=compresselang, ff=labmap))) labmap2 <- labmap labmap2[["."]] <- as.name(oldfname) fname <- eval(substitute(substitute(ee, ff), list(ee=compresselang, ff=labmap2))) fname <- paren(flat.deparse(fname)) } ## construct mathematical labels mathlabl <- as.character(fvlegend(data, expandelang)) mathlabl <- gsub("[[:space:]]+", " ", mathlabl) labl <- colnames(results) mathmap <- match(labl, used.dotnames) okmath <- !is.na(mathmap) labl[okmath] <- mathlabl[mathmap[okmath]] ## form fv object and return out <- fv(results, argu=xname, valu=newyname, labl=labl, desc=desc, alim=attr(data, "alim"), fmla=fmla, unitname=unitname(data), fname=fname, yexp=yexp, ylab=yexp) fvnames(out, ".") <- dotnames return(out) } ## method for 'range' range.fv <- local({ getValues <- function(x) { xdat <- as.matrix(as.data.frame(x)) yall <- fvnames(x, ".") vals <- xdat[, yall] return(as.vector(vals)) } range.fv <- function(..., na.rm=TRUE, finite=na.rm) { aarg <- list(...) isfun <- sapply(aarg, is.fv) if(any(isfun)) aarg[isfun] <- lapply(aarg[isfun], getValues) z <- do.call(range, append(aarg, list(na.rm=na.rm, finite=finite))) return(z) } range.fv }) min.fv <- function(..., na.rm=TRUE, finite=na.rm) { range(..., na.rm=TRUE, finite=na.rm)[1L] } max.fv <- function(..., na.rm=TRUE, finite=na.rm) { range(..., na.rm=TRUE, finite=na.rm)[2L] } ## stieltjes integration for fv objects stieltjes <- function(f, M, ...) { ## stieltjes integral of f(x) dM(x) stopifnot(is.function(f)) if(is.stepfun(M)) { envM <- environment(M) #' jump locations x <- get("x", envir=envM) #' values of integrand fx <- f(x, ...) #' jump amounts xx <- c(-Inf, (x[-1L] + x[-length(x)])/2, Inf) dM <- diff(M(xx)) #' integrate f(x) dM(x) f.dM <- fx * dM result <- sum(f.dM[is.finite(f.dM)]) return(list(result)) } else if(is.fv(M)) { ## integration variable argu <- attr(M, "argu") x <- M[[argu]] ## values of integrand fx <- f(x, ...) ## estimates of measure valuenames <- names(M) [names(M) != argu] Mother <- as.data.frame(M)[, valuenames] Mother <- as.matrix(Mother, nrow=nrow(M)) ## increments of measure dM <- apply(Mother, 2, diff) dM <- rbind(dM, 0) ## integrate f(x) dM(x) f.dM <- fx * dM f.dM[!is.finite(f.dM)] <- 0 results <- colSums(f.dM) results <- as.list(results) names(results) <- valuenames return(results) } else stop("M must be an object of class fv or stepfun") } prefixfv <- function(x, tagprefix="", descprefix="", lablprefix=tagprefix, whichtags=fvnames(x, "*")) { ## attach a prefix to fv information stopifnot(is.fv(x)) att <- attributes(x) relevant <- names(x) %in% whichtags oldtags <- names(x)[relevant] newtags <- paste(tagprefix, oldtags, sep="") newlabl <- paste(lablprefix, att$labl[relevant], sep="") newdesc <- paste(descprefix, att$desc[relevant]) y <- rebadge.fv(x, tags=oldtags, new.desc=newdesc, new.labl=newlabl, new.tags=newtags) return(y) } reconcile.fv <- local({ reconcile.fv <- function(...) { ## reconcile several fv objects by finding the columns they share in common z <- list(...) if(!all(unlist(lapply(z, is.fv)))) { if(length(z) == 1 && is.list(z[[1L]]) && all(unlist(lapply(z[[1L]], is.fv)))) z <- z[[1L]] else stop("all arguments should be fv objects") } n <- length(z) if(n <= 1) return(z) ## find columns that are common to all estimates keepcolumns <- names(z[[1L]]) keepvalues <- fvnames(z[[1L]], "*") for(i in 2:n) { keepcolumns <- intersect(keepcolumns, names(z[[i]])) keepvalues <- intersect(keepvalues, fvnames(z[[i]], "*")) } if(length(keepvalues) == 0) stop("cannot reconcile fv objects: they have no columns in common") ## determine name of the 'preferred' column prefs <- unlist(lapply(z, fvnames, a=".y")) prefskeep <- prefs[prefs %in% keepvalues] if(length(prefskeep) > 0) { ## pick the most popular chosen <- unique(prefskeep)[which.max(table(prefskeep))] } else { ## drat - pick a value arbitrarily chosen <- keepvalues[1L] } z <- lapply(z, rebadge.fv, new.preferred=chosen) z <- lapply(z, "[.fv", j=keepcolumns) ## also clip to the same r values rmax <- min(sapply(z, maxrval)) z <- lapply(z, cliprmax, rmax=rmax) return(z) } maxrval <- function(x) { max(with(x, .x)) } cliprmax <- function(x, rmax) { x[ with(x, .x) <= rmax, ] } reconcile.fv }) as.function.fv <- function(x, ..., value=".y", extrapolate=FALSE) { trap.extra.arguments(...) value.orig <- value ## extract function argument xx <- with(x, .x) ## extract all function values yy <- as.data.frame(x)[, fvnames(x, "*"), drop=FALSE] ## determine which value(s) to supply if(!is.character(value)) stop("value should be a string or vector specifying columns of x") if(!all(value %in% colnames(yy))) { expandvalue <- try(fvnames(x, value)) if(!inherits(expandvalue, "try-error")) { value <- expandvalue } else stop("Unable to determine columns of x") } yy <- yy[,value, drop=FALSE] argname <- fvnames(x, ".x") ## determine extrapolation rule (1=NA, 2=most extreme value) stopifnot(is.logical(extrapolate)) stopifnot(length(extrapolate) %in% 1:2) endrule <- 1 + extrapolate ## make function(s) if(length(value) == 1 && !identical(value.orig, "*")) { ## make a single 'approxfun' and return it f <- approxfun(xx, yy[,,drop=TRUE], rule=endrule) ## magic names(formals(f))[1L] <- argname body(f)[[4L]] <- as.name(argname) } else { ## make a list of 'approxfuns' with different function values funs <- lapply(yy, approxfun, x = xx, rule = endrule) ## return a function which selects the appropriate 'approxfun' and executes f <- function(xxxx, what=value) { what <- match.arg(what) funs[[what]](xxxx) } ## recast function definition ## ('any sufficiently advanced technology is ## indistinguishable from magic' -- Arthur C. Clarke) formals(f)[[2L]] <- value names(formals(f))[1L] <- argname ## body(f)[[3L]][[2L]] <- as.name(argname) body(f) <- eval(substitute(substitute(z, list(xxxx=as.name(argname))), list(z=body(f)))) } class(f) <- c("fvfun", class(f)) attr(f, "fname") <- attr(x, "fname") attr(f, "yexp") <- attr(x, "yexp") return(f) } print.fvfun <- function(x, ...) { y <- args(x) yexp <- as.expression(attr(x, "yexp")) body(y) <- as.name(paste("Returns interpolated value of", yexp)) print(y, ...) return(invisible(NULL)) } findcbind <- function(root, depth=0, maxdepth=1000) { ## recursive search through a parse tree to find calls to 'cbind' if(depth > maxdepth) stop("Reached maximum depth") if(length(root) == 1) return(NULL) if(identical(as.name(root[[1L]]), as.name("cbind"))) return(list(numeric(0))) out <- NULL for(i in 2:length(root)) { di <- findcbind(root[[i]], depth+1, maxdepth) if(!is.null(di)) out <- append(out, lapply(di, append, values=i, after=FALSE)) } return(out) } .MathOpNames <- c("+", "-", "*", "/", "^", "%%", "%/%", "&", "|", "!", "==", "!=", "<", "<=", ">=", ">") distributecbind <- local({ distributecbind <- function(x) { ## x is an expression involving a call to 'cbind' ## return a vector of expressions, each obtained by replacing 'cbind(...)' ## by one of its arguments in turn. stopifnot(typeof(x) == "expression") xlang <- x[[1L]] locations <- findcbind(xlang) if(length(locations) == 0) return(x) ## cbind might occur more than once ## check that the number of arguments is the same each time narg <- unique(sapply(locations, nargs.in.expr, e=xlang)) if(length(narg) > 1) return(NULL) out <- NULL if(narg > 0) { for(i in 1:narg) { ## make a version of the expression ## in which cbind() is replaced by its i'th argument fakexlang <- xlang for(loc in locations) { if(length(loc) > 0) { ## usual case: 'loc' is integer vector representing nested index cbindcall <- xlang[[loc]] ## extract i-th argument argi <- cbindcall[[i+1]] ## if argument is an expression, enclose it in parentheses if(length(argi) > 1 && paste(argi[[1L]]) %in% .MathOpNames) argi <- substitute((x), list(x=argi)) ## replace cbind call by its i-th argument fakexlang[[loc]] <- argi } else { ## special case: 'loc' = integer(0) representing xlang itself cbindcall <- xlang ## extract i-th argument argi <- cbindcall[[i+1L]] ## replace cbind call by its i-th argument fakexlang <- cbindcall[[i+1L]] } } ## add to final expression out <- c(out, as.expression(fakexlang)) } } return(out) } nargs.in.expr <- function(loc, e) { n <- if(length(loc) > 0) length(e[[loc]]) else length(e) return(n - 1L) } distributecbind }) ## Form a new 'fv' object as a ratio ratfv <- function(df, numer, denom, ..., ratio=TRUE) { ## Determine y if(!missing(df) && !is.null(df)) { y <- fv(df, ...) num <- NULL } else { ## Compute numer/denom ## Numerator must be a data frame num <- fv(numer, ...) ## Denominator may be a data frame or a constant force(denom) y <- eval.fv(num/denom) ## relabel y <- fv(as.data.frame(y), ...) } if(!ratio) return(y) if(is.null(num)) { ## Compute num = y * denom ## Denominator may be a data frame or a constant force(denom) num <- eval.fv(y * denom) ## ditch labels num <- fv(as.data.frame(num), ...) } ## make denominator an fv object if(is.data.frame(denom)) { den <- fv(denom, ...) } else { ## scalar check.1.real(denom, "Unless it is a data frame,") ## replicate it in all the data columns dendf <- as.data.frame(num) valuecols <- (names(num) != fvnames(num, ".x")) dendf[, valuecols] <- denom den <- fv(dendf, ...) } ## tweak the descriptions ok <- (names(y) != fvnames(y, ".x")) attr(num, "desc")[ok] <- paste("numerator of", attr(num, "desc")[ok]) attr(den, "desc")[ok] <- paste("denominator of", attr(den, "desc")[ok]) ## form ratio object y <- rat(y, num, den, check=FALSE) return(y) } ## Tack new column(s) onto a ratio fv object bind.ratfv <- function(x, numerator=NULL, denominator=NULL, labl = NULL, desc = NULL, preferred = NULL, ratio=TRUE, quotient=NULL) { if(ratio && !inherits(x, "rat")) stop("ratio=TRUE is set, but x has no ratio information", call.=FALSE) if(is.null(numerator) && !is.null(denominator) && !is.null(quotient)) numerator <- quotient * denominator if(is.null(denominator) && inherits(numerator, "rat")) { ## extract numerator & denominator from ratio object both <- numerator denominator <- attr(both, "denominator") usenames <- fvnames(both, ".a") numerator <- as.data.frame(both)[,usenames,drop=FALSE] denominator <- as.data.frame(denominator)[,usenames,drop=FALSE] ## labels default to those of ratio object ma <- match(usenames, colnames(both)) if(is.null(labl)) labl <- attr(both, "labl")[ma] if(is.null(desc)) desc <- attr(both, "desc")[ma] } # calculate ratio # The argument 'quotient' is rarely needed # except to avoid 0/0 or to improve accuracy if(is.null(quotient)) quotient <- numerator/denominator # bind new column to x y <- bind.fv(x, quotient, labl=labl, desc=desc, preferred=preferred) if(!ratio) return(y) ## convert scalar denominator to data frame if(!is.data.frame(denominator)) { if(!is.numeric(denominator) || !is.vector(denominator)) stop("Denominator should be a data frame or a numeric vector") nd <- length(denominator) if(nd != 1 && nd != nrow(x)) stop("Denominator has wrong length") dvalue <- denominator denominator <- numerator denominator[] <- dvalue } ## Now fuse with x num <- attr(x, "numerator") den <- attr(x, "denominator") num <- bind.fv(num, numerator, labl=labl, desc=paste("numerator of", desc), preferred=preferred) den <- bind.fv(den, denominator, labl=labl, desc=paste("denominator of", desc), preferred=preferred) y <- rat(y, num, den, check=FALSE) return(y) } conform.ratfv <- function(x) { ## harmonise display properties in components of a ratio stopifnot(inherits(x, "rat"), is.fv(x)) num <- attr(x, "numerator") den <- attr(x, "denominator") formula(num) <- formula(den) <- formula(x) fvnames(num, ".") <- fvnames(den, ".") <- fvnames(x, ".") unitname(num) <- unitname(den) <- unitname(x) attr(x, "numerator") <- num attr(x, "denominator") <- den return(x) } spatstat.core/R/First.R0000644000176200001440000000060614141452520014457 0ustar liggesusers## spatstat.core/R/First.R .onLoad <- function(...) reset.spatstat.options() .onAttach <- function(libname, pkgname) { vs <- read.dcf(file=system.file("DESCRIPTION", package="spatstat.core"), fields="Version") vs <- as.character(vs) putSpatstatVariable("SpatstatCoreVersion", vs) packageStartupMessage(paste("spatstat.core", vs)) return(invisible(NULL)) } spatstat.core/R/simulatekppm.R0000644000176200001440000002244114144333464016114 0ustar liggesusers#' #' simulatekppm.R #' #' simulate.kppm #' #' $Revision: 1.6 $ $Date: 2021/04/16 11:06:37 $ simulate.kppm <- function(object, nsim=1, seed=NULL, ..., window=NULL, covariates=NULL, n.cond=NULL, w.cond=NULL, verbose=TRUE, retry=10, drop=FALSE) { starttime <- proc.time() verbose <- verbose && (nsim > 1) check.1.real(retry) # .... copied from simulate.lm .... if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } ## .................................. ## determine window for simulation results if(!is.null(window)) { stopifnot(is.owin(window)) win <- window } else { win <- as.owin(object) } ## .................................. ## conditional simulation if(!is.null(n.cond)) { ## fixed number of points out <- condSimCox(object, nsim=nsim, seed=NULL, ..., window=win, covariates=covariates, n.cond=n.cond, w.cond=w.cond, verbose=verbose, retry=retry, drop=drop) out <- timed(out, starttime=starttime) attr(out, "seed") <- RNGstate return(out) } ## .................................. # determine parameters mp <- as.list(object$modelpar) # parameter 'mu' # = parent intensity of cluster process # = mean log intensity of log-Gaussian Cox process if(is.null(covariates) && (object$stationary || is.null(window))) { # use existing 'mu' (scalar or image) mu <- object$mu } else { # recompute 'mu' using new data switch(object$clusters, Cauchy=, VarGamma=, Thomas=, MatClust={ # Poisson cluster process kappa <- mp$kappa lambda <- predict(object, window=win, covariates=covariates) mu <- eval.im(lambda/kappa) }, LGCP={ # log-Gaussian Cox process sigma2 <- mp$sigma2 lambda <- predict(object, window=win, covariates=covariates) mu <- eval.im(log(lambda) - sigma2/2) }, stop(paste("Simulation of", sQuote(object$clusters), "processes is not yet implemented")) ) } # prepare data for execution out <- list() switch(object$clusters, Thomas={ kappa <- mp$kappa sigma <- mp$sigma cmd <- expression(rThomas(kappa,sigma,mu,win, ...)) dont.complain.about(kappa, sigma, mu) }, MatClust={ kappa <- mp$kappa r <- mp$R cmd <- expression(rMatClust(kappa,r,mu,win, ...)) dont.complain.about(kappa, r) }, Cauchy = { kappa <- mp$kappa omega <- mp$omega cmd <- expression(rCauchy(kappa, omega, mu, win, ...)) dont.complain.about(kappa, omega, mu) }, VarGamma = { kappa <- mp$kappa omega <- mp$omega nu.ker <- object$covmodel$margs$nu.ker cmd <- expression(rVarGamma(kappa, nu.ker, omega, mu, win, ...)) dont.complain.about(kappa, nu.ker, omega, mu) }, LGCP={ sigma2 <- mp$sigma2 alpha <- mp$alpha cm <- object$covmodel model <- cm$model margs <- cm$margs param <- append(list(var=sigma2, scale=alpha), margs) #' if(!is.im(mu)) { # model will be simulated in 'win' cmd <- expression(rLGCP(model=model, mu=mu, param=param, ..., win=win)) #' check that RandomFields package recognises parameter format rfmod <- try(rLGCP(model, mu=mu, param=param, win=win, ..., modelonly=TRUE)) } else { # model will be simulated in as.owin(mu), then change window cmd <- expression(rLGCP(model=model, mu=mu, param=param, ...)[win]) #' check that RandomFields package recognises parameter format rfmod <- try(rLGCP(model, mu=mu, param=param, ..., modelonly=TRUE)) } #' suppress warnings from code checker dont.complain.about(model, mu, param) #' check that model is recognised if(inherits(rfmod, "try-error")) stop(paste("Internal error in simulate.kppm:", "unable to build Random Fields model", "for log-Gaussian Cox process")) }) # run if(verbose) { cat(paste("Generating", nsim, "simulations... ")) state <- list() } for(i in 1:nsim) { out[[i]] <- try(eval(cmd)) if(verbose) state <- progressreport(i, nsim, state=state) } # detect failures if(any(bad <- unlist(lapply(out, inherits, what="try-error")))) { nbad <- sum(bad) gripe <- paste(nbad, ngettext(nbad, "simulation was", "simulations were"), "unsuccessful") if(verbose) splat(gripe) if(retry <= 0) { fate <- "returned as NULL" out[bad] <- list(NULL) } else { if(verbose) cat("Retrying...") ntried <- 0 while(ntried < retry) { ntried <- ntried + 1 for(j in which(bad)) out[[j]] <- try(eval(cmd)) bad <- unlist(lapply(out, inherits, what="try-error")) nbad <- sum(bad) if(nbad == 0) break } if(verbose) cat("Done.\n") fate <- if(nbad == 0) "all recomputed" else paste(nbad, "simulations still unsuccessful") fate <- paste(fate, "after", ntried, ngettext(ntried, "further try", "further tries")) } warning(paste(gripe, fate, sep=": ")) } if(verbose) cat("Done.\n") #' pack up out <- simulationresult(out, nsim, drop) out <- timed(out, starttime=starttime) attr(out, "seed") <- RNGstate return(out) } condSimCox <- function(object, nsim=1, ..., window=NULL, n.cond=NULL, w.cond=NULL, giveup=1000, maxchunk=100, verbose=TRUE, drop=FALSE) { stopifnot(is.kppm(object)) shortcut <- isFALSE(object$isPCP) w.sim <- as.owin(window) fullwindow <- is.null(w.cond) if(fullwindow) { w.cond <- w.sim w.free <- NULL } else { stopifnot(is.owin(w.cond)) w.free <- setminus.owin(w.sim, w.cond) } nremaining <- nsim ntried <- 0 accept <- FALSE nchunk <- 1 phistory <- mhistory <- numeric(0) results <- list() while(nremaining > 0) { ## increase chunk length nchunk <- min(maxchunk, giveup - ntried, 2 * nchunk) ## bite off next chunk of simulations if(shortcut) { lamlist <- simulate(object, nsim=nchunk, Lambdaonly=TRUE, ..., drop=FALSE, verbose=FALSE) } else { Xlist <- simulate(object, nsim=nchunk, saveLambda=TRUE, ..., drop=FALSE, verbose=FALSE) lamlist <- lapply(unname(Xlist), attr, which="Lambda", exact=TRUE) } ## compute acceptance probabilities lamlist <- lapply(lamlist, "[", i=w.sim, drop=FALSE, tight=TRUE) if(fullwindow) { mu <- sapply(lamlist, integral) } else { mu <- sapply(lamlist, integral, domain=w.cond) } p <- exp(n.cond * log(mu/n.cond) + n.cond - mu) phistory <- c(phistory, p) mhistory <- c(mhistory, mu) ## accept/reject accept <- (runif(length(p)) < p) if(any(accept)) { jaccept <- which(accept) if(length(jaccept) > nremaining) jaccept <- jaccept[seq_len(nremaining)] naccepted <- length(jaccept) if(verbose) splat("Accepted the", commasep(ordinal(ntried + jaccept)), ngettext(naccepted, "proposal", "proposals")) nremaining <- nremaining - naccepted for(j in jaccept) { lamj <- lamlist[[j]] if(min(lamj) < 0) lamj <- eval.im(pmax(lamj, 0)) if(fullwindow) { Y <- rpoint(n.cond, lamj, win=w.sim, forcewin=TRUE) } else { lamj.cond <- lamj[w.cond, drop=FALSE, tight=TRUE] lamj.free <- lamj[w.free, drop=FALSE, tight=TRUE] Ycond <- rpoint(n.cond, lamj.cond, win=w.cond) Yfree <- rpoispp(lamj.free) Y <- superimpose(Ycond, Yfree, W=w.sim) } results <- append(results, list(Y)) } } ntried <- ntried + nchunk if(ntried >= giveup && nremaining > 0) { message(paste("Gave up after", ntried, "proposals with", nsim - nremaining, "accepted")) message(paste("Mean acceptance probability =", signif(mean(phistory), 3))) break } } if((nresults <- length(results))) { results <- simulationresult(results, nresults, drop) } else { results <- solist() } attr(results, "history") <- data.frame(mu=mhistory, p=phistory) if(verbose && nresults == nsim) splat("Mean acceptance probability", signif(mean(phistory), 3)) return(results) } spatstat.core/R/markmark.R0000644000176200001440000000415714144333462015210 0ustar liggesusers#' #' markmark.R #' #' Mark-mark scatterplot #' #' $Revision: 1.7 $ $Date: 2018/12/03 10:26:38 $ markmarkscatter <- function(X, rmax, ..., col=NULL, symap=NULL, transform=I, jit=FALSE) { if(!is.ppp(X) && !is.pp3(X) && !is.ppx(X)) stop("X should be a point pattern", call.=FALSE) if(npoints(X) == 0) { warning("Empty point pattern; no plot generated.", call.=FALSE) return(invisible(NULL)) } stopifnot(is.marked(X)) marx <- numeric.columns(marks(X)) nc <- ncol(marx) if(nc == 0) stop("No marks are numeric", call.=FALSE) if(nc > 1) warning("Multiple columns of numeric marks: using the first column", call.=FALSE) marx <- marx[,1,drop=TRUE] transformed <- !missing(transform) marx <- transform(marx) if(jit) marx <- jitter(marx, factor=2.5) if(is.ppp(X) || is.pp3(X)) { cl <- closepairs(X, rmax, what="ijd") } else { D <- pairdist(X) ij <- which(D <= rmax, arr.ind=TRUE) cl <- list(i=ij[,1], j=ij[,2], d=as.numeric(D[ij])) } mi <- marx[cl$i] mj <- marx[cl$j] d <- cl$d ra <- range(marx) Y <- ppp(mi, mj, ra, ra, marks=d, check=FALSE) nY <- npoints(Y) Y <- Y[order(d, decreasing=TRUE)] if(is.null(symap)) { if(is.null(col)) col <- grey(seq(0.9, 0, length.out=128)) if(nY > 0) { rd <- c(0, max(d)) symap <- symbolmap(cols=col, range=rd, size=1, pch=16) } } plot(Y, ..., symap=symap, main="", leg.side="right") axis(1) axis(2) mname <- if(jit && transformed) "Jittered, transformed mark" else if(jit) "Jittered mark" else if(transformed) "Transformed mark" else "Mark" title(xlab=paste(mname, "of first point"), ylab=paste(mname, "of second point")) if(nY >= 2) { mbar2 <- mean(marx)^2 msd2 <- sqrt(2 * var(marx)) hyperbola <- function(x) { mbar2/x } bandline1 <- function(x) { x + msd2 } bandline2 <- function(x) { x - msd2 } curve(hyperbola, from=mbar2/ra[2], to=ra[2], add=TRUE) curve(bandline1, from=ra[1], to=ra[2]-msd2, add=TRUE) curve(bandline2, from=ra[1]+msd2, to=ra[2], add=TRUE) } return(invisible(NULL)) } spatstat.core/R/progress.R0000644000176200001440000002616414144333463015252 0ustar liggesusers# # progress.R # # $Revision: 1.21 $ $Date: 2016/04/25 02:34:40 $ # # progress plots (envelope representations) # dclf.progress <- function(X, ...) mctest.progress(X, ..., exponent=2) mad.progress <- function(X, ...) mctest.progress(X, ..., exponent=Inf) mctest.progress <- local({ smoothquantile <- function(z, alpha) { min(quantile(density(z), 1-alpha), max(z)) } silentmax <- function(z) { if(all(is.nan(z))) return(NaN) z <- z[is.finite(z)] if(length(z) == 0) return(NA) else return(max(z)) } mctest.progress <- function(X, fun=Lest, ..., exponent=1, nrank=1, interpolate=FALSE, alpha, rmin=0) { check.1.real(exponent) explain.ifnot(exponent >= 0) if(missing(fun) && inherits(X, "envelope")) fun <- NULL Z <- envelopeProgressData(X, fun=fun, ..., rmin=rmin, exponent=exponent) R <- Z$R devdata <- Z$devdata devsim <- Z$devsim nsim <- ncol(devsim) # determine 'alpha' and 'nrank' if(missing(alpha)) { if((nrank %% 1) != 0) stop("nrank must be an integer") alpha <- nrank/(nsim + 1) } else { check.1.real(alpha) stopifnot(alpha > 0 && alpha < 1) if(!interpolate) { if(!missing(nrank)) warning("nrank was ignored because alpha was given", call.=FALSE) nrank <- alpha * (nsim + 1) if(abs(nrank - round(nrank)) > 1e-2) stop("alpha should be a multiple of 1/(nsim + 1)", call.=FALSE) nrank <- as.integer(round(nrank)) } } alphastring <- paste(100 * alpha, "%%", sep="") # compute critical values critval <- if(interpolate) apply(devsim, 1, smoothquantile, alpha=alpha) else if(nrank == 1) apply(devsim, 1, silentmax) else apply(devsim, 1, orderstats, k=nrank, decreasing=TRUE) # create fv object fname <- if(is.infinite(exponent)) "mad" else if(exponent == 2) "T" else paste("D[",exponent,"]", sep="") ylab <- if(is.infinite(exponent)) quote(mad(R)) else if(exponent == 2) quote(T(R)) else eval(substitute(quote(D[p](R)), list(p=exponent))) df <- data.frame(R=R, obs=devdata, crit=critval, zero=0) mcname <- if(interpolate) "interpolated Monte Carlo" else "Monte Carlo" p <- fv(df, argu="R", ylab=ylab, valu="obs", fmla = . ~ R, desc = c("Interval endpoint R", "observed value of test statistic %s", paste(mcname, alphastring, "critical value for %s"), "zero"), labl=c("R", "%s(R)", "%s[crit](R)", "0"), unitname = unitname(X), fname = fname) fvnames(p, ".") <- c("obs", "crit", "zero") fvnames(p, ".s") <- c("zero", "crit") p <- hasenvelope(p, Z$envelope) # envelope may be NULL return(p) } mctest.progress }) # Do not call this function. # Performs underlying computations envelopeProgressData <- local({ envelopeProgressData <- function(X, fun=Lest, ..., exponent=1, alternative=c("two.sided", "less", "greater"), leaveout=1, scale=NULL, clamp=FALSE, normalize=FALSE, deflate=FALSE, rmin=0, save.envelope = savefuns || savepatterns, savefuns = FALSE, savepatterns = FALSE) { alternative <- match.arg(alternative) if(!(leaveout %in% 0:2)) stop("Argument leaveout should equal 0, 1 or 2") ## compute or extract simulated functions X <- envelope(X, fun=fun, ..., alternative=alternative, savefuns=TRUE, savepatterns=savepatterns) Y <- attr(X, "simfuns") ## extract values R <- with(X, .x) obs <- with(X, .y) sim <- as.matrix(as.data.frame(Y))[, -1] nsim <- ncol(sim) ## choose function as reference has.theo <- ("theo" %in% names(X)) use.theo <- identical(attr(X, "einfo")$use.theory, TRUE) if(use.theo && !has.theo) warning("No theoretical function available; use.theory ignored") if(use.theo && has.theo) { # theo.used <- TRUE reference <- with(X, theo) leaveout <- 0 } else { # theo.used <- FALSE if(leaveout == 2) { ## use sample mean of simulations only reference <- with(X, mmean) } else { ## use sample mean of simulations *and* observed reference <- (nsim * with(X, mmean) + obs)/(nsim + 1) } } ## restrict range if(rmin > 0) { if(sum(R >= rmin) < 2) stop("rmin is too large for the available range of r values") nskip <- sum(R < rmin) } else nskip <- 0 ## determine rescaling if any if(is.null(scale)) { scaling <- NULL scr <- 1 } else if(is.function(scale)) { scaling <- scale(R) sname <- "scale(r)" ans <- check.nvector(scaling, length(R), things="values of r", fatal=FALSE, vname=sname) if(!ans) stop(attr(ans, "whinge"), call.=FALSE) if(any(bad <- (scaling <= 0))) { ## issue a warning unless this only happens at r=0 if(any(bad[R > 0])) warning(paste("Some values of", sname, "were negative or zero:", "scale was reset to 1 for these values"), call.=FALSE) scaling[bad] <- 1 } scr <- scaling } else stop("Argument scale should be a function") ## compute deviations rawdevDat <- Deviation(obs, reference, leaveout, nsim, sim[,1]) rawdevSim <- Deviation(sim, reference, leaveout, nsim) ## evaluate signed/absolute deviation relevant to alternative ddat <- RelevantDeviation(rawdevDat, alternative, clamp, scaling) dsim <- RelevantDeviation(rawdevSim, alternative, clamp, scaling) ## compute test statistics if(is.infinite(exponent)) { ## MAD devdata <- cummaxskip(ddat, nskip) devsim <- apply(dsim, 2, cummaxskip, nskip=nskip) if(deflate) { devdata <- scr * devdata devsim <- scr * devsim } testname <- "Maximum absolute deviation test" } else { dR <- c(0, diff(R)) if(clamp || (alternative == "two.sided")) { ## deviations are nonnegative devdata <- cumsumskip(dR * ddat^exponent, nskip) devsim <- apply(dR * dsim^exponent, 2, cumsumskip, nskip=nskip) } else { ## sign of deviations should be retained devdata <- cumsumskip(dR * sign(ddat) * abs(ddat)^exponent, nskip=nskip) devsim <- apply(dR * sign(dsim) * abs(dsim)^exponent, 2, cumsumskip, nskip=nskip) } if(normalize) { devdata <- devdata/R devsim <- sweep(devsim, 1, R, "/") } if(deflate) { devdata <- scr * sign(devdata) * abs(devdata)^(1/exponent) devsim <- scr * sign(devsim) * abs(devsim)^(1/exponent) } testname <- if(exponent == 2) "Diggle-Cressie-Loosmore-Ford test" else if(exponent == 1) "Integral absolute deviation test" else paste("Integrated", ordinal(exponent), "Power Deviation test") } result <- list(R=R, devdata=devdata, devsim=devsim, testname=testname, scaleR=scr, clamp=clamp) if(save.envelope) result$envelope <- X return(result) } cumsumskip <- function(x, nskip=0) { if(nskip == 0) cumsum(x) else c(rep(NA, nskip), cumsum(x[-seq_len(nskip)])) } cummaxskip <- function(x, nskip=0) { if(nskip == 0) cummax(x) else c(rep(NA, nskip), cummax(x[-seq_len(nskip)])) } envelopeProgressData }) dg.progress <- function(X, fun=Lest, ..., exponent=2, nsim=19, nsimsub=nsim-1, nrank=1, alpha, leaveout=1, interpolate=FALSE, rmin=0, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { env.here <- sys.frame(sys.nframe()) if(!missing(nsimsub) && !relatively.prime(nsim, nsimsub)) stop("nsim and nsimsub must be relatively prime") ## determine 'alpha' and 'nrank' if(missing(alpha)) { if((nrank %% 1) != 0) stop("nrank must be an integer") alpha <- nrank/(nsim + 1) } else { check.1.real(alpha) stopifnot(alpha > 0 && alpha < 1) if(!interpolate) { if(!missing(nrank)) warning("nrank was ignored because alpha was given", call.=FALSE) nrank <- alpha * (nsim + 1) if(abs(nrank - round(nrank)) > 1e-2) stop("alpha should be a multiple of 1/(nsim + 1)", call.=FALSE) nrank <- as.integer(round(nrank)) } } if(verbose) cat("Computing first-level test data...") ## generate or extract simulated patterns and functions E <- envelope(X, fun=fun, ..., nsim=nsim, savepatterns=TRUE, savefuns=TRUE, verbose=FALSE, envir.simul=env.here) ## get progress data PD <- envelopeProgressData(E, fun=fun, ..., rmin=rmin, nsim=nsim, exponent=exponent, leaveout=leaveout, verbose=FALSE) ## get first level MC test significance trace T1 <- mctest.sigtrace(E, fun=fun, nsim=nsim, exponent=exponent, leaveout=leaveout, interpolate=interpolate, rmin=rmin, confint=FALSE, verbose=FALSE, ...) R <- T1$R phat <- T1$pest if(verbose) { cat("Done.\nComputing second-level data... ") state <- list() } ## second level traces simpat <- attr(E, "simpatterns") phat2 <- matrix(, length(R), nsim) for(j in seq_len(nsim)) { simj <- simpat[[j]] sigj <- mctest.sigtrace(simj, fun=fun, nsim=nsimsub, exponent=exponent, interpolate=interpolate, leaveout=leaveout, rmin=rmin, confint=FALSE, verbose=FALSE, ...) phat2[,j] <- sigj$pest if(verbose) state <- progressreport(j, nsim, state=state) } if(verbose) cat("Done.\n") ## Dao-Genton procedure dgcritrank <- 1 + rowSums(phat > phat2) dgcritrank <- pmin(dgcritrank, nsim) devsim.sort <- t(apply(PD$devsim, 1, sort, decreasing=TRUE, na.last=TRUE)) ii <- cbind(seq_along(dgcritrank), dgcritrank) devcrit <- devsim.sort[ii] devdata <- PD$devdata ## create fv object fname <- if(is.infinite(exponent)) "mad" else if(exponent == 2) "T" else paste("D[",exponent,"]", sep="") ylab <- if(is.infinite(exponent)) quote(mad(R)) else if(exponent == 2) quote(T(R)) else eval(substitute(quote(D[p](R)), list(p=exponent))) df <- data.frame(R=R, obs=devdata, crit=devcrit, zero=0) mcname <- if(interpolate) "interpolated Monte Carlo" else "Monte Carlo" p <- fv(df, argu="R", ylab=ylab, valu="obs", fmla = . ~ R, desc = c("Interval endpoint R", "observed value of test statistic %s", paste(mcname, paste0(100 * alpha, "%%"), "critical value for %s"), "zero"), labl=c("R", "%s(R)", "%s[crit](R)", "0"), unitname = unitname(X), fname = fname) fvnames(p, ".") <- c("obs", "crit", "zero") fvnames(p, ".s") <- c("zero", "crit") if(savefuns || savepatterns) p <- hasenvelope(p, E) return(p) } spatstat.core/R/quadrattest.R0000644000176200001440000004642014144333463015744 0ustar liggesusers# # quadrattest.R # # $Revision: 1.65 $ $Date: 2021/06/29 02:21:01 $ # quadrat.test <- function(X, ...) { UseMethod("quadrat.test") } quadrat.test.ppp <- function(X, nx=5, ny=nx, alternative = c("two.sided", "regular", "clustered"), method = c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, lambda=NULL, df.est=NULL, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) { Xname <- short.deparse(substitute(X)) method <- match.arg(method) alternative <- match.arg(alternative) do.call(quadrat.testEngine, resolve.defaults(list(quote(X), nx=nx, ny=ny, alternative=alternative, method=method, conditional=conditional, CR=CR, fit=lambda, df.est=df.est, xbreaks=xbreaks, ybreaks=ybreaks, tess=tess, nsim=nsim), list(...), list(Xname=Xname, fitname="CSR"))) } quadrat.test.splitppp <- function(X, ..., df=NULL, df.est=NULL, Xname=NULL) { if(is.null(Xname)) Xname <- short.deparse(substitute(X)) pool.quadrattest(lapply(X, quadrat.test.ppp, ...), df=df, df.est=df.est, Xname=Xname) } quadrat.test.slrm <- quadrat.test.ppm <- function(X, nx=5, ny=nx, alternative = c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, df.est=NULL, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) { fitname <- short.deparse(substitute(X)) dataname <- paste("data from", fitname) method <- match.arg(method) alternative <- match.arg(alternative) if(!is.poisson(X)) stop("Test is only defined for Poisson point process models") if(is.marked(X)) stop("Sorry, not yet implemented for marked point process models") Xdata <- response(X) dont.complain.about(Xdata) do.call(quadrat.testEngine, resolve.defaults(list(quote(Xdata), nx=nx, ny=ny, alternative=alternative, method=method, conditional=conditional, CR=CR, xbreaks=xbreaks, ybreaks=ybreaks, tess=tess, nsim=nsim, fit=X, df.est=df.est), list(...), list(Xname=dataname, fitname=fitname))) } quadrat.test.quadratcount <- function(X, alternative = c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, lambda=NULL, df.est=NULL, ..., nsim=1999) { trap.extra.arguments(...) method <- match.arg(method) alternative <- match.arg(alternative) quadrat.testEngine(Xcount=X, alternative=alternative, fit=lambda, df.est=df.est, method=method, conditional=conditional, CR=CR, nsim=nsim) } quadrat.testEngine <- function(X, nx, ny, alternative = c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, ..., nsim=1999, Xcount=NULL, xbreaks=NULL, ybreaks=NULL, tess=NULL, fit=NULL, df.est=NULL, Xname=NULL, fitname=NULL) { trap.extra.arguments(...) method <- match.arg(method) alternative <- match.arg(alternative) if(method == "MonteCarlo") { check.1.real(nsim) explain.ifnot(nsim > 0) } if(!is.null(df.est)) check.1.integer(df.est) if(is.null(Xcount)) Xcount <- quadratcount(X, nx=nx, ny=ny, xbreaks=xbreaks, ybreaks=ybreaks, tess=tess) tess <- attr(Xcount, "tess") ## determine expected values under model normalised <- FALSE if(is.null(fit)) { nullname <- "CSR" if(tess$type == "rect") areas <- outer(diff(tess$xgrid), diff(tess$ygrid), "*") else areas <- unlist(lapply(tiles(tess), area)) fitmeans <- sum(Xcount) * areas/sum(areas) normalised <- TRUE df <- switch(method, Chisq = length(fitmeans) - 1, MonteCarlo = NULL) } else if(is.im(fit) || inherits(fit, "funxy")) { nullname <- "Poisson process with given intensity" fit <- as.im(fit, W=Window(tess)) areas <- integral(fit, tess) fitmeans <- sum(Xcount) * areas/sum(areas) normalised <- TRUE df <- switch(method, Chisq = length(fitmeans) - df.est %orifnull% 1, MonteCarlo = NULL) } else if(is.ppm(fit)) { if(!is.poisson(fit)) stop("Quadrat test only supported for Poisson point process models") if(is.marked(fit)) stop("Sorry, not yet implemented for marked point process models") nullname <- paste("fitted Poisson model", sQuote(fitname)) Q <- quad.ppm(fit, drop=TRUE) ww <- w.quad(Q) lambda <- fitted(fit, drop=TRUE) masses <- lambda * ww # sum weights of quadrature points in each tile if(tess$type == "rect") { xx <- x.quad(Q) yy <- y.quad(Q) xbreaks <- tess$xgrid ybreaks <- tess$ygrid fitmeans <- rectquadrat.countEngine(xx, yy, xbreaks, ybreaks, weights=masses) fitmeans <- as.vector(t(fitmeans)) } else { V <- tileindex(as.ppp(Q), Z=tess) fitmeans <- tapplysum(masses, list(tile=V)) } switch(method, Chisq = { df <- length(fitmeans) - df.est %orifnull% length(coef(fit)) if(df < 1) stop(paste("Not enough quadrats: degrees of freedom df =", df)) }, MonteCarlo = { df <- NA }) } else if(is.slrm(fit)) { nullname <- paste("fitted spatial logistic regression", sQuote(fitname)) probs <- predict(fit, type="probabilities") ## usual case xy <- raster.xy(probs, drop=TRUE) masses <- as.numeric(probs[]) V <- tileindex(xy, Z=tess) fitmeans <- tapplysum(masses, list(tile=V)) switch(method, Chisq = { df <- length(fitmeans) - df.est %orifnull% length(coef(fit)) if(df < 1) stop(paste("Not enough quadrats: degrees of freedom df =", df)) }, MonteCarlo = { df <- NA }) } else stop("fit should be a point process model (ppm or slrm) or pixel image") ## assemble data for test OBS <- as.vector(t(as.table(Xcount))) EXP <- as.vector(fitmeans) if(!normalised) EXP <- EXP * sum(OBS)/sum(EXP) ## label it switch(method, Chisq = { if(CR == 1) { testname <- "Chi-squared test" reference <- statname <- NULL } else { testname <- CressieReadTestName(CR) statname <- paste("Test statistic:", CressieReadName(CR)) reference <- "(p-value obtained from chi-squared distribution)" } }, MonteCarlo = { testname <- paste(if(conditional) "Conditional" else "Unconditional", "Monte Carlo test") statname <- paste("Test statistic:", CressieReadName(CR)) reference <- NULL }) testblurb <- paste(testname, "of", nullname, "using quadrat counts") testblurb <- c(testblurb, statname, reference) #' perform test result <- X2testEngine(OBS, EXP, method=method, df=df, nsim=nsim, conditional=conditional, CR=CR, alternative=alternative, testname=testblurb, dataname=Xname) class(result) <- c("quadrattest", class(result)) attr(result, "quadratcount") <- Xcount return(result) } CressieReadStatistic <- function(OBS, EXP, lambda=1, normalise=FALSE, named=TRUE) { if(normalise) EXP <- sum(OBS) * EXP/sum(EXP) y <- if(lambda == 1) sum((OBS - EXP)^2/EXP) else if(lambda == 0) 2 * sum(ifelse(OBS > 0, OBS * log(OBS/EXP), 0)) else if(lambda == -1) 2 * sum(EXP * log(EXP/OBS)) else (2/(lambda * (lambda + 1))) * sum(ifelse(OBS > 0, OBS * ((OBS/EXP)^lambda - 1), 0)) names(y) <- if(named) CressieReadSymbol(lambda) else NULL return(y) } CressieReadSymbol <- function(lambda) { if(lambda == 1) "X2" else if(lambda == 0) "G2" else if(lambda == -1/2) "T2" else if(lambda == -1) "GM2" else if(lambda == -2) "NM2" else "CR" } CressieReadName <- function(lambda) { if(lambda == 1) "Pearson X2 statistic" else if(lambda == 0) "likelihood ratio test statistic G2" else if(lambda == -1/2) "Freeman-Tukey statistic T2" else if(lambda == -1) "modified likelihood ratio test statistic GM2" else if(lambda == -2) "Neyman modified X2 statistic NM2" else paste("Cressie-Read statistic", paren(paste("lambda =", if(abs(lambda - 2/3) < 1e-7) "2/3" else lambda) ) ) } CressieReadTestName <- function(lambda) { if(lambda == 1) "Chi-squared test" else if(lambda == 0) "Likelihood ratio test" else if(lambda == -1/2) "Freeman-Tukey test" else if(lambda == -1) "Modified likelihood ratio test" else if(lambda == -2) "Neyman modified chi-squared test" else paste("Cressie-Read power divergence test", paren(paste("lambda =", if(abs(lambda - 2/3) < 1e-7) "2/3" else lambda) ) ) } X2testEngine <- function(OBS, EXP, ..., method=c("Chisq", "MonteCarlo"), CR=1, df=NULL, nsim=NULL, conditional, alternative, testname, dataname) { method <- match.arg(method) if(method == "Chisq" && any(EXP < 5)) warning(paste("Some expected counts are small;", "chi^2 approximation may be inaccurate"), call.=FALSE) X2 <- CressieReadStatistic(OBS, EXP, CR) # conduct test switch(method, Chisq = { if(!is.null(df)) names(df) <- "df" pup <- pchisq(X2, df, lower.tail=FALSE) plo <- pchisq(X2, df, lower.tail=TRUE) PVAL <- switch(alternative, regular = plo, clustered = pup, two.sided = 2 * min(pup, plo)) }, MonteCarlo = { nsim <- as.integer(nsim) if(conditional) { npts <- sum(OBS) p <- EXP/sum(EXP) SIM <- rmultinom(n=nsim,size=npts,prob=p) } else { ne <- length(EXP) SIM <- matrix(rpois(nsim*ne,EXP),nrow=ne) } simstats <- apply(SIM, 2, CressieReadStatistic, EXP=EXP, lambda=CR, normalise=!conditional) if(anyDuplicated(simstats)) simstats <- jitter(simstats) phi <- (1 + sum(simstats >= X2))/(1+nsim) plo <- (1 + sum(simstats <= X2))/(1+nsim) PVAL <- switch(alternative, clustered = phi, regular = plo, two.sided = min(1, 2 * min(phi,plo))) }) result <- structure(list(statistic = X2, parameter = df, p.value = PVAL, method = testname, data.name = dataname, alternative = alternative, observed = OBS, expected = EXP, residuals = (OBS - EXP)/sqrt(EXP), CR = CR, method.key = method), class = "htest") return(result) } print.quadrattest <- function(x, ...) { NextMethod("print") single <- is.atomicQtest(x) if(!single) splat("Pooled test") if(waxlyrical('gory')) { if(single) { cat("Quadrats: ") } else { splat("Quadrats of component tests:") } x <- as.tess(x) do.call(print, resolve.defaults(list(x=quote(x)), list(...), list(brief=TRUE))) } return(invisible(NULL)) } plot.quadrattest <- local({ plot.quadrattest <- function(x, ..., textargs=list()) { xname <- short.deparse(substitute(x)) if(!is.atomicQtest(x)) { # pooled test - plot the original tests tests <- extractAtomicQtests(x) dont.complain.about(tests) do.call(plot, resolve.defaults(list(x=quote(tests)), list(...), list(main=xname))) return(invisible(NULL)) } Xcount <- attr(x, "quadratcount") # plot tessellation tess <- as.tess(Xcount) do.call(plot.tess, resolve.defaults(list(quote(tess)), list(...), list(main=xname))) # compute locations for text til <- tiles(tess) ok <- sapply(til, haspositivearea) incircles <- lapply(til[ok], incircle) x0 <- sapply(incircles, getElement, name="x") y0 <- sapply(incircles, getElement, name="y") ra <- sapply(incircles, getElement, name="r") # plot observed counts cos30 <- sqrt(2)/2 sin30 <- 1/2 f <- 0.4 dotext(-f * cos30, f * sin30, as.vector(t(as.table(Xcount)))[ok], x0, y0, ra, textargs, adj=c(1,0), ...) # plot expected counts dotext(f * cos30, f * sin30, round(x$expected,1)[ok], x0, y0, ra, textargs, adj=c(0,0), ...) # plot Pearson residuals dotext(0, -f, signif(x$residuals,2)[ok], x0, y0, ra, textargs, ...) return(invisible(NULL)) } dotext <- function(dx, dy, values, x0, y0, ra, textargs, ...) { xx <- x0 + dx * ra yy <- y0 + dy * ra do.call.matched(text.default, resolve.defaults(list(x=quote(xx), y = quote(yy)), list(labels=paste(as.vector(values))), textargs, list(...)), funargs=graphicsPars("text")) } haspositivearea <- function(x) { !is.null(x) && area(x) > 0 } plot.quadrattest }) ######## pooling multiple quadrat tests into a quadrat test pool.quadrattest <- function(..., df=NULL, df.est=NULL, nsim=1999, Xname=NULL, CR=NULL) { argh <- list(...) if(!is.null(df) + !is.null(df.est)) stop("Arguments df and df.est are incompatible") if(all(unlist(lapply(argh, inherits, what="quadrattest")))) { # Each argument is a quadrattest object tests <- argh } else if(length(argh) == 1 && is.list(arg1 <- argh[[1]]) && all(unlist(lapply(arg1, inherits, "quadrattest")))) { # There is just one argument, which is a list of quadrattests tests <- arg1 } else stop("Each entry in the list must be a quadrat test") # data from all cells in all tests OBS <- unlist(lapply(tests, getElement, name="observed")) EXP <- unlist(lapply(tests, getElement, name="expected")) # RES <- unlist(lapply(tests, getElement, name="residuals")) # STA <- unlist(lapply(tests, getElement, name="statistic")) # information about each test Mkey <- unlist(lapply(tests, getElement, name="method.key")) Testname <- lapply(tests, getElement, name="method") Alternative <- unlist(lapply(tests, getElement, name="alternative")) Conditional <- unlist(lapply(tests, getElement, name="conditional")) # name of data if(is.null(Xname)) { Nam <- unlist(lapply(tests, getElement, name="data.name")) Xname <- commasep(sQuote(Nam)) } # name of test testname <- unique(Testname) method.key <- unique(Mkey) if(length(testname) > 1) stop(paste("Cannot combine different types of tests:", commasep(sQuote(method.key)))) testname <- testname[[1]] # alternative hypothesis alternative <- unique(Alternative) if(length(alternative) > 1) stop(paste("Cannot combine tests with different alternatives:", commasep(sQuote(alternative)))) # conditional tests conditional <- any(Conditional) if(conditional) stop("Sorry, not implemented for conditional tests") # Cressie-Read exponent if(is.null(CR)) { CR <- unlist(lapply(tests, getElement, name="CR")) CR <- unique(CR) if(length(CR) > 1) { warning("Tests used different values of CR; assuming CR=1") CR <- 1 } } if(method.key == "Chisq") { # determine degrees of freedom if(is.null(df)) { if(!is.null(df.est)) { # total number of observations minus number of fitted parameters df <- length(OBS) - df.est } else { # total degrees of freedom of tests # implicitly assumes independence of tests PAR <- unlist(lapply(tests, getElement, name="parameter")) df <- sum(PAR) } } # validate df if(df < 1) stop(paste("Degrees of freedom = ", df)) names(df) <- "df" } # perform test result <- X2testEngine(OBS, EXP, method=method.key, df=df, nsim=nsim, conditional=conditional, CR=CR, alternative=alternative, testname=testname, dataname=Xname) # add info class(result) <- c("quadrattest", class(result)) attr(result, "tests") <- as.solist(tests) # there is no quadratcount attribute return(result) } is.atomicQtest <- function(x) { inherits(x, "quadrattest") && is.null(attr(x, "tests")) } extractAtomicQtests <- function(x) { if(is.atomicQtest(x)) return(list(x)) stopifnot(inherits(x, "quadrattest")) tests <- attr(x, "tests") y <- lapply(tests, extractAtomicQtests) z <- do.call(c, y) return(as.solist(z)) } as.tess.quadrattest <- function(X) { if(is.atomicQtest(X)) { Y <- attr(X, "quadratcount") return(as.tess(Y)) } tests <- extractAtomicQtests(X) return(as.solist(lapply(tests, as.tess.quadrattest))) } as.owin.quadrattest <- function(W, ..., fatal=TRUE) { if(is.atomicQtest(W)) return(as.owin(as.tess(W), ..., fatal=fatal)) gezeur <- paste("Cannot convert quadrat test result to a window;", "it contains data for several windows") if(fatal) stop(gezeur) else warning(gezeur) return(NULL) } domain.quadrattest <- Window.quadrattest <- function(X, ...) { as.owin(X) } ## The shift method is undocumented. ## It is only needed in plot.listof etc shift.quadrattest <- function(X, ...) { if(is.atomicQtest(X)) { attr(X, "quadratcount") <- qc <- shift(attr(X, "quadratcount"), ...) attr(X, "lastshift") <- getlastshift(qc) } else { tests <- extractAtomicQtests(X) attr(X, "tests") <- te <- lapply(tests, shift, ...) attr(X, "lastshift") <- getlastshift(te[[1]]) } return(X) } spatstat.core/R/newformula.R0000644000176200001440000000110514144333463015551 0ustar liggesusers#' #' newformula.R #' #' $Revision: 1.3 $ $Date: 2020/11/30 05:01:28 $ #' #' Update formula and expand polynomial newformula <- function(old, change, eold, enew, expandpoly=spatstat.options("expand.polynom")) { old <- if(is.null(old)) ~1 else eval(old, eold) change <- if(is.null(change)) ~1 else eval(change, enew) old <- as.formula(old, env=eold) change <- as.formula(change, env=enew) if(expandpoly) { old <- expand.polynom(old) change <- expand.polynom(change) } answer <- update.formula(old, change) return(answer) } spatstat.core/R/ppmclass.R0000644000176200001440000007724314144333463015234 0ustar liggesusers# # ppmclass.R # # Class 'ppm' representing fitted point process models. # # # $Revision: 2.150 $ $Date: 2021/06/29 02:28:26 $ # # An object of class 'ppm' contains the following: # # $method model-fitting method (currently "mpl") # # $coef vector of fitted regular parameters # as given by coef(glm(....)) # # $trend the trend formula # or NULL # # $interaction the interaction family # (an object of class 'interact') or NULL # # $Q the quadrature scheme used # # $maxlogpl the maximised value of log pseudolikelihood # # $internal list of internal calculation results # # $correction name of edge correction method used # $rbord erosion distance for border correction (or NULL) # # $the.call the originating call to ppm() # # $the.version version of mpl() which yielded the fit # # #------------------------------------------------------------------------ is.ppm <- function(x) { inherits(x, "ppm") } print.ppm <- function(x, ..., what=c("all", "model", "trend", "interaction", "se", "errors")) { verifyclass(x, "ppm") misswhat <- missing(what) opts <- c("model", "trend", "interaction", "se", "errors") what <- match.arg(what, c("all", opts), several.ok=TRUE) if("all" %in% what) what <- opts np <- length(coef(x)) terselevel <- spatstat.options("terse") digits <- getOption('digits') ## Determine whether SE is required want.SE <- force.SE <- force.no.SE <- FALSE if(!misswhat && ("se" %in% what)) { ## SE was explicitly requested force.SE <- TRUE } else { ## Default rule: compute SE only if the model is Poisson switch(spatstat.options("print.ppm.SE"), always = { force.SE <- TRUE }, never = { force.no.SE <- TRUE }, poisson = { want.SE <- is.poisson(x) && waxlyrical("extras", terselevel) }) } do.SE <- (want.SE || force.SE) && !force.no.SE if(do.SE) { ## Check whether able to compute SE unable.SE <- (np == 0) || any(x$fitter %in% "gam") || !is.null(x$internal$VB) || (any(x$method %in% "mppm") && is.null(x$varcov)) ## resolve if(force.SE && unable.SE) warning("Unable to compute variances for this model", call.=FALSE) do.SE <- do.SE && !unable.SE } s <- summary.ppm(x, quick=if(do.SE) FALSE else "no variances") notrend <- s$no.trend # stationary <- s$stationary poisson <- s$poisson markeddata <- s$marked multitype <- s$multitype # markedpoisson <- poisson && markeddata csr <- poisson && notrend && !markeddata special <- csr && all(c("model", "trend") %in% what) if(special) { ## ---------- Trivial/special cases ----------------------- splat("Stationary Poisson process") cat("Intensity:", signif(s$trend$value, digits), fill=TRUE) } else { ## ----------- Print model type ------------------- if("model" %in% what) { splat(s$name) parbreak(terselevel) if(markeddata) mrk <- s$entries$marks if(multitype) { splat(paste("Possible marks:", commasep(sQuote(levels(mrk))))) parbreak(terselevel) } } ## ----- trend -------------------------- if("trend" %in% what) { if(!notrend) { splat("Log", if(poisson) "intensity: " else "trend: ", pasteFormula(s$trend$formula)) parbreak(terselevel) } if(waxlyrical('space', terselevel) || !do.SE) { ## print trend coefficients, unless redundant and space is tight tv <- s$trend$value if(length(tv) == 0) splat("[No trend coefficients]") else { thead <- paste0(s$trend$label, ":") if(is.list(tv)) { splat(thead) for(i in seq_along(tv)) print(tv[[i]]) } else if(is.numeric(tv) && length(tv) == 1) { ## single number: append to end of current line tvn <- names(tv) tveq <- if(is.null(tvn)) "\t" else paste(" ", tvn, "= ") splat(paste0(thead, tveq, signif(tv, digits))) } else { ## some other format splat(thead) print(tv) } } parbreak(terselevel) } } if(waxlyrical("space", terselevel) && !is.null(cfa <- s$covfunargs) && length(cfa) > 0) { cfafitter <- s$cfafitter if(is.null(cfafitter)) { cat("Covariate", "function", "arguments", "(covfunargs)", "provided:", fill=TRUE) } else { cat("Irregular", "parameters", "(covfunargs)", "fitted", "by", paste0(sQuote(cfafitter), ":"), fill=TRUE) } for(i in seq_along(cfa)) { cat(paste(names(cfa)[i], "= ")) cfai <- cfa[[i]] if(is.numeric(cfai) && length(cfai) == 1) { cfai <- signif(cfai, digits) cat(paste(cfai, "\n")) } else print(cfai) } } } # ---- Interaction ---------------------------- if("interaction" %in% what) { if(!poisson) { print(s$interaction, family=FALSE, banner=FALSE, brief=!waxlyrical("extras")) parbreak(terselevel) } } # ----- parameter estimates with SE and 95% CI -------------------- if(waxlyrical("extras", terselevel) && ("se" %in% what) && (np > 0)) { if(!is.null(cose <- s$coefs.SE.CI)) { print(cose, digits=digits) } else if(do.SE) { # standard error calculation failed splat("Standard errors unavailable; variance-covariance matrix is singular") } else if(!force.no.SE) { # standard error was voluntarily omitted if(waxlyrical('space', terselevel)) splat("For standard errors, type coef(summary(x))\n") } } # ---- Warnings issued in mpl.prepare --------------------- if(waxlyrical("errors", terselevel) && "errors" %in% what) { probs <- s$problems if(!is.null(probs) && is.list(probs) && (length(probs) > 0)) lapply(probs, function(x) { if(is.list(x) && !is.null(p <- x$print)) splat(paste("Problem:\n", p, "\n\n")) }) if(s$old) warning(paste("Model fitted by old spatstat version", s$version)) # ---- Algorithm status ---------------------------- fitter <- s$fitter converged <- s$converged if(!is.null(fitter) && fitter %in% c("glm", "gam") && !converged) splat("*** Fitting algorithm for", sQuote(fitter), "did not converge ***") } if(waxlyrical("extras", terselevel) && s$projected) { parbreak() splat("Fit was emended to obtain a valid point process model") } if(identical(s$valid, FALSE) && waxlyrical("errors", terselevel)) { parbreak() splat("*** Model is not valid ***") if(!all(is.finite(s$entries$coef))) { splat("*** Some coefficients are NA or Inf ***") } else { splat("*** Interaction parameters are outside valid range ***") } } else if(is.na(s$valid) && waxlyrical("extras", terselevel)) { parbreak() splat("[Validity of model could not be checked]") } return(invisible(NULL)) } # Extract version string from ppm object versionstring.ppm <- function(object) { verifyclass(object, "ppm") v <- object$version if(is.null(v) || !is.list(v)) v <- list(major=1, minor=3, release=4) vs <- paste(v$major, ".", v$minor, "-", v$release, sep="") return(vs) } # Extract quadrature scheme quad.ppm <- function(object, drop=FALSE, clip=FALSE) { if(!is.ppm(object)) { if(is.kppm(object)) object <- object$po else if(is.lppm(object)) object <- object$fit else stop("object is not of class ppm, kppm or lppm") } Q <- object$Q if(is.null(Q)) return(Q) if(drop || clip) { ok <- getglmsubset(object) if(!is.null(ok)) Q <- Q[ok] } if(clip && object$correction == "border") { Wminus <- erosion(as.owin(object), object$rbord) Q <- Q[Wminus] } return(Q) } data.ppm <- function(object) { verifyclass(object, "ppm") object$Q$data } dummy.ppm <- function(object, drop=FALSE) { return(quad.ppm(object, drop=drop)$dummy) } # method for 'coef' coef.ppm <- function(object, ...) { verifyclass(object, "ppm") object$coef } hasglmfit <- function(object) { return(!is.null(object$internal$glmfit)) } getglmfit <- function(object) { verifyclass(object, "ppm") glmfit <- object$internal$glmfit if(is.null(glmfit)) return(NULL) if(object$method != "mpl") glmfit$coefficients <- object$coef return(glmfit) } getglmdata <- function(object, drop=FALSE) { verifyclass(object, "ppm") gd <- object$internal$glmdata if(!drop) return(gd) return(gd[getglmsubset(object), , drop=FALSE]) } getglmsubset <- function(object) { gd <- object$internal$glmdata if(object$method=="logi") return(gd$.logi.ok) return(gd$.mpl.SUBSET) } getppmdatasubset <- function(object) { # Equivalent to getglmsubset(object)[is.data(quad.ppm(object))] # but also works for models fitted exactly, etc # if(object$method %in% c("mpl", "ho")) { sub <- getglmsubset(object) if(!is.null(sub)) { Z <- is.data(quad.ppm(object)) return(sub[Z]) } } X <- data.ppm(object) sub <- if(object$correction == "border") { (bdist.points(X) >= object$rbord) } else rep(TRUE, npoints(X)) return(sub) } getppmOriginalCovariates <- function(object) { df <- as.data.frame(as.ppp(quad.ppm(object))) cova <- object$covariates if(length(cova) > 0) { df2 <- mpl.get.covariates(object$covariates, union.quad(quad.ppm(object)), "quadrature points", object$covfunargs) df <- cbind(df, df2) } return(df) } # ??? method for 'effects' ??? valid <- function(object, ...) { UseMethod("valid") } valid.ppm <- function(object, warn=TRUE, ...) { verifyclass(object, "ppm") coeffs <- coef(object) # ensure all coefficients are fitted, and finite if(!all(is.finite(coeffs))) return(FALSE) # inspect interaction inte <- object$interaction if(is.poisson(object)) return(TRUE) # Poisson process # extract fitted interaction coefficients Vnames <- object$internal$Vnames IsOffset <- object$internal$IsOffset Icoeffs <- coeffs[Vnames[!IsOffset]] # check interaction checker <- inte$valid if(is.null(checker) || !newstyle.coeff.handling(inte)) { if(warn) warning("Internal error: unable to check validity of model") return(NA) } #' remove prefix to obtain coefficient names expected by interaction if(npre <- sum(nchar(object$internal$vnameprefix))) names(Icoeffs) <- substring(names(Icoeffs), npre+1L) answer <- checker(Icoeffs, inte) return(answer) } emend <- function(object, ...) { UseMethod("emend") } emend.ppm <- project.ppm <- local({ tracemessage <- function(depth, ...) { if(depth == 0) return(NULL) spacer <- paste(rep.int(" ", depth), collapse="") marker <- ngettext(depth, "trace", paste("trace", depth)) marker <- paren(marker, "[") splat(paste0(spacer, marker, " ", paste(...))) } leaving <- function(depth) { tracemessage(depth, ngettext(depth, "Returning.", "Exiting level.")) } emend.ppm <- function(object, ..., fatal=FALSE, trace=FALSE) { verifyclass(object, "ppm") fast <- spatstat.options("project.fast") # user specifies 'trace' as logical # but 'trace' can also be integer representing trace depth td <- as.integer(trace) trace <- (td > 0) tdnext <- if(trace) td+1 else 0 if(valid.ppm(object)) { tracemessage(td, "Model is valid.") leaving(td) return(object) } # First ensure trend coefficients are all finite coeffs <- coef(object) # Which coefficients are trend coefficients coefnames <- names(coeffs) internames <- object$internal$Vnames trendnames <- coefnames[!(coefnames %in% internames)] # Trend terms in trend formula trendterms <- attr(terms(object), "term.labels") # Mapping from coefficients to terms of GLM coef2term <- attr(model.matrix(object), "assign") istrend <- (coef2term > 0) & (coefnames %in% trendnames) # Identify non-finite trend coefficients bad <- istrend & !is.finite(coeffs) if(!any(bad)) { tracemessage(td, "Trend terms are valid.") } else { nbad <- sum(bad) tracemessage(td, "Non-finite ", ngettext(nbad, "coefficient for term ", "coefficients for terms "), commasep(sQuote(trendterms[coef2term[bad]]))) if(fast) { # remove first illegal term firstbad <- min(which(bad)) badterm <- trendterms[coef2term[firstbad]] # remove this term from model tracemessage(td, "Removing term ", sQuote(badterm)) removebad <- as.formula(paste("~ . - ", badterm), env=object$callframe) newobject <- update(object, removebad) if(trace) { tracemessage(td, "Updated model:") print(newobject) } # recurse newobject <- emend.ppm(newobject, fatal=fatal, trace=tdnext) # return leaving(td) return(newobject) } else { # consider all illegal terms bestobject <- NULL for(i in which(bad)) { badterm <- trendterms[coef2term[i]] # remove this term from model tracemessage(td, "Considering removing term ", sQuote(badterm)) removebad <- as.formula(paste("~ . - ", badterm), env=object$callframe) object.i <- update(object, removebad) if(trace) { tracemessage(td, "Considering updated model:") print(object.i) } # recurse object.i <- emend.ppm(object.i, fatal=fatal, trace=tdnext) # evaluate logPL logPL.i <- logLik(object.i, warn=FALSE) tracemessage(td, "max log pseudolikelihood = ", logPL.i) # optimise if(is.null(bestobject) || (logLik(bestobject, warn=FALSE) < logPL.i)) bestobject <- object.i } if(trace) { tracemessage(td, "Best submodel:") print(bestobject) } # return leaving(td) return(bestobject) } } # Now handle interaction inte <- object$interaction if(is.null(inte)) { tracemessage(td, "No interaction to check.") leaving(td) return(object) } tracemessage(td, "Inspecting interaction terms.") proj <- inte$project if(is.null(proj)) { whinge <- "Internal error: interaction has no projection operator" if(fatal) stop(whinge) warning(whinge) leaving(td) return(object) } # ensure the same edge correction is used! correction <- object$correction rbord <- object$rbord # apply projection coef.orig <- coeffs <- coef(object) Vnames <- object$internal$Vnames Icoeffs <- coeffs[Vnames] change <- proj(Icoeffs, inte) if(is.null(change)) { tracemessage(td, "Interaction does not need updating.") leaving(td) return(object) } tracemessage(td, "Interaction is not valid.") if(is.numeric(change)) { tracemessage(td, "Interaction coefficients updated without re-fitting.") # old style: 'project' returned a vector of updated coefficients Icoeffs <- change # tweak interaction coefficients object$coef[Vnames] <- Icoeffs # recompute fitted interaction object$fitin <- NULL object$fitin <- fitin(object) } else if(is.interact(change)) { # new style: 'project' returns an interaction if(trace) { tracemessage(td, "Interaction changed to:") print(change) } # refit the whole model # (using the same edge correction) # (and the same quadrature scheme) newobject <- update(object, interaction=change, correction=correction, rbord=rbord, forcefit=TRUE, envir=object$callframe) if(trace) { tracemessage(td, "Updated model:") print(newobject) } # recurse newobject <- emend.ppm(newobject, fatal=fatal, trace=tdnext) object <- newobject } else if(is.list(change) && all(unlist(lapply(change, is.interact)))) { # new style: 'project' returns a list of candidate interactions nchange <- length(change) tracemessage(td, "Considering", nchange, ngettext(nchange, "submodel", "submodels")) bestobject <- NULL for(i in seq_len(nchange)) { change.i <- change[[i]] if(trace) { tracemessage(td, "Considering", ordinal(i), "candidate submodel, with interaction:") print(change.i) } # refit the whole model object.i <- update(object, interaction=change.i, correction=correction, rbord=rbord, forcefit=TRUE, envir=object$callframe) if(trace) { tracemessage(td, "Considering", ordinal(i), "candidate updated model:") print(object.i) } # recurse object.i <- emend.ppm(object.i, fatal=fatal, trace=tdnext) # evaluate logPL logPL.i <- logLik(object.i, warn=FALSE) tracemessage(td, "max log pseudolikelihood = ", logPL.i) # optimise if(is.null(bestobject) || (logLik(bestobject, warn=FALSE) < logPL.i)) bestobject <- object.i } # end loop through submodels if(trace) { tracemessage(td, "Best submodel:") print(bestobject) } object <- bestobject } else stop("Internal error: unrecognised format of update") object$projected <- TRUE object$coef.orig <- coef.orig leaving(td) return(object) } emend.ppm }) # more methods deviance.ppm <- function(object, ...) { force(object) satlogpl <- object$satlogpl if(is.null(satlogpl)) { object <- update(object, forcefit=TRUE) satlogpl <- object$satlogpl } if(is.null(satlogpl) || !is.finite(satlogpl)) return(NA) ll <- do.call(logLik, resolve.defaults(list(object=quote(object), absolute=FALSE), list(...))) ll <- as.numeric(ll) 2 * (satlogpl - ll) } logLik.ppm <- function(object, ..., new.coef=NULL, warn=TRUE, absolute=FALSE) { if(!is.poisson.ppm(object) && warn) warn.once("ppmLogLik", "log likelihood is not available for non-Poisson model;", "log pseudolikelihood returned") ## degrees of freedom nip <- if(!inherits(object, "ippm")) 0 else length(attr(object$covfunargs, "free")) df <- length(coef(object)) + nip ## compute adjustment constant if(absolute && object$method %in% c("exact", "mpl", "ho")) { X <- data.ppm(object) W <- Window(X) areaW <- if(object$correction == "border" && object$rbord > 0) eroded.areas(W, object$rbord) else area(W) constant <- areaW * markspace.integral(X) } else constant <- 0 ## if(is.null(new.coef)) { ## extract from object ll <- object$maxlogpl + constant attr(ll, "df") <- df class(ll) <- "logLik" return(ll) } ## recompute for new parameter values method <- object$method if(method == "exact") method <- update(method, forcefit=TRUE) Q <- quad.ppm(object, drop=TRUE) Z <- is.data(Q) cif <- fitted(object, type="cif", new.coef=new.coef, drop=TRUE) cifdata <- cif[Z] switch(method, mpl=, exact=, ho = { w <- w.quad(Q) ll <- sum(log(cifdata[cifdata > 0])) - sum(w * cif) }, logi=, VBlogi={ B <- getglmdata(object, drop=TRUE)$.logi.B p <- cif/(B+cif) ll <- sum(log(p/(1-p))[Z]) + sum(log(1-p)) + sum(log(B[Z])) }, stop(paste("Internal error: unrecognised ppm method:", dQuote(method))) ) ll <- ll + constant attr(ll, "df") <- df class(ll) <- "logLik" return(ll) } pseudoR2 <- function(object, ...) { UseMethod("pseudoR2") } pseudoR2.slrm <- pseudoR2.ppm <- function(object, ..., keepoffset=TRUE) { dres <- deviance(object, ..., warn=FALSE) nullfmla <- . ~ 1 if(keepoffset && has.offset.term(object)) { off <- attr(model.depends(object), "offset") offterms <- row.names(off)[apply(off, 1, any)] if(length(offterms)) { nullrhs <- paste(offterms, collapse=" + ") nullfmla <- as.formula(paste(". ~ ", nullrhs)) } } nullmod <- update(object, nullfmla, forcefit=TRUE) dnul <- deviance(nullmod, warn=FALSE) return(1 - dres/dnul) } formula.ppm <- function(x, ...) { return(x$trend) } terms.ppm <- function(x, ...) { terms(x$terms, ...) } labels.ppm <- function(object, ...) { # extract fitted trend coefficients co <- coef(object) Vnames <- object$internal$Vnames is.trend <- !(names(co) %in% Vnames) # model terms tt <- terms(object) lab <- attr(tt, "term.labels") if(length(lab) == 0) return(character(0)) # model matrix mm <- model.matrix(object) ass <- attr(mm, "assign") # 'ass' associates coefficients with model terms # except ass == 0 for the Intercept coef.ok <- is.finite(co) relevant <- (ass > 0) & is.trend okterms <- unique(ass[coef.ok & relevant]) return(lab[okterms]) } AIC.ppm <- function(object, ..., k=2, takeuchi=TRUE) { ll <- logLik(object, warn=FALSE) pen <- attr(ll, "df") if(takeuchi && !is.poisson(object)) { vv <- vcov(object, what="internals") logi <- (object$method == "logi") J <- with(vv, if(!logi) Sigma else (Sigma1log+Sigma2log)) H <- with(vv, if(!logi) A1 else Slog) ## Takeuchi penalty = trace of J H^{-1} = trace of H^{-1} J JiH <- try(solve(H, J), silent=TRUE) if(!inherits(JiH, "try-error")) pen <- sum(diag(JiH)) } return(- 2 * as.numeric(ll) + k * pen) } extractAIC.ppm <- function (fit, scale = 0, k = 2, ..., takeuchi=TRUE) { edf <- length(coef(fit)) aic <- AIC(fit, k=k, takeuchi=takeuchi) c(edf, aic) } # # method for model.frame model.frame.ppm <- function(formula, ...) { object <- formula gf <- getglmfit(object) if(is.null(gf)) { warning("Model re-fitted with forcefit=TRUE") object <- update(object, forcefit=TRUE) gf <- getglmfit(object) } argh <- resolve.defaults(list(formula=quote(gf)), list(...), list(data = getglmdata(object), subset = TRUE)) result <- switch(object$fitter, gam = do.call(modelFrameGam, argh), do.call(model.frame, argh)) return(result) } #' a hacked version of model.frame.glm that works for gam objects (mgcv) modelFrameGam <- function(formula, ...) { dots <- list(...) nargs <- dots[match(c("data", "na.action", "subset"), names(dots), 0L)] if (length(nargs) || is.null(formula$model)) { fcall <- formula$call # fcall$method <- "model.frame" fcall[[1L]] <- quote(mgcv::gam) fcall[names(nargs)] <- nargs env <- environment(formula$terms) if (is.null(env)) env <- parent.frame() refut <- eval(fcall, env) refut$model } else formula$model } # # method for model.matrix model.matrix.ppm <- function(object, data=model.frame(object, na.action=NULL), ..., Q=NULL, keepNA=TRUE) { if(missing(data)) data <- NULL PPMmodelmatrix(object, data=data, ..., Q=Q, keepNA=keepNA) } model.matrix.ippm <- function(object, data=model.frame(object, na.action=NULL), ..., Q=NULL, keepNA=TRUE, irregular=FALSE) { if(missing(data)) data <- NULL PPMmodelmatrix(object, data=data, ..., Q=Q, keepNA=keepNA, irregular=irregular) } PPMmodelmatrix <- function(object, data = NULL, ..., subset, Q=NULL, keepNA=TRUE, irregular=FALSE, splitInf=FALSE) { # handles ppm and ippm data.given <- !is.null(data) irregular <- irregular && inherits(object, "ippm") && !is.null(object$iScore) if(splitInf && !data.given && is.null(Q)) { #' force re-computation Q <- quad.ppm(object) } if(!is.null(Q)) { if(data.given) stop("Arguments Q and data are incompatible") if(!inherits(Q, c("ppp", "quad"))) stop("Q should be a point pattern or quadrature scheme") if(is.ppp(Q)) Q <- quad(Q, Q[FALSE]) ## construct Berman-Turner frame needed <- c("trend", "interaction", "covariates", "covfunargs", "correction", "rbord") bt <- do.call(bt.frame, c(list(Q), object[needed], list(splitInf=splitInf))) forbid <- bt$forbid ## compute model matrix mf <- model.frame(bt$fmla, bt$glmdata, ...) mm <- model.matrix(bt$fmla, mf, ...) ass <- attr(mm, "assign") if(irregular) { ## add irregular score components U <- union.quad(Q) mi <- sapply(object$iScore, do.call, args=append(list(x=U$x, y=U$y), object$covfunargs), envir=environment(terms(object))) if(nrow(mi) != nrow(mm)) stop("Internal error: incorrect number of rows in iScore") mm <- cbind(mm, mi) } ## subset if(!missing(subset)) { ok <- eval(substitute(subset), envir=bt$glmdata) mm <- mm[ok, , drop=FALSE] if(!is.null(forbid)) forbid <- forbid[ok] } ## remove NA's ? if(!keepNA) { ok <- complete.cases(mm) mm <- mm[ok, , drop=FALSE] if(!is.null(forbid)) forbid <- forbid[ok] } attr(mm, "assign") <- ass attr(mm, "-Inf") <- forbid return(mm) } #' extract GLM fit gf <- getglmfit(object) if(is.null(gf)) { warning("Model re-fitted with forcefit=TRUE") object <- update(object, forcefit=TRUE) gf <- getglmfit(object) if(is.null(gf)) stop("internal error: unable to extract a glm fit") } if(data.given) { #' new data. Must contain the Berman-Turner variables as well. bt <- list(.mpl.Y=1, .mpl.W=1, .mpl.SUBSET=TRUE) if(any(forgot <- !(names(bt) %in% names(data)))) data <- do.call(cbind, append(list(data), bt[forgot])) mm <- model.matrix(gf, data=data, ..., subset=NULL) ass <- attr(mm, "assign") if(irregular) { ## add irregular score components mi <- sapply(object$iScore, do.call, args=append(list(x=data$x, y=data$y), object$covfunargs), envir=environment(terms(object))) if(nrow(mi) != nrow(mm)) stop("Internal error: incorrect number of rows in iScore") mm <- cbind(mm, mi) attr(mm, "assign") <- ass } if(inherits(gf, "gam")) attr(mm, "assign") <- gf$assign return(mm) } scrambled <- object$scrambled %orifnull% FALSE ## if TRUE, this object was produced by 'subfits' using jittered covariate if(!keepNA && !irregular && !scrambled) { # extract model matrix of glm fit object # restricting to its 'subset' mm <- model.matrix(gf, ...) if(inherits(gf, "gam")) attr(mm, "assign") <- gf$assign return(mm) } ## extract model matrix for all cases gd <- getglmdata(object, drop=FALSE) if(!scrambled) { ## 'gf' was fitted to correct data. Use internals. mm <- model.matrix(gf, ..., subset=NULL, na.action=NULL) ass <- attr(mm, "assign") } else { ## 'gf' was originally fitted using jittered data: ## Use correct data given by 'gd' ## Temporarily add scrambled data to avoid singular matrices etc gds <- object$internal$glmdata.scrambled gdplus <- rbind(gd, gds) mm <- model.matrix(gf, ..., data=gdplus, subset=NULL, na.action=NULL) ass <- attr(mm, "assign") ## Now remove rows corresponding to scrambled data mm <- mm[seq_len(nrow(gd)), , drop=FALSE] attr(mm, "assign") <- ass } cn <- colnames(mm) if(nrow(mm) != nrow(gd)) { # can occur if covariates include NA's or interaction is -Inf insubset <- getglmsubset(object) isna <- is.na(insubset) | !insubset if(sum(isna) + nrow(mm) == nrow(gd)) { # insert rows of NA's mmplus <- matrix( , nrow(gd), ncol(mm)) mmplus[isna, ] <- NA mmplus[!isna, ] <- mm mm <- mmplus attr(mm, "assign") <- ass } else stop("internal error: model matrix does not match glm data frame") } if(irregular) { ## add irregular score components U <- union.quad(quad.ppm(object, drop=FALSE)) mi <- sapply(object$iScore, do.call, args=append(list(x=U$x, y=U$y), object$covfunargs), envir=environment(terms(object))) if(nrow(mi) != nrow(mm)) stop("Internal error: incorrect number of rows in iScore") mm <- cbind(mm, mi) attr(mm, "assign") <- ass cn <- c(cn, colnames(mi)) } ## subset if(!missing(subset)) { ok <- eval(substitute(subset), envir=gd) mm <- mm[ok, , drop=FALSE] attr(mm, "assign") <- ass } ## remove NA's if(!keepNA) { mm <- mm[complete.cases(mm), , drop=FALSE] attr(mm, "assign") <- ass } if(inherits(gf, "gam")) attr(mm, "assign") <- gf$assign colnames(mm) <- cn return(mm) } model.images <- function(object, ...) { UseMethod("model.images") } model.images.ppm <- function(object, W=as.owin(object), ...) { X <- data.ppm(object) # irregular <- resolve.1.default(list(irregular=FALSE), list(...)) ## make a quadscheme with a dummy point at every pixel Q <- pixelquad(X, W) ## compute model matrix mm <- model.matrix(object, Q=Q, ...) ## retain only the entries for dummy points (pixels) mm <- mm[!is.data(Q), , drop=FALSE] mm <- as.data.frame(mm) ## create template image Z <- as.im(attr(Q, "M")) ok <- !is.na(Z$v) ## make images imagenames <- colnames(mm) if(!is.multitype(object)) { result <- lapply(as.list(mm), replace, list=ok, x=Z) result <- as.solist(result) names(result) <- imagenames } else { marx <- marks(Q$dummy) mmsplit <- split(mm, marx) result <- vector(mode="list", length=length(mmsplit)) for(i in seq_along(mmsplit)) result[[i]] <- as.solist(lapply(as.list(mmsplit[[i]]), replace, list=ok, x=Z)) names(result) <- names(mmsplit) result <- do.call(hyperframe, result) row.names(result) <- imagenames } return(result) } unitname.ppm <- function(x) { return(unitname(x$Q)) } "unitname<-.ppm" <- function(x, value) { unitname(x$Q) <- value return(x) } nobs.ppm <- function(object, ...) { npoints(data.ppm(object)) } as.interact.ppm <- function(object) { verifyclass(object, "ppm") inte <- object$interaction if(is.null(inte)) inte <- Poisson() return(inte) } as.ppm <- function(object) { UseMethod("as.ppm") } as.ppm.ppm <- function(object) { object } ## method for as.owin as.owin.ppm <- function(W, ..., from=c("points", "covariates"), fatal=TRUE) { if(!verifyclass(W, "ppm", fatal=fatal)) return(NULL) from <- match.arg(from) datawin <- as.owin(data.ppm(W)) if(from == "points") return(datawin) covs <- W$covariates isim <- unlist(lapply(covs, is.im)) if(!any(isim)) return(datawin) cwins <- lapply(covs[isim], as.owin) covwin <- do.call(intersect.owin, unname(cwins)) result <- intersect.owin(covwin, datawin) return(result) } domain.ppm <- Window.ppm <- function(X, ..., from=c("points", "covariates")) { from <- match.arg(from) as.owin(X, ..., from=from) } ## change the coefficients in a ppm or other model tweak.coefs <- function(model, new.coef) { if(is.null(new.coef)) return(model) co <- coef(model) check.nvector(new.coef, length(co), things="coefficients") model$coef.orig <- co model$coef <- new.coef return(model) } spatstat.core/R/quadratmtest.R0000644000176200001440000000067114144333464016120 0ustar liggesusers# # method for 'quadrat.test' for class mppm # # $Revision: 1.8 $ $Date: 2015/08/12 07:29:17 $ # quadrat.test.mppm <- function(X, ...) { Xname <- short.deparse(substitute(X)) if(!is.poisson.mppm(X)) stop("Model is not a Poisson point process") subs <- subfits(X) tests <- anylapply(subs, quadrat.test.ppm, ..., fitname=Xname) df.est <- length(coef(X)) return(pool.quadrattest(tests, Xname=Xname, df.est=df.est)) } spatstat.core/R/rotmean.R0000644000176200001440000000333514144333463015046 0ustar liggesusers## ## rotmean.R ## ## rotational average of pixel values ## ## $Revision: 1.13 $ $Date: 2020/05/22 02:43:34 $ rotmean <- function(X, ..., origin, padzero=TRUE, Xname, result=c("fv", "im"), adjust=1) { if(missing(Xname)) Xname <- sensiblevarname(short.deparse(substitute(X)), "X") trap.extra.arguments(..., .Context="rotmean") stopifnot(is.im(X)) if(!missing(origin)) { X <- shift(X, origin=origin) backshift <- -getlastshift(X) } else { backshift <- NULL } result <- match.arg(result) rmax <- with(vertices(Frame(X)), sqrt(max(x^2+y^2))) Xunpad <- X if(padzero) X <- padimage(na.handle.im(X, 0), 0, W=square(c(-1,1)*rmax)) Xdata <- as.data.frame(X) values <- Xdata$value radii <- with(Xdata, sqrt(x^2+y^2)) ra <- pmin(range(radii), rmax) bw <- adjust * 0.1 * sqrt(X$xstep^2 + X$ystep^2) a <- unnormdensity(radii, from=ra[1], to=ra[2], bw=bw) b <- unnormdensity(radii, weights=values, from=ra[1], to=ra[2], bw=a$bw) df <- data.frame(r=a$x, f=b$y/a$y) FUN <- fv(df, argu="r", ylab=substitute(bar(X)(r), list(X=as.name(Xname))), valu="f", fmla=(. ~ r), alim=ra, labl=c("r", "%s(r)"), desc=c("distance argument r", "rotational average"), unitname=unitname(X), fname=paste0("bar", paren(Xname))) attr(FUN, "dotnames") <- "f" unitname(FUN) <- unitname(X) if(result == "fv") return(FUN) ## compute image FUN <- as.function(FUN) XX <- as.im(Xunpad, na.replace=1) IM <- as.im(function(x,y,FUN){ FUN(sqrt(x^2+y^2)) }, XX, FUN=FUN) if(!is.null(backshift)) IM <- shift(IM,backshift) unitname(IM) <- unitname(X) return(IM) } spatstat.core/R/rlabel.R0000644000176200001440000000272514144333463014644 0ustar liggesusers# # rlabel.R # # random (re)labelling # # $Revision: 1.13 $ $Date: 2020/10/23 15:18:00 $ # # rlabel <- local({ resample <- function(x, replace=FALSE) { x[sample(length(x), replace=replace)] } rlabel <- function(X, labels=marks(X), permute=TRUE, group=NULL, ..., nsim=1, drop=TRUE) { stopifnot(is.ppp(X) || is.lpp(X) || is.pp3(X) || is.ppx(X) || is.psp(X)) if(is.null(labels)) stop("labels not given and marks not present") singlecolumn <- (length(dim(labels)) < 2) nthings <- nobjects(X) things <- if(is.psp(X)) "segments" else "points" nlabels <- if(singlecolumn) length(labels) else nrow(labels) if((nlabels != nthings) && (permute || !is.null(group))) stop(paste(if(singlecolumn) "Length" else "Number of rows", "of labels does not match the number of", things), call.=FALSE) ## if(is.null(group)) { Y <- replicate(nsim, { X %mark% marksubset(labels, sample(nlabels, nthings, replace=!permute)) }, simplify=FALSE) } else { group <- marks(cut(X, group, ...)) seqn <- seq_len(nlabels) pieces <- split(seqn, group) Y <- replicate(nsim, { X %mark% marksubset(labels, unsplit(lapply(pieces, resample, replace=!permute), group)) }, simplify=FALSE) } ## return(simulationresult(Y, nsim, drop)) } rlabel }) spatstat.core/R/intensity.ppm.R0000644000176200001440000001705514144333462016225 0ustar liggesusers#' #' intensity.ppm.R #' #' Intensity and intensity approximations for fitted point process models #' #' $Revision: 1.1 $ $Date: 2020/11/24 01:57:24 $ #' intensity.ppm <- function(X, ...) { if(!identical(valid.ppm(X), TRUE)) { warning("Model is invalid - projecting it") X <- project.ppm(X) } if(is.poisson(X)) { if(is.stationary(X)) { # stationary univariate/multivariate Poisson sX <- summary(X, quick="no variances") lam <- sX$trend$value if(sX$multitype && sX$no.trend) { ## trend is ~1; lam should be replicated for each mark lev <- levels(marks(data.ppm(X))) lam <- rep(lam, length(lev)) names(lam) <- lev } return(lam) } # Nonstationary Poisson return(predict(X, ...)) } # Gibbs process if(is.multitype(X)) stop("Not yet implemented for multitype Gibbs processes") # Compute first order term if(is.stationary(X)) { ## activity parameter sX <- summary(X, quick="no variances") beta <- sX$trend$value } else { ## activity function (or values of it, depending on '...') beta <- predict(X, ...) } ## apply approximation lambda <- PoisSaddle(beta, fitin(X)) return(lambda) } PoisSaddle <- function(beta, fi) { ## apply Poisson-Saddlepoint approximation ## given first order term and fitted interaction stopifnot(inherits(fi, "fii")) inte <- as.interact(fi) if(identical(inte$family$name, "pairwise")) return(PoisSaddlePairwise(beta, fi)) if(identical(inte$name, "Geyer saturation process")) return(PoisSaddleGeyer(beta, fi)) if(identical(inte$name, "Area-interaction process")) return(PoisSaddleArea(beta, fi)) stop(paste("Intensity approximation is not yet available for", inte$name), call.=FALSE) } PoisSaddlePairwise <- function(beta, fi) { inte <- as.interact(fi) Mayer <- inte$Mayer if(is.null(Mayer)) stop(paste("Sorry, not yet implemented for", inte$name)) # interaction coefficients co <- with(fi, coefs[Vnames[!IsOffset]]) # compute second Mayer cluster integral G <- Mayer(co, inte) if(is.null(G) || !is.finite(G)) stop("Internal error in computing Mayer cluster integral") if(G < 0) stop(paste("Unable to apply Poisson-saddlepoint approximation:", "Mayer cluster integral is negative")) ## solve if(is.im(beta)) { lambda <- if(G == 0) beta else eval.im(LambertW(G * beta)/G) } else { lambda <- if(G == 0) beta else (LambertW(G * beta)/G) if(length(lambda) == 1) lambda <- unname(lambda) } return(lambda) } # Lambert's W-function LambertW <- local({ yexpyminusx <- function(y,x){y*exp(y)-x} W <- function(x) { result <- rep.int(NA_real_, length(x)) ok <- is.finite(x) & (x >= 0) if(requireNamespace("gsl", quietly=TRUE)) { result[ok] <- gsl::lambert_W0(x[ok]) } else { for(i in which(ok)) result[i] <- uniroot(yexpyminusx, c(0, x[i]), x=x[i])$root } return(result) } W }) PoisSaddleGeyer <- local({ PoisSaddleGeyer <- function(beta, fi) { gamma <- summary(fi)$sensible$param$gamma if(gamma == 1) return(beta) inte <- as.interact(fi) sat <- inte$par$sat R <- inte$par$r #' get probability distribution of Geyer statistic under reference model z <- Spatstat.Geyer.Nulldist # from sysdata if(is.na(m <- match(sat, z$sat))) stop(paste("Sorry, the Poisson-saddlepoint approximation", "is not implemented for Geyer models with sat =", sat), call.=FALSE) probmatrix <- z$prob[[m]] maxachievable <- max(which(colSums(probmatrix) > 0)) - 1 gammarange <- sort(c(1, gamma^maxachievable)) #' apply approximation betavalues <- beta[] nvalues <- length(betavalues) lambdavalues <- numeric(nvalues) for(i in seq_len(nvalues)) { beta.i <- betavalues[i] ra <- beta.i * gammarange lambdavalues[i] <- uniroot(diffapproxGeyer, ra, beta=beta.i, gamma=gamma, R=R, sat=sat, probmatrix=probmatrix)$root } #' return result in same format as 'beta' lambda <- beta lambda[] <- lambdavalues if(length(lambda) == 1) lambda <- unname(lambda) return(lambda) } diffapproxGeyer <- function(lambda, beta, gamma, R, sat, probmatrix) { lambda - approxEpoisGeyerT(lambda, beta, gamma, R, sat, probmatrix) } approxEpoisGeyerT <- function(lambda, beta=1, gamma=1, R=1, sat=1, probmatrix) { #' Compute approximation to E_Pois(lambda) Lambda(0,X) for Geyer #' ('probmatrix' contains distribution of geyerT(0, Z_n) for each n, #' where 'sat' is given, and Z_n is runifdisc(n, radius=2*R). possT <- 0:(ncol(probmatrix)-1) possN <- 0:(nrow(probmatrix)-1) pN <- dpois(possN, lambda * pi * (2*R)^2) EgamT <- pN %*% probmatrix %*% (gamma^possT) #' assume that, for n > max(possN), #' distribution of T is concentrated on T=sat EgamT <- EgamT + (gamma^sat) * (1-sum(pN)) return(beta * EgamT) } PoisSaddleGeyer }) PoisSaddleArea <- local({ PoisSaddleArea <- function(beta, fi) { eta <- summary(fi)$sensible$param$eta if(eta == 1) return(beta) etarange <- range(c(eta^2, 1.1, 0.9)) inte <- as.interact(fi) R <- inte$par$r #' reference distribution of canonical sufficient statistic zeroprob <- Spatstat.Area.Zeroprob areaquant <- Spatstat.Area.Quantiles # expectation of eta^A_n for each n = 0, 1, .... EetaAn <- c(1/eta, zeroprob + (1-zeroprob) * colMeans((eta^(-areaquant)))) #' compute approximation betavalues <- beta[] nvalues <- length(betavalues) lambdavalues <- numeric(nvalues) for(i in seq_len(nvalues)) { beta.i <- betavalues[i] ra <- beta.i * etarange lambdavalues[i] <- uniroot(diffapproxArea, ra, beta=beta.i, eta=eta, r=R, EetaAn=EetaAn)$root } #' return result in same format as 'beta' lambda <- beta lambda[] <- lambdavalues if(length(lambda) == 1) lambda <- unname(lambda) return(lambda) } diffapproxArea <- function(lambda, beta, eta, r, EetaAn) { lambda - approxEpoisArea(lambda, beta, eta, r, EetaAn) } approxEpoisArea <- function(lambda, beta=1, eta=1, r=1, EetaAn) { #' Compute approximation to E_Pois(lambda) Lambda(0,X) for AreaInter mu <- lambda * pi * (2*r)^2 zeta <- pi^2/2 - 1 theta <- -log(eta) zetatheta <- zeta * theta #' contribution from tabulated values Nmax <- length(EetaAn) - 1L possN <- 0:Nmax qN <- dpois(possN, mu) # expectation of eta^A when N ~ poisson (truncated) EetaA <- sum(qN * EetaAn) #' asymptotics for quite large n Nbig <- qpois(0.999, mu) qn <- 0 if(Nbig > Nmax) { n <- (Nmax+1):Nbig #' asymptotic mean uncovered area conditional on this being positive mstarn <- (16/((n+3)^2)) * exp(n * (1/4 - log(4/3))) ztm <- zetatheta * mstarn ok <- (ztm < 1) if(!any(ok)) { Nbig <- Nmax qn <- 0 } else { if(!all(ok)) { Nbig <- max(which(!ok)) - 1 n <- (Nmax+1):Nbig ztm <- ztm[1:((Nbig-Nmax)+1)] } qn <- dpois(n, mu) #' asymptotic probability of complete coverage pstarn <- 1 - pmin(1, 3 * (1 + n^2/(16*pi)) * exp(-n/4)) Estarn <- (1 - ztm)^(-1/zeta) EetaA <- EetaA + sum(qn * (pstarn + (1-pstarn) * Estarn)) } } #' for very large n, assume complete coverage, so A = 0 EetaA <- EetaA + 1 - sum(qN) - sum(qn) return(beta * eta * EetaA) } PoisSaddleArea }) spatstat.core/R/metriccontact.R0000644000176200001440000000627614144333462016246 0ustar liggesusers#' #' metriccontact.R #' #' Metric contact distribution #' (corresponding distance transforms are defined in 'metricPdt.R') #' #' $Revision: 1.1 $ $Date: 2020/11/29 07:41:37 $ rectcontact <- function(X, ..., asp=1.0, npasses=4, eps=NULL, r=NULL, breaks=NULL, correction=c("rs", "km")) { verifyclass(X, "im") rorbgiven <- !is.null(r) || !is.null(breaks) checkspacing <- !isFALSE(list(...)$checkspacing) testme <- isTRUE(list(...)$testme) check.1.real(asp) stopifnot(asp > 0) if(X$type != "logical") stop("X should be a logical-valued image") if(!missing(eps)) X <- as.im(X, eps=eps) W <- as.mask(X) # the region that is defined Y <- solutionset(X) # the region that is TRUE fullframe <- all(W$m) emptyframe <- !any(W$m) ## histogram breakpoints rmaxdefault <- rmax.rule("F", W) breaks <- handle.r.b.args(r, breaks, W, eps, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max if(testme || (rorbgiven && checkspacing)) check.finespacing(rvals, if(is.null(eps)) NULL else eps/4, W, rmaxdefault=if(rorbgiven) NULL else rmaxdefault, action="fatal", rname="r", context="in rectcontact(X, r)") correction <- pickoption("correction", correction, c(border="rs", rs="rs", KM="km", km="km", Kaplan="km", best="km"), multi=TRUE) ## compute distances and censoring distances if(!emptyframe) { dist <- rectdistmap(Y, asp, npasses=npasses) if(fullframe) { bdry <- attr(dist, "bdist") } else { bdry <- rectdistmap(complement.owin(W), asp, npasses=npasses) } #' extract corresponding values dist <- dist[W, drop=TRUE, rescue=FALSE] bdry <- bdry[W, drop=TRUE, rescue=FALSE] ## censoring indicators d <- (dist <= bdry) ## observed distances o <- pmin.int(dist, bdry) } ## calculate Kaplan-Meier and/or border corrected (Reduced Sample) estimators want.rs <- "rs" %in% correction want.km <- "km" %in% correction selection <- c(want.rs, want.km) tags <- c("rs", "km")[selection] labels <- c("hat(%s)[bord](r)", "hat(%s)[km](r)")[selection] descr <- c("border corrected estimate of %s", "Kaplan-Meier estimate of %s")[selection] if(emptyframe) { df <- as.data.frame(matrix(0, length(rvals), length(tags))) names(df) <- tags } else { df <- km.rs.opt(o, bdry, d, breaks, KM=want.km, RS=want.rs) df <- as.data.frame(df[tags]) } ## create fv object df <- cbind(data.frame(r=rvals), df) Z <- fv(df, "r", quote(H(r)), if(want.km) "km" else "rs", . ~ r, c(0,rmax), c("r", labels), c("distance argument r", descr), fname="H") fvnames(Z, ".") <- rev(fvnames(Z, ".")) attr(Z, "alim") <- with(Z, range(.x[is.finite(.y) & .y <= 0.95])) attr(Z, "conserve") <- list(checkspacing=FALSE) return(Z) } spatstat.core/R/inforder.family.R0000644000176200001440000000653014144333462016470 0ustar liggesusers# # # inforder.family.R # # $Revision: 1.2 $ $Date: 2010/07/10 10:22:09 $ # # Family of `infinite-order' point process models # # inforder.family: object of class 'isf' # # # ------------------------------------------------------------------- # inforder.family <- list( name = "inforder", print = function(self) { cat("Family of infinite-order interactions\n") }, plot = NULL, # ---------------------------------------------------- eval = function(X,U,EqualPairs,pot,pars,correction, ...) { # # This is the eval function for the `inforder' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # For the `inforder' family of interactions with infinite order, # there are no structures common to all interactions. # So this function simply invokes the potential 'pot' directly # and expects 'pot' to return the values of the sufficient statistic S(u,X). # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pairpot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ########################################################################## # POTENTIAL: # In this case the potential function 'pot' should have arguments # pot(X, U, EqualPairs, pars, correction, ...) # # It must return a vector with length equal to the number of points in U, # or a matrix with as many rows as there are points in U. if(!is.ppp(U)) U <- ppp(U$x, U$y, window=X$window) POT <- pot(X, U, EqualPairs, pars, correction, ...) if(is.matrix(POT)) { if(nrow(POT) != U$n) stop("Internal error: the potential returned a matrix with the wrong number of rows") } else if(is.array(POT) && length(dim(POT)) > 2) stop("Internal error: the potential returned an array with more than 2 dimensions") else if(is.vector(POT)) { if(length(POT) != U$n) stop("Internal error: the potential returned a vector with the wrong length") POT <- matrix(POT, ncol=1) } else stop("Internal error: the return value from the potential is not understood") return(POT) }, ######### end of function $eval suffstat = NULL ######### end of function $suffstat ) ######### end of list class(inforder.family) <- "isf" spatstat.core/R/quadratresample.R0000644000176200001440000000223214144333463016566 0ustar liggesusers# # quadratresample.R # # resample a point pattern by resampling quadrats # # $Revision: 1.7 $ $Date: 2015/10/21 09:06:57 $ # quadratresample <- function(X, nx, ny=nx, ..., replace=FALSE, nsamples=1, verbose=(nsamples > 1)) { stopifnot(is.ppp(X)) if(X$window$type != "rectangle") stop("Resampling is only implemented for rectangular windows") # create tessellation A <- quadrats(X, nx=nx, ny=ny) # split data over tessellation B <- split(X, A) nq <- length(B) # determine bottom left corner of each tile V <- lapply(B, framebottomleft) out <- list() if(verbose) { cat("Generating resampled patterns...") pstate <- list() } for(i in 1:nsamples) { # resample tiles ind <- sample(1:nq, nq, replace=replace) Xresampled <- X Bresampled <- B for(j in 1:nq) { k <- ind[j] Bresampled[[j]] <- shift(B[[k]], unlist(V[[j]]) - unlist(V[[k]])) } split(Xresampled, A) <- Bresampled out[[i]] <- Xresampled if(verbose) pstate <- progressreport(i, nsamples, state=pstate) } if(nsamples == 1) return(out[[1]]) return(as.solist(out)) } spatstat.core/R/Kest.R0000644000176200001440000010413714144333461014307 0ustar liggesusers# # Kest.R Estimation of K function # # $Revision: 5.132 $ $Date: 2021/10/09 01:56:06 $ # # # -------- functions ---------------------------------------- # Kest() compute estimate of K # using various edge corrections # # # -------- standard arguments ------------------------------ # X point pattern (of class 'ppp') # # r distance values at which to compute K # # -------- standard output ------------------------------ # A data frame (class "fv") with columns named # # r: same as input # # trans: K function estimated by translation correction # # iso: K function estimated by Ripley isotropic correction # # theo: K function for Poisson ( = pi * r ^2 ) # # border: K function estimated by border method # using standard formula (denominator = count of points) # # bord.modif: K function estimated by border method # using modified formula # (denominator = area of eroded window # # ------------------------------------------------------------------------ "Lest" <- function(X, ..., correction) { if(missing(correction)) correction <- NULL K <- Kest(X, ..., correction=correction) L <- eval.fv(sqrt(K/pi), dotonly=FALSE) # handle variance estimates if(any(varcols <- colnames(K) %in% c("rip", "ls"))) { r <- with(L, .x) L[,varcols] <- as.data.frame(K)[,varcols]/(2 * pi * r)^2 # fix 0/0 n <- npoints(X) A <- area(Window(X)) if(any(colnames(K) == "rip")) L[r == 0, "rip"] <- (2 * A/(n-1)^2)/(4 * pi) if(any(colnames(K) == "ls")) L[r == 0, "ls"] <- (2 * A/(n * (n-1)))/(4 * pi) } # relabel the fv object L <- rebadge.fv(L, quote(L(r)), "L", names(K), new.labl=attr(K, "labl")) # return(L) } "Kest"<- function(X, ..., r=NULL, rmax=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), nlarge=3000, domain=NULL, var.approx=FALSE, ratio=FALSE) { verifyclass(X, "ppp") nlarge.given <- !missing(nlarge) && !is.null(nlarge) rfixed <- !is.null(r) || !is.null(breaks) npts <- npoints(X) W <- X$window areaW <- area(W) lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) if(!is.null(domain)) { ## estimate based on contributions from a subdomain domain <- as.owin(domain) if(!is.subset.owin(domain, W)) stop(paste(dQuote("domain"), "is not a subset of the window of X")) ## use code in Kdot/Kmulti indom <- factor(inside.owin(X$x, X$y, domain), levels=c(FALSE,TRUE)) Kd <- Kdot(X %mark% indom, i="TRUE", r=r, breaks=breaks, correction=correction, ratio=ratio, rmax=rmax, domainI=domain) # relabel and exit Kd <- rebadge.fv(Kd, quote(K(r)), "K") return(Kd) } rmaxdefault <- rmax %orifnull% rmax.rule("K", W, lambda) if(is.infinite(rmaxdefault)) rmaxdefault <- diameter(W) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", rigid="rigid", good="good", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) # replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) # retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) ########################################### # Efficient code for border correction and no correction # Usable only if r values are evenly spaced from 0 to rmax # Invoked automatically if number of points is large can.do.fast <- breaks$even large.n <- (npts >= nlarge) # demand.best <- correction.given && best.wanted large.n.trigger <- large.n && !correction.given fastcorrections <- c("border", "bord.modif", "none") fastdefault <- "border" correction.fast <- all(correction %in% fastcorrections) will.do.fast <- can.do.fast && (correction.fast || large.n.trigger) asked <- correction.fast || (nlarge.given && large.n.trigger) if(asked && !can.do.fast) warning("r values not evenly spaced - cannot use efficient code") if(will.do.fast) { # determine correction(s) ok <- correction %in% fastcorrections correction <- if(any(ok)) correction[ok] else fastdefault bord <- any(correction %in% c("border", "bord.modif")) none <- any(correction =="none") if(!all(ok)) { # some corrections were overridden; notify user corx <- c(if(bord) "border correction estimate" else NULL, if(none) "uncorrected estimate" else NULL) corx <- paste(corx, collapse=" and ") message(paste("number of data points exceeds", nlarge, "- computing", corx , "only")) } # restrict r values to recommended range, unless specifically requested if(!rfixed) r <- seq(from=0, to=alim[2], length.out=length(r)) if(bord) Kb <- Kborder.engine(X, max(r), length(r), correction, ratio=ratio) if(none) Kn <- Knone.engine(X, max(r), length(r), ratio=ratio) if(bord && none) { Kn <- Kn[ , names(Kn) != "theo"] yn <- fvnames(Kb, ".y") Kbn <- if(!ratio) bind.fv(Kb, Kn, preferred=yn) else bind.ratfv(Kb, Kn, preferred=yn) return(Kbn) } if(bord) return(Kb) if(none) return(Kn) } do.fast.rectangle <- can.do.fast && is.rectangle(W) && spatstat.options("use.Krect") && !any(correction == "rigid") if(do.fast.rectangle) { ########################################### ## Fast code for rectangular window ########################################### K <- Krect.engine(X, rmax, length(r), correction, ratio=ratio) attr(K, "alim") <- alim } else { ########################################### ## Slower code ########################################### ## this will be the output data frame Kdf <- data.frame(r=r, theo = pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") denom <- lambda2 * areaW K <- ratfv(Kdf, NULL, denom, "r", quote(K(r)), "theo", NULL, alim, c("r","%s[pois](r)"), desc, fname="K", ratio=ratio) ## identify all close pairs rmax <- max(r) what <- if(any(correction %in% c("translate", "isotropic"))) "all" else "ijd" close <- closepairs(X, rmax, what=what) DIJ <- close$d ## precompute set covariance of window gW <- NULL if(any(correction %in% c("translate", "rigid", "isotropic"))) gW <- setcov(W) if(any(correction == "none")) { ## uncorrected! For demonstration purposes only! wh <- whist(DIJ, breaks$val) # no weights numKun <- cumsum(wh) denKun <- lambda2 * areaW ## uncorrected estimate of K K <- bind.ratfv(K, data.frame(un=numKun), denKun, "hat(%s)[un](r)", "uncorrected estimate of %s", "un", ratio=ratio) } if(any(correction == "border" | correction == "bord.modif")) { ## border method ## Compute distances to boundary b <- bdist.points(X) I <- close$i bI <- b[I] ## apply reduced sample algorithm RS <- Kount(DIJ, bI, b, breaks) if(any(correction == "bord.modif")) { ## modified border correction denom.area <- eroded.areas(W, r) numKbm <- RS$numerator denKbm <- lambda2 * denom.area K <- bind.ratfv(K, data.frame(bord.modif=numKbm), data.frame(bord.modif=denKbm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif", ratio=ratio) } if(any(correction == "border")) { numKb <- RS$numerator denKb <- lambda * RS$denom.count K <- bind.ratfv(K, data.frame(border=numKb), data.frame(border=denKb), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border", ratio=ratio) } } if(any(correction == "translate")) { ## Ohser-Stoyan translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE, gW = gW, give.rmax=TRUE) wh <- whist(DIJ, breaks$val, edgewt) numKtrans <- cumsum(wh) denKtrans <- lambda2 * areaW h <- attr(edgewt, "rmax") numKtrans[r >= h] <- NA K <- bind.ratfv(K, data.frame(trans=numKtrans), denKtrans, "hat(%s)[trans](r)", "translation-corrected estimate of %s", "trans", ratio=ratio) } if(any(correction == "rigid")) { ## Ohser-Stoyan rigid motion correction CW <- rotmean(gW) edgewt <- areaW/as.function(CW)(DIJ) wh <- whist(DIJ, breaks$val, edgewt) numKrigid <- cumsum(wh) denKrigid <- lambda2 * areaW h <- rmax.Rigid(X, gW) #sic: X not W numKrigid[r >= h] <- NA K <- bind.ratfv(K, data.frame(rigid=numKrigid), denKrigid, "hat(%s)[rigid](r)", "rigid motion-corrected estimate of %s", "rigid", ratio=ratio) } if(any(correction == "isotropic")) { ## Ripley isotropic correction XI <- ppp(close$xi, close$yi, window=W, check=FALSE) edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) wh <- whist(DIJ, breaks$val, edgewt) numKiso <- cumsum(wh) denKiso <- lambda2 * areaW h <- boundingradius(W) numKiso[r >= h] <- NA K <- bind.ratfv(K, data.frame(iso=numKiso), denKiso, "hat(%s)[iso](r)", "Ripley isotropic correction estimate of %s", "iso", ratio=ratio) } } ############################# ## VARIANCE APPROXIMATION ############################# if(var.approx && !any(correction == "isotropic")) { warn.once("varapproxiso", "Ignored argument 'var.approx=TRUE'; the variance approximation", "is available only for the isotropic correction") var.approx <- FALSE } if(var.approx) { ## Compute variance approximations A <- areaW P <- perimeter(W) n <- npts ## Ripley asymptotic approximation rip <- 2 * ((A/(n-1))^2) * (pi * r^2/A + 0.96 * P * r^3/A^2 + 0.13 * (n/A) * P * r^5/A^2) if(!ratio) { K <- bind.fv(K, data.frame(rip=rip), "vR(r)", "Ripley approximation to var(%s) under CSR", "iso") } else { den <- (n-1)^2 ripnum <- den * rip ripden <- rep.int(den, length(rip)) K <- bind.ratfv(K, data.frame(rip=ripnum), data.frame(rip=ripden), "vR(r)", "Ripley approximation to var(%s) under CSR", "iso") } if(W$type == "rectangle") { # Lotwick-Silverman a1r <- (0.21 * P * r^3 + 1.3 * r^4)/A^2 a2r <- (0.24 * P * r^5 + 2.62 * r^6)/A^3 # contains correction to typo on p52 of Diggle 2003 # cf Lotwick & Silverman 1982 eq (5) br <- (pi * r^2/A) * (1 - pi * r^2/A) + (1.0716 * P * r^3 + 2.2375 * r^4)/A^2 ls <- (A^2) * (2 * br - a1r + (n-2) * a2r)/(n*(n-1)) # add column if(!ratio) { K <- bind.fv(K, data.frame(ls=ls), "vLS(r)", "Lotwick-Silverman approx to var(%s) under CSR", "iso") } else { den <- n*(n-1) lsnum <- ls * den lsden <- rep.int(den, length(ls)) K <- bind.ratfv(K, data.frame(ls=lsnum), data.frame(ls=lsden), "vLS(r)", "Lotwick-Silverman approx to var(%s) under CSR", "iso") } } } ### FINISH OFF ##### ## default plot will display all edge corrections formula(K) <- . ~ r nama <- rev(colnames(K)) fvnames(K, ".") <- setdiff(nama, c("r", "rip", "ls")) ## unitname(K) <- unitname(X) # copy to other components if(ratio) K <- conform.ratfv(K) return(K) } ################################################################ ############# SUPPORTING ALGORITHMS ########################### ################################################################ Kount <- function(dIJ, bI, b, breaks) { # # "internal" routine to compute border-correction estimate of K or Kij # # dIJ: vector containing pairwise distances for selected I,J pairs # bI: corresponding vector of boundary distances for I # b: vector of ALL distances to window boundary # # breaks : breakpts object # stopifnot(length(dIJ) == length(bI)) # determine which distances d_{ij} were observed without censoring uncen <- (dIJ <= bI) # histogram of noncensored distances nco <- whist(dIJ[uncen], breaks$val) # histogram of censoring times for noncensored distances ncc <- whist(bI[uncen], breaks$val) # histogram of censoring times (yes, this is a different total size) cen <- whist(b, breaks$val) # count censoring times beyond rightmost breakpoint uppercen <- sum(b > max(breaks$val)) # go RS <- reduced.sample(nco, cen, ncc, show=TRUE, uppercen=uppercen) # extract results numerator <- RS$numerator denom.count <- RS$denominator # check if(length(numerator) != breaks$ncells) stop("internal error: length(numerator) != breaks$ncells") if(length(denom.count) != breaks$ncells) stop("internal error: length(denom.count) != breaks$ncells") return(list(numerator=numerator, denom.count=denom.count)) } #### interface to C code for border method Kborder.engine <- function(X, rmax, nr=100, correction=c("border", "bord.modif"), weights=NULL, ratio=FALSE) { verifyclass(X, "ppp") npts <- npoints(X) W <- as.owin(X) areaW <- area(W) lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) if(missing(rmax)) rmax <- diameter(W)/4 r <- seq(from=0, to=rmax, length.out=nr) # this will be the output data frame Kdf <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") Kfv <- fv(Kdf, "r", quote(K(r)), "theo", , c(0,rmax), c("r","%s[pois](r)"), desc, fname="K") if(ratio) { # save numerator and denominator denom <- lambda2 * areaW numK <- eval.fv(denom * Kfv) denK <- eval.fv(denom + Kfv * 0) attributes(numK) <- attributes(denK) <- attributes(Kfv) numK <- rebadge.fv(numK, tags="theo", new.desc="numerator for theoretical Poisson %s") denK <- rebadge.fv(denK, tags="theo", new.desc="denominator for theoretical Poisson %s") } ####### start computing ############ # sort in ascending order of x coordinate orderX <- fave.order(X$x) Xsort <- X[orderX] x <- Xsort$x y <- Xsort$y # boundary distances b <- bdist.points(Xsort) # call the C code if(is.null(weights)) { # determine whether the numerator can be stored as an integer bigint <- .Machine$integer.max if(npts < sqrt(bigint)) { # yes - use faster integer arithmetic res <- .C(SC_KborderI, nxy=as.integer(npts), x=as.double(x), y=as.double(y), b=as.double(b), nr=as.integer(nr), rmax=as.double(rmax), numer=as.integer(integer(nr)), denom=as.integer(integer(nr)), PACKAGE="spatstat.core") } else { # no - need double precision storage res <- .C(SC_KborderD, nxy=as.integer(npts), x=as.double(x), y=as.double(y), b=as.double(b), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), denom=as.double(numeric(nr)), PACKAGE="spatstat.core") } if("bord.modif" %in% correction) { denom.area <- eroded.areas(W, r) numKbm <- res$numer denKbm <- lambda2 * denom.area bm <- numKbm/denKbm Kfv <- bind.fv(Kfv, data.frame(bord.modif=bm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numK <- bind.fv(numK, data.frame(bord.modif=numKbm), "hat(%s)[bordm](r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denK <- bind.fv(denK, data.frame(bord.modif=denKbm), "hat(%s)[bordm](r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } if("border" %in% correction) { numKb <- res$numer denKb <- lambda * res$denom bord <- numKb/denKb Kfv <- bind.fv(Kfv, data.frame(border=bord), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border") if(ratio) { numK <- bind.fv(numK, data.frame(border=numKb), "hat(%s)[bord](r)", "numerator of border-corrected estimate of %s", "border") denK <- bind.fv(denK, data.frame(border=denKb), "hat(%s)[bord](r)", "denominator of border-corrected estimate of %s", "border") } } } else { # weighted version if(is.numeric(weights)) { if(length(weights) != X$n) stop("length of weights argument does not match number of points in X") } else { wim <- as.im(weights, W) weights <- wim[X, drop=FALSE] if(anyNA(weights)) stop("domain of weights image does not contain all points of X") } weights.Xsort <- weights[orderX] res <- .C(SC_Kwborder, nxy=as.integer(npts), x=as.double(x), y=as.double(y), w=as.double(weights.Xsort), b=as.double(b), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), denom=as.double(numeric(nr)), PACKAGE="spatstat.core") if("border" %in% correction) { bord <- res$numer/res$denom Kfv <- bind.fv(Kfv, data.frame(border=bord), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border") if(ratio) { numK <- bind.fv(numK, data.frame(border=res$numer), "hat(%s)[bord](r)", "numerator of border-corrected estimate of %s", "border") denK <- bind.fv(denK, data.frame(border=res$denom), "hat(%s)[bord](r)", "denominator of border-corrected estimate of %s", "border") } } if("bord.modif" %in% correction) { numKbm <- res$numer denKbm <- eroded.areas(W, r) bm <- numKbm/denKbm Kfv <- bind.fv(Kfv, data.frame(bord.modif=bm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numK <- bind.fv(numK, data.frame(bord.modif=numKbm), "hat(%s)[bordm](r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denK <- bind.fv(denK, data.frame(bord.modif=denKbm), "hat(%s)[bordm](r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } } ## # default is to display them all formula(Kfv) <- . ~ r unitname(Kfv) <- unitname(X) if(ratio) { # finish off numerator and denominator formula(numK) <- formula(denK) <- . ~ r unitname(denK) <- unitname(numK) <- unitname(X) # tack on to result Kfv <- rat(Kfv, numK, denK, check=FALSE) } return(Kfv) } Knone.engine <- function(X, rmax, nr=100, weights=NULL, ratio=FALSE) { verifyclass(X, "ppp") npts <- npoints(X) W <- as.owin(X) areaW <- area(W) # lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) denom <- lambda2 * areaW if(missing(rmax)) rmax <- diameter(W)/4 r <- seq(from=0, to=rmax, length.out=nr) # this will be the output data frame Kdf <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") Kfv <- fv(Kdf, "r", quote(K(r)), "theo", , c(0,rmax), c("r","%s[pois](r)"), desc, fname="K") if(ratio) { # save numerator and denominator numK <- eval.fv(denom * Kfv) denK <- eval.fv(denom + Kfv * 0) attributes(numK) <- attributes(denK) <- attributes(Kfv) numK <- rebadge.fv(numK, tags="theo", new.desc="numerator for theoretical Poisson %s") denK <- rebadge.fv(denK, tags="theo", new.desc="denominator for theoretical Poisson %s") } ####### start computing ############ # sort in ascending order of x coordinate orderX <- fave.order(X$x) Xsort <- X[orderX] x <- Xsort$x y <- Xsort$y # call the C code if(is.null(weights)) { # determine whether the numerator can be stored as an integer bigint <- .Machine$integer.max if(npts < sqrt(bigint)) { # yes - use faster integer arithmetic res <- .C(SC_KnoneI, nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), numer=as.integer(integer(nr)), PACKAGE="spatstat.core") } else { # no - need double precision storage res <- .C(SC_KnoneD, nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), PACKAGE="spatstat.core") } numKun <- res$numer denKun <- denom # = lambda2 * areaW Kun <- numKun/denKun } else { # weighted version if(is.numeric(weights)) { if(length(weights) != X$n) stop("length of weights argument does not match number of points in X") } else { wim <- as.im(weights, W) weights <- wim[X, drop=FALSE] if(anyNA(weights)) stop("domain of weights image does not contain all points of X") } weights.Xsort <- weights[orderX] res <- .C(SC_Kwnone, nxy=as.integer(npts), x=as.double(x), y=as.double(y), w=as.double(weights.Xsort), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), PACKAGE="spatstat.core") numKun <- res$numer denKun <- sum(weights) Kun <- numKun/denKun } # tack on to fv object Kfv <- bind.fv(Kfv, data.frame(un=Kun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") if(ratio) { numK <- bind.fv(numK, data.frame(un=numKun), "hat(%s)[un](r)", "numerator of uncorrected estimate of %s", "un") denK <- bind.fv(denK, data.frame(un=denKun), "hat(%s)[un](r)", "denominator of uncorrected estimate of %s", "un") } ## # default is to display them all formula(Kfv) <- . ~ r unitname(Kfv) <- unitname(X) if(ratio) { # finish off numerator and denominator formula(numK) <- formula(denK) <- . ~ r unitname(denK) <- unitname(numK) <- unitname(X) # tack on to result Kfv <- rat(Kfv, numK, denK, check=FALSE) } return(Kfv) } rmax.rule <- function(fun="K", W, lambda) { if(gotW <- !missing(W)) verifyclass(W, "owin") if(gotL <- !missing(lambda)) lambda <- as.numeric(lambda) # can be vector gotall <- gotW && gotL switch(fun, K = { ## Ripley's Rule ripley <- if(gotW) shortside(Frame(W))/4 else Inf ## Count at most 1000 neighbours per point rlarge <- if(gotL) sqrt(1000 /(pi * lambda)) else Inf rmax <- min(rlarge, ripley) }, Kscaled = { ## rule of thumb for Kscaled rdiam <- if(gotall) diameter(Frame(W))/2 * sqrt(lambda) else Inf rmax <- min(10, rdiam) }, F = , G = , J = { # rule of thumb rdiam <- if(gotW) diameter(Frame(W))/2 else Inf # Poisson process has F(rlarge) = 1 - 10^(-5) rlarge <- if(gotL) sqrt(log(1e5)/(pi * lambda)) else Inf rmax <- min(rlarge, rdiam) }, stop(paste("Unrecognised function type", sQuote(fun))) ) return(rmax) } implemented.for.K <- function(correction, windowtype, explicit) { pixels <- (windowtype == "mask") if(any(correction == "best")) { # select best available correction correction[correction == "best"] <- if(!pixels) "isotropic" else "translate" } else { # available selection of edge corrections depends on window if(pixels) { iso <- (correction == "isotropic") if(any(iso)) { whinge <- "Isotropic correction not implemented for binary masks" if(explicit) { if(all(iso)) stop(whinge, call.=FALSE) else warning(whinge, call.=FALSE) } correction <- correction[!iso] } } } return(correction) } good.correction.K <- function(X) { nX <- npoints(X) W <- as.owin(X) avail <- c("none", if(nX < 1e5) "border" else NULL, if(nX < 3000)"translate" else NULL, if(nX < 1000 && !is.mask(W)) "isotropic" else NULL) chosen <- rev(avail)[1] return(chosen) } Krect.engine <- function(X, rmax, nr=100, correction, weights=NULL, ratio=FALSE, fname="K", use.integers=TRUE) { verifyclass(X, "ppp") npts <- npoints(X) W <- as.owin(X) areaW <- area(W) width <- sidelengths(W)[1] height <- sidelengths(W)[2] lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) if(missing(rmax)) rmax <- diameter(W)/4 r <- seq(from=0, to=rmax, length.out=nr) if(weighted <- !is.null(weights)) { ## coerce weights to a vector if(is.numeric(weights)) { check.nvector(weights, npts) } else { wim <- as.im(weights, W) weights <- wim[X, drop=FALSE] if(anyNA(weights)) stop("domain of weights image does not contain all points of X") } } # this will be the output data frame Kdf <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") denom <- if(weighted) areaW else (lambda2 * areaW) Kfv <- ratfv(Kdf, NULL, denom, "r", quote(K(r)), "theo", NULL, c(0,rmax), c("r", makefvlabel(NULL, NULL, fname, "pois")), desc, fname=fname, ratio=ratio) ####### prepare data ############ if(!all(correction == "translate")) { ## Ensure rectangle has its bottom left corner at the origin if(W$xrange[1] != 0 || W$yrange[1] != 0) { X <- shift(X, origin="bottomleft") W <- as.owin(X) } } ## sort in ascending order of x coordinate orderX <- fave.order(X$x) x <- X$x[orderX] y <- X$y[orderX] if(weighted) wt <- weights[orderX] ## establish algorithm parameters doIso <- "isotropic" %in% correction doTrans <- "translate" %in% correction doBord <- any(c("border", "bord.modif") %in% correction) doUnco <- "none" %in% correction trimedge <- spatstat.options("maxedgewt") ## allocate space for results ziso <- numeric(if(doIso) nr else 1L) ztrans <- numeric(if(doTrans) nr else 1L) ## call the C code if(weighted) { ## weighted version zbnumer <- numeric(if(doBord) nr else 1L) zbdenom <- numeric(if(doBord) nr else 1L) zunco <- numeric(if(doUnco) nr else 1L) res <- .C(SC_KrectWtd, width=as.double(width), height=as.double(height), nxy=as.integer(npts), x=as.double(x), y=as.double(y), w=as.double(wt), nr=as.integer(nr), rmax=as.double(rmax), trimedge=as.double(trimedge), doIso=as.integer(doIso), doTrans=as.integer(doTrans), doBord=as.integer(doBord), doUnco=as.integer(doUnco), iso=as.double(ziso), trans=as.double(ztrans), bnumer=as.double(zbnumer), bdenom=as.double(zbdenom), unco=as.double(zunco), PACKAGE="spatstat.core") } else if(use.integers && npts < sqrt(.Machine$integer.max)) { ## unweighted ## numerator of border correction can be stored as an integer ## use faster integer arithmetic zbnumer <- integer(if(doBord) nr else 1L) zbdenom <- integer(if(doBord) nr else 1L) zunco <- integer(if(doUnco) nr else 1L) res <- .C(SC_KrectInt, width=as.double(width), height=as.double(height), nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), trimedge=as.double(trimedge), doIso=as.integer(doIso), doTrans=as.integer(doTrans), doBord=as.integer(doBord), doUnco=as.integer(doUnco), iso=as.double(ziso), trans=as.double(ztrans), bnumer=as.integer(zbnumer), bdenom=as.integer(zbdenom), unco=as.integer(zunco), PACKAGE="spatstat.core") } else { ## unweighted ## need double precision storage zbnumer <- numeric(if(doBord) nr else 1L) zbdenom <- numeric(if(doBord) nr else 1L) zunco <- numeric(if(doUnco) nr else 1L) res <- .C(SC_KrectDbl, width=as.double(width), height=as.double(height), nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), trimedge=as.double(trimedge), doIso=as.integer(doIso), doTrans=as.integer(doTrans), doBord=as.integer(doBord), doUnco=as.integer(doUnco), iso=as.double(ziso), trans=as.double(ztrans), bnumer=as.double(zbnumer), bdenom=as.double(zbdenom), unco=as.double(zunco), PACKAGE="spatstat.core") } ## Process corrections in reverse order of priority ## Uncorrected estimate if("none" %in% correction) { numKun <- res$unco denKun <- if(weighted) areaW else (lambda2 * areaW) Kfv <- bind.ratfv(Kfv, data.frame(un=numKun), denKun, makefvlabel(NULL, "hat", fname, "un"), "uncorrected estimate of %s", "un", ratio=ratio) } ## Modified border correction if("bord.modif" %in% correction) { denom.area <- eroded.areas(W, r) numKbm <- res$bnumer denKbm <- if(weighted) denom.area else (lambda2 * denom.area) Kfv <- bind.ratfv(Kfv, data.frame(bord.modif=numKbm), denKbm, makefvlabel(NULL, "hat", fname, "bordm"), "modified border-corrected estimate of %s", "bord.modif", ratio=ratio) } ## Border correction if("border" %in% correction) { numKb <- res$bnumer denKb <- if(weighted) res$bdenom else lambda * res$bdenom Kfv <- bind.ratfv(Kfv, data.frame(border=numKb), denKb, makefvlabel(NULL, "hat", fname, "bord"), "border-corrected estimate of %s", "border", ratio=ratio) } ## translation correction if("translate" %in% correction) { numKtrans <- res$trans denKtrans <- if(weighted) areaW else (lambda2 * areaW) h <- diameter(as.rectangle(W))/2 numKtrans[r >= h] <- NA Kfv <- bind.ratfv(Kfv, data.frame(trans=numKtrans), denKtrans, makefvlabel(NULL, "hat", fname, "trans"), "translation-corrected estimate of %s", "trans", ratio=ratio) } ## isotropic correction if("isotropic" %in% correction) { numKiso <- res$iso denKiso <- if(weighted) areaW else (lambda2 * areaW) h <- diameter(as.rectangle(W))/2 numKiso[r >= h] <- NA Kfv <- bind.ratfv(Kfv, data.frame(iso=numKiso), denKiso, makefvlabel(NULL, "hat", fname, "iso"), "isotropic-corrected estimate of %s", "iso", ratio=ratio) } ## # default is to display them all formula(Kfv) <- . ~ r unitname(Kfv) <- unitname(X) if(ratio) Kfv <- conform.ratfv(Kfv) return(Kfv) } spatstat.core/R/satpiece.R0000644000176200001440000001120014144333463015164 0ustar liggesusers# # # satpiece.S # # $Revision: 1.17 $ $Date: 2018/03/15 07:37:41 $ # # Saturated pairwise interaction process with piecewise constant potential # # SatPiece() create an instance of the process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # SatPiece <- local({ # ..... auxiliary functions ...... delSP <- function(i, r, sat) { r <- r[-i] sat <- sat[-i] nr <- length(r) if(nr == 0) return(Poisson()) if(nr == 1) return(Geyer(r, sat)) return(SatPiece(r, sat)) } # ....... template object .......... BlankSatPiece <- list( name = "piecewise constant Saturated pairwise interaction process", creator = "SatPiece", family = "pairsat.family", # evaluated later pot = function(d, par) { r <- par$r nr <- length(r) out <- array(FALSE, dim=c(dim(d), nr)) out[,,1] <- (d < r[1]) if(nr > 1) { for(i in 2:nr) out[,,i] <- (d >= r[i-1]) & (d < r[i]) } out }, par = list(r = NULL, sat=NULL), # filled in later parnames = c("interaction thresholds", "saturation parameters"), hasInf = FALSE, init = function(self) { r <- self$par$r sat <- self$par$sat if(!is.numeric(r) || !all(r > 0)) stop("interaction thresholds r must be positive numbers") if(length(r) > 1 && !all(diff(r) > 0)) stop("interaction thresholds r must be strictly increasing") if(!is.numeric(sat) || any(sat < 0)) stop("saturation parameters must be nonnegative numbers") if(any(ceiling(sat) != floor(sat))) warning("saturation parameter has a non-integer value") if(length(sat) != length(r) && length(sat) != 1) stop("vectors r and sat must have equal length") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { r <- self$par$r npiece <- length(r) # extract coefficients gammas <- exp(as.numeric(coeffs)) # name them gn <- gammas names(gn) <- paste("[", c(0,r[-npiece]),",", r, ")", sep="") # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_i", printable=dround(gn))) }, valid = function(coeffs, self) { # interaction parameters gamma must be # non-NA # finite, if sat > 0 # less than 1, if sat = Inf gamma <- (self$interpret)(coeffs, self)$param$gammas sat <- self$par$sat if(anyNA(gamma)) return(FALSE) return(all((is.finite(gamma) | sat == 0) & (gamma <= 1 | sat != Inf))) }, project = function(coeffs, self){ loggammas <- as.numeric(coeffs) sat <- self$par$sat r <- self$par$r ok <- is.finite(loggammas) & (is.finite(sat) | loggammas <= 0) if(all(ok)) return(NULL) if(!any(ok)) return(Poisson()) bad <- !ok if(spatstat.options("project.fast") || sum(bad) == 1) { # remove smallest threshold with an unidentifiable parameter firstbad <- min(which(bad)) return(delSP(firstbad, r, sat)) } else { # consider all candidate submodels subs <- lapply(which(bad), delSP, r=r, sat=sat) return(subs) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r sat <- self$par$sat if(all(is.na(coeffs))) return(2 * max(r)) gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- (abs(log(gamma)) > epsilon) & (sat > 0) if(!any(active)) return(0) else return(2 * max(r[active])) }, version=NULL # added later ) class(BlankSatPiece) <- "interact" SatPiece <- function(r, sat) { instantiate.interact(BlankSatPiece, list(r=r, sat=sat)) } SatPiece <- intermaker(SatPiece, BlankSatPiece) SatPiece }) spatstat.core/R/geyer.R0000644000176200001440000003477414144333462014526 0ustar liggesusers# # # geyer.S # # $Revision: 2.45 $ $Date: 2021/02/06 03:46:20 $ # # Geyer's saturation process # # Geyer() create an instance of Geyer's saturation process # [an object of class 'interact'] # # Geyer <- local({ # .......... template .......... BlankGeyer <- list( name = "Geyer saturation process", creator = "Geyer", family = "pairsat.family", # evaluated later pot = function(d, par) { (d <= par$r) # same as for Strauss }, par = list(r = NULL, sat=NULL), # filled in later parnames = c("interaction distance","saturation parameter"), hasInf = FALSE, init = function(self) { r <- self$par$r sat <- self$par$sat if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") if(!is.numeric(sat) || length(sat) != 1 || sat < 0) stop("saturation parameter sat must be a positive number") }, update = NULL, # default OK print = NULL, # default OK plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction unitz <- unitname(fint) if(!identical(inter$name, "Geyer saturation process")) stop("Tried to plot the wrong kind of interaction") #' fitted interaction coefficient theta <- fint$coefs[fint$Vnames] #' interaction radius r <- inter$par$r sat <- inter$par$sat xlim <- resolve.1.default(list(xlim=c(0, 1.25 * r)), list(...)) rmax <- max(xlim, d) if(is.null(d)) { d <- seq(from=0, to=rmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) } #' compute interaction between two points at distance d y <- exp(theta * sat * (d <= r)) #' compute `fv' object fun <- fv(data.frame(r=d, h=y, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "maximal interaction h(r)", "reference value 1"), unitname=unitz) if(plotit) do.call(plot.fv, resolve.defaults(list(fun), list(...), list(ylim=range(0,1,y)))) return(invisible(fun)) }, #' end of function 'plot' interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1L]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1L]) sat <- self$par$sat return(is.finite(loggamma) && (is.finite(sat) || loggamma <= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(any(!is.na(coeffs))) { loggamma <- coeffs[1L] if(!is.na(loggamma) && (abs(loggamma) <= epsilon)) return(0) } return(2 * r) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ..., halfway=FALSE, check=TRUE) { #' fast evaluator for Geyer interaction dont.complain.about(splitInf) if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Geyer") r <- potpars$r sat <- potpars$sat # first ensure all data points are in U nX <- npoints(X) nU <- npoints(U) Xseq <- seq_len(nX) if(length(EqualPairs) == 0) { # no data points currently included missingdata <- rep.int(TRUE, nX) } else { Xused <- EqualPairs[,1L] missingdata <- !(Xseq %in% Xused) } somemissing <- any(missingdata) if(somemissing) { # add the missing data points nmiss <- sum(missingdata) U <- superimpose(U, X[missingdata], W=X$window, check=check) # correspondingly augment the list of equal pairs originalrows <- seq_len(nU) newXindex <- Xseq[missingdata] newUindex <- nU + seq_len(nmiss) EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex)) nU <- nU + nmiss } # determine saturated pair counts counts <- strausscounts(U, X, r, EqualPairs) satcounts <- pmin.int(sat, counts) satcounts <- matrix(satcounts, ncol=1) if(halfway) { # trapdoor used by suffstat() answer <- satcounts } else if(sat == Inf) { # no saturation: fast code answer <- 2 * satcounts } else { # extract counts for data points Uindex <- EqualPairs[,2L] Xindex <- EqualPairs[,1L] Xcounts <- integer(npoints(X)) Xcounts[Xindex] <- counts[Uindex] # evaluate change in saturated counts of other data points change <- geyercounts(U, X, r, sat, Xcounts, EqualPairs) answer <- satcounts + change answer <- matrix(answer, ncol=1) } if(somemissing) answer <- answer[originalrows, , drop=FALSE] return(answer) }, delta2 = function(X,inte,correction, ..., sparseOK=TRUE) { # Sufficient statistic for second order conditional intensity # h(X[i] | X) - h(X[i] | X[-j]) # Geyer interaction if(correction == "periodic") return(NULL) r <- inte$par$r sat <- inte$par$sat result <- geyerdelta2(X, r, sat, correction=correction, sparseOK=sparseOK) return(result) } ) class(BlankGeyer) <- "interact" Geyer <- function(r, sat) { instantiate.interact(BlankGeyer, list(r = r, sat=sat)) } Geyer <- intermaker(Geyer, BlankGeyer) Geyer }) # ........... externally visible auxiliary functions ......... geyercounts <- function(U, X, r, sat, Xcounts, EqualPairs) { # evaluate effect of adding dummy point or deleting data point # on saturated counts of other data points stopifnot(is.numeric(r)) stopifnot(is.numeric(sat)) # for C calls we need finite numbers stopifnot(is.finite(r)) stopifnot(is.finite(sat)) # sort in increasing order of x coordinate oX <- fave.order(X$x) oU <- fave.order(U$x) Xsort <- X[oX] Usort <- U[oU] nX <- npoints(X) nU <- npoints(U) Xcountsort <- Xcounts[oX] # inverse: data point i has sorted position i' = rankX[i] rankX <- integer(nX) rankX[oX] <- seq_len(nX) rankU <- integer(nU) rankU[oU] <- seq_len(nU) # map from quadrature points to data points Uindex <- EqualPairs[,2L] Xindex <- EqualPairs[,1L] Xsortindex <- rankX[Xindex] Usortindex <- rankU[Uindex] Cmap <- rep.int(-1L, nU) Cmap[Usortindex] <- Xsortindex - 1L # call C routine zz <- .C(SC_Egeyer, nnquad = as.integer(nU), xquad = as.double(Usort$x), yquad = as.double(Usort$y), quadtodata = as.integer(Cmap), nndata = as.integer(nX), xdata = as.double(Xsort$x), ydata = as.double(Xsort$y), tdata = as.integer(Xcountsort), rrmax = as.double(r), ssat = as.double(sat), result = as.double(numeric(nU)), PACKAGE="spatstat.core") result <- zz$result[rankU] return(result) } geyerdelta2 <- local({ geyerdelta2 <- function(X, r, sat, ..., sparseOK=TRUE, correction="none") { ## Sufficient statistic for second order conditional intensity ## Geyer model stopifnot(is.numeric(sat) && length(sat) == 1 && sat > 0) ## X could be a ppp or quad. if(is.ppp(X)) { # evaluate \Delta_{x_i} \Delta_{x_j} S(x) for data points x_i, x_j # i.e. h(X[i]|X) - h(X[i]|X[-j]) where h is first order cif statistic return(geydelppp(X, r, sat, correction, sparseOK)) } else if(is.quad(X)) { # evaluate \Delta_{u_i} \Delta_{u_j} S(x) for quadrature points u_i, u_j return(geydelquad(X, r, sat, correction, sparseOK)) } else stop("Internal error: X should be a ppp or quad object") } geydelppp <- function(X, r, sat, correction, sparseOK) { # initialise nX <- npoints(X) result <- if(!sparseOK) matrix(0, nX, nX) else sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(nX, nX)) ## identify all r-close pairs (ordered pairs i ~ j) unweighted <- correction %in% c("border", "none") if(unweighted) { a <- closepairs(X, r, what="indices") I <- a$i J <- a$j IJ <- cbind(I,J) ## count number of r-neighbours for each point tvals <- table(factor(I, levels=1:nX)) } else { a <- weightedclosepairs(X, r, correction=correction, what="indices") I <- a$i J <- a$j IJ <- cbind(I,J) wIJ <- a$weight wmatrix <- sparseMatrix(i=I,j=J,x=wIJ, dims=c(nX,nX)) wJI <- wmatrix[cbind(J,I)] ## total edge-correction weight over r-neighbours for each point tvals <- tapplysum(wIJ, list(factor(I, levels=1:nX))) } # Compute direct part # (arising when i~j) tI <- tvals[I] tJ <- tvals[J] if(unweighted) { result[IJ] <- pmin(sat, tI) - pmin(sat, tI - 1L) + pmin(sat, tJ) - pmin(sat, tJ - 1L) } else { result[IJ] <- pmin(sat, tI) - pmin(sat, tI - wIJ) + pmin(sat, tJ) - pmin(sat, tJ - wJI) } # Compute indirect part # (arising when i~k and j~k for another point k) # First find all such triples ord <- (I < J) vees <- edges2vees(I[ord], J[ord], nX) # evaluate contribution of (k, i, j) II <- vees$j JJ <- vees$k KK <- vees$i tKK <- tvals[KK] if(unweighted) { contribKK <- pmin(sat, tKK) - 2 * pmin(sat, tKK-1L) + pmin(sat, tKK-2L) } else { wKKII <- wmatrix[cbind(KK, II)] wKKJJ <- wmatrix[cbind(KK, JJ)] contribKK <- ( pmin(sat, tKK) - pmin(sat, tKK-wKKII) - pmin(sat, tKK-wKKJJ) + pmin(sat, tKK-wKKII-wKKJJ) ) } # for each (i, j), sum the contributions over k if(!sparseOK) { II <- factor(II, levels=1:nX) JJ <- factor(JJ, levels=1:nX) # was: # delta3 <- tapply(contribKK, list(I=II, J=JJ), sum) # delta3[is.na(delta3)] <- 0 delta3 <- tapplysum(contribKK, list(I=II, J=JJ)) } else { delta3 <- sparseMatrix(i=II, j=JJ, x=contribKK, dims=c(nX, nX)) } # symmetrise and combine result <- result + delta3 + t(delta3) return(result) } geydelquad <- function(Q, r, sat, correction, sparseOK) { Z <- is.data(Q) U <- union.quad(Q) nU <- npoints(U) nX <- npoints(Q$data) result <- if(!sparseOK) matrix(0, nU, nU) else sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(nU, nU)) ## identify all r-close pairs U[i], U[j] unweighted <- correction %in% c("none", "border") if(unweighted) { a <- closepairs(U, r, what="indices") I <- a$i J <- a$j IJ <- cbind(I, J) } else { a <- weightedclosepairs(U, r, correction=correction, what="indices") I <- a$i J <- a$j IJ <- cbind(I, J) wIJ <- a$weight wmatrix <- sparseMatrix(i=I, j=J, x=wIJ, dims=c(nU,nU)) wJI <- wmatrix[cbind(J,I)] } ## tag which ones are data points zI <- Z[I] zJ <- Z[J] # count t(U[i], X) IzJ <- I[zJ] JzJ <- J[zJ] if(unweighted) { tvals <- table(factor(IzJ, levels=1:nU)) } else { wzJ <- wIJ[zJ] tvals <- tapplysum(wzJ, list(factor(IzJ, levels=1:nU))) } ## Compute direct part ## (arising when U[i]~U[j]) tI <- tvals[I] tJ <- tvals[J] if(unweighted) { tIJ <- tI - zJ tJI <- tJ - zI result[IJ] <- pmin(sat, tIJ + 1L) - pmin(sat, tIJ) + pmin(sat, tJI + 1L) - pmin(sat, tJI) } else { tIJ <- tI - zJ * wIJ tJI <- tJ - zI * wJI result[IJ] <- pmin(sat, tIJ + wIJ) - pmin(sat, tIJ) + pmin(sat, tJI + wJI) - pmin(sat, tJI) } # Compute indirect part # (arising when U[i]~X[k] and U[j]~X[k] for another point X[k]) # First find all such triples # Group close pairs X[k] ~ U[j] by index k spl <- split(IzJ, factor(JzJ, levels=1:nX)) grlen <- lengths(spl) # Assemble list of triples U[i], X[k], U[j] # by expanding each pair U[i], X[k] JJ <- unlist(spl[JzJ]) II <- rep(IzJ, grlen[JzJ]) KK <- rep(JzJ, grlen[JzJ]) # remove identical pairs i = j ok <- II != JJ II <- II[ok] JJ <- JJ[ok] KK <- KK[ok] # evaluate contribution of each triple tKK <- tvals[KK] zII <- Z[II] zJJ <- Z[JJ] if(unweighted) { tKIJ <- tKK - zII - zJJ contribKK <- pmin(sat, tKIJ + 2L) - 2 * pmin(sat, tKIJ + 1L) + pmin(sat, tKIJ) } else { wKKII <- wmatrix[cbind(KK, II)] wKKJJ <- wmatrix[cbind(KK, JJ)] tKIJ <- tKK - zII * wKKII - zJJ * wKKJJ contribKK <- (pmin(sat, tKIJ + wKKII + wKKJJ) - pmin(sat, tKIJ + wKKII) - pmin(sat, tKIJ + wKKJJ) + pmin(sat, tKIJ)) } # for each (i, j), sum the contributions over k if(!sparseOK) { II <- factor(II, levels=1:nU) JJ <- factor(JJ, levels=1:nU) # was: # delta4 <- tapply(contribKK, list(I=II, J=JJ), sum) # delta4[is.na(delta4)] <- 0 delta4 <- tapplysum(contribKK, list(I=II, J=JJ)) } else { delta4 <- sparseMatrix(i=II, j=JJ, x=contribKK, dims=c(nU, nU)) } # combine result <- result + delta4 return(result) } geyerdelta2 }) spatstat.core/R/fasp.R0000644000176200001440000001022114144333462014321 0ustar liggesusers# # fasp.R # # $Revision: 1.36 $ $Date: 2020/11/24 01:38:24 $ # # #----------------------------------------------------------------------------- # # creator fasp <- function(fns, which, formulae=NULL, dataname=NULL, title=NULL, rowNames=NULL, colNames=NULL, checkfv=TRUE) { stopifnot(is.list(fns)) stopifnot(is.matrix(which)) stopifnot(length(fns) == length(which)) n <- length(which) if(checkfv) for(i in seq_len(n)) if(!is.fv(fns[[i]])) stop(paste("fns[[", i, "]] is not an fv object", sep="")) # set row and column labels if(!is.null(rowNames)) rownames(which) <- rowNames if(!is.null(colNames)) colnames(which) <- colNames if(!is.null(formulae)) { # verify format and convert to character vector formulae <- FormatFaspFormulae(formulae, "formulae") # ensure length matches length of "fns" if(length(formulae) == 1L && n > 1L) # single formula - replicate it formulae <- rep.int(formulae, n) else stopifnot(length(formulae) == length(which)) } rslt <- list(fns=fns, which=which, default.formula=formulae, dataname=dataname, title=title) class(rslt) <- "fasp" return(rslt) } # subset extraction operator "[.fasp" <- function(x, I, J, drop=TRUE, ...) { verifyclass(x, "fasp") m <- nrow(x$which) n <- ncol(x$which) if(missing(I)) I <- 1:m if(missing(J)) J <- 1:n if(!is.vector(I) || !is.vector(J)) stop("Subset operator is only implemented for vector indices") # determine index subset for lists 'fns', 'titles' etc included <- rep.int(FALSE, length(x$fns)) w <- as.vector(x$which[I,J]) if(length(w) == 0) stop("result is empty") included[w] <- TRUE # if only one cell selected, and drop=TRUE: if((sum(included) == 1L) && drop) return(x$fns[included][[1L]]) # determine positions in shortened lists whichIJ <- x$which[I,J,drop=FALSE] newk <- cumsum(included) newwhich <- matrix(newk[whichIJ], ncol=ncol(whichIJ), nrow=nrow(whichIJ)) rownames(newwhich) <- rownames(x$which)[I] colnames(newwhich) <- colnames(x$which)[J] # default plotting formulae - could be NULL deform <- x$default.formula # create new fasp object Y <- fasp(fns = x$fns[included], formulae = if(!is.null(deform)) deform[included] else NULL, which = newwhich, dataname = x$dataname, title = x$title) return(Y) } dim.fasp <- function(x) { dim(x$which) } # print method print.fasp <- function(x, ...) { verifyclass(x, "fasp") cat(paste("Function array (class", sQuote("fasp"), ")\n")) dim <- dim(x$which) cat(paste("Dimensions: ", dim[1L], "x", dim[2L], "\n")) cat(paste("Title:", if(is.null(x$title)) "(None)" else x$title, "\n")) invisible(NULL) } # other methods as.fv.fasp <- function(x) do.call(cbind.fv, x$fns) dimnames.fasp <- function(x) { return(dimnames(x$which)) } "dimnames<-.fasp" <- function(x, value) { w <- x$which dimnames(w) <- value x$which <- w return(x) } ## other functions FormatFaspFormulae <- local({ zapit <- function(x, argname) { if(inherits(x, "formula")) deparse(x) else if(is.character(x)) x else stop(paste("The entries of", sQuote(argname), "must be formula objects or strings")) } FormatFaspFormulae <- function(f, argname) { ## f should be a single formula object, a list of formula objects, ## a character vector, or a list containing formulae and strings. ## It will be converted to a character vector. result <- if(is.character(f)) f else if(inherits(f, "formula")) deparse(f) else if(is.list(f)) unlist(lapply(f, zapit, argname=argname)) else stop(paste(sQuote(argname), "should be a formula, a list of formulae,", "or a character vector")) return(result) } FormatFaspFormulae }) spatstat.core/R/dummify.R0000644000176200001440000000153414144333462015051 0ustar liggesusers# # dummify.R # # Convert a factor to a matrix of dummy variables, etc. # # $Revision: 1.5 $ $Date: 2016/02/11 10:17:12 $ # dummify <- function(x) { if(is.matrix(x) || is.data.frame(x)) { x <- as.data.frame(x) y <- do.call(data.frame, lapply(x, dummify)) return(as.matrix(y)) } # x is 1-dimensional if(is.complex(x)) return(as.matrix(data.frame(Re=Re(x), Im=Im(x)))) # convert factors etc if(is.character(x)) x <- factor(x) if(is.logical(x)) x <- factor(x, levels=c(FALSE,TRUE)) if(is.factor(x)) { # convert to dummy variables nx <- length(x) lev <- levels(x) y <- matrix(0L, nrow=nx, ncol=length(lev)) colnames(y) <- lev y[cbind(seq_len(nx), as.integer(x))] <- 1L return(y) } # convert to numeric y <- as.numeric(x) if(!is.matrix(y)) y <- matrix(y, ncol=1) return(y) } spatstat.core/R/bw.frac.R0000644000176200001440000000142714144333461014721 0ustar liggesusers#' #' bw.frac.R #' #' $Revision: 1.2 $ $Date: 2020/12/19 05:25:06 $ bw.frac <- function(X, ..., f=1/4) { X <- as.owin(X) g <- distcdf(X, ...) r <- with(g, .x) Fr <- with(g, .y) iopt <- min(which(Fr >= f)) ropt <- r[iopt] attr(ropt, "f") <- f attr(ropt, "g") <- g class(ropt) <- c("bw.frac", class(ropt)) return(ropt) } print.bw.frac <- function(x, ...) { print(as.numeric(x), ...) } plot.bw.frac <- function(x, ...) { xname <- short.deparse(substitute(x)) g <- attr(x, "g") f <- attr(x, "f") ropt <- as.numeric(x) dont.complain.about(g) do.call(plot, resolve.defaults(list(quote(g)), list(...), list(main=xname))) abline(v=ropt, lty=3) abline(h=f, lty=3) invisible(NULL) } spatstat.core/R/clusterset.R0000644000176200001440000000432314144333462015573 0ustar liggesusers# # clusterset.R # # Allard-Fraley estimator of cluster region # # $Revision: 1.12 $ $Date: 2016/02/16 01:39:12 $ # clusterset <- function(X, what=c("marks", "domain"), ..., verbose=TRUE, fast=FALSE, exact=!fast) { stopifnot(is.ppp(X)) what <- match.arg(what, several.ok=TRUE) if(!missing(exact)) stopifnot(is.logical(exact)) if(fast && exact) stop("fast=TRUE is incompatible with exact=TRUE") # compute duplication exactly as in deldir, or the universe will explode X <- unique(unmark(X), rule="deldir", warn=TRUE) n <- npoints(X) W <- as.owin(X) # discretised Dirichlet tessellation if(verbose) cat("Computing Dirichlet tessellation...") if(fast || !exact) cellid <- as.im(nnfun(X), ...) # compute tile areas if(fast) { a <- table(factor(as.vector(as.matrix(cellid)), levels=1:n)) if(verbose) cat("done.\n") a <- a + 0.5 A <- sum(a) } else { d <- dirichlet(X) if(verbose) cat("done.\n") D <- tiles(d) suppressWarnings(id <- as.integer(names(D))) if(anyNA(id) && ("marks" %in% what)) stop("Unable to map Dirichlet tiles to data points") A <- area(W) a <- unlist(lapply(D, area)) } # determine optimal selection of tiles ntile <- length(a) o <- order(a) b <- cumsum(a[o]) m <- seq_len(ntile) logl <- -n * log(n) + m * log(m/b) + (n-m) * log((n-m)/(A-b)) mopt <- which.max(logl) picked <- o[seq_len(mopt)] ## map tiles to points if(!fast) picked <- id[picked] ## logical vector is.picked <- rep.int(FALSE, n) is.picked[picked] <- TRUE # construct result out <- list(marks=NULL, domain=NULL) if("marks" %in% what) { ## label points yesno <- factor(ifelse(is.picked, "yes", "no"), levels=c("no", "yes")) out$marks <- X %mark% yesno } if("domain" %in% what) { if(verbose) cat("Computing cluster set...") if(exact) { domain <- do.call(union.owin, unname(D[is.picked])) domain <- rebound.owin(domain, as.rectangle(W)) } else { domain <- eval.im(is.picked[cellid]) } out$domain <- domain if(verbose) cat("done.\n") } out <- if(length(what) == 1L) out[[what]] else out return(out) } spatstat.core/R/triplets.R0000644000176200001440000001250114144333464015243 0ustar liggesusers# # # triplets.R # # $Revision: 1.18 $ $Date: 2018/03/15 07:37:41 $ # # The triplets interaction # # Triplets() create an instance of the triplets process # [an object of class 'interact'] # # ------------------------------------------------------------------- # Triplets <- local({ DebugTriplets <- FALSE # define triplet potential TripletPotential <- function(X,U,EqualPairs,pars,correction, ...) { if(!all(ok <- correction %in% c("border", "none"))) { nbad <- sum(bad <- !ok) warning(paste(ngettext(nbad, "Correction", "Corrections"), commasep(sQuote(correction[bad])), ngettext(nbad, "is unavailable and was ignored", "are unavailable and were ignored"))) } # check that all points of X are included in U nX <- npoints(X) nU <- npoints(U) XinU <- if(length(EqualPairs) == 0) integer(0) else EqualPairs[,1] missX <- which(table(factor(XinU, levels=1:nX)) == 0) if((nmiss <- length(missX)) > 0) { # add missing points to (the end of) U U <- superimpose(U, X[missX], W=as.owin(X), check=FALSE) EqualPairs <- rbind(EqualPairs, cbind(missX, nU + 1:nmiss)) nU <- nU + nmiss } iXX <- EqualPairs[,1] iXU <- EqualPairs[,2] # construct map from X index to U index mapXU <- integer(nX) mapXU[iXX] <- iXU # construct map from U index to X index mapUX <- rep.int(NA_integer_, nU) mapUX[iXU] <- iXX # logical vector identifying which quadrature points are in X isdata <- rep.int(FALSE, nU) isdata[iXU] <- TRUE # identify all close pairs u, x r <- pars$r cp <- crosspairs(U, X, r, what="indices") if(DebugTriplets) cat(paste("crosspairs at distance", r, "yields", length(cp$i), "pairs\n")) IU <- cp$i J <- cp$j # map X index to U index JU <- mapXU[J] # Each (Xi, Xj) pair will appear twice - eliminate duplicates dupX <- isdata[IU] & isdata[JU] & (IU > JU) retain <- !dupX IU <- IU[retain] JU <- JU[retain] if(DebugTriplets) cat(paste(sum(dupX), "duplicate pairs removed\n")) # find all triangles tri <- edges2triangles(IU, JU, nU, friendly=isdata) if(DebugTriplets) cat(paste(nrow(tri), "triangles identified\n")) if(nrow(tri) == 0) { # there are no triangles; return vector of zeroes return(rep.int(0, nU-nmiss)) } # count triangles containing a given quadrature point tcount <- apply(tri, 2, function(x, n) { table(factor(x, levels=1:n)) }, n=nU) tcount <- .rowSums(tcount, nrow(tcount), ncol(tcount)) # select triangles consisting only of data points triX <- matrix(mapUX[tri], nrow=nrow(tri)) isX <- matrowall(!is.na(triX)) triX <- triX[isX, , drop=FALSE] # if(nrow(triX) > 0) { # count triangles of data points containing each given data point tXcount <- apply(triX, 2, function(x, n) { table(factor(x, levels=1:n)) }, n=nX) tXcount <- .rowSums(tXcount, nrow(tXcount), ncol(tXcount)) } else { # there are no triangles of data points tXcount <- rep.int(0, nX) } # answer <- tcount answer[iXU] <- tXcount[iXX] if(DebugTriplets) cat(paste("Max suff stat: data ", max(tXcount), ", dummy ", max(tcount[isdata]), "\n", sep="")) # truncate to original size if(nmiss > 0) answer <- answer[-((nU-nmiss+1):nU)] return(answer) } # set up basic 'triplets' object except for family and parameters BlankTripletsObject <- list( name = "Triplets process", creator = "Triplets", family = "triplet.family", # evaluated later pot = TripletPotential, par = list(r=NULL), # filled in later parnames = "interaction distance", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { gamma <- ((self$interpret)(coeffs, self))$param$gamma return(is.finite(gamma) && (gamma <= 1)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(anyNA(coeffs)) return(r) loggamma <- coeffs[1] if(abs(loggamma) <= epsilon) return(0) else return(r) }, version=NULL # to be added ) class(BlankTripletsObject) <- "interact" # define Triplets function Triplets <- function(r) { instantiate.interact(BlankTripletsObject, list(r=r)) } Triplets <- intermaker(Triplets, BlankTripletsObject) Triplets }) spatstat.core/R/nncorr.R0000644000176200001440000001154414144333463014703 0ustar liggesusers# # nncorr.R # # $Revision: 1.12 $ $Date: 2019/01/22 03:08:57 $ # nnmean <- function(X, k=1, na.action="warn") { stopifnot(is.ppp(X)) if(!is.marked(X, na.action=na.action)) stop("X must be a marked point pattern", call.=FALSE) if(k %% 1 != 0 || length(k) != 1 || k <= 0) stop("k should be a single integer greater than 0", call.=FALSE) m <- numeric.columns(marks(X), logical=TRUE, others="na") ## default result nana <- rep(NA_real_, ncol(m)) ans <- rbind(unnormalised=nana, normalised=nana) ## if(all(is.na(m))) { warning("non-numeric marks; results are NA", call.=FALSE) } else if(k >= npoints(X)) { warning(paste("Not enough points to compute k-th nearest neighbours", paste0(paren(paste0("n = ", npoints(X), ", k = ", k)), ";"), "results are NA"), call.=FALSE) } else { nnid <- nnwhich(X, k=k) ok <- (nndist(X, k=k) <= bdist.points(X)) if(!any(ok, na.rm=TRUE)) { warning("insufficient data remaining after border correction; results are NA") } else { numer <- sapply(as.data.frame(m[nnid[ok], ]), mean, na.rm=TRUE) denom <- sapply(as.data.frame(m), mean, na.rm=TRUE) ans <- rbind(unnormalised=numer, normalised =numer/denom) } } if(ncol(ans) == 1) ans <- ans[,1,drop=TRUE] return(ans) } nnvario <- local({ nnvario <- function(X, k=1, na.action="warn") { stopifnot(is.ppp(X)) if(!is.marked(X, na.action=na.action)) stop("X must be a marked point pattern", call.=FALSE) m <- numeric.columns(marks(X), logical=TRUE, others="na") if(all(is.na(m))) warning("non-numeric marks; results are NA", call.=FALSE) ans <- nncorr(X %mark% m, sqdif, k=k, denominator=diag(var(m)), na.action="ignore") return(ans) } sqdif <- function(m1,m2) { ((m1-m2)^2)/2 } nnvario }) nncorr <- function(X, f = function(m1,m2) { m1 * m2}, k=1, ..., use = "all.obs", method = c("pearson", "kendall", "spearman"), denominator=NULL, na.action="warn") { stopifnot(is.ppp(X)) if(!is.marked(X, na.action=na.action)) stop("X must be a marked point pattern", call.=FALSE) if(k %% 1 != 0 || length(k) != 1 || k <= 0) stop("k should be a single integer greater than 0", call.=FALSE) if(k >= npoints(X)) stop("Not enough points to compute k-th nearest neighbours") m <- as.data.frame(marks(X)) nv <- ncol(m) if(nv == 1) colnames(m) <- "" # if(missing(method) || is.null(method)) method <- "pearson" # if(missing(f)) f <- NULL if(!is.null(f) && !is.function(f)) { if(nv == 1) stop("f should be a function") # could be a list of functions if(!(is.list(f) && all(unlist(lapply(f, is.function))))) stop("f should be a function or a list of functions") if(length(f) != nv) stop("Length of list f does not match number of mark variables") } # optional denominator(s) if(!is.null(denominator) && !(length(denominator) %in% c(1, nv))) stop("Denominator has incorrect length") # multi-dimensional case if(nv > 1) { # replicate things if(is.function(f)) f <- rep.int(list(f), nv) if(length(denominator) <= 1) denominator <- rep.int(list(denominator), nv) # result <- matrix(NA, nrow=3, ncol=nv) outnames <- c("unnormalised", "normalised", "correlation") dimnames(result) <- list(outnames, colnames(m)) for(j in 1:nv) { mj <- m[,j, drop=FALSE] denj <- denominator[[j]] nncj <- nncorr(X %mark% mj, f=f[[j]], k=k, use=use, method=method, denominator=denj) kj <- length(nncj) result[1:kj,j] <- nncj } if(all(is.na(result[3, ]))) result <- result[1:2, ] return(result) } # one-dimensional m <- m[,1,drop=TRUE] # select 'f' appropriately for X chk <- check.testfun(f, X=X) f <- chk$f ftype <- chk$ftype # denominator Efmm <- if(!is.null(denominator)) denominator else switch(ftype, mul={ mean(m)^2 }, equ={ sum(table(m)^2)/length(m)^2 }, general={ mean(outer(m, m, f, ...)) }) # border method nn <- nnwhich(X, k=k) ok <- (nndist(X, k=k) <= bdist.points(X)) if(!any(ok)) stop("Insufficient data") mY <- m[nn[ok]] mX <- m[ok] Efmk <- switch(ftype, mul = { mean(mX * mY, ...) }, equ = { mean(mX == mY, ...) }, general = { mean(f(mX, mY, ...)) }) # answer <- c(unnormalised=Efmk, normalised=Efmk/Efmm) if(ftype == "mul") { classic <- cor(mX, mY, use=use, method=method) answer <- c(answer, correlation=classic) } return(answer) } spatstat.core/R/areainter.R0000644000176200001440000003143314144333461015351 0ustar liggesusers# # # areainter.R # # $Revision: 1.50 $ $Date: 2021/02/06 03:45:20 $ # # The area interaction # # AreaInter() create an instance of the area-interaction process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # AreaInter <- local({ #' area-interaction conditional intensity potential #' corresponds to potential -C(x) = n(x) - A(x)/\pi r^2 areapot <- function(X,U,EqualPairs,pars,correction, ..., W=as.owin(X)) { #' W is the window to which computations of area will be clipped. #' W=NULL is permissible, meaning no clipping. uhoh <- !(correction %in% c("border", "none")) if(any(uhoh)) { nuh <- sum(uhoh) warning(paste(ngettext(nuh, "Correction", "Corrections"), commasep(sQuote(correction[uhoh])), ngettext(nuh, "is not supported and was ignored", "are not supported and were ignored"))) } r <- pars$r if(is.null(r)) stop("internal error: r parameter not found") #' if(Poly <- spatstat.options('areainter.polygonal')) { if(is.mask(W)) W <- as.polygonal(W) if(is.mask(Window(X))) Window(X) <- as.polygonal(Window(X)) if(is.mask(Window(U))) Window(U) <- as.polygonal(Window(U)) } n <- U$n areas <- numeric(n) #' dummy points dummies <- setdiff(seq_len(n), EqualPairs[,2L]) if(length(dummies)) areas[dummies] <- areaGain(U[dummies], X, r, W=W, exact=Poly) #' data points represented in U ii <- EqualPairs[,1L] jj <- EqualPairs[,2L] inborder <- (bdist.points(X[ii]) <= r) # sic #' points in border region need clipping if(any(inborder)) areas[jj[inborder]] <- areaLoss(X, r, subset=ii[inborder], exact=Poly) #' points in eroded region do not necessarily if(any(ineroded <- !inborder)) { areas[jj[ineroded]] <- areaLoss(X, r, subset=ii[ineroded], W=W, exact=Poly) } return(1 - areas/(pi * r^2)) } #' fractional area of overlap of two unit discs at distance 2 * z discOverlap <- function(z) { z <- pmax(pmin(z, 1), -1) (2/pi) * (acos(z) - z * sqrt(1 - z^2)) } # template object without family, par, version BlankAI <- list( name = "Area-interaction process", creator = "AreaInter", family = "inforder.family", # evaluated later pot = areapot, par = list(r = NULL), # to be filled in parnames = "disc radius", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("disc radius r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction unitz <- unitname(fint) if(!identical(inter$name, "Area-interaction process")) stop("Tried to plot the wrong kind of interaction") #' fitted interaction coefficient theta <- fint$coefs[fint$Vnames] #' interaction radius r <- inter$par$r xlim <- resolve.1.default(list(xlim=c(0, 1.25 * 2*r)), list(...)) rmax <- max(xlim, d) if(is.null(d)) { d <- seq(from=0, to=rmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) } #' compute interaction between two points at distance d y <- exp(theta * discOverlap(d/(2 * r))) #' compute `fv' object fun <- fv(data.frame(r=d, h=y, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "maximal interaction h(r)", "reference value 1"), unitname=unitz) if(plotit) do.call(plot.fv, resolve.defaults(list(quote(fun)), list(...), list(ylim=range(0,1,y)))) return(invisible(fun)) }, #' end of function 'plot' interpret = function(coeffs, self) { logeta <- as.numeric(coeffs[1L]) eta <- exp(logeta) return(list(param=list(eta=eta), inames="interaction parameter eta", printable=signif(eta))) }, valid = function(coeffs, self) { eta <- ((self$interpret)(coeffs, self))$param$eta return(is.finite(eta)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(anyNA(coeffs)) return(2 * r) logeta <- coeffs[1L] if(abs(logeta) <= epsilon) return(0) else return(2 * r) }, delta2 = function(X, inte, correction, ..., sparseOK=TRUE) { # Sufficient statistic for second order conditional intensity # Area-interaction model if(!(correction %in% c("border", "none"))) return(NULL) r <- inte$par$r areadelta2(X, r, sparseOK=sparseOK) }, version=NULL # to be added ) class(BlankAI) <- "interact" AreaInter <- function(r) { instantiate.interact(BlankAI, list(r=r)) } AreaInter <- intermaker(AreaInter, BlankAI) AreaInter }) areadelta2 <- local({ areadelta2 <- function(X, r, ..., sparseOK=TRUE) { # Sufficient statistic for second order conditional intensity # Area-interaction model if(is.ppp(X)) return(areadelppp(X, r, ..., sparseOK=sparseOK)) else if(is.quad(X)) return(areadelquad(X, r, sparseOK=sparseOK)) else stop("internal error: X should be a ppp or quad object") } areadelppp <- function(X, r, algorithm=c("C", "nncross", "nnmap"), sparseOK=TRUE) { # Evaluate \Delta_{x_i} \Delta_{x_j} S(x) for data points x_i, x_j # i.e. h(X[i]|X) - h(X[i]|X[-j]) # where h is first order cif statistic algorithm <- match.arg(algorithm) nX <- npoints(X) sparseOK <- sparseOK result <- if(!sparseOK) matrix(0, nX, nX) else sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(nX,nX)) if(nX < 2) return(result) if(algorithm == "C") { # use special purpose C routine # called once for each interacting pair of points xx <- X$x yy <- X$y cl <- closepairs(X, 2 * r, what="indices", twice=FALSE, neat=FALSE) I <- cl$i J <- cl$j eps <- r/spatstat.options("ngrid.disc") for(k in seq_along(I)) { i <- I[k] j <- J[k] # all neighbours of i Ki <- union(J[I==i], I[J==i]) # all neighbours of j Kj <- union(J[I==j], I[J==j]) # relevant neighbours K <- setdiff(union(Ki, Kj), c(i,j)) # call C code z <- .C(SC_delta2area, xa = as.double(xx[i]), ya = as.double(yy[i]), xb = as.double(xx[j]), yb = as.double(yy[j]), nother = as.integer(length(K)), xother = as.double(xx[K]), yother = as.double(yy[K]), radius = as.double(r), epsilon = as.double(eps), pixcount = as.integer(integer(1L)), PACKAGE="spatstat.core") result[i,j] <- result[j,i] <- z$pixcount } # normalise result <- result * (eps^2)/(pi * r^2) return(result) } # non-C algorithms # confine attention to points which are interacting relevant <- (nndist(X) <= 2 * r) if(!all(relevant)) { if(any(relevant)) { # call self on subset Dok <- areadelppp(X[relevant], r, algorithm, sparseOK=sparseOK) result[relevant,relevant] <- Dok } return(result) } # .............. algorithm using interpreted code ........... # sort pattern in increasing order of x sortX <- (algorithm == "nnmap") if(sortX) { oX <- fave.order(X$x) X <- X[oX] } # area calculation may be restricted to window W for efficiency W <- as.owin(X) U <- as.rectangle(W) # decide pixel resolution eps <- r/spatstat.options("ngrid.disc") npix <- prod(ceiling(sidelengths(U)/eps)) if(npix <= 2^20) { # do it all in one go tile <- list(NULL) } else { # divide into rectangular tiles B <- as.rectangle(W) ntile0 <- ceiling(npix/(2^20)) tile0area <- area(B)/ntile0 tile0side <- sqrt(tile0area) nx <- ceiling(sidelengths(B)[1L]/tile0side) ny <- ceiling(sidelengths(B)[2L]/tile0side) tile <- tiles(quadrats(B, nx, ny)) } result <- matrix(0, nX, nX) for(i in seq_len(length(tile))) { # form pixel grid Ti <- tile[[i]] Wi <- if(is.null(Ti)) W else intersect.owin(W, Ti) if(algorithm == "nncross") { # Trusted, slow algorithm using nncross Z <- as.mask(Wi, eps=eps) G <- as.ppp(rasterxy.mask(Z), U, check=FALSE) # compute 3 nearest neighbours in X of each grid point v <- nncross(G, X, k=1:3) # select pixels which have exactly 2 neighbours within distance r ok <- with(v, dist.3 > r & dist.2 <= r) if(any(ok)) { v <- v[ok, , drop=FALSE] # accumulate pixel counts -> areas counts <- with(v, table(i=factor(which.1, levels=1L:nX), j=factor(which.2, levels=1L:nX))) pixarea <- with(Z, xstep * ystep) result <- result + pixarea * (counts + t(counts)) } } else { # Faster algorithm using nnmap # compute 3 nearest neighbours in X of each grid point stuff <- nnmap(X, k=1:3, W=Wi, eps=eps, is.sorted.X=TRUE, sortby="x", outputarray=TRUE) dist.2 <- stuff$dist[2L,,] dist.3 <- stuff$dist[3L,,] which.1 <- stuff$which[1L,,] which.2 <- stuff$which[2L,,] ok <- (dist.3 > r & dist.2 <= r) if(any(ok)) { which.1 <- as.vector(which.1[ok]) which.2 <- as.vector(which.2[ok]) counts <- table(i=factor(which.1, levels=1L:nX), j=factor(which.2, levels=1L:nX)) pixarea <- attr(stuff, "pixarea") result <- result + pixarea * (counts + t(counts)) } } } if(sortX) { # map back to original ordering result[oX, oX] <- result } # normalise result <- result/(pi * r^2) return(result) } areadelquad <- function(Q, r, sparseOK=TRUE) { # Sufficient statistic for second order conditional intensity # Area-interaction model # Evaluate \Delta_{u_j} \Delta_{u_i} S(x) for quadrature points # answer is area(b(u[i],r) \cap b(u[j],r)\setminus \bigcup_k b(x[k],r)) # where k ranges over all indices that are not equivalent to u[i,j] U <- union.quad(Q) Z <- is.data(Q) nU <- npoints(U) xx <- U$x yy <- U$y # identify all close pairs of quadrature points cl <- closepairs(U, 2 * r, what="indices") I <- cl$i J <- cl$j # find neighbours in X of each quadrature point zJ <- Z[J] neigh <- split(J[zJ], factor(I[zJ], levels=1L:nU)) # result <- if(!sparseOK) matrix(0, nU, nU) else sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(nU,nU)) eps <- r/spatstat.options("ngrid.disc") # for(k in seq_along(I)) { i <- I[k] j <- J[k] # all points of X close to U[i] Ki <- neigh[[i]] # all points of X close to U[j] Kj <- neigh[[j]] # relevant neighbours K <- setdiff(union(Ki, Kj), c(i,j)) # call C code z <- .C(SC_delta2area, xa = as.double(xx[i]), ya = as.double(yy[i]), xb = as.double(xx[j]), yb = as.double(yy[j]), nother = as.integer(length(K)), xother = as.double(xx[K]), yother = as.double(yy[K]), radius = as.double(r), epsilon = as.double(eps), pixcount = as.integer(integer(1L)), PACKAGE="spatstat.core") result[i,j] <- z$pixcount } # normalise result <- result * (eps^2)/(pi * r^2) return(result) } areadelta2 }) spatstat.core/R/randomppx.R0000644000176200001440000000245714144333463015415 0ustar liggesusers#' #' randomppx.R #' #' $Revision: 1.1 $ $Date: 2020/11/30 11:44:46 $ #' runifpointx <- function(n, domain, nsim=1, drop=TRUE) { check.1.integer(n) check.1.integer(nsim) stopifnot(inherits(domain, "boxx")) ra <- domain$ranges d <- length(ra) result <- vector(mode="list", length=nsim) for(i in 1:nsim) { if(n == 0) { coo <- matrix(numeric(0), nrow=0, ncol=d) } else { coo <- mapply(runif, n=rep(n, d), min=ra[1,], max=ra[2,]) if(!is.matrix(coo)) coo <- matrix(coo, ncol=d) } colnames(coo) <- colnames(ra) df <- as.data.frame(coo) result[[i]] <- ppx(df, domain, coord.type=rep("s", d)) } if(nsim == 1 && drop) return(result[[1]]) result <- as.anylist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } rpoisppx <- function(lambda, domain, nsim=1, drop=TRUE) { stopifnot(inherits(domain, "boxx")) stopifnot(is.numeric(lambda) && length(lambda) == 1 && lambda >= 0) n <- rpois(nsim, lambda * volume.boxx(domain)) result <- vector(mode="list", length=nsim) for(i in 1:nsim) result[[i]] <- runifpointx(n[i], domain) if(nsim == 1 && drop) return(result[[1]]) result <- as.anylist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } spatstat.core/R/triplet.family.R0000644000176200001440000000640114144333464016342 0ustar liggesusers# # # triplet.family.R # # $Revision: 1.1 $ $Date: 2011/11/05 07:18:51 $ # # Family of `third-order' point process models # # triplet.family: object of class 'isf' # # # ------------------------------------------------------------------- # triplet.family <- list( name = "triplet", print = function(self) { cat("Family of third-order interactions\n") }, plot = NULL, # ---------------------------------------------------- eval = function(X,U,EqualPairs,pot,pars,correction, ...) { # # This is the eval function for the `triplet' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # This function is currently modelled on 'inforder.family'. # It simply invokes the potential 'pot' directly # and expects 'pot' to return the values of the sufficient statistic S(u,X). # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pairpot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ########################################################################## # POTENTIAL: # In this case the potential function 'pot' should have arguments # pot(X, U, EqualPairs, pars, correction, ...) # # It must return a vector with length equal to the number of points in U, # or a matrix with as many rows as there are points in U. if(!is.ppp(U)) U <- ppp(U$x, U$y, window=X$window) POT <- pot(X, U, EqualPairs, pars, correction, ...) if(is.matrix(POT)) { if(nrow(POT) != U$n) stop("Internal error: the potential returned a matrix with the wrong number of rows") } else if(is.array(POT) && length(dim(POT)) > 2) stop("Internal error: the potential returned an array with more than 2 dimensions") else if(is.vector(POT)) { if(length(POT) != U$n) stop("Internal error: the potential returned a vector with the wrong length") POT <- matrix(POT, ncol=1) } else stop("Internal error: the return value from the potential is not understood") return(POT) }, ######### end of function $eval suffstat = NULL ######### end of function $suffstat ) ######### end of list class(triplet.family) <- "isf" spatstat.core/R/Kcom.R0000644000176200001440000003150314144333461014266 0ustar liggesusers# # Kcom.R # # model compensated K-function # # $Revision: 1.16 $ $Date: 2018/10/19 03:20:51 $ # Kcom <- local({ Kcom <- function(object, r=NULL, breaks=NULL, ..., correction=c("border", "isotropic", "translate"), conditional=!is.poisson(object), restrict=FALSE, model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), compute.var=TRUE, truecoef=NULL, hi.res=NULL) { if(is.ppm(object)) { fit <- object } else if(is.ppp(object) || is.quad(object)) { if(is.ppp(object)) object <- quadscheme(object, ...) if(!is.null(model)) { fit <- update(model, Q=object, forcefit=TRUE) } else { fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } } else stop("object should be a fitted point process model or a point pattern") if(missing(conditional) || is.null(conditional)) conditional <- !is.poisson(fit) restrict <- isTRUE(restrict) if(restrict && !conditional) { warning("restrict=TRUE ignored because conditional=FALSE", call.=FALSE) restrict <- FALSE } # rfixed <- !is.null(r) || !is.null(breaks) # Extract data and window Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) Win <- X$window # selection of edge corrections correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", isotropic="isotropic", Ripley="isotropic", ripley="isotropic", trans="translation", translate="translation", translation="translation", best="best"), multi=TRUE) correction <- implemented.for.K(correction, Win$type, correction.given) opt <- list(bord = any(correction == "border"), tran = any(correction == "translation"), ripl = any(correction == "isotropic")) if(sum(unlist(opt)) == 0) stop("No corrections selected") # edge correction algorithm algo <- if(!conditional) "classical" else if(restrict) "restricted" else "reweighted" # conditioning on border region? if(!conditional) { Wfree <- Win } else { rbord <- fit$rbord Wfree <- erosion(Win, rbord) if(restrict) { retain <- inside.owin(union.quad(Q), , Wfree) # Throw away boundary data Q <- Q[Wfree] X <- X[Wfree] Win <- Wfree } } # Extract quadrature info U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy E <- equalsfun.quad(Q) WQ <- w.quad(Q) # quadrature weights # quadrature points used USED <- if(algo == "reweighted") (bdist.points(U) > rbord) else rep.int(TRUE, U$n) # basic statistics npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW lambda2 <- npts * (npts - 1)/(areaW^2) # adjustments to account for restricted domain of pseudolikelihood if(algo == "reweighted") { npts.used <- sum(Z & USED) area.used <- sum(WQ[USED]) # lambda.used <- npts.used/area.used # lambda2.used <- npts.used * (npts.used - 1)/(area.used^2) } else { npts.used <- npts area.used <- areaW # lambda.used <- lambda # lambda2.used <- lambda2 } # 'r' values rmaxdefault <- rmax.rule("K", if(restrict) Wfree else Win, npts/areaW) breaks <- handle.r.b.args(r, breaks, Wfree, rmaxdefault=rmaxdefault) r <- breaks$r # nr <- length(r) rmax <- breaks$max # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame K <- data.frame(r=r, pois=pi * r^2) desc <- c("distance argument r", "expected %s for CSR") K <- fv(K, "r", substitute(K(r), NULL), "pois", , alim, c("r","%s[pois](r)"), desc, fname="K") ############### start computing ################## # residuals resid <- residuals(fit, type="raw",drop=FALSE, new.coef=truecoef, quad=hi.res) resval <- with(resid, "increment") rescts <- with(resid, "continuous") if(restrict) { # keep only data inside Wfree resval <- resval[retain] rescts <- rescts[retain] } # close pairs of points # (quadrature point to data point) clos <- crosspairs(U, X, rmax, what="ijd") dIJ <- clos$d I <- clos$i J <- clos$j UI <- U[I] XJ <- X[J] EIJ <- E(I, J) # TRUE if points are identical, U[I[k]] == X[J[k]] ZI <- Z[I] # TRUE if U[I[k]] is a data point DD <- ZI & !EIJ # TRUE for pairs of distinct data points only # nDD <- sum(DD) # determine whether a quadrature point will be used in integral okI <- USED[I] if(spatstat.options("Kcom.remove.zeroes")) okI <- okI & !EIJ # residual weights # wIJ <- ifelseXY(EIJ, rescts[I], resval[I]) # absolute weight for continuous integrals wc <- -rescts wcIJ <- -rescts[I] #################################################### if(opt$bord) { # border method # Compute distances to boundary # (in restricted case, the window of U has been adjusted) b <- bdist.points(U) bI <- b[I] # reduced sample for K(r) of data only RSX <- Kount(dIJ[DD & okI], bI[DD & okI], b[Z & USED], breaks) # Kb <- RSX$numerator/(lambda.used * RSX$denom.count) Kb <- RSX$numerator/(lambda * RSX$denom.count) K <- bind.fv(K, data.frame(border=Kb), "hat(%s)[bord](r)", nzpaste(algo, "border-corrected nonparametric estimate of %s"), "border") # reduced sample for adjustment integral RSD <- Kwtsum(dIJ[okI], bI[okI], wcIJ[okI], b[Z & USED], rep.int(1, npts.used), breaks, fatal=FALSE) # lambdaU <- (npts.used + 1)/area.used lambdaU <- (npts + 1)/areaW Kb <- RSD$numerator/((RSD$denominator + 1) * lambdaU) K <- bind.fv(K, data.frame(bcom=Kb), "bold(C)~hat(%s)[bord](r)", nzpaste("model compensator of", algo, "border-corrected %s"), "border") } if(opt$tran) { # translation correction edgewt <- switch(algo, classical = edge.Trans(UI, XJ, paired=TRUE), restricted = edge.Trans(UI, XJ, paired=TRUE), reweighted = edge.Trans.modif(UI, XJ, Win, Wfree, paired=TRUE)) wh <- whist(dIJ[okI], breaks$val, (edgewt * wcIJ)[okI]) whDD <- whist(dIJ[DD & okI], breaks$val, edgewt[DD & okI]) Ktrans <- cumsum(whDD)/(lambda2 * area.used) Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), "hat(%s)[trans](r)", nzpaste(algo, "translation-corrected nonparametric estimate of %s"), "trans") # lambda2U <- (npts.used + 1) * npts.used/(area.used^2) lambda2U <- (npts + 1) * npts/(areaW^2) Ktrans <- cumsum(wh)/(lambda2U * area.used) Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(tcom=Ktrans), "bold(C)~hat(%s)[trans](r)", nzpaste("model compensator of", algo, "translation-corrected %s"), "trans") } if(opt$ripl) { # Ripley isotropic correction edgewt <- edge.Ripley(UI, matrix(dIJ, ncol=1)) wh <- whist(dIJ[okI], breaks$val, (edgewt * wcIJ)[okI]) whDD <- whist(dIJ[DD & okI], breaks$val, edgewt[DD & okI]) # Kiso <- cumsum(whDD)/(lambda2.used * area.used) Kiso <- cumsum(whDD)/(lambda2 * area.used) Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(iso=Kiso), "hat(%s)[iso](r)", nzpaste(algo, "isotropic-corrected nonparametric estimate of %s"), "iso") # lambda2U <- (npts.used + 1) * npts.used/(area.used^2) lambda2U <- (npts + 1) * npts/(areaW^2) Kiso <- cumsum(wh)/(lambda2U * area.used) Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(icom=Kiso), "bold(C)~hat(%s)[iso](r)", nzpaste("model compensator of", algo, "isotropic-corrected %s"), "iso") # if(compute.var) { savedotnames <- fvnames(K, ".") # compute contribution to compensator from each quadrature point dOK <- dIJ[okI] eOK <- edgewt[okI] iOK <- I[okI] denom <- lambda2U * area.used variso <- varsumiso <- 0 * Kiso for(i in sortunique(iOK)) { relevant <- (iOK == i) tincrem <- whist(dOK[relevant], breaks$val, eOK[relevant]) localterm <- cumsum(tincrem)/denom variso <- variso + wc[i] * localterm^2 if(Z[i]) varsumiso <- varsumiso + localterm^2 } sdiso <- sqrt(variso) K <- bind.fv(K, data.frame(ivar=variso, isd =sdiso, ihi = 2*sdiso, ilo = -2*sdiso, ivarsum=varsumiso), c("bold(C)^2~hat(%s)[iso](r)", "sqrt(bold(C)^2~hat(%s)[iso](r))", "bold(R)~hat(%s)[hi](r)", "bold(R)~hat(%s)[lo](r)", "hat(C)^2~hat(%s)[iso](r)"), c("Poincare variance of isotropic-corrected %s", "sqrt(Poincare variance) of isotropic-corrected %s", "upper critical band for isotropic-corrected %s", "lower critical band for isotropic-corrected %s", "data estimate of Poincare variance of %s"), "iso") # fvnames(K, ".") <- c(savedotnames, "isd") fvnames(K, ".") <- savedotnames } } # default is to display all corrections formula(K) <- . ~ r unitname(K) <- unitname(X) # secret tag used by 'Kres' attr(K, "maker") <- "Kcom" return(K) } # `reweighted' translation edge correction edge.Trans.modif <- function(X, Y=X, WX=X$window, WY=Y$window, exact=FALSE, paired=FALSE, trim=spatstat.options("maxedgewt")) { # computes edge correction factor # f = area(WY)/area(intersect.owin(WY, shift(WX, X[i] - Y[j]))) X <- as.ppp(X, WX) W <- X$window x <- X$x y <- X$y Y <- as.ppp(Y, WY) xx <- Y$x yy <- Y$y nX <- npoints(X) nY <- npoints(Y) if(paired && (nX != nY)) stop("X and Y should have equal length when paired=TRUE") # For irregular polygons, exact evaluation is very slow; # so use pixel approximation, unless exact=TRUE if(!exact) { if(WX$type == "polygonal") WX <- as.mask(WX) if(WY$type == "polygonal") WY <- as.mask(WX) } typeX <- WX$type typeY <- WY$type if(typeX == "rectangle" && typeY == "rectangle") { # Fast code for this case if(!paired) { DX <- abs(outer(x,xx,"-")) DY <- abs(outer(y,yy,"-")) } else { DX <- abs(xx - x) DY <- abs(yy - y) } A <- WX$xrange B <- WX$yrange a <- WY$xrange b <- WY$yrange # compute width and height of intersection wide <- pmin.int(a[2], A[2]+DX) - pmax(a[1], A[1]+DX) high <- pmin.int(b[2], B[2]+DY) - pmax(b[1], B[1]+DY) # edge correction weight weight <- diff(a) * diff(b) / (wide * high) if(!paired) weight <- matrix(weight, nrow=X$n, ncol=Y$n) } else if(typeX %in% c("rectangle", "polygonal") && typeY %in% c("rectangle", "polygonal")) { # This code is SLOW WX <- as.polygonal(WX) WY <- as.polygonal(WY) a <- area(W) if(!paired) { weight <- matrix(, nrow=nX, ncol=nY) if(nX > 0 && nY > 0) { for(i in seq_len(nX)) { X.i <- c(x[i], y[i]) for(j in seq_len(nY)) { shiftvector <- X.i - c(xx[j],yy[j]) WXshift <- shift(WX, shiftvector) b <- overlap.owin(WY, WXshift) weight[i,j] <- a/b } } } } else { nX <- npoints(X) weight <- numeric(nX) if(nX > 0) { for(i in seq_len(nX)) { shiftvector <- c(x[i],y[i]) - c(xx[i],yy[i]) WXshift <- shift(WX, shiftvector) b <- overlap.owin(WY, WXshift) weight[i] <- a/b } } } } else { WX <- as.mask(WX) WY <- as.mask(WY) # make difference vectors if(!paired) { DX <- outer(x,xx,"-") DY <- outer(y,yy,"-") } else { DX <- x - xx DY <- y - yy } # compute set cross-covariance g <- setcov(WY,WX) # evaluate set cross-covariance at these vectors gvalues <- lookup.im(g, as.vector(DX), as.vector(DY), naok=TRUE, strict=FALSE) weight <- area(WY)/gvalues } # clip high values if(length(weight) > 0) weight <- pmin.int(weight, trim) if(!paired) weight <- matrix(weight, nrow=X$n, ncol=Y$n) return(weight) } Kcom }) spatstat.core/R/mincontrast.R0000644000176200001440000010322614144333462015741 0ustar liggesusers#' #' mincontrast.R #' #' Functions for estimation by minimum contrast #' #' $Revision: 1.115 $ $Date: 2021/11/09 07:32:58 $ #' ################## base ################################ safePositiveValue <- function(x, default=.Machine$double.eps) { ## ensure x is finite, positive, and acceptable to C routines ifelse(is.finite(x), pmin(.Machine$double.xmax, pmax(.Machine$double.eps, x)), default) } safeFiniteValue <- function(x, default=0) { ## ensure x is finite and acceptable to C routines biggest <- .Machine$double.xmax ifelse(is.finite(x), pmin(biggest, pmax(-biggest, x)), default) } bigvaluerule <- function(objfun, objargs, startpar, ...) { ## Determine suitable large number to replace Inf values of objective ## Evaluate objective at starting parameter vector startval <- do.call(objfun, list(par=startpar, objargs=objargs, ...)) ## check with(.Machine, { hugeval <- sqrt(double.xmax) * double.eps if(abs(startval) > hugeval) { warning(paste("Internal error: objective function returns huge value", paren(startval), "which may cause numerical problems"), call.=FALSE) return(sqrt(double.xmax)) } bigvalue <- min(hugeval, max(sqrt(hugeval), 1024 * abs(startval))) return(bigvalue) }) } mincontrast <- local({ ## objective function (in a format that is re-usable by other code) contrast.objective <- function(par, objargs, ...) { with(objargs, { theo <- theoretical(par=par, rvals, ...) if(!is.vector(theo) || !is.numeric(theo)) stop("theoretical function did not return a numeric vector") if(length(theo) != nrvals) stop("theoretical function did not return the correct number of values") ## integrand of discrepancy discrep <- (abs(theo^qq - obsq))^pp ## protect C code from weird values bigvalue <- BIGVALUE + sqrt(sum(par^2)) discrep <- safePositiveValue(discrep, default=bigvalue) ## rescaled integral of discrepancy value <- mean(discrep) return(value) }) } mincontrast <- function(observed, theoretical, startpar, ..., ctrl=list(q = 1/4, p = 2, rmin=NULL, rmax=NULL), fvlab=list(label=NULL, desc="minimum contrast fit"), explain=list(dataname=NULL, modelname=NULL, fname=NULL), action.bad.values=c("warn", "stop", "silent"), control=list(), stabilize=TRUE, pspace=NULL) { verifyclass(observed, "fv") action.bad.values <- match.arg(action.bad.values) stopifnot(is.function(theoretical)) if(!any("par" %in% names(formals(theoretical)))) stop(paste("Theoretical function does not include an argument called", sQuote("par"))) ## enforce defaults ctrl <- resolve.defaults(ctrl, list(q = 1/4, p = 2, rmin=NULL, rmax=NULL)) fvlab <- resolve.defaults(fvlab, list(label=NULL, desc="minimum contrast fit")) explain <- resolve.defaults(explain, list(dataname=NULL, modelname=NULL, fname=NULL)) ## extract vector of r values argu <- fvnames(observed, ".x") rvals <- observed[[argu]] ## determine range of r values rmin <- ctrl$rmin rmax <- ctrl$rmax if(!is.null(rmin) && !is.null(rmax)) stopifnot(rmin < rmax && rmin >= 0) else { alim <- attr(observed, "alim") %orifnull% range(rvals) if(is.null(rmax)) rmax <- alim[2] if(is.null(rmin)) { rmin <- alim[1] if(rmin == 0 && identical(explain$fname,"g")) rmin <- rmax/1e3 # avoid artefacts at zero in pcf } } ## extract vector of observed values of statistic valu <- fvnames(observed, ".y") obs <- observed[[valu]] ## restrict to [rmin, rmax] if(max(rvals) < rmax) stop(paste("rmax=", signif(rmax,4), "exceeds the range of available data", "= [", signif(min(rvals),4), ",", signif(max(rvals),4), "]"), call.=FALSE) sub <- (rvals >= rmin) & (rvals <= rmax) rvals <- rvals[sub] obs <- obs[sub] ## sanity clause if(!all(ok <- is.finite(obs))) { doomed <- !any(ok) if(!doomed && all(ok[-1])) { ## common case: all finite except the value for r=0 whinge <- paste("The value of the empirical function", sQuote(explain$fname), "for r=", rvals[1], "was", paste0(obs[1], ".")) if(action.bad.values == "stop") stop(whinge, call.=FALSE) iMIN <- 2 iMAX <- length(obs) success <- TRUE } else { ## general case: some non-finite values whinge <- paste(if(doomed) "All" else "Some", "values of the empirical function", sQuote(explain$fname), "were infinite, NA or NaN.") if(doomed || action.bad.values == "stop") stop(whinge, call.=FALSE) ## trim each end of domain ra <- range(which(ok)) iMIN <- ra[1] iMAX <- ra[2] success <- all(ok[iMIN:iMAX]) } if(!success) { ## Finite and non-finite values are interspersed; ## find the longest run of finite values z <- rle(ok) k <- which.max(z$lengths * z$values) ## Run must be at least half of the data if(2 * z$lengths[k] > length(ok)) { csl <- cumsum(z$lengths) iMAX <- csl[k] iMIN <- 1L + if(k == 1) 0 else csl[k-1] success <- TRUE } } if(success) { ## accept trimmed domain rmin <- rvals[iMIN] rmax <- rvals[iMAX] obs <- obs[iMIN:iMAX] rvals <- rvals[iMIN:iMAX] sub[sub] <- ok if(action.bad.values == "warn") { warning(paste(whinge, "Range of r values was reset to", prange(c(rmin, rmax))), call.=FALSE) } } else stop(paste(whinge, "Unable to recover.", "Please choose a narrower range [rmin, rmax]"), call.=FALSE) } ## pack data into a list objargs <- list(theoretical = theoretical, rvals = rvals, nrvals = length(rvals), obsq = obs^(ctrl$q), ## for efficiency qq = ctrl$q, pp = ctrl$p, rmin = rmin, rmax = rmax, BIGVALUE = 1) ## determine a suitable large number to replace Inf values of objective objargs$BIGVALUE <- bigvaluerule(contrast.objective, objargs, startpar, ...) ## ................... optimization algorithm control parameters ....................... if(stabilize) { ## Numerical stabilisation ## evaluate objective at starting state startval <- contrast.objective(startpar, objargs, ...) ## use to determine appropriate global scale smallscale <- sqrt(.Machine$double.eps) fnscale <- max(abs(startval), smallscale) parscale <- pmax(abs(startpar), smallscale) scaling <- list(fnscale=fnscale, parscale=parscale) } else { scaling <- list() } control <- resolve.defaults(control, scaling, list(trace=0)) ## ..................................................................................... ## >>>>>>>>>>>>>>>>> . . . . O P T I M I Z E . . . . <<<<<<<<<<<<<<<<<< minimum <- optim(startpar, fn=contrast.objective, objargs=objargs, ..., control=control) ## ..................................................................................... ## if convergence failed, issue a warning signalStatus(optimStatus(minimum), errors.only=TRUE) ## evaluate the fitted theoretical curve fittheo <- theoretical(minimum$par, rvals, ...) ## pack it up as an `fv' object label <- fvlab$label %orifnull% "%s[fit](r)" desc <- fvlab$desc fitfv <- bind.fv(observed[sub, ], data.frame(fit=fittheo), label, desc) result <- list(par = minimum$par, fit = fitfv, opt = minimum, ctrl = list(p=ctrl$p,q=ctrl$q,rmin=rmin,rmax=rmax), info = explain, startpar = startpar, objfun = contrast.objective, objargs = objargs, dotargs = list(...)) class(result) <- c("minconfit", class(result)) return(result) } mincontrast }) print.minconfit <- function(x, ...) { terselevel <- spatstat.options('terse') digits <- getOption('digits') ## explanatory cat(paste("Minimum contrast fit ", "(", "object of class ", dQuote("minconfit"), ")", "\n", sep="")) mo <- x$info$modelname fu <- x$info$fname da <- x$info$dataname cm <- x$covmodel if(!is.null(mo)) cat("Model:", mo, fill=TRUE) if(!is.null(cm)) { ## Covariance/kernel model and nuisance parameters cat("\t", cm$type, "model:", cm$model, fill=TRUE) margs <- cm$margs if(!is.null(margs)) { nama <- names(margs) tags <- ifelse(nzchar(nama), paste(nama, "="), "") tagvalue <- paste(tags, margs) splat("\t", cm$type, "parameters:", paste(tagvalue, collapse=", ")) } } if(!is.null(fu) && !is.null(da)) splat("Fitted by matching theoretical", fu, "function to", da) else { if(!is.null(fu)) splat(" based on", fu) if(!is.null(da)) splat(" fitted to", da) } if(waxlyrical('space', terselevel)) cat("\n") ## Values splat("Internal parameters fitted by minimum contrast ($par):") print(x$par, ...) if(waxlyrical('space', terselevel)) cat("\n") ## Handling new parameters isPCP <- x$isPCP %orifnull% x$internal$model!="lgcp" cpar <- x$clustpar if (!is.null(cpar)) { splat("Fitted", if(isPCP) "cluster" else "covariance", "parameters:") print(cpar, digits=digits) } else{ ## Old modelpar field if necessary mp <- x$modelpar if(!is.null(mp)) { splat("Derived parameters of", if(!is.null(mo)) mo else "model", "($modelpar):") print(mp) } } if(length(mu <- x$mu)) { if(isPCP) { splat("Mean cluster size: ", if(is.numeric(mu)) paste(signif(mu, digits), "points") else if(is.im(mu)) "[pixel image]" else "[unknown]") } else { splat("Fitted mean of log of random intensity: ", if(is.numeric(mu)) signif(mu, digits) else if(is.im(mu)) "[pixel image]" else "[unknown]") } } if(waxlyrical('space', terselevel)) cat("\n") ## Diagnostics printStatus(optimStatus(x$opt)) ## Starting values if(waxlyrical('gory', terselevel)){ cat("\n") splat("Starting values of parameters:") print(x$startpar) ## Algorithm parameters ct <- x$ctrl splat("Domain of integration:", "[", signif(ct$rmin,4), ",", signif(ct$rmax,4), "]") splat("Exponents:", "p=", paste(signif(ct$p, 3), ",", sep=""), "q=", signif(ct$q,3)) } invisible(NULL) } plot.minconfit <- function(x, ...) { xname <- short.deparse(substitute(x)) xf <- x$fit dont.complain.about(xf) do.call(plot.fv, resolve.defaults(list(quote(xf)), list(...), list(main=xname))) } unitname.minconfit <- function(x) { unitname(x$fit) } "unitname<-.minconfit" <- function(x, value) { unitname(x$fit) <- value return(x) } as.fv.minconfit <- function(x) x$fit ###### convergence status of 'optim' object optimConverged <- function(x) { x$convergence == 0 } optimNsteps <- function(x) { x$counts[["function"]] } optimStatus <- function(x, call=NULL) { cgce <- x$convergence neval <- x$counts[["function"]] switch(paste(cgce), "0" = { simpleMessage( paste("Converged successfully after", neval, "function evaluations"), call) }, "1" = simpleWarning( paste("Iteration limit maxit was reached after", neval, "function evaluations"), call), "10" = simpleWarning("Nelder-Mead simplex was degenerate", call), "51"= { simpleWarning( paste("Warning message from L-BGFS-B method:", sQuote(x$message)), call) }, "52"={ simpleError( paste("Error message from L-BGFS-B method:", sQuote(x$message)), call) }, simpleWarning(paste("Unrecognised error code", cgce), call) ) } #' general code for collecting status reports signalStatus <- function(x, errors.only=FALSE) { if(is.null(x)) return(invisible(NULL)) stopifnot(inherits(x, "condition")) if(inherits(x, "error")) stop(x) if(inherits(x, "warning")) warning(x) if(inherits(x, "message") && !errors.only) message(x) return(invisible(NULL)) } printStatus <- function(x, errors.only=FALSE) { if(is.null(x)) return(invisible(NULL)) prefix <- if(inherits(x, "error")) "error: " else if(inherits(x, "warning")) "warning: " else NULL if(!is.null(prefix) || !errors.only) cat(paste(prefix, conditionMessage(x), "\n", sep="")) return(invisible(NULL)) } accumulateStatus <- function(x, stats=NULL) { values <- stats$values %orifnull% list() frequencies <- stats$frequencies %orifnull% integer(0) if(inherits(x, c("error", "warning", "message"))) { same <- unlist(lapply(values, identical, y=x)) if(any(same)) { i <- min(which(same)) frequencies[i] <- frequencies[i] + 1L } else { values <- append(values, list(x)) frequencies <- c(frequencies, 1L) } } stats <- list(values=values, frequencies=frequencies) return(stats) } printStatusList <- function(stats) { with(stats, { for(i in seq_along(values)) { printStatus(values[[i]]) fi <- frequencies[i] splat("\t", paren(paste(fi, ngettext(fi, "time", "times")))) } } ) invisible(NULL) } ############### applications (specific models) ################## getdataname <- function(defaultvalue, ..., dataname=NULL) { if(!is.null(dataname)) dataname else defaultvalue } thomas.estK <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("Thomas") startpar <- info$checkpar(startpar) theoret <- info$K result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of Thomas process"), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Thomas process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "sigma2") result$par <- par ## infer meaningful model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Thomas") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } lgcp.estK <- function(X, startpar=c(var=1,scale=1), covmodel=list(model="exponential"), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("LGCP") startpar <- info$checkpar(startpar) ## digest parameters of Covariance model and test validity ph <- info$parhandler cmodel <- do.call(ph, covmodel) theoret <- info$K result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of LGCP"), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="log-Gaussian Cox process"), ..., model=cmodel$model, margs=cmodel$margs) ## imbue with meaning par <- result$par names(par) <- c("sigma2", "alpha") result$par <- par result$covmodel <- cmodel ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="lgcp") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) result$clustargs <- info$checkclustargs(cmodel$margs, old=FALSE) return(result) } matclust.estK <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("MatClust") startpar <- info$checkpar(startpar) theoret <- info$K funaux <- info$funaux result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of Matern Cluster process"), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Matern Cluster process"), ..., funaux=funaux) ## imbue with meaning par <- result$par names(par) <- c("kappa", "R") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="MatClust") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } ## versions using pcf (suggested by Jan Wild) thomas.estpcf <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()){ dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("Thomas") startpar <- info$checkpar(startpar) theoret <- info$pcf ## avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list( label="%s[fit](r)", desc="minimum contrast fit of Thomas process"), explain=list( dataname=dataname, fname=attr(g, "fname"), modelname="Thomas process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "sigma2") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Thomas") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } matclust.estpcf <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()){ dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("MatClust") startpar <- info$checkpar(startpar) theoret <- info$pcf funaux <- info$funaux ## avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of Matern Cluster process"), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="Matern Cluster process"), ..., funaux=funaux) ## imbue with meaning par <- result$par names(par) <- c("kappa", "R") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="MatClust") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } lgcp.estpcf <- function(X, startpar=c(var=1,scale=1), covmodel=list(model="exponential"), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("LGCP") startpar <- info$checkpar(startpar) ## digest parameters of Covariance model and test validity ph <- info$parhandler cmodel <- do.call(ph, covmodel) theoret <- info$pcf result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of LGCP"), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="log-Gaussian Cox process"), ..., model=cmodel$model, margs=cmodel$margs) ## imbue with meaning par <- result$par names(par) <- c("sigma2", "alpha") result$par <- par result$covmodel <- cmodel ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="lgcp") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) result$clustargs <- info$checkclustargs(cmodel$margs, old=FALSE) return(result) } cauchy.estK <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { ## omega: scale parameter of Cauchy kernel function ## eta: scale parameter of Cauchy pair correlation function ## eta = 2 * omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("Cauchy") startpar <- info$checkpar(startpar) theoret <- info$K desc <- "minimum contrast fit of Neyman-Scott process with Cauchy kernel" result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Cauchy process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "eta2") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Cauchy") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } cauchy.estpcf <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()) { ## omega: scale parameter of Cauchy kernel function ## eta: scale parameter of Cauchy pair correlation function ## eta = 2 * omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("Cauchy") startpar <- info$checkpar(startpar) theoret <- info$pcf ## avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } desc <- "minimum contrast fit of Neyman-Scott process with Cauchy kernel" result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="Cauchy process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "eta2") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Cauchy") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } ## user-callable resolve.vargamma.shape <- function(..., nu.ker=NULL, nu.pcf=NULL, default = FALSE) { if(is.null(nu.ker) && is.null(nu.pcf)){ if(!default) stop("Must specify either nu.ker or nu.pcf", call.=FALSE) nu.ker <- -1/4 } if(!is.null(nu.ker) && !is.null(nu.pcf)) stop("Only one of nu.ker and nu.pcf should be specified", call.=FALSE) if(!is.null(nu.ker)) { check.1.real(nu.ker) stopifnot(nu.ker > -1/2) nu.pcf <- 2 * nu.ker + 1 } else { check.1.real(nu.pcf) stopifnot(nu.pcf > 0) nu.ker <- (nu.pcf - 1)/2 } return(list(nu.ker=nu.ker, nu.pcf=nu.pcf)) } vargamma.estK <- function(X, startpar=c(kappa=1,scale=1), nu = -1/4, lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { ## nu.ker: smoothness parameter of Variance Gamma kernel function ## omega: scale parameter of kernel function ## nu.pcf: smoothness parameter of Variance Gamma pair correlation function ## eta: scale parameter of Variance Gamma pair correlation function ## nu.pcf = 2 * nu.ker + 1 and eta = omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") ## Catch old nu.ker/nu.pcf syntax and resolve nu-value. dots <- list(...) if(missing(nu)){ nu <- resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf, default = TRUE)$nu.ker } check.1.real(nu) stopifnot(nu > -1/2) info <- spatstatClusterModelInfo("VarGamma") startpar <- info$checkpar(startpar) theoret <- info$K ## test validity of parameter nu and digest ph <- info$parhandler cmodel <- ph(nu.ker=nu) margs <- cmodel$margs desc <- "minimum contrast fit of Neyman-Scott process with Variance Gamma kernel" result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Variance Gamma process"), margs=margs, ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "eta") result$par <- par result$covmodel <- cmodel ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="VarGamma") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) result$clustargs <- info$checkclustargs(cmodel$margs, old=FALSE) return(result) } vargamma.estpcf <- function(X, startpar=c(kappa=1,scale=1), nu=-1/4, lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()) { ## nu.ker: smoothness parameter of Variance Gamma kernel function ## omega: scale parameter of kernel function ## nu.pcf: smoothness parameter of Variance Gamma pair correlation function ## eta: scale parameter of Variance Gamma pair correlation function ## nu.pcf = 2 * nu.ker + 1 and eta = omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") ## Catch old nu.ker/nu.pcf syntax and resolve nu-value. dots <- list(...) if(missing(nu)){ ## nutmp <- try(resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf)$nu.ker, silent=TRUE) ## if(!inherits(nutmp, "try-error")) nu <- nutmp nu <- resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf, default = TRUE)$nu.ker } check.1.real(nu) stopifnot(nu > -1/2) info <- spatstatClusterModelInfo("VarGamma") startpar <- info$checkpar(startpar) theoret <- info$pcf ## test validity of parameter nu and digest ph <- info$parhandler cmodel <- ph(nu.ker=nu) margs <- cmodel$margs ## avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } desc <- "minimum contrast fit of Neyman-Scott process with Variance Gamma kernel" result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="Variance Gamma process"), margs=margs, ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "eta") result$par <- par result$covmodel <- cmodel ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="VarGamma") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) result$clustargs <- info$checkclustargs(cmodel$margs, old=FALSE) return(result) } spatstat.core/R/bw.pplHeat.R0000644000176200001440000000720014141452520015371 0ustar liggesusers#' #' bw.pplHeat.R #' #' Bandwidth selection for densityHeat.ppp #' by point process likelihood cross-validation #' #' Copyright (c) 2020 Adrian Baddeley, Tilman Davies and Suman Rakshit #' GNU Public Licence >= 2.0 bw.pplHeat <- function(X, ..., srange=NULL, ns=16, sigma=NULL, leaveoneout=TRUE, verbose=TRUE) { #' compute intensity estimates b <- HeatEstimates.ppp(X, ..., srange=srange, ns=ns, sigma=sigma, leaveoneout=leaveoneout, verbose=verbose) lambda <- b$lambda h <- b$h hname <- b$hname #' compute likelihood cross-validation criterion CV <- rowSums(log(lambda)) iopt <- which.max(CV) result <- bw.optim(CV, h, iopt, criterion="Likelihood cross-validation", hname=hname, unitname=unitname(X)) return(result) } HeatEstimates.ppp <- function(X, ..., srange=NULL, ns=16, sigma=NULL, leaveoneout=FALSE, verbose=TRUE) { stopifnot(is.ppp(X)) nX <- npoints(X) ## trap a common error if(length(argh <- list(...)) && (is.null(nama <- names(argh)) || !nzchar(nama[[1L]])) && is.numeric(a <- argh[[1L]]) && length(a) == 1L) stop("Use argument 'sigma' to specify the maximum bandwidth!", call.=FALSE) ## determine candidate bandwidths if(is.numeric(sigma) && length(sigma)) { ## sigma is a vector of candidate bandwidths, or a maximum bandwidth sigMax <- max(sigma) fractions <- if(length(sigma) > 1) sigma/sigMax else geomseq(from=0.05, to=1, length.out=ns) } else if(is.im(sigma)) { ## sigma is an image giving the spatially-varying maximum bandwidth sigMax <- sigma fractions <- seq_len(ns)/ns } else if(is.null(sigma)) { #' make a sequence of candidate bandwidths if(!is.null(srange)) { check.range(srange) } else { nnd <- nndist(X) srange <- c(min(nnd[nnd > 0]), diameter(as.owin(X))/2) } sigMax <- srange[2] sigmavalues <- geomseq(from=srange[1L], to=srange[2L], length.out=ns) fractions <- sigmavalues/sigMax } else stop("Format of sigma is not understood") ## set up transition matrix and initial state a <- densityHeat.ppp(X, sigMax, ..., internal=list(setuponly=TRUE)) Y <- a$Y # initial state image u <- a$u # initial state vector (dropping NA) Xpos <- a$Xpos # location of each data point, index in 'u' A <- a$A # transition matrix, operates on 'u; Nstep <- a$Nstep # total number of iterations ## map desired sigma values to iteration numbers nits <- pmax(1L, pmin(Nstep, round(Nstep * fractions^2))) nits <- diff(c(0L,nits)) reciprocalpixelarea <- with(Y, 1/(xstep * ystep)) ## compute .... lambda <- matrix(nrow=ns, ncol=nX) if(!leaveoneout) { ## usual estimates for(k in seq_len(ns)) { for(l in seq_len(nits[k])) u <- u %*% A lambda[k, ] <- u[Xpos] } } else { ## compute leave-one-out estimates if(verbose) { cat("Processing", nX, "points ... ") pstate <- list() } for(i in seq_len(nX)) { ## initial state = X[-i] ui <- u Xposi <- Xpos[i] ui[Xposi] <- ui[Xposi] - reciprocalpixelarea ## run iterations, pausing at each sigma value for(k in seq_len(ns)) { for(l in seq_len(nits[k])) ui <- ui %*% A lambda[k, i] <- ui[Xposi] } if(verbose) pstate <- progressreport(i, nX, state=pstate) } if(verbose) cat("Done.\n") } if(!is.im(sigma)) { h <- sigMax * fractions hname <- "sigma" } else { h <- fractions hname <- "fract" } return(list(lambda=lambda, h=h, hname=hname)) } spatstat.core/R/clusterinfo.R0000644000176200001440000007210514144333461015735 0ustar liggesusers## clusterinfo.R ## ## Lookup table of explicitly-known K functions and pcf ## and algorithms for computing sensible starting parameters ## ## $Revision: 1.25 $ $Date: 2021/08/08 08:55:42 $ .Spatstat.ClusterModelInfoTable <- list( Thomas=list( ## Thomas process: old par = (kappa, sigma2) (internally used everywhere) ## Thomas process: new par = (kappa, scale) (officially recommended for input/output) modelname = "Thomas process", # In modelname field of mincon fv obj. descname = "Thomas process", # In desc field of mincon fv obj. modelabbrev = "Thomas process", # In fitted obj. printmodelname = function(...) "Thomas process", # Used by print.kppm parnames = c("kappa", "sigma2"), clustargsnames = NULL, checkpar = function(par, old = TRUE, ...){ if(is.null(par)) par <- c(kappa=1,scale=1) if(any(par<=0)) stop("par values must be positive.", call.=FALSE) nam <- check.named.vector(par, c("kappa","sigma2"), onError="null") if(is.null(nam)) { check.named.vector(par, c("kappa","scale")) names(par)[2L] <- "sigma2" par[2L] <- par[2L]^2 } if(!old){ names(par)[2L] <- "scale" par[2L] <- sqrt(par[2L]) } return(par) }, checkclustargs = function(margs, old = TRUE, ...) list(), resolvedots = function(...){ return(list(...)) }, # density function for the distance to offspring ddist = function(r, scale, ...) { 2 * pi * r * dnorm(r, 0, scale)/sqrt(2*pi*scale^2) }, ## Practical range of clusters range = function(...){ dots <- list(...) par <- dots$par # Choose the first of the possible supplied values for scale: scale <- c(dots$scale, dots$par[["scale"]], dots$sigma, dots$par[["sigma"]])[1L] if(is.null(scale)) stop(paste("Argument ", sQuote("scale"), " must be given."), call.=FALSE) thresh <- dots$thresh if(!is.null(thresh)){ ## The squared length of isotropic Gaussian (sigma) ## is exponential with mean 2 sigma^2 rmax <- scale * sqrt(2 * qexp(thresh, lower.tail=FALSE)) ## old code ## ddist <- .Spatstat.ClusterModelInfoTable$Thomas$ddist ## kernel0 <- clusterkernel("Thomas", scale = scale)(0,0) ## f <- function(r) ddist(r, scale = scale)-thresh*kernel0 ## rmax <- uniroot(f, lower = scale, upper = 1000 * scale)$root } else{ rmax <- 4*scale } return(rmax) }, kernel = function(par, rvals, ...) { scale <- sqrt(par[2L]) dnorm(rvals, 0, scale)/sqrt(2*pi*scale^2) }, isPCP=TRUE, ## K-function K = function(par,rvals, ...){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) pi*rvals^2+(1-exp(-rvals^2/(4*par[2L])))/par[1L] }, ## pair correlation function pcf= function(par,rvals, ...){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) 1 + exp(-rvals^2/(4 * par[2L]))/(4 * pi * par[1L] * par[2L]) }, ## gradient of pcf (contributed by Chiara Fend) Dpcf= function(par,rvals, ...){ if(any(par <= 0)){ dsigma2 <- rep.int(Inf, length(rvals)) dkappa <- rep.int(Inf, length(rvals)) } else { dsigma2 <- exp(-rvals^2/(4 * par[2L])) * (rvals/(4^2 * pi * par[1L] * par[2L]^3) - 1/(4 * pi * par[1L] * par[2L]^2)) dkappa <- -exp(-rvals^2/(4 * par[2L]))/(4 * pi * par[1L]^2 * par[2L]) } out <- rbind(dkappa, dsigma2) rownames(out) <- c("kappa","sigma2") return(out) }, ## sensible starting parameters selfstart = function(X) { kappa <- intensity(X) sigma2 <- 4 * mean(nndist(X))^2 c(kappa=kappa, sigma2=sigma2) }, ## meaningful model parameters interpret = function(par, lambda) { kappa <- par[["kappa"]] sigma <- sqrt(par[["sigma2"]]) mu <- if(is.numeric(lambda) && length(lambda) == 1) lambda/kappa else NA c(kappa=kappa, sigma=sigma, mu=mu) } ), ## ............................................... MatClust=list( ## Matern Cluster process: old par = (kappa, R) (internally used everywhere) ## Matern Cluster process: new par = (kappa, scale) (officially recommended for input/output) modelname = "Matern cluster process", # In modelname field of mincon fv obj. descname = "Matern cluster process", # In desc field of mincon fv obj. modelabbrev = "Matern cluster process", # In fitted obj. printmodelname = function(...) "Matern cluster process", # Used by print.kppm parnames = c("kappa", "R"), clustargsnames = NULL, checkpar = function(par, old = TRUE, ...){ if(is.null(par)) par <- c(kappa=1,scale=1) if(any(par<=0)) stop("par values must be positive.", call.=FALSE) nam <- check.named.vector(par, c("kappa","R"), onError="null") if(is.null(nam)) { check.named.vector(par, c("kappa","scale")) names(par)[2L] <- "R" } if(!old){ names(par)[2L] <- "scale" } return(par) }, # density function for the distance to offspring ddist = function(r, scale, ...) { ifelse(r>scale, 0, 2 * r / scale^2) }, ## Practical range of clusters range = function(...){ dots <- list(...) par <- dots$par # Choose the first of the possible supplied values for scale: scale <- c(dots$scale, dots$par[["scale"]], dots$R, dots$par[["R"]])[1L] if(is.null(scale)) stop(paste("Argument ", sQuote("scale"), " must be given."), call.=FALSE) if(!is.null(dots$thresh)) warning("Argument ", sQuote("thresh"), " is ignored for Matern Cluster model") return(scale) }, checkclustargs = function(margs, old = TRUE, ...) list(), resolvedots = function(...){ return(list(...)) }, kernel = function(par, rvals, ...) { scale <- par[2L] ifelse(rvals>scale, 0, 1/(pi*scale^2)) }, isPCP=TRUE, K = function(par,rvals, ..., funaux){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) kappa <- par[1L] R <- par[2L] Hfun <- funaux$Hfun y <- pi * rvals^2 + (1/kappa) * Hfun(rvals/(2 * R)) return(y) }, pcf= function(par,rvals, ..., funaux){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) kappa <- par[1L] R <- par[2L] g <- funaux$g y <- 1 + (1/(pi * kappa * R^2)) * g(rvals/(2 * R)) return(y) }, Dpcf= function(par,rvals, ..., funaux){ kappa <- par[1L] R <- par[2L] g <- funaux$g gprime <- funaux$gprime if(any(par <= 0)){ dkappa <- rep.int(Inf, length(rvals)) dR <- rep.int(Inf, length(rvals)) } else { dkappa <- -g(rvals/(2 * R)) / (pi * kappa^2 * R^2) dR <- -2*g(rvals/(2 * R))/(pi * kappa * R^3) - (1/(pi * kappa * R^2)) * gprime(rvals/(2 * R))*rvals/(2*R^2) } out <- rbind(dkappa, dR) rownames(out) <- c("kappa","R") return(out) }, funaux=list( Hfun=function(zz) { ok <- (zz < 1) h <- numeric(length(zz)) h[!ok] <- 1 z <- zz[ok] h[ok] <- 2 + (1/pi) * ( (8 * z^2 - 4) * acos(z) - 2 * asin(z) + 4 * z * sqrt((1 - z^2)^3) - 6 * z * sqrt(1 - z^2) ) return(h) }, DOH=function(zz) { ok <- (zz < 1) h <- numeric(length(zz)) h[!ok] <- 0 z <- zz[ok] h[ok] <- (16/pi) * (z * acos(z) - (z^2) * sqrt(1 - z^2)) return(h) }, ## g(z) = DOH(z)/z has a limit at z=0. g=function(zz) { ok <- (zz < 1) h <- numeric(length(zz)) h[!ok] <- 0 z <- zz[ok] h[ok] <- (2/pi) * (acos(z) - z * sqrt(1 - z^2)) return(h) }, gprime=function(zz) { ok <- (zz < 1) h <- numeric(length(zz)) h[!ok] <- 0 z <- zz[ok] h[ok] <- -(2/pi) * 2 * sqrt(1 - z^2) return(h) }), ## sensible starting paramters selfstart = function(X) { kappa <- intensity(X) R <- 2 * mean(nndist(X)) c(kappa=kappa, R=R) }, ## meaningful model parameters interpret = function(par, lambda) { kappa <- par[["kappa"]] R <- par[["R"]] mu <- if(is.numeric(lambda) && length(lambda) == 1) lambda/kappa else NA c(kappa=kappa, R=R, mu=mu) } ), ## ............................................... Cauchy=list( ## Neyman-Scott with Cauchy clusters: old par = (kappa, eta2) (internally used everywhere) ## Neyman-Scott with Cauchy clusters: new par = (kappa, scale) (officially recommended for input/output) modelname = "Neyman-Scott process with Cauchy kernel", # In modelname field of mincon fv obj. descname = "Neyman-Scott process with Cauchy kernel", # In desc field of mincon fv obj. modelabbrev = "Cauchy process", # In fitted obj. printmodelname = function(...) "Cauchy process", # Used by print.kppm parnames = c("kappa", "eta2"), clustargsnames = NULL, checkpar = function(par, old = TRUE, ...){ if(is.null(par)) par <- c(kappa=1,scale=1) if(any(par<=0)) stop("par values must be positive.", call.=FALSE) nam <- check.named.vector(par, c("kappa","eta2"), onError="null") if(is.null(nam)) { check.named.vector(par, c("kappa","scale")) names(par)[2L] <- "eta2" par[2L] <- (2*par[2L])^2 } if(!old){ names(par)[2L] <- "scale" par[2L] <- sqrt(par[2L])/2 } return(par) }, checkclustargs = function(margs, old = TRUE, ...) list(), resolvedots = function(...){ return(list(...)) }, # density function for the distance to offspring ddist = function(r, scale, ...) { r/(scale^2) * (1 + (r / scale)^2)^(-3/2) }, ## Practical range of clusters range = function(...){ dots <- list(...) # Choose the first of the possible supplied values for scale: scale <- c(dots$scale, dots$par[["scale"]])[1L] if(is.null(scale)) stop(paste("Argument ", sQuote("scale"), " must be given."), call.=FALSE) thresh <- dots$thresh %orifnull% 0.01 ## integral of ddist(r) dr is 1 - (1+(r/scale)^2)^(-1/2) ## solve for integral = 1-thresh: rmax <- scale * sqrt(1/thresh^2 - 1) ## old code ## ddist <- .Spatstat.ClusterModelInfoTable$Cauchy$ddist ## kernel0 <- clusterkernel("Cauchy", scale = scale)(0,0) ## f <- function(r) ddist(r, scale = scale)-thresh*kernel0 ## rmax <- uniroot(f, lower = scale, upper = 1000 * scale)$root return(rmax) }, kernel = function(par, rvals, ...) { scale <- sqrt(par[2L])/2 1/(2*pi*scale^2)*((1 + (rvals/scale)^2)^(-3/2)) }, isPCP=TRUE, K = function(par,rvals, ...){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) pi*rvals^2 + (1 - 1/sqrt(1 + rvals^2/par[2L]))/par[1L] }, pcf= function(par,rvals, ...){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) 1 + ((1 + rvals^2/par[2L])^(-1.5))/(2 * pi * par[2L] * par[1L]) }, Dpcf= function(par,rvals, ...){ if(any(par <= 0)){ dkappa <- rep.int(Inf, length(rvals)) deta2 <- rep.int(Inf, length(rvals)) } else { dkappa <- -(1 + rvals^2/par[2L])^(-1.5)/(2 * pi * par[2L] * par[1L]^2) deta2 <- 1.5 * rvals^2 * (1 + rvals^2/par[2L])^(-2.5)/(2 * par[2L]^3 * par[1L] * pi) - (1 + rvals^2/par[2L])^(-1.5)/(2*pi*par[1L]*par[2L]^2) } out <- rbind(dkappa, deta2) rownames(out) <- c("kappa","eta2") return(out) }, selfstart = function(X) { kappa <- intensity(X) eta2 <- 4 * mean(nndist(X))^2 c(kappa = kappa, eta2 = eta2) }, ## meaningful model parameters interpret = function(par, lambda) { kappa <- par[["kappa"]] omega <- sqrt(par[["eta2"]])/2 mu <- if(is.numeric(lambda) && length(lambda) == 1) lambda/kappa else NA c(kappa=kappa, omega=omega, mu=mu) } ), ## ............................................... VarGamma=list( ## Neyman-Scott with VarianceGamma/Bessel clusters: old par = (kappa, eta) (internally used everywhere) ## Neyman-Scott with VarianceGamma/Bessel clusters: new par = (kappa, scale) (officially recommended for input/output) modelname = "Neyman-Scott process with Variance Gamma kernel", # In modelname field of mincon fv obj. descname = "Neyman-Scott process with Variance Gamma kernel", # In desc field of mincon fv obj. modelabbrev = "Variance Gamma process", # In fitted obj. printmodelname = function(obj){ # Used by print.kppm paste0("Variance Gamma process (nu=", signif(obj$clustargs[["nu"]], 2), ")") }, parnames = c("kappa", "eta"), clustargsnames = "nu", checkpar = function(par, old = TRUE, ...){ if(is.null(par)) par <- c(kappa=1,scale=1) if(any(par<=0)) stop("par values must be positive.", call.=FALSE) nam <- check.named.vector(par, c("kappa","eta"), onError="null") if(is.null(nam)) { check.named.vector(par, c("kappa","scale")) names(par)[2L] <- "eta" } if(!old) names(par)[2L] <- "scale" return(par) }, checkclustargs = function(margs, old = TRUE, ...){ if(!old) margs <- list(nu=margs$nu.ker) return(margs) }, resolvedots = function(...){ ## resolve dots for kppm and friends allowing for old/new par syntax dots <- list(...) out <- list() nu <- dots$nu if(is.null(nu)){ nu <- try(resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf)$nu.ker, silent = TRUE) if(inherits(nu, "try-error")) nu <- -1/4 } else { check.1.real(nu) stopifnot(nu > -1/2) } out$margs <- list(nu.ker=nu, nu.pcf=2*nu+1) out$covmodel <- list(type="Kernel", model="VarGamma", margs=out$margs) return(out) }, # density function for the distance to offspring ddist = function(r, scale, nu, ...) { numer <- ((r/scale)^(nu+1)) * besselK(r/scale, nu) numer[r==0] <- 0 denom <- (2^nu) * scale * gamma(nu + 1) numer/denom }, ## Practical range of clusters range = function(...){ dots <- list(...) # Choose the first of the possible supplied values for scale: scale <- c(dots$scale, dots$par[["scale"]])[1L] if(is.null(scale)) stop(paste("Argument ", sQuote("scale"), " must be given."), call.=FALSE) # Find value of nu: extra <- .Spatstat.ClusterModelInfoTable$VarGamma$resolvedots(...) nu <- .Spatstat.ClusterModelInfoTable$VarGamma$checkclustargs(extra$margs, old=FALSE)$nu if(is.null(nu)) stop(paste("Argument ", sQuote("nu"), " must be given."), call.=FALSE) thresh <- dots$thresh if(is.null(thresh)) thresh <- .001 ddist <- .Spatstat.ClusterModelInfoTable$VarGamma$ddist f1 <- function(rmx) { integrate(ddist, 0, rmx, scale=scale, nu=nu)$value - (1 - thresh) } f <- Vectorize(f1) ## old code ## kernel0 <- clusterkernel("VarGamma", scale = scale, nu = nu)(0,0) ## f <- function(r) ddist(r, scale = scale, nu = nu) - thresh*kernel0 rmax <- uniroot(f, lower = scale, upper = 1000 * scale)$root return(rmax) }, ## kernel function in polar coordinates (no angular argument). kernel = function(par, rvals, ..., margs) { scale <- as.numeric(par[2L]) nu <- margs$nu if(is.null(nu)) stop(paste("Argument ", sQuote("nu"), " is missing."), call.=FALSE) numer <- ((rvals/scale)^nu) * besselK(rvals/scale, nu) numer[rvals==0] <- ifelse(nu>0, 2^(nu-1)*gamma(nu), Inf) denom <- pi * (2^(nu+1)) * scale^2 * gamma(nu + 1) numer/denom }, isPCP=TRUE, K = local({ ## K function requires integration of pair correlation xgx <- function(x, par, nu.pcf) { ## x * pcf(x) without check on par values numer <- (x/par[2L])^nu.pcf * besselK(x/par[2L], nu.pcf) denom <- 2^(nu.pcf+1) * pi * par[2L]^2 * par[1L] * gamma(nu.pcf + 1) return(x * (1 + numer/denom)) } vargammaK <- function(par,rvals, ..., margs){ ## margs = list(.. nu.pcf.. ) if(any(par <= 0)) return(rep.int(Inf, length(rvals))) nu.pcf <- margs$nu.pcf out <- numeric(length(rvals)) ok <- (rvals > 0) rvalsok <- rvals[ok] outok <- numeric(sum(ok)) for (i in 1:length(rvalsok)) outok[i] <- 2 * pi * integrate(xgx, lower=0, upper=rvalsok[i], par=par, nu.pcf=nu.pcf)$value out[ok] <- outok return(out) } ## Initiated integration in sub-subintervals, but it is unfinished! ## vargammaK <- function(par,rvals, ..., margs){ ## ## margs = list(.. nu.pcf.. ) ## if(any(par <= 0)) ## return(rep.int(Inf, length(rvals))) ## nu.pcf <- margs$nu.pcf ## out <- numeric(length(rvals)) ## out[1L] <- if(rvals[1L] == 0) 0 else ## integrate(xgx, lower=0, upper=rvals[1L], ## par = par, nu.pcf=nu.pcf)$value ## for (i in 2:length(rvals)) { ## delta <- integrate(xgx, ## lower=rvals[i-1L], upper=rvals[i], ## par=par, nu.pcf=nu.pcf) ## out[i]=out[i-1L]+delta$value ## } ## return(out) ## } vargammaK }), ## end of 'local' pcf= function(par,rvals, ..., margs){ ## margs = list(..nu.pcf..) if(any(par <= 0)) return(rep.int(Inf, length(rvals))) nu.pcf <- margs$nu.pcf sig2 <- 1 / (4 * pi * (par[2L]^2) * nu.pcf * par[1L]) denom <- 2^(nu.pcf - 1) * gamma(nu.pcf) rr <- rvals / par[2L] ## Matern correlation function fr <- ifelseXB(rr > 0, (rr^nu.pcf) * besselK(rr, nu.pcf) / denom, 1) return(1 + sig2 * fr) }, Dpcf = NULL, parhandler = function(..., nu.ker = -1/4) { check.1.real(nu.ker) stopifnot(nu.ker > -1/2) nu.pcf <- 2 * nu.ker + 1 return(list(type="Kernel", model="VarGamma", margs=list(nu.ker=nu.ker, nu.pcf=nu.pcf))) }, ## sensible starting values selfstart = function(X) { kappa <- intensity(X) eta <- 2 * mean(nndist(X)) c(kappa=kappa, eta=eta) }, ## meaningful model parameters interpret = function(par, lambda) { kappa <- par[["kappa"]] omega <- par[["eta"]] mu <- if(is.numeric(lambda) && length(lambda) == 1) lambda/kappa else NA c(kappa=kappa, omega=omega, mu=mu) } ), ## ............................................... LGCP=list( ## Log Gaussian Cox process: old par = (sigma2, alpha) (internally used everywhere) ## Log Gaussian Cox process: new par = (var, scale) (officially recommended for input/output) modelname = "Log-Gaussian Cox process", # In modelname field of mincon fv obj. descname = "LGCP", # In desc field of mincon fv obj. modelabbrev = "log-Gaussian Cox process", # In fitted obj. printmodelname = function(...) "log-Gaussian Cox process", # Used by print.kppm parnames = c("sigma2", "alpha"), checkpar = function(par, old = TRUE, ...){ if(is.null(par)) par <- c(var=1,scale=1) if(any(par<=0)) stop("par values must be positive.", call.=FALSE) nam <- check.named.vector(par, c("sigma2","alpha"), onError="null") if(is.null(nam)) { check.named.vector(par, c("var","scale")) names(par) <- c("sigma2", "alpha") } if(!old) names(par) <- c("var", "scale") return(par) }, checkclustargs = function(margs, old = TRUE, ...) return(margs), resolvedots = function(...){ ## resolve dots for kppm and friends allowing for old/new par syntax dots <- list(...) nam <- names(dots) out <- list() cmod <- dots$covmodel model <- cmod$model %orifnull% dots$model %orifnull% "exponential" margs <- NULL if(!identical(model, "exponential")) { ## get the 'model generator' modgen <- getRandomFieldsModelGen(model) attr(model, "modgen") <- modgen if(is.null(cmod)){ margsnam <- names(formals(modgen)) margsnam <- margsnam[!(margsnam %in% c("var", "scale"))] margs <- dots[nam %in% margsnam] } else{ margs <- cmod[names(cmod)!="model"] } } if(length(margs)==0) { margs <- NULL } else { ## detect anisotropic model if("Aniso" %in% names(margs)) stop("Anisotropic covariance models cannot be used", call.=FALSE) } out$margs <- margs out$model <- model out$covmodel <- list(type="Covariance", model=model, margs=margs) return(out) }, isPCP=FALSE, ## calls relevant covariance function from RandomFields package K = function(par, rvals, ..., model, margs) { if(any(par <= 0)) return(rep.int(Inf, length(rvals))) if(model == "exponential") { ## For efficiency and to avoid need for RandomFields package integrand <- function(r,par,...) 2*pi*r*exp(par[1L]*exp(-r/par[2L])) } else { kraeverRandomFields() integrand <- function(r,par,model,margs) { modgen <- attr(model, "modgen") if(length(margs) == 0) { mod <- modgen(var=par[1L], scale=par[2L]) } else { mod <- do.call(modgen, append(list(var=par[1L], scale=par[2L]), margs)) } 2*pi *r *exp(RandomFields::RFcov(model=mod, x=r)) } } nr <- length(rvals) th <- numeric(nr) if(spatstat.options("fastK.lgcp")) { ## integrate using Simpson's rule fvals <- integrand(r=rvals, par=par, model=model, margs=margs) th[1L] <- rvals[1L] * fvals[1L]/2 if(nr > 1) for(i in 2:nr) th[i] <- th[i-1L] + (rvals[i] - rvals[i-1L]) * (fvals[i] + fvals[i-1L])/2 } else { ## integrate using 'integrate' th[1L] <- if(rvals[1L] == 0) 0 else integrate(integrand,lower=0,upper=rvals[1L], par=par,model=model,margs=margs)$value for (i in 2:length(rvals)) { delta <- integrate(integrand, lower=rvals[i-1L],upper=rvals[i], par=par,model=model,margs=margs) th[i]=th[i-1L]+delta$value } } return(th) }, pcf= function(par, rvals, ..., model, margs) { if(any(par <= 0)) return(rep.int(Inf, length(rvals))) if(model == "exponential") { ## For efficiency and to avoid need for RandomFields package gtheo <- exp(par[1L]*exp(-rvals/par[2L])) } else { kraeverRandomFields() modgen <- attr(model, "modgen") if(length(margs) == 0) { mod <- modgen(var=par[1L], scale=par[2L]) } else { mod <- do.call(modgen, append(list(var=par[1L], scale=par[2L]), margs)) } gtheo <- exp(RandomFields::RFcov(model=mod, x=rvals)) } return(gtheo) }, Dpcf= function(par,rvals, ..., model){ if(!identical(model, "exponential")) { stop("Gradient of the pcf not available for this model.") } dsigma2 <- exp(-rvals/par[2L]) * exp(par[1L]*exp(-rvals/par[2L])) dalpha <- rvals * par[1L] * exp(-rvals/par[2L]) * exp(par[1L]*exp(-rvals/par[2L]))/par[2L]^2 out <- rbind(dsigma2, dalpha) rownames(out) <- c("sigma2","alpha") return(out) }, parhandler=function(model = "exponential", ...) { if(!is.character(model)) stop("Covariance function model should be specified by name", call.=FALSE) margs <- c(...) if(!identical(model, "exponential")) { ## get the 'model generator' modgen <- getRandomFieldsModelGen(model) attr(model, "modgen") <- modgen } return(list(type="Covariance", model=model, margs=margs)) }, ## sensible starting values selfstart = function(X) { alpha <- 2 * mean(nndist(X)) c(sigma2=1, alpha=alpha) }, ## meaningful model parameters interpret = function(par, lambda) { sigma2 <- par[["sigma2"]] alpha <- par[["alpha"]] mu <- if(is.numeric(lambda) && length(lambda) == 1 && lambda > 0) log(lambda) - sigma2/2 else NA c(sigma2=sigma2, alpha=alpha, mu=mu) } ) ) spatstatClusterModelInfo <- function(name, onlyPCP = FALSE) { if(inherits(name, "detpointprocfamily")) return(spatstatDPPModelInfo(name)) if(!is.character(name) || length(name) != 1) stop("Argument must be a single character string", call.=FALSE) TheTable <- .Spatstat.ClusterModelInfoTable nama2 <- names(TheTable) if(onlyPCP){ ok <- sapply(TheTable, getElement, name="isPCP") nama2 <- nama2[ok] } if(!(name %in% nama2)) stop(paste(sQuote(name), "is not recognised;", "valid names are", commasep(sQuote(nama2))), call.=FALSE) out <- TheTable[[name]] return(out) } spatstat.core/R/pool.R0000644000176200001440000002173214144333463014353 0ustar liggesusers#' #' pool.R #' #' pool Generic #' pool.fv #' pool.rat #' pool.fasp #' #' $Revision: 1.7 $ $Date: 2020/11/30 09:43:37 $ pool <- function(...) { UseMethod("pool") } pool.anylist <- function(x, ...) { do.call(pool, append(x, list(...))) } ## ................................................ pool.fv <- local({ Square <- function(A) { force(A); eval.fv(A^2, relabel=FALSE) } Add <- function(A,B){ force(A); force(B); eval.fv(A+B, relabel=FALSE) } Cmul <- function(A, f) { force(A); force(f); eval.fv(f * A, relabel=FALSE) } pool.fv <- function(..., weights=NULL, relabel=TRUE, variance=TRUE) { argh <- list(...) n <- narg <- length(argh) if(narg == 0) return(NULL) if(narg == 1) return(argh[[1]]) ## validate isfv <- unlist(lapply(argh, is.fv)) if(!all(isfv)) stop("All arguments must be fv objects") argh <- do.call(harmonise, append(argh, list(strict=TRUE))) template <- vanilla.fv(argh[[1]]) ## compute products if(!is.null(weights)) { check.nvector(weights, narg, things="Functions") Y <- Map(Cmul, argh, weights) XY <- Map(Cmul, argh, weights^2) sumX <- sum(weights) sumX2 <- sum(weights^2) } else { ## default: weights=1 Y <- XY <- argh sumX <- sumX2 <- narg } ## sum sumY <- Reduce(Add, Y) attributes(sumY) <- attributes(template) ## ratio-of-sums Ratio <- eval.fv(sumY/sumX, relabel=FALSE) if(variance) { ## variance calculation meanX <- sumX/n meanY <- eval.fv(sumY/n, relabel=FALSE) sumY2 <- Reduce(Add, lapply(Y, Square)) varX <- (sumX2 - n * meanX^2)/(n-1) varY <- eval.fv((sumY2 - n * meanY^2)/(n-1), relabel=FALSE) sumXY <- Reduce(Add, XY) covXY <- eval.fv((sumXY - n * meanX * meanY)/(n-1), relabel=FALSE) ## variance by delta method relvar <- eval.fv(pmax.int(0, varY/meanY^2 + varX/meanX^2 - 2 * covXY/(meanX * meanY)), relabel=FALSE) Variance <- eval.fv(Ratio^2 * relvar/n, relabel=FALSE) ## two sigma CI hiCI <- eval.fv(Ratio + 2 * sqrt(Variance), relabel=FALSE) loCI <- eval.fv(Ratio - 2 * sqrt(Variance), relabel=FALSE) } ## tweak labels of main estimate attributes(Ratio) <- attributes(template) if(relabel) Ratio <- prefixfv(Ratio, tagprefix="pool", descprefix="pooled ", lablprefix="") if(!variance) return(Ratio) ## tweak labels of variance terms attributes(Variance) <- attributes(template) Variance <- prefixfv(Variance, tagprefix="var", descprefix="delta-method variance estimate of ", lablprefix="bold(var)~") attributes(hiCI) <- attributes(loCI) <- attributes(template) hiCI <- prefixfv(hiCI, tagprefix="hi", descprefix="upper limit of two-sigma CI based on ", lablprefix="bold(hi)~") loCI <- prefixfv(loCI, tagprefix="lo", descprefix="lower limit of two-sigma CI based on ", lablprefix="bold(lo)~") ## glue together result <- Reduce(bind.fv, list(Ratio, Variance, hiCI, loCI)) ## don't plot variances, by default fvnames(result, ".") <- setdiff(fvnames(result, "."), fvnames(Variance, ".")) return(result) } pool.fv }) ## ................................................ pool.rat <- local({ Add <- function(A,B){ force(A); force(B); eval.fv(A+B, relabel=FALSE) } Square <- function(A) { force(A); eval.fv(A^2, relabel=FALSE) } Mul <- function(A,B){ force(A); force(B); eval.fv(A*B, relabel=FALSE) } pool.rat <- function(..., weights=NULL, relabel=TRUE, variance=TRUE) { argh <- list(...) n <- narg <- length(argh) if(narg == 0) return(NULL) if(narg == 1) return(argh[[1]]) ## israt <- unlist(lapply(argh, inherits, what="rat")) if(any(bad <- !israt)) { nbad <- sum(bad) stop(paste(ngettext(nbad, "Argument", "Arguments"), commasep(which(bad)), ngettext(nbad, "does not", "do not"), "contain ratio (numerator/denominator) information")) } isfv <- unlist(lapply(argh, is.fv)) if(!all(isfv)) stop("All arguments must be fv objects") ## extract template <- vanilla.fv(argh[[1]]) Y <- lapply(argh, attr, which="numerator") X <- lapply(argh, attr, which="denominator") X <- do.call(harmonise, X) Y <- do.call(harmonise, Y) templateX <- vanilla.fv(X[[1]]) templateY <- vanilla.fv(Y[[1]]) ## compute products if(!is.null(weights)) { check.nvector(weights, narg, things="Functions") X <- Map(Mul, X, weights) Y <- Map(Mul, Y, weights) } ## sum sumX <- Reduce(Add, X) sumY <- Reduce(Add, Y) attributes(sumX) <- attributes(templateX) attributes(sumY) <- attributes(templateY) ## ratio-of-sums Ratio <- eval.fv(sumY/sumX, relabel=FALSE) attributes(Ratio) <- attributes(template) ## variance calculation if(variance) { meanX <- eval.fv(sumX/n, relabel=FALSE) meanY <- eval.fv(sumY/n, relabel=FALSE) sumX2 <- Reduce(Add, lapply(X, Square)) sumY2 <- Reduce(Add, lapply(Y, Square)) varX <- eval.fv((sumX2 - n * meanX^2)/(n-1), relabel=FALSE) varY <- eval.fv((sumY2 - n * meanY^2)/(n-1), relabel=FALSE) XY <- Map(Mul, X, Y) sumXY <- Reduce(Add, XY) covXY <- eval.fv((sumXY - n * meanX * meanY)/(n-1), relabel=FALSE) ## variance by delta method relvar <- eval.fv(pmax.int(0, varY/meanY^2 + varX/meanX^2 - 2 * covXY/(meanX * meanY)), relabel=FALSE) Variance <- eval.fv(Ratio^2 * relvar/n, relabel=FALSE) attributes(Variance) <- attributes(template) ## two sigma CI hiCI <- eval.fv(Ratio + 2 * sqrt(Variance), relabel=FALSE) loCI <- eval.fv(Ratio - 2 * sqrt(Variance), relabel=FALSE) attributes(hiCI) <- attributes(loCI) <- attributes(template) } ## dress up if(relabel) { Ratio <- prefixfv(Ratio, tagprefix="pool", descprefix="pooled ", lablprefix="") if(variance) { Variance <- prefixfv(Variance, tagprefix="var", descprefix="delta-method variance estimate of ", lablprefix="bold(var)~") hiCI <- prefixfv(hiCI, tagprefix="hi", descprefix="upper limit of two-sigma CI based on ", lablprefix="bold(hi)~") loCI <- prefixfv(loCI, tagprefix="lo", descprefix="lower limit of two-sigma CI based on ", lablprefix="bold(lo)~") } } result <- if(!variance) Ratio else Reduce(bind.fv, list(Ratio, Variance, hiCI, loCI)) return(result) } pool.rat }) ## ........................................................... pool.fasp <- local({ pool.fasp <- function(...) { Alist <- list(...) Yname <- short.deparse(sys.call()) if(nchar(Yname) > 60) Yname <- paste(substr(Yname, 1L, 40L), "[..]") nA <- length(Alist) if(nA == 0) return(NULL) ## validate.... ## All arguments must be fasp objects notfasp <- !unlist(lapply(Alist, inherits, what="fasp")) if(any(notfasp)) { n <- sum(notfasp) why <- paste(ngettext(n, "Argument", "Arguments"), commasep(which(notfasp)), ngettext(n, "does not", "do not"), "belong to the class", dQuote("fasp")) stop(why) } ## All arguments must have envelopes notenv <- !unlist(lapply(Alist, has.env)) if(any(notenv)) { n <- sum(notenv) why <- paste(ngettext(n, "Argument", "Arguments"), commasep(which(notenv)), ngettext(n, "does not", "do not"), "contain envelope data") stop(why) } if(nA == 1L) return(Alist[[1L]]) ## All arguments must have the same dimensions witches <- lapply(Alist, getElement, name="which") witch1 <- witches[[1L]] same <- unlist(lapply(witches, identical, y=witch1)) if(!all(same)) stop("Function arrays do not have the same array dimensions") ## OK. ## Pool envelopes at each position result <- Alist[[1L]] fns <- result$fns for(k in seq_along(fns)) { funks <- lapply(Alist, extractfun, k=k) fnk <- do.call(pool.envelope, funks) attr(fnk, "einfo")$Yname <- Yname fns[[k]] <- fnk } result$fns <- fns return(result) } has.env <- function(z) { all(unlist(lapply(z$fns, inherits, what="envelope"))) } extractfun <- function(z, k) { z$fns[[k]] } pool.fasp }) spatstat.core/R/marktable.R0000644000176200001440000000272114144333462015340 0ustar liggesusers# # marktable.R # # Tabulate mark frequencies in neighbourhood of each point # for multitype point patterns # # $Revision: 1.7 $ $Date: 2015/03/25 03:43:35 $ # # Requested by Ian Robertson "marktable" <- function(X, R, N, exclude=TRUE, collapse=FALSE) { verifyclass(X, "ppp") if(!is.marked(X, dfok=FALSE)) stop("point pattern has no marks") gotR <- !missing(R) && !is.null(R) gotN <- !missing(N) && !is.null(N) if(gotN == gotR) stop("Exactly one of the arguments N and R should be given") stopifnot(is.logical(exclude) && length(exclude) == 1) m <- marks(X) if(!is.factor(m)) stop("marks must be a factor") if(gotR) { stopifnot(is.numeric(R) && length(R) == 1 && R > 0) #' identify close pairs p <- closepairs(X,R,what="indices") pi <- p$i pj <- p$j if(!exclude) { #' add identical pairs n <- X$n pi <- c(pi, 1:n) pj <- c(pj, 1:n) } } else { stopifnot(is.numeric(N) && length(N) == 1) ii <- seq_len(npoints(X)) nn <- nnwhich(X, k=1:N) if(N == 1) nn <- matrix(nn, ncol=1) if(!exclude) nn <- cbind(ii, nn) pi <- as.vector(row(nn)) pj <- as.vector(nn) } #' tabulate if(!collapse) { ## table for each point i <- factor(pi, levels=seq_len(npoints(X))) mj <- m[pj] mat <- table(point=i, mark=mj) } else { #' table by type mi <- m[pi] mj <- m[pj] mat <- table(point=mi, neighbour=mj) } return(mat) } spatstat.core/R/twostage.R0000644000176200001440000002736514144333464015250 0ustar liggesusers## ## twostage.R ## ## Two-stage Monte Carlo tests and envelopes ## ## $Revision: 1.17 $ $Date: 2019/10/09 04:52:11 $ ## bits.test <- function(X, ..., exponent=2, nsim=19, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate=FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { twostage.test(X, ..., exponent=exponent, nsim=nsim, nsimsub=nsim, reuse=FALSE, alternative=match.arg(alternative), leaveout=leaveout, interpolate=interpolate, savefuns=savefuns, savepatterns=savepatterns, verbose=verbose, testblurb="Balanced Independent Two-stage Test") } dg.test <- function(X, ..., exponent=2, nsim=19, nsimsub=nsim-1, alternative=c("two.sided", "less", "greater"), reuse=TRUE, leaveout=1, interpolate=FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { if(!missing(nsimsub) && !relatively.prime(nsim, nsimsub)) stop("nsim and nsimsub must be relatively prime") twostage.test(X, ..., exponent=exponent, nsim=nsim, nsimsub=nsimsub, reuse=reuse, alternative=match.arg(alternative), leaveout=leaveout, interpolate=interpolate, savefuns=savefuns, savepatterns=savepatterns, verbose=verbose, testblurb="Dao-Genton adjusted goodness-of-fit test") } twostage.test <- function(X, ..., exponent=2, nsim=19, nsimsub=nsim, alternative=c("two.sided", "less", "greater"), reuse=FALSE, leaveout=1, interpolate=FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE, badXfatal=TRUE, testblurb="Two-stage Monte Carlo test") { Xname <- short.deparse(substitute(X)) alternative <- match.arg(alternative) env.here <- sys.frame(sys.nframe()) Xismodel <- is.ppm(X) || is.kppm(X) || is.lppm(X) || is.slrm(X) ## first-stage p-value if(verbose) cat("Applying first-stage test to original data... ") tX <- envelopeTest(X, ..., nsim=if(reuse) nsim else nsimsub, alternative=alternative, leaveout=leaveout, interpolate=interpolate, exponent=exponent, savefuns=savefuns, savepatterns=savepatterns || reuse, verbose=FALSE, badXfatal=badXfatal, envir.simul=env.here) pX <- tX$p.value ## check special case afortiori <- !interpolate && !reuse && (nsimsub < nsim) && (pX == (1/(nsim+1)) || pX == 1) if(afortiori) { ## result is determined padj <- pX pY <- NULL } else { ## result is not yet determined if(!reuse) { if(verbose) cat("Repeating first-stage test... ") tXX <- envelopeTest(X, ..., nsim=nsim, alternative=alternative, leaveout=leaveout, interpolate=interpolate, exponent=exponent, savefuns=savefuns, savepatterns=TRUE, verbose=FALSE, badXfatal=badXfatal, envir.simul=env.here) ## extract simulated patterns Ylist <- attr(attr(tXX, "envelope"), "simpatterns") } else { Ylist <- attr(attr(tX, "envelope"), "simpatterns") } if(verbose) cat("Done.\n") ## apply same test to each simulated pattern if(verbose) cat(paste("Running second-stage tests on", nsim, "simulated patterns... ")) pY <- numeric(nsim) for(i in 1:nsim) { if(verbose) progressreport(i, nsim) Yi <- Ylist[[i]] ## if X is a model, fit it to Yi. Otherwise the implicit model is CSR. if(Xismodel) Yi <- update(X, Yi) tYi <- envelopeTest(Yi, ..., nsim=nsimsub, alternative=alternative, leaveout=leaveout, interpolate=interpolate, exponent=exponent, savepatterns=TRUE, verbose=FALSE, badXfatal=FALSE, envir.simul=env.here) pY[i] <- tYi$p.value } pY <- sort(pY) ## compute adjusted p-value padj <- (1 + sum(pY <= pX))/(1+nsim) } # pack up method <- tX$method method <- c(testblurb, paste("based on", method[1L]), paste("First stage:", method[2L]), method[-(1:2)], if(afortiori) { paren(paste("Second stage was omitted: p0 =", pX, "implies p-value =", padj)) } else if(reuse) { paste("Second stage: nested, ", nsimsub, "simulations for each first-stage simulation") } else { paste("Second stage:", nsim, "*", nsimsub, "nested simulations independent of first stage") } ) names(pX) <- "p0" result <- structure(list(statistic = pX, p.value = padj, method = method, data.name = Xname), class="htest") attr(result, "rinterval") <- attr(tX, "rinterval") attr(result, "pX") <- pX attr(result, "pY") <- pY if(savefuns || savepatterns) result <- hasenvelope(result, attr(tX, "envelope")) return(result) } dg.envelope <- function(X, ..., nsim=19, nsimsub=nsim-1, nrank=1, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { twostage.envelope(X, ..., nsim=nsim, nsimsub=nsimsub, reuse=TRUE, nrank=nrank, alternative=match.arg(alternative), leaveout=leaveout, interpolate=interpolate, savefuns=savefuns, savepatterns=savepatterns, verbose=verbose, testlabel="bits") } bits.envelope <- function(X, ..., nsim=19, nrank=1, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { twostage.envelope(X, ..., nsim=nsim, nsimsub=nsim, reuse=FALSE, nrank=nrank, alternative=match.arg(alternative), leaveout=leaveout, interpolate=interpolate, savefuns=savefuns, savepatterns=savepatterns, verbose=verbose, testlabel="bits") } twostage.envelope <- function(X, ..., nsim=19, nsimsub=nsim, nrank=1, alternative=c("two.sided", "less", "greater"), reuse=FALSE, leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE, badXfatal=TRUE, testlabel="twostage") { # Xname <- short.deparse(substitute(X)) alternative <- match.arg(alternative) env.here <- sys.frame(sys.nframe()) Xismodel <- is.ppm(X) || is.kppm(X) || is.lppm(X) || is.slrm(X) ############## first stage ################################## if(verbose) cat("Applying first-stage test to original data... ") tX <- envelopeTest(X, ..., alternative=alternative, leaveout=leaveout, interpolate = interpolate, nsim=if(reuse) nsim else nsimsub, nrank=nrank, exponent=Inf, savepatterns=TRUE, savefuns=TRUE, verbose=FALSE, badXfatal=badXfatal, envir.simul=env.here) if(verbose) cat("Done.\n") envX <- attr(tX, "envelope") if(!reuse) { if(verbose) cat("Repeating first-stage test... ") tXX <- envelopeTest(X, ..., alternative=alternative, leaveout=leaveout, interpolate = interpolate, nsim=nsim, nrank=nrank, exponent=Inf, savepatterns=TRUE, savefuns=TRUE, verbose=FALSE, badXfatal=badXfatal, envir.simul=env.here) ## extract simulated patterns Ylist <- attr(attr(tXX, "envelope"), "simpatterns") } else { Ylist <- attr(attr(tX, "envelope"), "simpatterns") } if(verbose) cat("Done.\n") ############## second stage ################################# ## apply same test to each simulated pattern if(verbose) cat(paste("Running tests on", nsim, "simulated patterns... ")) pvalY <- numeric(nsim) for(i in 1:nsim) { if(verbose) progressreport(i, nsim) Yi <- Ylist[[i]] # if X is a model, fit it to Yi. Otherwise the implicit model is CSR. if(Xismodel) Yi <- update(X, Yi) tYi <- envelopeTest(Yi, ..., alternative=alternative, leaveout=leaveout, interpolate = interpolate, save.interpolant = FALSE, nsim=nsimsub, nrank=nrank, exponent=Inf, savepatterns=TRUE, verbose=FALSE, badXfatal=FALSE, envir.simul=env.here) pvalY[i] <- tYi$p.value } ## Find critical deviation if(!interpolate) { ## find critical rank 'l' rankY <- pvalY * (nsimsub + 1) twostage.rank <- orderstats(rankY, k=nrank) if(verbose) cat(paste0(testlabel, ".rank"), "=", twostage.rank, fill=TRUE) ## extract deviation values from top-level simulation simdev <- attr(tX, "statistics")[["sim"]] ## find critical deviation twostage.crit <- orderstats(simdev, decreasing=TRUE, k=twostage.rank) if(verbose) cat(paste0(testlabel, ".crit"), "=", twostage.crit, fill=TRUE) } else { ## compute estimated cdf of t fhat <- attr(tX, "density")[c("x", "y")] fhat$z <- with(fhat, cumsum(y)/sum(y)) # 'within' upsets package checker ## find critical (second stage) p-value pcrit <- orderstats(pvalY, k=nrank) ## compute corresponding upper quantile of estimated density of t twostage.crit <- with(fhat, { min(x[z >= 1 - pcrit]) }) } ## make fv object, for now refname <- if("theo" %in% names(envX)) "theo" else "mmean" fname <- attr(envX, "fname") result <- (as.fv(envX))[, c(fvnames(envX, ".x"), fvnames(envX, ".y"), refname)] refval <- envX[[refname]] ## newdata <- data.frame(hi=refval + twostage.crit, lo=refval - twostage.crit) newlabl <- c(makefvlabel(NULL, NULL, fname, "hi"), makefvlabel(NULL, NULL, fname, "lo")) alpha <- nrank/(nsim+1) alphatext <- paste0(100*alpha, "%%") newdesc <- c(paste("upper", alphatext, "critical boundary for %s"), paste("lower", alphatext, "critical boundary for %s")) switch(alternative, two.sided = { }, less = { newdata$hi <- Inf newlabl[1L] <- "infinity" newdesc[1L] <- "infinite upper limit" }, greater = { newdata$lo <- -Inf newlabl[2L] <- "infinity" newdesc[2L] <- "infinite lower limit" }) result <- bind.fv(result, newdata, newlabl, newdesc) fvnames(result, ".") <- rev(fvnames(result, ".")) fvnames(result, ".s") <- c("lo", "hi") if(savefuns || savepatterns) result <- hasenvelope(result, envX) return(result) } spatstat.core/R/blur.R0000644000176200001440000000363114144333461014342 0ustar liggesusers# # blur.R # # apply Gaussian blur to an image # # $Revision: 1.25 $ $Date: 2020/11/30 07:16:06 $ # Smooth.im <- function(X, sigma=NULL, ..., kernel="gaussian", normalise=FALSE, bleed=TRUE, varcov=NULL) { blur(X, sigma=sigma, ..., kernel=kernel, normalise=normalise, bleed=bleed, varcov=varcov) } blur <- function(x, sigma=NULL, ..., kernel="gaussian", normalise=FALSE, bleed=TRUE, varcov=NULL) { stopifnot(is.im(x)) # determine smoothing kernel sigma.given <- !is.null(sigma) varcov.given <- !is.null(varcov) if (sigma.given) { stopifnot(is.numeric(sigma)) stopifnot(length(sigma) %in% c(1, 2)) stopifnot(all(sigma > 0)) } if (varcov.given) stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov) == 2) ngiven <- varcov.given + sigma.given switch(ngiven + 1L, { sigma <- (1/8) * min(diff(x$xrange), diff(x$yrange)) }, { if (sigma.given && length(sigma) == 2) varcov <- diag(sigma^2) if (!is.null(varcov)) sigma <- NULL }, { stop(paste("Give only one of the arguments", sQuote("sigma"), "and", sQuote("varcov"))) }) # replace NA's in image raster by zeroes X <- fillNA(x, 0) # convolve with Gaussian Y <- second.moment.calc(X, sigma=sigma, ..., kernel=kernel, varcov=varcov, what="smooth") # if no bleeding, we restrict data to the original boundary if(!bleed) Y$v[is.na(x$v)] <- NA # if(!normalise) return(Y) # normalisation: # convert original image to window (0/1 image) Xone <- x isna <- is.na(x$v) Xone$v[isna] <- 0 Xone$v[!isna] <- 1 # convolve with Gaussian Ydenom <- second.moment.calc(Xone, sigma=sigma, ..., kernel=kernel, varcov=varcov, what="smooth") # normalise Z <- eval.im(Y/Ydenom) return(Z) } spatstat.core/R/Fest.R0000644000176200001440000001427514144333461014305 0ustar liggesusers# # Fest.R # # Computes estimates of the empty space function # # $Revision: 4.47 $ $Date: 2019/11/01 01:32:28 $ # Fhazard <- function(X, ...) { Z <- Fest(X, ...) if(!any(names(Z) == "km")) stop("Kaplan-Meier estimator 'km' is required for hazard rate") conserve <- attr(Z, "conserve") ## strip off Poisson F Z <- Z[, (colnames(Z) != "theo")] ## relabel the fv object Z <- rebadge.fv(Z, new.ylab=quote(h(r)), new.fname="h", tags=c("hazard", "theohaz"), new.tags=c("hazard", "theo"), new.labl=c("hat(%s)[km](r)", "%s[pois](r)"), new.desc=c( "Kaplan-Meier estimate of %s", "theoretical Poisson %s"), new.dotnames=c("hazard", "theo"), new.preferred="hazard") ## strip off unwanted bits Z <- Z[, c("r", "hazard", "theo")] attr(Z, "conserve") <- conserve return(Z) } Fest <- function(X, ..., eps = NULL, r=NULL, breaks=NULL, correction=c("rs", "km", "cs"), domain=NULL) { verifyclass(X, "ppp") if(!is.null(domain)) stopifnot(is.subset.owin(domain, Window(X))) rorbgiven <- !is.null(r) || !is.null(breaks) checkspacing <- !isFALSE(list(...)$checkspacing) testme <- isTRUE(list(...)$testme) ## Intensity estimate W <- Window(X) npts <- npoints(X) lambda <- npts/area(W) ## Discretise window dwin <- as.mask(W, eps=eps) dX <- ppp(X$x, X$y, window=dwin, check=FALSE) ## Histogram breakpoints rmaxdefault <- rmax.rule("F", dwin, lambda) breaks <- handle.r.b.args(r, breaks, dwin, eps, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max if(testme || (rorbgiven && checkspacing)) check.finespacing(rvals, if(is.null(eps)) NULL else eps/4, dwin, rmaxdefault=rmaxdefault, action="fatal", rname="r", context="in Fest(X, r)") ## choose correction(s) if(is.null(correction)) { correction <- c("rs", "km", "cs") } else correction <- pickoption("correction", correction, c(none="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", cs="cs", ChiuStoyan="cs", Hanisch="cs", han="cs", best="km"), multi=TRUE) ## initialise fv object df <- data.frame(r=rvals, theo=1-exp(-lambda * pi * rvals^2)) Z <- fv(df, "r", substitute(F(r), NULL), "theo", . ~ r, c(0,rmax), c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="F") nr <- length(rvals) zeroes <- numeric(nr) ## compute distances and censoring distances if(X$window$type == "rectangle") { ## original data were in a rectangle ## output of exactdt() is sufficient e <- exactdt(dX) dist <- e$d bdry <- e$b if(!is.null(domain)) { ok <- inside.owin(raster.xy(e$w), , domain) dist <- dist[ok] bdry <- bdry[ok] } } else { ## window is irregular.. # Distance transform & boundary distance for all pixels e <- exactdt(dX) b <- bdist.pixels(dX$window, style="matrix") ## select only those pixels inside mask mm <- dwin$m if(!is.null(domain)) { ok <- inside.owin(raster.xy(e$w), , domain) mm <- as.vector(mm) & ok } dist <- e$d[mm] bdry <- b[mm] } ## censoring indicators d <- (dist <= bdry) ## observed distances o <- pmin.int(dist, bdry) ## start calculating estimates of F if("none" %in% correction) { ## UNCORRECTED e.d.f. of empty space distances if(npts == 0) edf <- zeroes else { hh <- hist(dist[dist <= rmax],breaks=breaks$val,plot=FALSE)$counts edf <- cumsum(hh)/length(dist) } Z <- bind.fv(Z, data.frame(raw=edf), "hat(%s)[raw](r)", "uncorrected estimate of %s", "raw") } if("cs" %in% correction) { ## Chiu-Stoyan correction if(npts == 0) cs <- zeroes else { ## uncensored distances x <- dist[d] ## weights a <- eroded.areas(W, rvals) ## calculate Hanisch estimator h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts H <- cumsum(h/a) cs <- H/max(H[is.finite(H)]) } ## add to fv object Z <- bind.fv(Z, data.frame(cs=cs), "hat(%s)[cs](r)", "Chiu-Stoyan estimate of %s", "cs") } if(any(correction %in% c("rs", "km"))) { ## calculate Kaplan-Meier and/or border corrected (Reduced Sample) estimators want.rs <- "rs" %in% correction want.km <- "km" %in% correction selection <- c(want.rs, want.km, want.km, want.km) tags <- c("rs", "km", "hazard", "theohaz")[selection] labels <- c("hat(%s)[bord](r)", "hat(%s)[km](r)", "hat(h)[km](r)", "h[pois](r)")[selection] descr <- c("border corrected estimate of %s", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function h(r)", "theoretical Poisson hazard h(r)")[selection] if(npts == 0) { result <- as.data.frame(matrix(0, nr, length(tags))) names(result) <- tags } else { result <- km.rs.opt(o, bdry, d, breaks, KM=want.km, RS=want.rs) result$theohaz <- 2 * pi * lambda * rvals result <- as.data.frame(result[tags]) } ## add to fv object Z <- bind.fv(Z, result, labels, descr, if(want.km) "km" else "rs") } ## wrap up unitname(Z) <- unitname(X) ## remove 'hazard' from the dotnames nama <- names(Z) fvnames(Z, ".") <- rev(setdiff(nama, c("r", "hazard", "theohaz"))) ## determine recommended plot range attr(Z, "alim") <- with(Z, range(.x[is.finite(.y) & .y <= 0.9])) ## arguments to be used in envelope, etc attr(Z, "conserve") <- list(checkspacing=FALSE) return(Z) } spatstat.core/R/exactMPLEstrauss.R0000644000176200001440000000400314144333462016600 0ustar liggesusers# # exactMPLEstrauss.R # # 'exact' MPLE for stationary Strauss process # # $Revision: 1.6 $ $Date: 2014/11/10 07:39:41 $ # exactMPLEstrauss <- local({ # main function exactMPLEstrauss <- function(X, R, ngrid=2048, plotit=FALSE, project=TRUE) { # n <- npoints(X) W <- as.owin(X) # border correction WminR <- erosion(W, R) bR <- (bdist.points(X) >= R) nR <- sum(bR) # evaluate neighbour counts for data points Tcounts <- crosspaircounts(X, X, R) - 1L sumT <- sum(Tcounts[bR]) # determine the coefficients a_k for k = 0, 1, ... Z <- scanmeasure(X, R, dimyx=ngrid) Z <- Z[WminR, drop=FALSE] kcounts <- tabulate(as.vector(Z$v) + 1L) pixarea <- with(Z, xstep * ystep) A <- kcounts * pixarea # find optimal log(gamma) op <- optim(log(0.5), lpl, sco, method="L-BFGS-B", control=list(fnscale=-1), lower=-Inf, upper=if(project) 0 else Inf, A=A, sumT=sumT, nR=nR) loggamma <- op$par # plot? if(plotit) { x <- seq(log(1e-4), if(project) 0 else log(1e4), length=512) plot(x, lpl(x, A, sumT, nR), type="l", xlab=expression(log(gamma)), ylab=expression(log(PL(gamma)))) abline(v=loggamma, lty=3) } # derive optimal beta kmax <-length(A) - 1L polypart <- A %*% exp(outer(0:kmax, loggamma)) beta <- nR/polypart logbeta <- log(beta) result <- c(logbeta, loggamma) names(result) <- c("(Intercept)", "Interaction") return(result) } # helper functions (vectorised) # log pseudolikelihood lpl <- function(theta, A=A, sumT=sumT, nR=nR) { kmax <-length(A) - 1L polypart <- A %*% exp(outer(0:kmax, theta)) nR * (log(nR) - log(polypart) - 1) + theta * sumT } # pseudoscore sco <- function(theta, A=A, sumT=sumT, nR=nR) { kmax <- length(A) - 1L kseq <- 0:kmax mat <- exp(outer(kseq, theta)) polypart <- A %*% mat Dpolypart <- (A * kseq) %*% mat sumT - nR * Dpolypart/polypart } exactMPLEstrauss }) spatstat.core/R/strausshard.R0000644000176200001440000001611214144333464015742 0ustar liggesusers# # # strausshard.S # # $Revision: 2.37 $ $Date: 2018/05/02 09:38:36 $ # # The Strauss/hard core process # # StraussHard() create an instance of the Strauss-hardcore process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # StraussHard <- local({ BlankStraussHard <- list( name = "Strauss - hard core process", creator = "StraussHard", family = "pairwise.family", # evaluated later pot = function(d, par) { v <- (d <= par$r) v[ d <= par$hc ] <- (-Inf) v }, par = list(r = NULL, hc = NULL), # filled in later parnames = c("interaction distance", "hard core distance"), hasInf = TRUE, selfstart = function(X, self) { # self starter for StraussHard nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } r <- self$par$r md <- minnndist(X) if(md == 0) { warning(paste("Pattern contains duplicated points:", "hard core must be zero")) return(StraussHard(r=r, hc=0)) } if(!is.na(hc <- self$par$hc)) { # value fixed by user or previous invocation # check it if(md < hc) warning(paste("Hard core distance is too large;", "some data points will have zero probability")) return(self) } # take hc = minimum interpoint distance * n/(n+1) hcX <- md * nX/(nX+1) StraussHard(r=r, hc = hcX) }, init = function(self) { r <- self$par$r hc <- self$par$hc if(length(hc) != 1) stop("hard core distance must be a single value") if(!is.na(hc)) { if(!is.numeric(hc) || hc <= 0) stop("hard core distance hc must be a positive number, or NA") if(!is.numeric(r) || length(r) != 1 || r <= hc) stop("interaction distance r must be a number greater than hc") } }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) return(is.finite(loggamma)) }, project = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) if(is.finite(loggamma)) return(NULL) hc <- self$par$hc if(hc > 0) return(Hardcore(hc)) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r hc <- self$par$hc if(anyNA(coeffs)) return(r) loggamma <- coeffs[1] if(abs(loggamma) <= epsilon) return(hc) else return(r) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { #' fast evaluator for StraussHard interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for StraussHard") r <- potpars$r hc <- potpars$hc hclose <- (strausscounts(U, X, hc, EqualPairs) != 0) rclose <- strausscounts(U, X, r, EqualPairs) if(!splitInf) { answer <- ifelseAX(hclose, -Inf, rclose) answer <- matrix(answer, ncol=1) } else { answer <- ifelseAX(hclose, 0, rclose) answer <- matrix(answer, ncol=1) attr(answer, "-Inf") <- hclose } return(answer) }, Mayer=function(coeffs, self) { # second Mayer cluster integral gamma <- exp(as.numeric(coeffs[1])) r <- self$par$r hc <- self$par$hc return(pi * (hc^2 + (1-gamma) * (r^2 - hc^2))) }, delta2 = function(X, inte, correction, ..., sparseOK=FALSE) { r <- inte$par$r hc <- inte$par$hc #' positive part U <- as.ppp(X) nU <- npoints(U) cl <- weightedclosepairs(U, r, correction=correction, what="indices") if(is.null(cl)) # can't handle edge correction return(NULL) v <- sparseMatrix(i=cl$i, j=cl$j, x=cl$weight, dims=c(nU, nU)) #' hard core part hcl <- closepairs(U, hc, what="indices") ihit <- hcl$i jhit <- hcl$j vh <- NULL if(is.ppp(X)) { #' count conflicts between data points nhit <- as.integer(table(factor(jhit, levels=seq_len(nU)))) #' for a conflicting pair X[i], X[j], #' status of X[j] will change when X[i] is deleted #' iff X[j] is only in conflict with X[i] changes <- (nhit == 1) if(any(changes)) { changesJ <- changes[jhit] vh <- sparseMatrix(i=ihit[changesJ], j=jhit[changesJ], x=TRUE, dims=c(nU, nU)) } } else if(is.quad(X)) { #' count conflicts with existing data points izdat <- is.data(X) hitdata <- izdat[ihit] nhitdata <- as.integer(table(factor(jhit[hitdata], levels=seq_len(nU)))) #' for a conflicting pair U[i], U[j], #' status of U[j] will change when U[i] is added/deleted #' iff EITHER #' U[i] = X[i] is a data point and #' U[j] is only in conflict with X[i], #' OR #' U[i] is a dummy point, #' U[j] has no conflicts with X. changesJ <- (hitdata & (nhitdata[jhit] == 1)) | (!hitdata & (nhitdata[jhit] == 0)) if(any(changesJ)) vh <- sparseMatrix(i=ihit[changesJ], j=jhit[changesJ], x=TRUE, dims=c(nU, nU)) } else stop("X should be a ppp or quad object") # pack up if(!sparseOK) { v <- as.matrix(v) if(!is.null(vh)) vh <- as.matrix(vh) } attr(v, "deltaInf") <- vh return(v) } ) class(BlankStraussHard) <- "interact" StraussHard <- function(r, hc=NA) { instantiate.interact(BlankStraussHard, list(r=r, hc=hc)) } StraussHard <- intermaker(StraussHard, BlankStraussHard) StraussHard }) spatstat.core/R/eval.fv.R0000644000176200001440000002340414144333462014740 0ustar liggesusers# # eval.fv.R # # # eval.fv() Evaluate expressions involving fv objects # # compatible.fv() Check whether two fv objects are compatible # # $Revision: 1.41 $ $Date: 2020/12/06 02:32:12 $ # eval.fv <- local({ # main function eval.fv <- function(expr, envir, dotonly=TRUE, equiv=NULL, relabel=TRUE) { # convert syntactic expression to 'expression' object e <- as.expression(substitute(expr)) # convert syntactic expression to call elang <- substitute(expr) # find names of all variables in the expression varnames <- all.vars(e) if(length(varnames) == 0) stop("No variables in this expression") # get the actual variables if(missing(envir)) { envir <- parent.frame() } else if(is.list(envir)) { envir <- list2env(envir, parent=parent.frame()) } vars <- lapply(as.list(varnames), get, envir=envir) names(vars) <- varnames # find out which ones are fv objects fvs <- unlist(lapply(vars, is.fv)) nfuns <- sum(fvs) if(nfuns == 0) stop("No fv objects in this expression") # extract them funs <- vars[fvs] # restrict to columns identified by 'dotnames' if(dotonly) funs <- lapply(funs, restrict.to.dot) # map names if instructed if(!is.null(equiv)) funs <- lapply(funs, mapnames, map=equiv) # test whether the fv objects are compatible if(nfuns > 1L && !(do.call(compatible, unname(funs)))) { warning(paste(if(nfuns > 2) "some of" else NULL, "the functions", commasep(sQuote(names(funs))), "were not compatible: enforcing compatibility")) funs <- do.call(harmonise, append(funs, list(strict=TRUE))) } # copy first object as template result <- funs[[1L]] ## ensure 'conservation' info is retained conserve <- unname(lapply(funs, attr, which="conserve")) if(any(present <- !sapply(conserve, is.null))) { conserve <- do.call(resolve.defaults, conserve[present]) attr(result, "conserve") <- conserve } ## remove potential ratio info class(result) <- setdiff(class(result), "rat") attr(result, "numerator") <- attr(result, "denominator") <- NULL labl <- attr(result, "labl") origdotnames <- fvnames(result, ".") origshadenames <- fvnames(result, ".s") # determine which function estimates are supplied argname <- fvnames(result, ".x") nam <- names(result) ynames <- nam[nam != argname] # for each function estimate, evaluate expression for(yn in ynames) { # extract corresponding estimates from each fv object funvalues <- lapply(funs, "[[", i=yn) # insert into list of argument values vars[fvs] <- funvalues # evaluate result[[yn]] <- eval(e, vars, enclos=envir) } if(!relabel) return(result) # determine mathematical labels. # 'yexp' determines y axis label # 'ylab' determines y label in printing and description # 'fname' is sprintf-ed into 'labl' for legend yexps <- lapply(funs, attr, which="yexp") ylabs <- lapply(funs, attr, which="ylab") fnames <- lapply(funs, getfname) # Repair 'fname' attributes if blank blank <- unlist(lapply(fnames, isblank)) if(any(blank)) { # Set function names to be object names as used in the expression for(i in which(blank)) attr(funs[[i]], "fname") <- fnames[[i]] <- names(funs)[i] } # Remove duplicated names # Typically occurs when combining several K functions, etc. # Tweak fv objects so their function names are their object names # as used in the expression if(anyDuplicated(fnames)) { newfnames <- names(funs) for(i in 1:nfuns) funs[[i]] <- rebadge.fv(funs[[i]], new.fname=newfnames[i]) fnames <- newfnames } if(anyDuplicated(ylabs)) { flatnames <- lapply(funs, flatfname) for(i in 1:nfuns) { new.ylab <- substitute(f(r), list(f=flatnames[[i]])) funs[[i]] <- rebadge.fv(funs[[i]], new.ylab=new.ylab) } ylabs <- lapply(funs, attr, which="ylab") } if(anyDuplicated(yexps)) { newfnames <- names(funs) for(i in 1:nfuns) { new.yexp <- substitute(f(r), list(f=as.name(newfnames[i]))) funs[[i]] <- rebadge.fv(funs[[i]], new.yexp=new.yexp) } yexps <- lapply(funs, attr, which="yexp") } # now compute y axis labels for the result attr(result, "yexp") <- eval(substitute(substitute(e, yexps), list(e=elang))) attr(result, "ylab") <- eval(substitute(substitute(e, ylabs), list(e=elang))) # compute fname equivalent to expression if(nfuns > 1L) { # take original expression the.fname <- paren(flatten(deparse(elang))) } else if(nzchar(oldname <- flatfname(funs[[1L]]))) { # replace object name in expression by its function name namemap <- list(as.name(oldname)) names(namemap) <- names(funs)[1L] the.fname <- deparse(eval(substitute(substitute(e, namemap), list(e=elang)))) } else the.fname <- names(funs)[1L] attr(result, "fname") <- the.fname # now compute the [modified] y labels labelmaps <- lapply(funs, fvlabelmap, dot=FALSE) for(yn in ynames) { # labels for corresponding columns of each argument funlabels <- lapply(labelmaps, "[[", i=yn) # form expression involving these columns labl[match(yn, names(result))] <- flatten(deparse(eval(substitute(substitute(e, f), list(e=elang, f=funlabels))))) } attr(result, "labl") <- labl # copy dotnames and shade names from template fvnames(result, ".") <- origdotnames[origdotnames %in% names(result)] if(!is.null(origshadenames) && all(origshadenames %in% names(result))) fvnames(result, ".s") <- origshadenames return(result) } # helper functions restrict.to.dot <- function(z) { argu <- fvnames(z, ".x") dotn <- fvnames(z, ".") shadn <- fvnames(z, ".s") ok <- colnames(z) %in% unique(c(argu, dotn, shadn)) return(z[, ok]) } getfname <- function(x) { if(!is.null(y <- attr(x, "fname"))) y else "" } flatten <- function(x) { paste(x, collapse=" ") } mapnames <- function(x, map=NULL) { colnames(x) <- mapstrings(colnames(x), map=map) fvnames(x, ".y") <- mapstrings(fvnames(x, ".y"), map=map) return(x) } isblank <- function(z) { !any(nzchar(z)) } eval.fv }) compatible.fv <- local({ approx.equal <- function(x, y) { max(abs(x-y)) <= .Machine$double.eps } compatible.fv <- function(A, B, ..., samenames=TRUE) { verifyclass(A, "fv") if(missing(B)) { answer <- if(length(...) == 0) TRUE else compatible(A, ...) return(answer) } verifyclass(B, "fv") ## is the function argument the same? samearg <- (fvnames(A, ".x") == fvnames(B, ".x")) if(!samearg) return(FALSE) if(samenames) { ## are all columns the same, and in the same order? namesmatch <- isTRUE(all.equal(names(A),names(B))) && samearg && (fvnames(A, ".y") == fvnames(B, ".y")) if(!namesmatch) return(FALSE) } ## are 'r' values the same ? rA <- with(A, .x) rB <- with(B, .x) rmatch <- (length(rA) == length(rB)) && approx.equal(rA, rB) if(!rmatch) return(FALSE) ## A and B are compatible if(length(list(...)) == 0) return(TRUE) ## recursion return(compatible.fv(B, ...)) } compatible.fv }) # force a list of functions to be compatible with regard to 'x' values harmonize.fv <- harmonise.fv <- local({ harmonise.fv <- function(..., strict=FALSE) { argh <- list(...) n <- length(argh) if(n == 0) return(argh) if(n == 1) { a1 <- argh[[1L]] if(is.fv(a1)) return(argh) if(is.list(a1) && all(sapply(a1, is.fv))) { argh <- a1 n <- length(argh) } } isfv <- sapply(argh, is.fv) if(!all(isfv)) stop("All arguments must be fv objects") if(n == 1) return(argh[[1L]]) ## determine range of argument ranges <- lapply(argh, argumentrange) xrange <- c(max(unlist(lapply(ranges, min))), min(unlist(lapply(ranges, max)))) if(diff(xrange) < 0) stop("No overlap in ranges of argument") if(strict) { ## find common column names and keep these keepnames <- Reduce(intersect, lapply(argh, colnames)) argh <- lapply(argh, "[", j=keepnames) } ## determine finest resolution xsteps <- sapply(argh, argumentstep) finest <- which.min(xsteps) ## extract argument values xx <- with(argh[[finest]], .x) xx <- xx[xrange[1L] <= xx & xx <= xrange[2L]] xrange <- range(xx) ## convert each fv object to a function funs <- lapply(argh, as.function, value="*") ## evaluate at common argument result <- vector(mode="list", length=n) for(i in 1:n) { ai <- argh[[i]] fi <- funs[[i]] xxval <- list(xx=xx) names(xxval) <- fvnames(ai, ".x") starnames <- fvnames(ai, "*") ## ensure they are given in same order as current columns starnames <- colnames(ai)[colnames(ai) %in% starnames] yyval <- lapply(starnames, function(v,xx,fi) fi(xx, v), xx=xx, fi=fi) names(yyval) <- starnames ri <- do.call(data.frame, append(xxval, yyval)) fva <- .Spatstat.FvAttrib attributes(ri)[fva] <- attributes(ai)[fva] class(ri) <- c("fv", class(ri)) attr(ri, "alim") <- intersect.ranges(attr(ai, "alim"), xrange) result[[i]] <- ri } names(result) <- names(argh) return(result) } argumentrange <- function(f) { range(with(f, .x)) } argumentstep <- function(f) { mean(diff(with(f, .x))) } harmonise.fv }) spatstat.core/R/interactions.R0000644000176200001440000002103614144333464016102 0ustar liggesusers# # interactions.R # # Works out which interaction is in force for a given point pattern # # $Revision: 1.25 $ $Date: 2016/04/25 02:34:40 $ # # impliedpresence <- function(tags, formula, df, extranames=character(0)) { # Determines, for each row of the data frame df, # whether the variable called tags[j] is required in the formula stopifnot(is.data.frame(df)) stopifnot(inherits(formula, "formula")) stopifnot(is.character(tags)) stopifnot(is.character(extranames)) # allvars <- variablesinformula(formula) if(any(tags %in% names(df))) stop(paste(sQuote("tags"), "conflicts with the name of a column of", sQuote("df"))) if(any(extranames %in% names(df))) stop(paste(sQuote("extranames"), "conflicts with the name of a column of", sQuote("df"))) # answer is a matrix nvars <- length(tags) nrows <- nrow(df) answer <- matrix(TRUE, nrows, nvars) # expand data frame with zeroes for each tags and extranames for(v in unique(c(tags, extranames))) df[ , v] <- 0 # loop for(i in seq(nrow(df))) { # make a fake data frame for the formula # using the data frame entries from row i # (includes 0 values for all other variables) pseudat <- df[i, , drop=FALSE] # use this to construct a fake model matrix mof0 <- model.frame(formula, pseudat) mom0 <- model.matrix(formula, mof0) for(j in seq(nvars)) { # Reset the variable called tags[j] to 1 pseudatj <- pseudat pseudatj[ , tags[j]] <- 1 # Now create the fake model matrix mofj <- model.frame(formula, pseudatj) momj <- model.matrix(formula, mofj) # Compare the two matrices answer[i,j] <- any(momj != mom0) } } return(answer) } active.interactions <- function(object) { stopifnot(inherits(object, "mppm")) interaction <- object$Inter$interaction iformula <- object$iformula nenv <- new.env() environment(iformula) <- nenv random <- object$random if(!is.null(random)) environment(random) <- nenv itags <- object$Inter$itags # The following are currently unused # ninter <- object$Inter$ninter # iused <- object$Inter$iused # trivial <- object$Inter$trivial # names of variables dat <- object$data datanames <- names(dat) dfnames <- summary(dat)$dfnames nondfnames <- datanames[!(datanames %in% dfnames)] nondfnames <- union(nondfnames, c("x", "y")) # extract data-frame values dfdata <- as.data.frame(dat[, dfnames, drop=FALSE], warn=FALSE) # determine which interaction(s) are in force answer <- impliedpresence(itags, iformula, dfdata, nondfnames) if(!is.null(random)) { if("|" %in% all.names(random)) { ## hack since model.matrix doesn't handle "|" as desired rnd <- gsub("|", "/", pasteFormula(random), fixed=TRUE) random <- as.formula(rnd, env=environment(random)) } answer2 <- impliedpresence(itags, random, dfdata, nondfnames) answer <- answer | answer2 } colnames(answer) <- names(interaction) return(answer) } impliedcoefficients <- function(object, tag) { stopifnot(inherits(object, "mppm")) stopifnot(is.character(tag) && length(tag) == 1) fitobj <- object$Fit$FIT Vnamelist <- object$Fit$Vnamelist has.random <- object$Info$has.random # Not currently used: # fitter <- object$Fit$fitter # interaction <- object$Inter$interaction # ninteract <- object$Inter$ninteract # trivial <- object$Inter$trivial # iused <- object$Inter$iused itags <- object$Inter$itags if(!(tag %in% itags)) stop(paste("Argument", dQuote("tag"), "is not one of the interaction names")) # (0) Set up # Identify the columns of the glm data frame # that are associated with this interpoint interaction vnames <- Vnamelist[[tag]] if(!is.character(vnames)) stop("Internal error - wrong format for vnames") # Check atomic type of each covariate Moadf <- as.list(object$Fit$moadf) islog <- sapply(Moadf, is.logical) isnum <- sapply(Moadf, is.numeric) isfac <- sapply(Moadf, is.factor) # Interaction variables must be numeric or logical if(any(bad <- !(isnum | islog)[vnames])) stop(paste("Internal error: the", ngettext(sum(bad), "variable", "variables"), commasep(sQuote(vnames[bad])), "should be numeric or logical"), call.=FALSE) # The answer is a matrix of coefficients, # with one row for each point pattern, # and one column for each vname answer <- matrix(, nrow=object$npat, ncol=length(vnames)) colnames(answer) <- vnames # (1) make a data frame of covariates # Names of all columns in glm data frame allnames <- names(Moadf) # Extract the design covariates df <- as.data.frame(object$data, warn=FALSE) # Names of all covariates other than design covariates othernames <- allnames[!(allnames %in% names(df))] # Add columns in which all other covariates are set to 0, FALSE, etc for(v in othernames) { df[, v] <- if(isnum[[v]]) 0 else if(islog[[v]]) FALSE else if(isfac[[v]]) { lev <- levels(Moadf[[v]]) factor(lev[1], levels=lev) } else sort(unique(Moadf[[v]]))[1] } # (2) evaluate linear predictor Coefs <- if(!has.random) coef(fitobj) else fixef(fitobj) suppressWarnings({ # eta0 <- predict(fitobj, newdata=df, type="link") eta0 <- GLMpredict(fitobj, data=df, coefs=Coefs, changecoef=TRUE, type="link") }) # (3) for each vname in turn, # set the value of the vname to 1 and predict again for(j in seq_along(vnames)) { vnj <- vnames[j] df[[vnj]] <- 1 suppressWarnings({ # etaj <- predict(fitobj, newdata=df, type="link") etaj <- GLMpredict(fitobj, data=df, coefs=Coefs, changecoef=TRUE, type="link") }) answer[ ,j] <- etaj - eta0 # set the value of this vname back to 0 df[[vnj]] <- 0 } return(answer) } illegal.iformula <- local({ illegal.iformula <- function(ifmla, itags, dfvarnames) { ## THIS IS TOO STRINGENT! ## Check validity of the interaction formula. ## ifmla is the formula. ## itags is the character vector of interaction names. ## Check whether the occurrences of `itags' in `iformula' are valid: ## e.g. no functions applied to `itags[i]'. ## Returns NULL if legal, otherwise a character string stopifnot(inherits(ifmla, "formula")) stopifnot(is.character(itags)) ## formula must not have a LHS if(length(ifmla) > 2) return("iformula must not have a left-hand side") ## variables in formula must be interaction tags or data frame variables varsinf <- variablesinformula(ifmla) if(!all(ok <- varsinf %in% c(itags, dfvarnames))) return(paste( ngettext(sum(!ok), "variable", "variables"), paste(dQuote(varsinf[!ok]), collapse=", "), "not allowed in iformula")) ## if formula uses no interaction tags, it's trivial if(!any(itags %in% variablesinformula(ifmla))) return(NULL) ## create terms object tt <- attributes(terms(ifmla)) ## extract all variables appearing in the formula vars <- as.list(tt$variables)[-1] ## nvars <- length(vars) varexprs <- lapply(vars, as.expression) varstrings <- sapply(varexprs, paste) ## Each variable may be a name or an expression v.is.name <- sapply(vars, is.name) ## a term may be an expression like sin(x), poly(x,y,degree=2) v.args <- lapply(varexprs, all.vars) ## v.n.args <- sapply(v.args, length) v.has.itag <- sapply(lapply(v.args, "%in%", x=itags), any) ## interaction tags may only appear as names, not in functions if(any(nbg <- v.has.itag & !v.is.name)) return(paste("interaction tags may not appear inside a function:", paste(dQuote(varstrings[nbg]), collapse=", "))) ## Interaction between two itags is not defined ## Inspect the higher-order terms fax <- tt$factors if(prod(dim(fax)) == 0) return(NULL) ## rows are first order terms, columns are terms of order >= 1 fvars <- rownames(fax) fterms <- colnames(fax) fv.args <- lapply(fvars, variablesintext) ft.args <- lapply(fterms, variables.in.term, factors=fax, varnamelist=fv.args) ft.itags <- lapply(ft.args, intersect, y=itags) if(any(lengths(ft.itags) > 1)) return("Interaction between itags is not defined") return(NULL) } variables.in.term <- function(term, factors, varnamelist) { basis <- (factors[, term] != 0) unlist(varnamelist[basis]) } illegal.iformula }) spatstat.core/R/studpermutest.R0000644000176200001440000005627714144333464016347 0ustar liggesusers#' #' studpermtest.R #' #' Original by Ute Hahn 2014 #' #' $Revision: 1.10 $ $Date: 2019/10/16 02:36:54 $ #' #' Studentized permutation test for comparison of grouped point patterns; #' functions to generate these grouped point patterns; #' wrapper for test of reweighted second order stationarity. #' #' studpermu.test #' studentized permutation test for grouped point patterns #' interpreted version, random permutations only. #' A group needs to contain at least two point patterns with at least minpoints each. # #' X the data, may be a list of lists of point patterns, or a hyperframe #' formula if X is a hyperframe, relates point patterns to factor variables that #' determine the groups. If missing, the first column of X that contains #' a factor variable is used. #' summaryfunction the function used in the test #' ... additional arguments for summaryfunction #' rinterval r-interval where summaryfunction is evaluated. If NULL, the #' interval is calculated from spatstat defaults #' (intersection for all patterns) #' nperm number of random permutations #' use.Tbar use the alternative test statistic, for summary functions with #' roughly constant variance, such as K/r or L #' minpoints the minimum number of points a pattern needs to have. Patterns #' with fewer points are not used. #' rsteps discretization steps of the r-interval #' r arguments at which to evaluate summaryfunction, overrides rinterval #' Should normally not be given, replace by rinterval instead, #' this allows r_0 > 0. Also, there is no plausibility check for r so far #' arguments.in.data if TRUE, individual extra arguments to summary function that #' change are taken from X (which has to be a hyperframe then). #' Assumes that the first argument of summaryfunction always is the #' point pattern. #' This is meant for internal purposes (automatisation) # #' returns an object of classes htest and studpermutest, that can be plotted. The #' plot shows the summary functions for the groups (and the means if requested) studpermu.test <- local({ studpermu.test <- function (X, formula, summaryfunction = Kest, ..., rinterval = NULL, nperm = 999, use.Tbar = FALSE, # the alternative statistic, use with K/r or L minpoints = 20, rsteps = 128, r = NULL, arguments.in.data = FALSE) { #' ---- the loooong preliminaries ------- #' ---- argument checking paranoia ---- if (arguments.in.data & !is.hyperframe(X)) stop(paste("X needs to be a hyperframe", "if arguments for summary function are to be retrieved"), call.=FALSE) stopifnot(is.function(summaryfunction)) #' there could be more... #' first prepare the data if(is.hyperframe(X)) { if(dim(X)[2] < 2) stop(paste("Hyperframe X needs to contain at least 2 columns,", "one for patterns, one indicating groups"), call.=FALSE) data <- X # renaming for later. Xclass <- unclass(X)$vclass factorcandidate <- Xclass %in% c("integer", "numeric", "character", "factor") ppcandidate <- Xclass == "ppp" names(factorcandidate) <- names(ppcandidate) <- names(Xclass) <- Xnames <- names(X) if(all(!factorcandidate) || all(!ppcandidate)) stop(paste("Hyperframe X needs to contain at least a column", "with point patterns, and one indicating groups"), call.=FALSE) if(!missing(formula)){ #' safety precautions ;-) if(!inherits(formula, "formula")) stop(paste("Argument", dQuote("formula"), "should be a formula")) if (length(formula) < 3) stop(paste("Argument", sQuote("formula"), "must have a left hand side")) rhs <- rhs.of.formula(formula) ppname <- formula[[2]] if (!is.name(ppname)) stop("Left hand side of formula should be a single name") ppname <- paste(ppname) if(!ppcandidate[ppname]) stop(paste("Left hand side of formula", "should be the name of a column of point patterns"), call.=FALSE) groupvars <- all.vars(as.expression(rhs)) if(!all(groupvars %in% Xnames) || any(!factorcandidate[groupvars])) stop(paste("Not all variables on right hand side of formula", "can be interpreted as factors"), call.=FALSE) #' make the groups to be compared group <- interaction(lapply(as.data.frame(data[ , groupvars, drop=FALSE]), factor)) #' rename the point patterns, needs the patch newnames <- Xnames newnames[Xnames == ppname] <- "pp" names(data) <- newnames data$group <- group } else { #' No formula supplied. #' Choose first ppp column and first factor column to make pp and groups thepp <- which.max(ppcandidate) thegroup <- which.max(factorcandidate) #' fake formula for output of test result formula <- as.formula(paste( Xnames[thepp],"~", Xnames[thegroup])) newnames <- Xnames newnames[thepp] <- "pp" newnames[thegroup] <- "group" names(data) <- newnames data$group <- as.factor(data$group) } } else { #' X is not a hyperframe, but hopefully a list of ppp if(!is.list(X)) stop("X should be a hyperframe or a list of lists of point patterns") if (!is.list(X[[1]]) || !is.ppp(X[[1]][[1]])) stop("X is a list, but not a list of lists of point patterns") nams <- names(X) if(is.null(nams)) nams <- paste("group", seq_along(X)) pp <- list() group <- NULL for (i in seq_along(X)) { pp <- c(pp, X[[i]]) group <- c(group, rep(nams[i], length(X[[i]]))) } group <- as.factor(group) data <- hyperframe(pp = pp, group = group) ppname <- "pp" } framename <- short.deparse(substitute(X)) fooname <- short.deparse(substitute(summaryfunction)) #' sorting out the patterns that contain too few points OK <- sapply(data$pp, npoints) >= minpoints if((nbad <- sum(!OK)) > 0) warning(paste(nbad, "patterns have been discarded", "because they contained fewer than", minpoints, "points"), call.=FALSE) data <- data[OK, ,drop=FALSE] pp <- data$pp #' ---- the groups, #' or what remains after discarding the poor patterns with few points ----- #' check if at least two observations in each group groupi <- as.integer(data$group) ngroups <- max(groupi) if(ngroups < 2) stop(paste("Sorry, after discarding patterns with fewer than", minpoints, "points,", if(ngroups < 1) "nothing" else "only one group", "is left over.", "\n- nothing to compare, take a break!"), call.=FALSE) lev <- 1:ngroups m <- as.vector(table(groupi)) if (any(m < 3)) stop(paste("Data groups need to contain at least two patterns;", "\nafter discarding those with fewer than", minpoints, "points, the remaining group sizes are", commasep(m)), call.=FALSE) #' check if number of possible outcomes is small #' WAS: npossible <- factorial(sum(m))/prod(factorial(m))/prod(factorial(table(m))) lognpossible <- lgamma(sum(m)+1)-sum(lgamma(m+1))-sum(lgamma(table(m)+1)) if (lognpossible < log(max(100, nperm))) warning("Don't expect exact results - group sizes are too small") #' --------- real preliminaries now ------ #' get interval for arguments if(!is.null(r)){ rinterval <- range(r) rsteps <- length(r) } else if (is.null(rinterval)) { foochar <- substr(fooname, 1, 1) if (foochar %in% c("p", "L")) foochar <- "K" if (fooname %in% c("Kscaled", "Lscaled")) foochar <- "Kscaled" rinterval <- c(0, min(with(data, rmax.rule(foochar, Window(pp), intensity(pp))))) } ranger <- diff(range(rinterval)) #' r sequence needs to start at 0 for Kest and such rr <- r %orifnull% seq(0, rinterval[2], length.out = rsteps + 1) taker <- rr >= rinterval[1] & rr <= rinterval[2] # used for testing #' now estimate the summary function, finally... #' TO DO!!!! Match function call of summary function with data columns! #' use arguments.in.data, if applicable. This is for inhomogeneous summary #' functions #' Force all calls to summaryfunction to use the same edge correction, #' rather than allowing correction to depend on npoints needcorx <- "correction" %in% names(formals(summaryfunction)) gavecorx <- "correction" %in% names(list(...)) corx <- if(needcorx && !gavecorx) "best" else NULL #' --------- retrieve arguments for summary function from data, hvis det er fvlist <- if(arguments.in.data) { #' use arguments in hyperframe 'data' as well as explicit arguments if(is.null(corx)) { multicall(summaryfunction, pp, data, r = rr, ...) } else { multicall(summaryfunction, pp, data, r = rr, ..., correction=corx) } } else { #' use explicit arguments only if(is.null(corx)) { with(data, summaryfunction(pp, r = rr, ...)) } else { with(data, summaryfunction(pp, r = rr, ..., correction=corx)) } } fvtemplate <- fvlist[[1]] valu <- attr(fvtemplate, "valu") argu <- attr(fvtemplate, "argu") foar <- sapply(lapply(fvlist, "[[", valu), "[", taker) #' --------- the real stuff -------------- #' function that calculates the discrepancy #' slow version combs <- combn(lev, 2) #' --------- now do the real real stuff :-) ------------- #' generate "simulated values" from random permutations. #' possible improvement for future work: #' If the number of all permutations (combis) is small, #' first generate all permutations and then #' sample from them to improve precision predigested <- list(lev=lev, foar=foar, m=m, combs=combs, rrr=rr[taker], ranger=ranger) if(use.Tbar) { Tobs <- Tbarstat(groupi, predigested) Tsim <- replicate(nperm, Tbarstat(sample(groupi), predigested)) } else { Tobs <- Tstat(groupi, predigested) Tsim <- replicate(nperm, Tstat(sample(groupi), predigested)) } names(Tobs) <- if(use.Tbar) "Tbar" else "T" pval <- (1 + sum(Tobs < Tsim))/(1 + nperm) #' ----- making a test object ----- method <- c("Studentized permutation test for grouped point patterns", if(is.hyperframe(X)) pasteFormula(formula) else NULL, choptext(ngroups, "groups:", paste(levels(data$group), collapse=", ")), choptext("summary function:", paste0(fooname, ","), "evaluated on r in", prange(rinterval)), choptext("test statistic:", if(use.Tbar) "Tbar," else "T,", nperm, "random permutations")) fooshort <- switch(fooname, pcf = "pair correlation ", Kinhom = "inhomogeneous K-", Linhom = "inhomogeneous L-", Kscaled = "locally scaled K-", Lscaled = "locally scaled L-", paste(substr(fooname, 1, 1),"-",sep="")) alternative <- c(paste("not the same ",fooshort,"function", sep="")) testerg <- list(statistic = Tobs, p.value = pval, alternative = alternative, method = method, data.name = framename) class(testerg) <- c("studpermutest", "htest") #' Add things for plotting #' prepare the fvlist, so that it only contains the estimates used, fvs <- lapply(fvlist, "[.fv", j=c(argu, valu)) #' with rinterval as alim fvs <- lapply(fvs, "attr<-", which="alim", value=rinterval) testerg$curves <- hyperframe(fvs = fvs, groups = data$group) fvtheo <- fvlist[[1]] fvnames(fvtheo, ".y") <- "theo" attr(fvtheo, "alim") <- rinterval testerg$curvtheo <- fvtheo[ , c(argu, "theo")] #' group means grmn <- lapply(lev, splitmean, ind=groupi, f=foar) testerg$groupmeans <- lapply(grmn, makefv, xvals=rr[taker], template=fvtheo) return(testerg) } splitmean <- function(l, ind, f) { apply(f[ , ind == l], 1, mean) } splitvarn <- function(l, ind, f, m) { apply(f[ , ind == l], 1, var) / m[l] } studentstat <- function(i, grmean, grvar) { (grmean[, i[1]] - grmean[, i[2]])^2 / (grvar[i[1],] + grvar[i[2], ]) } Tstat <- function (ind = groupi, predigested) { #' predigested should be a list with entries lev, foar, m, combs, rrr with(predigested, { grmean <- sapply(lev, splitmean, ind=ind, f=foar) grvar <- t(sapply(lev, splitvarn, ind=ind, f=foar, m=m)) y <- apply(combs, 2, studentstat, grmean=grmean, grvar=grvar) sum(apply(y, 2, trapint, x = rrr)) }) } intstudent <- function(i, rrr, grmean, meangrvar) { trapint(rrr, (grmean[, i[1]] - grmean[, i[2]])^2 / (meangrvar[i[1]] + meangrvar[i[2]])) } Tbarstat <- function (ind = groupi, predigested) { #' predigested should be a list #' with entries lev, foar, m, combs, rrr, ranger with(predigested, { grmean <- sapply(lev, splitmean, ind=ind, f=foar) grvar <- t(sapply(lev, splitvarn, ind=ind, f=foar, m=m)) meangrvar <- apply(grvar, 1, trapint, x = rrr)/ranger sum(apply(combs, 2, intstudent, rrr=rrr, grmean=grmean, meangrvar=meangrvar)) #' trapint(rr[taker], grvar[i[1],] + grvar[i[2], ])))) }) } makefv <- function(yvals, xvals, template) { fdf <- data.frame(r = xvals, y = yvals) argu <- fvnames(template, ".x") valu <- fvnames(template, ".y") names(fdf) <- c(argu,valu) fv(fdf, argu = argu, ylab = attr(template, "ylab"), valu = valu, fmla = attr(template,"fmla"), alim = attr(template, "alim")) } #' Trapezoidal rule approximation to integral #' ------- Trapezregel, mit Behandlung von NAns: #' die werden einfach ignoriert ---- trapint <- function(x, y) { nonan <- !is.na(y) nn <- sum(nonan) if(nn < 2L) return(0) Y <- y[nonan] X <- x[nonan] 0.5 * sum( (Y[-1] + Y[-nn]) * diff(X)) } #' call foo(x, further arguments) repeatedly #' further arguments are taken from hyperframe H and ... multicall <- function(foo, x, H, ...){ stopifnot(is.hyperframe(H)) if (is.hyperframe(x)) { x <- as.list(x)[[1]] } else if(!is.list(x)) stop("in multicall: x should be a hyperframe or list", call.=FALSE) #' check if same length nrows <- dim(H)[1] if (length(x) != nrows) stop(paste("in multicall: x and H need to have", "the same number of rows or list elements"), call.=FALSE) dotargs <- list(...) hnames <- names(H) argnames <- names(formals(foo))#' always assume first argument is given ppname <- argnames[1] argnames <- argnames[-1] dotmatch <- pmatch(names(dotargs), argnames) dotmatched <- dotmatch[!is.na(dotmatch)] dotuseargs <- dotargs[!is.na(dotmatch)] restargs <- if(length(dotmatched) >0) argnames[-dotmatched] else argnames hmatch <- pmatch(hnames, restargs) huse <- !is.na(hmatch) lapply(seq_len(nrows), function (i) do.call(foo, c(list(x[[i]]), as.list(H[i, huse, drop=TRUE, strip=FALSE]), dotargs))) } studpermu.test }) #' ------------------- plot studpermutest --------------------------------------- # #' plot.studpermutest #' plot the functions that were used in studperm.test #' also plot group means, if requested # #' x a studpermtest object, the test result #' fmla a plot formula as in plot.fv, should be generic, using "." for values #' ... further plot parameters #' col, lty, lwd parameter (vectors) for plotting the individual summary functions, #' according to group, if vectors #' col.theo, lty.theo, lwd.theo if not all are NULL, the "theo" curve is also plotted #' lwd.mean a multiplyer for the line width of the group means. #' if NULL, group means are not plotted, defaults to NULL #' lty.mean, col.mean selbsterklaerend #' separately generate a separate plot for each group (then no legends are plotted) #' meanonly do not plot individual summary functions #' legend if TRUE, and plots are not separate, plot a legend #' legendpos ... #' lbox if TRUE, draw box around legend. Defaults to FALSE #' add ... plot.studpermutest <- local({ plot.studpermutest <- function(x, fmla, ..., lty = NULL, col = NULL, lwd = NULL, lty.theo = NULL, col.theo = NULL, lwd.theo = NULL, lwd.mean = if(meanonly) 1 else NULL, lty.mean = lty, col.mean = col, separately = FALSE, meanonly = FALSE, main = if(meanonly) "group means" else NULL, xlim = NULL, ylim = NULL, ylab = NULL, legend = !add, legendpos = "topleft", lbox=FALSE, add = FALSE) { stopifnot(inherits(x, "studpermutest")) env.user <- parent.frame() curvlists <- split(x$curves, x$curves$groups) ngroups <- length(curvlists) gnames <- names(curvlists) #' check if theoretical functions shall be plottet plottheo <- !(is.null(lty.theo) & is.null(col.theo) & is.null(lwd.theo)) #' prepare plot parameters for groups if (is.null(lty)) lty <- 1:ngroups if (is.null(col)) col <- 1:ngroups if (is.null(lwd)) lwd <- par("lwd") if (is.null(col.mean)) col.mean <- col if (is.null(lty.mean)) lty.mean <- lty lty <- rep(lty, length.out = ngroups) col <- rep(col, length.out = ngroups) lwd <- rep(lwd, length.out = ngroups) col.mean <- rep(col.mean, length.out = ngroups) lty.mean <- rep(lty.mean, length.out = ngroups) if (plottheo){ if (is.null(lty.theo)) lty.theo <- ngroups + 1#par("lty") if (is.null(col.theo)) col.theo <- ngroups + 1 #par("col") if (is.null(lwd.theo)) lwd.theo <- par("lwd") } #' transporting the formula in ... unfortunately does not work #' for the axis labels, because the fvs contain only one variable. #' Have to knit them self if (is.null(ylab)) { if (!missing(fmla)) { #' puha. det bliver noget lappevaerk. fmla <- as.formula(fmla, env=env.user) map <- fvlabelmap(x$curvtheo) lhs <- lhs.of.formula(as.formula(fmla)) ylab <- eval(substitute(substitute(le, mp), list(le = lhs, mp = map))) } else ylab <- attr(x$curvtheo, "yexp") } if (missing(fmla)) fmla <- attr(x$curvtheo, "fmla") if(!is.null(lwd.mean)) lwd.Mean <- lwd.mean*lwd if(separately) { for (i in seq_along(gnames)) { if(!meanonly) plot.fvlist(curvlists[[i]]$fvs, fmla, ..., col = col[i], lwd = lwd[i], lty= lty[i], xlim = xlim, ylim = ylim, ylab = ylab, main = gnames[i]) if (!is.null(lwd.mean)) plot(x$groupmeans[[i]], fmla, ..., col = col.mean[i], lwd = lwd.Mean[i], lty = lty.mean[i], main = gnames[i], add = !meanonly, ylim = ylim) if (plottheo) plot(x$curvtheo, fmla, ..., add = TRUE, col = col.theo, lwd = lwd.theo, lty = lty.theo) } } else { #' ---- TODO SIMPLIFY! they should all have the same x-range, #' just check y-range ---- lims <- if (meanonly) { plot.fvlist(x$groupmeans, fmla,..., limitsonly = TRUE) } else { as.data.frame(apply(sapply(curvlists, function(C) plot.fvlist(C$fvs, fmla,..., limitsonly = TRUE)), 1, range)) } if(is.null(xlim)) xlim <- lims$xlim if(is.null(ylim)) ylim <- lims$ylim iadd <- add for (i in seq_along(gnames)) { if(!meanonly) plot.fvlist(curvlists[[i]]$fvs, fmla, ..., col = col[i], lwd = lwd[i], lty= lty[i], xlim = xlim, ylim = ylim, ylab= ylab, main = main, add = iadd) iadd <- iadd | !meanonly if (!is.null(lwd.mean)) plot(x$groupmeans[[i]], fmla, ..., col = col.mean[i], lwd = lwd.Mean[i], lty = lty.mean[i], add = iadd, xlim = xlim, ylim = ylim, ylab= ylab, main=main) if (plottheo) plot(x$curvtheo, fmla, ..., add = TRUE, col = col.theo, lwd = lwd.theo, lty = lty.theo, xlim = xlim, ylim = ylim, ylab= ylab, main=main) iadd <- TRUE } if(legend) { if(meanonly) { lwd <- lwd.Mean col <- col.mean lty <- lty.mean } if(plottheo){ gnames <- c(gnames, "Poisson mean") col <- c(col, col.theo) lty <- c(lty, lty.theo) lwd <- c(lwd, lwd.theo) } legend(legendpos, gnames, col = col, lty = lty, lwd = lwd, bty=ifelse(lbox, "o", "n")) } } return(invisible(NULL)) } #' ------------------ Helper function---------------- #' flist: list of fv, with plot method plot.fvlist <- function(x, fmla, ..., xlim=NULL, ylim=NULL, add = FALSE, limitsonly = FALSE, main=NULL){ #' no safety precautions if (missing(fmla)) fmla <- attr(x[[1]], "fmla") if (!add | limitsonly) { lims <- sapply(x, plot, fmla, ..., limitsonly = TRUE) if(is.null(xlim)) xlim = range(unlist(lims[1,])) if(is.null(ylim)) ylim = range(unlist(lims[2,])) lims=list(xlim=xlim, ylim=ylim) if(limitsonly) return(lims) plot(x[[1]], fmla, ..., xlim = xlim, ylim = ylim, main = main) } else plot(x[[1]], fmla,..., add=T) for (foo in x[-1]) plot(foo, fmla, ..., add=T) } plot.studpermutest }) spatstat.core/R/rat.R0000644000176200001440000000354014144333463014165 0ustar liggesusers# # rat.R # # Ratio objects # # Numerator and denominator are stored as attributes # # $Revision: 1.13 $ $Date: 2020/11/30 09:43:44 $ # rat <- function(ratio, numerator, denominator, check=TRUE) { if(check) { stopifnot(compatible(numerator, denominator)) stopifnot(compatible(ratio, denominator)) } attr(ratio, "numerator") <- numerator attr(ratio, "denominator") <- denominator class(ratio) <- unique(c("rat", class(ratio))) return(ratio) } print.rat <- function(x, ...) { NextMethod("print") cat("[Contains ratio information]\n") return(invisible(NULL)) } compatible.rat <- function(A, B, ...) { NextMethod("compatible") } adjust.ratfv <- function(f, columns=fvnames(f, "*"), numfactor=1, denfactor=1) { stopifnot(is.fv(f)) f[,columns] <- (numfactor/denfactor) * as.data.frame(f)[,columns] if(numfactor != 1 && !is.null(num <- attr(f, "numerator"))) { num[,columns] <- numfactor * as.data.frame(num)[,columns] attr(f, "numerator") <- num } if(denfactor != 1 && !is.null(den <- attr(f, "denominator"))) { den[,columns] <- denfactor * as.data.frame(den)[,columns] attr(f, "denominator") <- den } return(f) } tweak.ratfv.entry <- function(x, ...) { # apply same tweak to function, numerator and denominator. x <- tweak.fv.entry(x, ...) if(!is.null(num <- attr(x, "numerator"))) attr(x, "numerator") <- tweak.fv.entry(num, ...) if(!is.null(den <- attr(x, "denominator"))) attr(x, "denominator") <- tweak.fv.entry(den, ...) return(x) } "[.rat" <- function(x, ...) { if(!is.fv(x)) stop("Not yet implemented for non-fv ratios") num <- attr(x, "numerator") den <- attr(x, "denominator") class(x) <- "fv" x <- x[...] den <- den[...] num <- num[...] attr(x, "numerator") <- num attr(x, "denominator") <- den class(x) <- unique(c("rat", class(x))) return(x) } spatstat.core/R/rhohat.R0000644000176200001440000007212714144333463014673 0ustar liggesusers#' #' rhohat.R #' #' $Revision: 1.98 $ $Date: 2021/06/28 03:45:18 $ #' #' Non-parametric estimation of a transformation rho(z) determining #' the intensity function lambda(u) of a point process in terms of a #' spatial covariate Z(u) through lambda(u) = rho(Z(u)). #' More generally allows offsets etc. #' Copyright (c) Adrian Baddeley 2015-2021 #' GNU Public Licence GPL >= 2.0 rhohat <- function(object, covariate, ...) { UseMethod("rhohat") } rhohat.ppp <- rhohat.quad <- function(object, covariate, ..., baseline=NULL, weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "piecewise"), subset=NULL, dimyx=NULL, eps=NULL, n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) { callstring <- short.deparse(sys.call()) smoother <- match.arg(smoother) method <- match.arg(method) if(missing(positiveCI)) positiveCI <- (smoother == "local") if(missing(covname)) covname <- sensiblevarname(short.deparse(substitute(covariate)), "X") if(is.null(adjust)) adjust <- 1 # validate model if(is.null(baseline)) { model <- ppm(object ~1) reference <- "Lebesgue" } else { model <- ppm(object ~ offset(log(baseline))) reference <- "baseline" } modelcall <- NULL if(is.character(covariate) && length(covariate) == 1) { covname <- covariate switch(covname, x={ covariate <- function(x,y) { x } }, y={ covariate <- function(x,y) { y } }, stop("Unrecognised covariate name") ) covunits <- unitname(data.ppm(model)) } else { covunits <- NULL } W <- Window(data.ppm(model)) if(!is.null(subset)) W <- W[subset, drop=FALSE] areaW <- area(W) rhohatEngine(model, covariate, reference, areaW, ..., subset=subset, weights=weights, method=method, horvitz=horvitz, smoother=smoother, resolution=list(dimyx=dimyx, eps=eps), n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, covunits=covunits, confidence=confidence, positiveCI=positiveCI, breaks=breaks, modelcall=modelcall, callstring=callstring) } rhohat.ppm <- function(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "piecewise"), subset=NULL, dimyx=NULL, eps=NULL, n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) { callstring <- short.deparse(sys.call()) smoother <- match.arg(smoother) method <- match.arg(method) if(missing(positiveCI)) positiveCI <- (smoother == "local") if(missing(covname)) covname <- sensiblevarname(short.deparse(substitute(covariate)), "X") if(is.null(adjust)) adjust <- 1 if("baseline" %in% names(list(...))) warning("Argument 'baseline' ignored: not available for rhohat.ppm") ## validate model model <- object reference <- "model" modelcall <- model$call if(is.character(covariate) && length(covariate) == 1) { covname <- covariate switch(covname, x={ covariate <- function(x,y) { x } }, y={ covariate <- function(x,y) { y } }, stop("Unrecognised covariate name") ) covunits <- unitname(data.ppm(model)) } else { covunits <- NULL } W <- Window(data.ppm(model)) if(!is.null(subset)) W <- W[subset, drop=FALSE] areaW <- area(W) rhohatEngine(model, covariate, reference, areaW, ..., weights=weights, method=method, horvitz=horvitz, smoother=smoother, resolution=list(dimyx=dimyx, eps=eps), n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, covunits=covunits, confidence=confidence, positiveCI=positiveCI, breaks=breaks, modelcall=modelcall, callstring=callstring) } rhohatEngine <- function(model, covariate, reference=c("Lebesgue", "model", "baseline"), volume, ..., subset=NULL, weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "piecewise"), resolution=list(), evalCovarArgs=list(), n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, covunits=NULL, confidence=0.95, breaks=NULL, modelcall=NULL, callstring="rhohat") { reference <- match.arg(reference) # evaluate the covariate at data points and at pixels stuff <- do.call(evalCovar, c(list(model=model, covariate=covariate, subset=subset), resolution, evalCovarArgs)) # unpack values <- stuff$values # values at each data point ZX <- values$ZX lambdaX <- values$lambdaX # values at each pixel Zimage <- values$Zimage lambdaimage <- values$lambdaimage # could be multiple images # values at each pixel (for .ppp, .ppm) or quadrature point (for .lpp, .lppm) Zvalues <- values$Zvalues lambda <- values$lambda ## weights if(!is.null(weights)) { X <- data.ppm(model) if(is.im(weights)) weights <- safelookup(weights, X) else if(is.function(weights)) weights <- weights(X$x, X$y) else if(is.numeric(weights) && is.vector(as.numeric(weights))) check.nvector(weights, npoints(X)) else stop(paste(sQuote("weights"), "should be a vector, a pixel image, or a function")) } # normalising constants denom <- volume * (if(reference == "Lebesgue" || horvitz) 1 else mean(lambda)) # info savestuff <- list(reference = reference, horvitz = horvitz, Zimage = Zimage, lambdaimage = lambdaimage) # calculate rho-hat result <- rhohatCalc(ZX, Zvalues, lambda, denom, ..., weights=weights, lambdaX=lambdaX, method=method, horvitz=horvitz, smoother=smoother, n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, confidence=confidence, breaks=breaks, covunits=covunits, modelcall=modelcall, callstring=callstring, savestuff=savestuff) return(result) } # basic calculation of rhohat from covariate values rhohatCalc <- local({ interpolate <- function(x,y) { if(inherits(x, "density") && missing(y)) approxfun(x$x, x$y, rule=2) else approxfun(x, y, rule=2) } ## note: this function normalises the weights, like density.default LocfitRaw <- function(x, ..., weights=NULL) { if(is.null(weights)) weights <- 1 requireNamespace("locfit", quietly=TRUE) do.call.matched(locfit::locfit.raw, append(list(x=x, weights=weights), list(...))) } varlog <- function(obj,xx) { ## variance of log f-hat stopifnot(inherits(obj, "locfit")) if(!identical(obj$trans, exp)) stop("internal error: locfit object does not have log link") ## the following call should have band="local" but that produces NaN's pred <- predict(obj, newdata=xx, se.fit=TRUE, what="coef") se <- pred$se.fit return(se^2) } rhohatCalc <- function(ZX, Zvalues, lambda, denom, ..., weights=NULL, lambdaX, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "piecewise"), n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95, breaks=NULL, positiveCI=(smoother == "local"), markovCI=TRUE, covunits = NULL, modelcall=NULL, callstring=NULL, savestuff=list()) { method <- match.arg(method) smoother <- match.arg(smoother) ## check availability of locfit package if(smoother == "local" && !requireNamespace("locfit", quietly=TRUE)) { warning(paste("In", paste0(dQuote(callstring), ":"), "package", sQuote("locfit"), "is not available;", "unable to perform local likelihood smoothing;", "using kernel smoothing instead"), call.=FALSE) smoother <- "kernel" } ## validate stopifnot(is.numeric(ZX)) stopifnot(is.numeric(Zvalues)) stopifnot(is.numeric(lambda)) stopifnot(length(lambda) == length(Zvalues)) stopifnot(all(is.finite(lambda))) check.1.real(denom) ## if(horvitz) { ## data points will be weighted by reciprocal of model intensity weights <- (weights %orifnull% 1)/lambdaX } ## normalising constants nX <- if(is.null(weights)) length(ZX) else sum(weights) kappahat <- nX/denom ## limits Zrange <- range(ZX, Zvalues) if(is.null(from)) from <- Zrange[1] if(is.null(to)) to <- Zrange[2] if(from > Zrange[1] || to < Zrange[2]) stop(paste("In", paste0(dQuote(callstring), ":"), "interval [from, to] =", prange(c(from,to)), "does not contain the range of data values =", prange(Zrange)), call.=FALSE) ## critical constant for CI's crit <- qnorm((1+confidence)/2) percentage <- paste(round(100 * confidence), "%%", sep="") CIblurb <- paste("pointwise", percentage, "confidence interval") ## estimate densities switch(smoother, kernel = { ## ............... kernel smoothing ...................... ## reference density (normalised) for calculation ghat <- density(Zvalues,weights=if(horvitz) NULL else lambda/sum(lambda), bw=bwref,adjust=adjust,n=n,from=from,to=to, ...) xxx <- ghat$x ghatfun <- interpolate(ghat) ## relative density switch(method, ratio={ ## compute ratio of smoothed densities fhat <- unnormdensity(ZX,weights=weights, bw=bw,adjust=adjust, n=n,from=from, to=to, ...) fhatfun <- interpolate(fhat) Ghat.xxx <- denom * ghatfun(xxx) yyy <- fhatfun(xxx)/Ghat.xxx ## compute variance approximation sigma <- fhat$bw weights2 <- if(is.null(weights)) NULL else weights^2 fstar <- unnormdensity(ZX,weights=weights2, bw=bw,adjust=adjust/sqrt(2), n=n,from=from, to=to, ...) fstarfun <- interpolate(fstar) const <- 1/(2 * sigma * sqrt(pi)) vvv <- const * fstarfun(xxx)/Ghat.xxx^2 }, reweight={ ## weight Z values by reciprocal of reference wt <- (weights %orifnull% 1)/(denom * ghatfun(ZX)) rhat <- unnormdensity(ZX, weights=wt, bw=bw,adjust=adjust, n=n,from=from, to=to, ...) rhatfun <- interpolate(rhat) yyy <- rhatfun(xxx) ## compute variance approximation sigma <- rhat$bw rongstar <- unnormdensity(ZX, weights=wt^2, bw=bw,adjust=adjust/sqrt(2), n=n,from=from, to=to, ...) rongstarfun <- interpolate(rongstar) const <- 1/(2 * sigma * sqrt(pi)) vvv <- const * rongstarfun(xxx) }, transform={ ## probability integral transform Gfun <- interpolate(ghat$x, cumsum(ghat$y)/sum(ghat$y)) GZX <- Gfun(ZX) ## smooth density on [0,1] qhat <- unnormdensity(GZX,weights=weights, bw=bw,adjust=adjust, n=n, from=0, to=1, ...) qhatfun <- interpolate(qhat) ## edge effect correction one <- density(seq(from=0,to=1,length.out=512), bw=qhat$bw, adjust=1, n=n,from=0, to=1, ...) onefun <- interpolate(one) ## apply to transformed values Gxxx <- Gfun(xxx) Dxxx <- denom * onefun(Gxxx) yyy <- qhatfun(Gxxx)/Dxxx ## compute variance approximation sigma <- qhat$bw weights2 <- if(is.null(weights)) NULL else weights^2 qstar <- unnormdensity(GZX,weights=weights2, bw=bw,adjust=adjust/sqrt(2), n=n,from=0, to=1, ...) qstarfun <- interpolate(qstar) const <- 1/(2 * sigma * sqrt(pi)) vvv <- const * qstarfun(Gxxx)/Dxxx^2 }) vvvname <- "Variance of estimator" vvvlabel <- paste("bold(Var)~hat(%s)", paren(covname), sep="") sd <- sqrt(vvv) if(!positiveCI) { hi <- yyy + crit * sd lo <- yyy - crit * sd } else { sdlog <- ifelse(yyy > 0, sd/yyy, 0) sss <- exp(crit * sdlog) hi <- yyy * sss lo <- yyy / sss if(markovCI) { ## truncate extremely large confidence intervals ## using Markov's Inequality hi <- pmin(hi, yyy/(1-confidence)) } } }, local = { ## .................. local likelihood smoothing ....................... xlim <- c(from, to) xxx <- seq(from, to, length=n) ## reference density ghat <- LocfitRaw(Zvalues, weights=if(horvitz) NULL else lambda, xlim=xlim, ...) ggg <- predict(ghat, xxx) ## relative density switch(method, ratio={ ## compute ratio of smoothed densities fhat <- LocfitRaw(ZX, weights=weights, xlim=xlim, ...) fff <- predict(fhat, xxx) yyy <- kappahat * fff/ggg ## compute approximation to variance of log rho-hat varlogN <- 1/nX vvv <- varlog(fhat, xxx) + varlogN }, reweight={ ## weight Z values by reciprocal of reference wt <- (weights %orifnull% 1)/(denom * predict(ghat,ZX)) sumwt <- sum(wt) rhat <- LocfitRaw(ZX, weights=wt, xlim=xlim, ...) rrr <- predict(rhat, xxx) yyy <- sumwt * rrr ## compute approximation to variance of log rho-hat varsumwt <- mean(yyy /(denom * ggg)) * diff(xlim) varlogsumwt <- varsumwt/sumwt^2 vvv <- varlog(rhat, xxx) + varlogsumwt }, transform={ ## probability integral transform Gfun <- approxfun(xxx, cumsum(ggg)/sum(ggg), rule=2) GZX <- Gfun(ZX) ## smooth density on [0,1], end effect corrected qhat <- LocfitRaw(GZX, weights=weights, xlim=c(0,1), ...) ## apply to transformed values Gxxx <- Gfun(xxx) qqq <- predict(qhat, Gxxx) yyy <- kappahat * qqq ## compute approximation to variance of log rho-hat varlogN <- 1/nX vvv <- varlog(qhat, Gxxx) + varlogN }) vvvname <- "Variance of log of estimator" vvvlabel <- paste("bold(Var)~log(hat(%s)", paren(covname), ")", sep="") sdlog <- sqrt(vvv) if(positiveCI) { sss <- exp(crit * sdlog) hi <- yyy * sss lo <- yyy / sss if(markovCI) { ## truncate extremely large confidence intervals ## using Markov's Inequality hi <- pmin(hi, yyy/(1-confidence)) } } else { hi <- yyy * (1 + crit * sdlog) lo <- yyy * (1 - crit * sdlog) } }, increasing = , decreasing = { ## .................. nonparametric maximum likelihood ............ if(is.null(weights)) weights <- rep(1, length(ZX)) if(method != "ratio") warning(paste("Argument method =", sQuote(method), "is ignored when smoother =", sQuote(smoother))) #' observed (sorted) oX <- order(ZX) ZX <- ZX[oX] weights <- weights[oX] #' reference CDF G <- ewcdf(Zvalues, lambda) #' reference denominator ('area') at each observed value if(smoother == "decreasing") { areas <- denom * G(ZX) } else { areas <- denom * (1 - G(rev(ZX))) weights <- rev(weights) } #' maximum upper sets algorithm rho <- numeric(0) darea <- diff(c(0, areas)) dcount <- weights while(length(darea) > 0) { u <- cumsum(dcount)/cumsum(darea) if(any(bad <- !is.finite(u))) # divide by zero etc u[bad] <- max(u[!bad], 0) k <- which.max(u) rho <- c(rho, rep(u[k], k)) darea <- darea[-(1:k)] dcount <- dcount[-(1:k)] } rho <- c(rho, 0) if(smoother == "increasing") rho <- rev(rho) #' compute as a stepfun rhofun <- stepfun(x = ZX, y=rho, right=TRUE, f=1) #' evaluate on a grid xlim <- c(from, to) xxx <- seq(from, to, length=n) yyy <- rhofun(xxx) #' vvv <- hi <- lo <- NULL savestuff$rhofun <- rhofun }, piecewise = { ## .................. piecewise constant ............ if(is.null(breaks)) { breaks <- pretty(c(from, to)) } else { stopifnot(is.numeric(breaks)) breaks <- exactCutBreaks(c(from, to), breaks) } if(method != "ratio") { warning(paste("Argument method =", sQuote(method), "is not implemented when smoother = 'piecewise';", "replaced by method = 'ratio'")) method <- "ratio" } ## convert numerical covariate values to factor cutZvalues <- cut(Zvalues, breaks=breaks) cutZX <- cut(ZX, breaks=breaks) ## denominator areas <- denom * tapplysum(lambda, list(cutZvalues))/sum(lambda) ## numerator counts <- if(is.null(weights)) { as.numeric(table(cutZX)) } else { tapplysum(weights, list(cutZX)) } ## estimate of rho(z) for each band of z values rhovals <- counts/areas #' convert to a stepfun rhofun <- stepfun(x = breaks, y=c(0, rhovals, 0)) #' evaluate on a grid xlim <- c(from, to) xxx <- seq(from, to, length=n) yyy <- rhofun(xxx) #' variance vvvname <- "Variance of estimator" vvvlabel <- paste("bold(Var)~hat(%s)", paren(covname), sep="") varnum <- if(is.null(weights)) counts else tapplysum(weights^2, list(cutZX)) varvals <- varnum/areas^2 varfun <- stepfun(x = breaks, y=c(0, varvals, 0)) vvv <- varfun(xxx) sd <- sqrt(vvv) if(!positiveCI) { hi <- yyy + crit * sd lo <- yyy - crit * sd } else { sdlog <- ifelse(yyy > 0, sd/yyy, 0) sss <- exp(crit * sdlog) hi <- yyy * sss lo <- yyy / sss if(markovCI) { ## truncate extremely large confidence intervals ## using Markov's Inequality hi <- pmin(hi, yyy/(1-confidence)) } } ## pack up savestuff$rhofun <- rhofun savestuff$breaks <- breaks }) ## pack into fv object df <- data.frame(xxx=xxx, rho=yyy, ave=kappahat) names(df)[1] <- covname desc <- c(paste("Covariate", covname), "Estimated intensity", "Average intensity") parencov <- paren(covname) labl <- c(covname, paste0("hat(%s)", parencov), "bar(%s)") if(did.variance <- !is.null(vvv)) { df <- cbind(df, data.frame(var=vvv, hi=hi, lo=lo)) desc <- c(desc, vvvname, paste("Upper limit of", CIblurb), paste("Lower limit of", CIblurb)) labl <- c(labl, vvvlabel, paste0("%s[hi]", parencov), paste0("%s[lo]", parencov)) } rslt <- fv(df, argu=covname, ylab=substitute(rho(X), list(X=as.name(covname))), valu="rho", fmla= as.formula(paste(". ~ ", covname)), alim=c(from, to), labl=labl, desc=desc, unitname=covunits, fname="rho", yexp=substitute(rho(X), list(X=as.name(covname)))) if(did.variance) { fvnames(rslt, ".") <- c("rho", "ave", "hi", "lo") fvnames(rslt, ".s") <- c("hi", "lo") } else fvnames(rslt, ".") <- c("rho", "ave") ## pack up class(rslt) <- c("rhohat", class(rslt)) ## add info stuff <- list(modelcall = modelcall, callstring = callstring, sigma = switch(smoother, kernel=sigma, local=NULL), covname = paste(covname, collapse=""), ZX = ZX, lambda = lambda, method = method, smoother = smoother, confidence = confidence, positiveCI = positiveCI) attr(rslt, "stuff") <- append(stuff, savestuff) return(rslt) } rhohatCalc }) ## ........... end of 'rhohatCalc' ................................. print.rhohat <- function(x, ...) { s <- attr(x, "stuff") smoother <- s$smoother method <- s$method splat("Intensity function estimate (class rhohat)", "for the covariate", s$covname) switch(s$reference, Lebesgue=splat("Function values are absolute intensities"), baseline=splat("Function values are relative to baseline"), model={ splat("Function values are relative to fitted model") print(s$modelcall) }) cat("Type of estimate: ") switch(smoother, kernel = , local = splat("Smooth function of covariate"), increasing = splat("Increasing function of covariate"), decreasing = splat("Decreasing function of covariate"), piecewise = splat("Piecewise-constant function of covariate"), splat("unknown smoother =", sQuote(smoother)) ) cat("Estimation method: ") switch(smoother, piecewise = splat("average intensity in sub-regions"), increasing = , decreasing = splat("nonparametric maximum likelihood"), kernel = { switch(method, ratio = splat("ratio of fixed-bandwidth kernel smoothers"), reweight={ splat("fixed-bandwidth kernel smoother of weighted data") }, transform={ splat("probability integral transform,", "edge-corrected fixed bandwidth kernel smoothing", "on [0,1]") }, splat("Unknown method =", sQuote(s$method))) if(isTRUE(s$horvitz)) splat("\twith Horvitz-Thompson weight") splat("\tActual smoothing bandwidth sigma = ", signif(s$sigma,5)) }, local = { switch(method, ratio = splat("ratio of local likelihood smoothers"), reweight={ splat("local likelihood smoother of weighted data") }, transform={ splat("probability integral transform followed by", "local likelihood smoothing on [0,1]") }, splat("Unknown method =", sQuote(s$method))) if(isTRUE(s$horvitz)) splat("\twith Horvitz-Thompson weight") }) if(!(smoother %in% c("increasing", "decreasing"))) { positiveCI <- s$positiveCI %orifnull% (smoother == "local") confidence <- s$confidence %orifnull% 0.95 splat("Pointwise", paste0(100 * confidence, "%"), "confidence bands for rho(x)\n\t based on asymptotic variance of", if(positiveCI) "log(rhohat(x))" else "rhohat(x)") } splat("Call:", s$callstring) cat("\n") NextMethod("print") } plot.rhohat <- function(x, ..., do.rug=TRUE) { xname <- short.deparse(substitute(x)) force(x) s <- attr(x, "stuff") covname <- s$covname asked.rug <- !missing(do.rug) && identical(rug, TRUE) snam <- intersect(c("hi", "lo"), names(x)) if(length(snam) == 0) snam <- NULL out <- do.call(plot.fv, resolve.defaults(list(x=quote(x)), list(...), list(main=xname, shade=snam))) if(identical(list(...)$limitsonly, TRUE)) return(out) if(do.rug) { rugx <- ZX <- s$ZX # check whether it's the default plot argh <- list(...) isfo <- unlist(lapply(argh, inherits, what="formula")) if(any(isfo)) { # a plot formula was given; inspect RHS fmla <- argh[[min(which(isfo))]] rhs <- rhs.of.formula(fmla) vars <- variablesinformula(rhs) vars <- vars[vars %in% c(colnames(x), ".x", ".y")] if(length(vars) == 1 && vars %in% c(covname, ".x")) { # expression in terms of covariate rhstr <- as.character(rhs)[2] dat <- list(ZX) names(dat) <- vars[1] rugx <- as.numeric(eval(parse(text=rhstr), dat)) } else { if(asked.rug) warning("Unable to add rug plot") rugx <- NULL } } if(!is.null(rugx)) { # restrict to x limits, if given if(!is.null(xlim <- list(...)$xlim)) rugx <- rugx[rugx >= xlim[1] & rugx <= xlim[2]] # finally plot the rug if(length(rugx) > 0) rug(rugx) } } invisible(NULL) } predict.rhohat <- local({ predict.rhohat <- function(object, ..., relative=FALSE, what=c("rho", "lo", "hi", "se")) { trap.extra.arguments(..., .Context="in predict.rhohat") what <- match.arg(what) #' extract info s <- attr(object, "stuff") reference <- s$reference #' check availability if((what %in% c("lo", "hi", "se")) && !("hi" %in% names(object))) stop("Standard error and confidence bands are not available in this object", call.=FALSE) #' convert to (linearly interpolated) function x <- with(object, .x) y <- if(what == "se") sqrt(object[["var"]]) else object[[what]] fun <- approxfun(x, y, rule=2) #' extract image(s) of covariate Z <- s$Zimage #' apply fun to Z Y <- if(is.im(Z)) evalfun(Z, fun) else solapply(Z, evalfun, f=fun) if(reference != "Lebesgue" && !relative) { #' adjust to reference baseline Lam <- s$lambdaimage # could be an image or a list of images #' multiply Y * Lam (dispatch on 'Math' is not yet working) netted <- is.linim(Y) || (is.solist(Y) && all(sapply(Y, is.linim))) netted <- netted && requireNamespace("spatstat.linnet") if(!netted) { Y <- imagelistOp(Lam, Y, "*") } else { if(is.solist(Y)) Y <- as.linimlist(Y) Y <- spatstat.linnet::LinimListOp(Lam, Y, "*") } } return(Y) } evalfun <- function(X, f) { force(f) force(X) if(is.linim(X) && requireNamespace("spatstat.linnet")) return(spatstat.linnet::eval.linim(f(X))) if(is.im(X)) return(eval.im(f(X))) return(NULL) } predict.rhohat }) as.function.rhohat <- function(x, ..., value=".y", extrapolate=TRUE) { NextMethod("as.function") } simulate.rhohat <- function(object, nsim=1, ..., drop=TRUE) { trap.extra.arguments(..., .Context="in simulate.rhohat") lambda <- predict(object) if(is.linim(lambda) || (is.solist(lambda) && all(sapply(lambda, is.linim)))) { if(!requireNamespace("spatstat.linnet")) { warning(paste("Cannot generate simulations on a network;", "this requires the package 'spatstat.linnet'"), call.=FALSE) return(NULL) } result <- spatstat.linnet::rpoislpp(lambda, nsim=nsim, drop=drop) } else { result <- rpoispp(lambda, nsim=nsim, drop=drop) } return(result) } spatstat.core/R/edgeTrans.R0000644000176200001440000001045514144333462015315 0ustar liggesusers# # edgeTrans.R # # $Revision: 1.16 $ $Date: 2019/01/18 02:26:41 $ # # Translation edge correction weights # # edge.Trans(X) compute translation correction weights # for each pair of points from point pattern X # # edge.Trans(X, Y, W) compute translation correction weights # for all pairs of points X[i] and Y[j] # (i.e. one point from X and one from Y) # in window W # # edge.Trans(X, Y, W, paired=TRUE) # compute translation correction weights # for each corresponding pair X[i], Y[i]. # # To estimate the K-function see the idiom in "Kest.R" # ####################################################################### edge.Trans <- function(X, Y=X, W=Window(X), exact=FALSE, paired=FALSE, ..., trim=spatstat.options("maxedgewt"), dx=NULL, dy=NULL, give.rmax=FALSE, gW = NULL) { given.dxdy <- !is.null(dx) && !is.null(dy) if(!given.dxdy) { ## dx, dy will be computed from X, Y X <- as.ppp(X, W) W <- X$window Y <- if(!missing(Y)) as.ppp(Y, W) else X nX <- X$n nY <- Y$n if(paired) { if(nX != nY) stop("X and Y should have equal length when paired=TRUE") dx <- Y$x - X$x dy <- Y$y - X$y } else { dx <- outer(X$x, Y$x, "-") dy <- outer(X$y, Y$y, "-") } } else { ## dx, dy given if(paired) { ## dx, dy are vectors check.nvector(dx) check.nvector(dy) stopifnot(length(dx) == length(dy)) } else { ## dx, dy are matrices check.nmatrix(dx) check.nmatrix(dy) stopifnot(all(dim(dx) == dim(dy))) nX <- nrow(dx) nY <- ncol(dx) } stopifnot(is.owin(W)) } ## For irregular polygons, exact evaluation is very slow; ## so use pixel approximation, unless exact=TRUE if(W$type == "polygonal" && !exact) W <- as.mask(W) ## compute if(!paired) { dx <- as.vector(dx) dy <- as.vector(dy) } switch(W$type, rectangle={ ## Fast code for this case wide <- diff(W$xrange) high <- diff(W$yrange) weight <- wide * high / ((wide - abs(dx)) * (high - abs(dy))) }, polygonal={ ## This code is SLOW n <- length(dx) weight <- numeric(n) if(n > 0) { for(i in seq_len(n)) { Wshift <- shift(W, c(dx[i], dy[i])) weight[i] <- overlap.owin(W, Wshift) } weight <- area(W)/weight } }, mask={ ## compute set covariance of window if(is.null(gW)) gW <- setcov(W) ## evaluate set covariance at these vectors gvalues <- lookup.im(gW, dx, dy, naok=TRUE, strict=FALSE) weight <- area(W)/gvalues } ) ## clip high values if(length(weight) > 0) weight <- pmin.int(weight, trim) if(!paired) weight <- matrix(weight, nrow=nX, ncol=nY) if(give.rmax) attr(weight, "rmax") <- rmax.Trans(W, gW) return(weight) } ## maximum radius for translation correction ## = radius of largest circle centred at 0 contained in W + ^W rmax.Trans <- function(W, g=setcov(W)) { ## calculate maximum permissible 'r' value ## for validity of translation correction W <- as.owin(W) if(is.rectangle(W)) return(shortside(W)) ## find support of set covariance if(is.null(g)) g <- setcov(W) eps <- 2 * max(1, max(g)) * .Machine$double.eps gsupport <- solutionset(g > eps) gboundary <- bdry.mask(gsupport) xy <- rasterxy.mask(gboundary, drop=TRUE) rmax <- with(xy, sqrt(min(x^2 + y^2))) return(rmax) } ## maximum radius for rigid motion correction ## = radius of smallest circle centred at 0 containing W + ^W rmax.Rigid <- function(X, g=setcov(as.owin(X))) { stopifnot(is.ppp(X) || is.owin(X)) if(is.ppp(X)) return(max(pairdist(X[chull(X)]))) W <- X if(is.rectangle(W)) return(diameter(W)) if(is.null(g)) g <- setcov(W) eps <- 2 * max(1, max(g)) * .Machine$double.eps gsupport <- solutionset(g > eps) gboundary <- bdry.mask(gsupport) xy <- rasterxy.mask(gboundary, drop=TRUE) rmax <- with(xy, sqrt(max(x^2 + y^2))) return(rmax) } spatstat.core/R/rhohat.slrm.R0000644000176200001440000000443514144333463015644 0ustar liggesusers#' #' rhohat.slrm.R #' #' $Revision: 1.2 $ $Date: 2021/06/28 03:47:38 $ #' rhohat.slrm <- function(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "piecewise"), subset=NULL, n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) { callstring <- short.deparse(sys.call()) smoother <- match.arg(smoother) method <- match.arg(method) if(missing(positiveCI)) positiveCI <- (smoother == "local") if(missing(covname)) covname <- sensiblevarname(short.deparse(substitute(covariate)), "X") if(is.null(adjust)) adjust <- 1 if("baseline" %in% names(list(...))) warning("Argument 'baseline' ignored: not available for rhohat.slrm") ## validate model model <- object reference <- "model" modelcall <- model$call if(!is.null(splitby <- object$CallInfo$splitby)) stop("Sorry, rhohat.slrm is not yet implemented for split pixels", call.=FALSE) if(is.character(covariate) && length(covariate) == 1) { covname <- covariate switch(covname, x={ covariate <- function(x,y) { x } }, y={ covariate <- function(x,y) { y } }, stop("Unrecognised covariate name") ) covunits <- unitname(response(model)) } else { covunits <- NULL } W <- Window(response(model)) if(!is.null(subset)) W <- W[subset, drop=FALSE] areaW <- area(W) rhohatEngine(model, covariate, reference, areaW, ..., evalCovarArgs=list(lambdatype="intensity"), weights=weights, method=method, horvitz=horvitz, smoother=smoother, n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, covunits=covunits, confidence=confidence, positiveCI=positiveCI, breaks=breaks, modelcall=modelcall, callstring=callstring) } spatstat.core/R/Hest.R0000644000176200001440000001042214144333461014275 0ustar liggesusers# # Hest.R # # Contact distribution for a random set # # Hest <- local({ Hest <- function(X, r=NULL, breaks=NULL, ..., W, correction=c("km", "rs", "han"), conditional=TRUE) { if(missing(W)) W <- NULL HestEngine(X, r=r, breaks=breaks, ..., W=W, correction=correction, conditional=conditional) } HestEngine <- function(X, r=NULL, breaks=NULL, ..., W, correction=c("km", "rs", "han"), conditional=TRUE, checkspacing=TRUE, testme=FALSE) { rorbgiven <- !is.null(r) || !is.null(breaks) if(is.ppp(X) || is.psp(X)) { XX <- X W0 <- Window(X) } else if(is.owin(X)) { XX <- X W0 <- Frame(X) } else if(is.im(X)) { if(!is.logical(ZeroValue(X))) stop("When X is an image, its pixel values should be logical values") XX <- solutionset(X) W0 <- Window(X) } else stop("X should be an object of class ppp, psp, owin or im") ## if(given.W <- !missing(W) && !is.null(W)) { stopifnot(is.owin(W)) if(!is.subset.owin(W, W0)) stop("W is not a subset of the observation window of X") } else { W <- W0 } ## handle corrections if(is.null(correction)) correction <- c("rs", "km", "cs") correction <- pickoption("correction", correction, c(none="none", raw="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", best="km"), multi=TRUE) corxtable <- c("km", "rs", "han", "none") corx <- as.list(corxtable %in% correction) names(corx) <- corxtable ## compute distance map D <- distmap(XX, ...) pixeps <- with(D, min(xstep, ystep)) if(!given.W && !is.im(X)) { B <- attr(D, "bdry") } else { B <- distmap(W, invert=TRUE, ...) har <- harmonise(D=D, B=B) D <- har$D[W, drop=FALSE] B <- har$B[W, drop=FALSE] } ## histogram breakpoints dmax <- max(D) breaks <- handle.r.b.args(r, breaks, W, NULL, rmaxdefault=dmax) rval <- breaks$r if(testme || (rorbgiven && checkspacing)) check.finespacing(rval, rname="r", eps=pixeps/4, W, rmaxdefault=dmax, context="in Hest(X,r)", action="fatal") ## extract distances and censoring distances dist <- as.vector(as.matrix(D)) bdry <- as.vector(as.matrix(B)) ok <- !is.na(dist) & !is.na(bdry) dist <- dist[ok] bdry <- bdry[ok] ## delete zero distances if(is.owin(X) || is.im(X)) { pos <- (dist > 0) areafraction <- 1 - mean(pos) dist <- dist[pos] bdry <- bdry[pos] } ## censoring indicators d <- (dist <= bdry) ## observed distances o <- pmin.int(dist, bdry) ## calculate estimates Z <- censtimeCDFest(o, bdry, d, breaks, KM=corx$km, RS=corx$rs, HAN=corx$han, RAW=corx$none, han.denom=if(corx$han) eroded.areas(W, rval) else NULL, tt=dist) ## conditional on d > 0 ? if(is.owin(X) || is.im(X)) { if(conditional) { if(corx$km) Z$km <- condition(Z$km) if(corx$rs) Z$rs <- condition(Z$rs) if(corx$han) Z$han <- condition(Z$han) if(corx$none) Z$raw <- condition(Z$raw) } else { if(corx$km) Z$km <- reconstitute(Z$km, areafraction) if(corx$rs) Z$rs <- reconstitute(Z$rs, areafraction) if(corx$han) Z$han <- reconstitute(Z$han, areafraction) if(corx$none) Z$raw <- reconstitute(Z$raw, areafraction) } } ## relabel Z <- rebadge.fv(Z, substitute(H(r), NULL), "H") unitname(Z) <- unitname(X) attr(Z, "conserve") <- list(checkspacing=FALSE) return(Z) } condition <- function(x) { (x - x[1])/(1-x[1]) } reconstitute <- function(x, p) { p + (1-p) * x } Hest }) spatstat.core/R/vcov.kppm.R0000644000176200001440000001217714144333464015331 0ustar liggesusers# # vcov.kppm # # vcov method for kppm objects # # Original code: Abdollah Jalilian # # $Revision: 1.11 $ $Date: 2018/02/15 03:28:11 $ # vcov.kppm <- function(object, ..., what=c("vcov", "corr", "fisher", "internals"), fast = NULL, rmax = NULL, eps.rmax = 0.01, verbose = TRUE) { what <- match.arg(what) verifyclass(object, "kppm") fast.given <- !is.null(fast) #' secret argument (eg for testing) splitup <- resolve.1.default(list(splitup=FALSE), list(...)) #' if(is.null(object$improve)) { ## Normal composite likelihood (poisson) case ## extract composite likelihood results po <- object$po ## ensure it was fitted with quadscheme if(is.null(getglmfit(po))) { warning("Re-fitting model with forcefit=TRUE") po <- update(po, forcefit=TRUE) } ## extract quadrature scheme information Q <- quad.ppm(po) U <- union.quad(Q) nU <- npoints(U) wt <- w.quad(Q) ## compute fitted intensity values lambda <- fitted(po, type="lambda") ## extract covariate values Z <- model.matrix(po) ## evaluate integrand ff <- Z * lambda * wt ## extract pcf g <- pcfmodel(object) ## resolve options for algorithm maxmat <- spatstat.options("maxmatrix") if(!fast.given) { fast <- (nU^2 > maxmat) } else stopifnot(is.logical(fast)) ## attempt to compute large matrix: pair correlation function minus 1 if(!fast) { gminus1 <- there.is.no.try( matrix(g(c(pairdist(U))) - 1, nU, nU) ) } else { if(is.null(rmax)){ diamwin <- diameter(as.owin(U)) fnc <- get("fnc", envir = environment(improve.kppm)) rmax <- if(fnc(diamwin, eps.rmax, g) >= 0) diamwin else uniroot(fnc, lower = 0, upper = diamwin, eps=eps.rmax, g=g)$root } cp <- there.is.no.try( crosspairs(U,U,rmax,what="ijd") ) gminus1 <- if(is.null(cp)) NULL else sparseMatrix(i=cp$i, j=cp$j, x=g(cp$d) - 1, dims=c(nU, nU)) } ## compute quadratic form if(!splitup && !is.null(gminus1)) { E <- t(ff) %*% gminus1 %*% ff } else { ## split calculation of (gminus1 %*% ff) into blocks nrowperblock <- max(1, floor(maxmat/nU)) nblocks <- ceiling(nU/nrowperblock) g1ff <- NULL if(verbose) { splat("Splitting large matrix calculation into", nblocks, "blocks") pstate <- list() } if(!fast) { for(k in seq_len(nblocks)) { if(verbose) pstate <- progressreport(k, nblocks, state=pstate) istart <- nrowperblock * (k-1) + 1 iend <- min(nrowperblock * k, nU) ii <- istart:iend gm1 <- matrix(g(c(crossdist(U[ii], U))) - 1, iend-istart+1, nU) g1ff <- rbind(g1ff, gm1 %*% ff) } } else { for(k in seq_len(nblocks)) { if(verbose) pstate <- progressreport(k, nblocks, state=pstate) istart <- nrowperblock * (k-1) + 1 iend <- min(nrowperblock * k, nU) ii <- istart:iend cp <- crosspairs(U[ii], U, rmax, what="ijd") gm1 <- sparseMatrix(i=cp$i, j=cp$j, x=g(cp$d) - 1, dims=c(iend-istart+1, nU)) g1ff <- rbind(g1ff, as.matrix(gm1 %*% ff)) } } E <- t(ff) %*% g1ff } ## asymptotic covariance matrix in the Poisson case J <- t(Z) %*% ff J.inv <- try(solve(J)) ## could be singular if(inherits(J.inv, "try-error")) { if(what == "internals") { return(list(ff=ff, J=J, E=E, J.inv=NULL)) } else { return(NULL) } } ## asymptotic covariance matrix in the clustered case vc <- J.inv + J.inv %*% E %*% J.inv } else { ## Case of quasi-likelihood (or other things from improve.kppm) run <- is.null(object$vcov) || (!is.null(fast) && (fast != object$improve$fast.vcov)) if(run){ ## Calculate vcov if it hasn't already been so ## or if option fast differs from fast.vcov args <- object$improve internal <- what=="internals" if(!is.null(fast)) args$fast.vcov <- fast object <- with(args, improve.kppm(object, type = type, rmax = rmax, dimyx = dimyx, fast = fast, vcov = TRUE, fast.vcov = fast.vcov, maxIter = 0, save.internals = internal)) } vc <- object$vcov } ## Convert from Matrix to ordinary matrix: vc <- as.matrix(vc) switch(what, vcov={ return(vc) }, corr={ sd <- sqrt(diag(vc)) co <- vc/outer(sd, sd, "*") return(co) }, fisher={ fish <- try(solve(vc)) if(inherits(fish, "try-error")) fish <- NULL return(fish) }, internals={ return(list(ff=ff, J=J, E=E, J.inv=J.inv, vc=vc)) }) stop(paste("Unrecognised option: what=", what)) } spatstat.core/R/nnorient.R0000644000176200001440000001024014144333463015226 0ustar liggesusers## ## nnorient.R ## ## nearest neighbour pair orientation distribution ## ## Function \vartheta(phi) defined in ## Illian et al (2008) equ (4.5.3) page 253 ## ## $Revision: 1.4 $ $Date: 2018/10/02 01:21:40 $ nnorient <- function(X, ..., cumulative=FALSE, correction, k = 1, unit=c("degree", "radian"), domain=NULL, ratio=FALSE) { stopifnot(is.ppp(X)) check.1.integer(k) stopifnot(k>=1) W <- Window(X) if(!is.null(domain)) stopifnot(is.subset.owin(domain, W)) unit <- match.arg(unit) switch(unit, degree = { FullCircle <- 360 Convert <- 180/pi }, radian = { FullCircle <- 2 * pi Convert <- 1 }) ## choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(!correction.given) correction <- c("bord.modif", "none") correction <- pickoption("correction", correction, c(none="none", bord.modif="bord.modif", good="good", best="best"), multi=TRUE) correction[correction %in% c("good", "best")] <- "bord.modif" ## process point pattern Xcoord <- coords(X) Ycoord <- Xcoord[nnwhich(X, k=k), ] if(!is.null(domain)) { inD <- inside.owin(Xcoord$x, Xcoord$y, domain) Xcoord <- Xcoord[inD,] Ycoord <- Ycoord[inD,] } dYX <- Ycoord-Xcoord ANGLE <- with(dYX, atan2(y, x) * Convert) %% FullCircle nangles <- length(ANGLE) ## initialise output object Nphi <- 512 breaks <- make.even.breaks(bmax=FullCircle, npos=Nphi-1) phi <- breaks$r Odf <- data.frame(phi = phi, theo = (if(cumulative) phi else 1)/FullCircle) desc <- c("angle argument phi", "theoretical isotropic %s") NOletter <- if(cumulative) "Theta" else "vartheta" NOsymbol <- as.name(NOletter) NNO <- ratfv(Odf, NULL, denom=nangles, argu="phi", ylab=substitute(fn(phi), list(fn=NOsymbol)), valu="theo", fmla = . ~ phi, alim = c(0, FullCircle), c("phi", "{%s[%s]^{pois}}(phi)"), desc, fname=NOletter, yexp=substitute(fn(phi), list(fn=NOsymbol))) ## ^^^^^^^^^^^^^^^ Compute edge corrected estimates ^^^^^^^^^^^^^^^^ if(any(correction == "none")) { ## uncorrected! For demonstration purposes only! if(cumulative) { wh <- whist(ANGLE, breaks$val) # no weights num.un <- cumsum(wh) } else { kd <- circdensity(ANGLE, ..., n=Nphi, unit=unit) num.un <- kd$y * nangles } den.un <- nangles ## uncorrected estimate NNO <- bind.ratfv(NNO, data.frame(un=num.un), den.un, "{hat(%s)[%s]^{un}}(phi)", "uncorrected estimate of %s", "un", ratio=ratio) } if("bord.modif" %in% correction) { ## border type correction bX <- bdist.points(X) nndX <- nndist(X, k=k) if(!is.null(domain)) { bX <- bX[inD] nndX <- nndX[inD] } ok <- (nndX < bX) nok <- sum(ok) rr <- seq(0, max(bX), length=256) if(nok == 0) { num.bm <- numeric(Nphi) # i.e. rep(0, Nphi) } else { Ar <- eroded.areas(W, rr) Arf <- approxfun(rr, Ar, rule=2) AI <- Arf(bX) edgewt <- ifelse(ok, pmin(area(W)/AI, 100), 0) if(cumulative) { wh <- whist(ANGLE, breaks$val, edgewt) num.bm <- cumsum(wh)/mean(edgewt) } else { w <- edgewt/sum(edgewt) kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit) num.bm <- kd$y * nok } } den.bm <- nok NNO <- bind.ratfv(NNO, data.frame(bordm=num.bm), den.bm, "{hat(%s)[%s]^{bordm}}(phi)", "modified border-corrected estimate of %s", "bordm", ratio=ratio) } unitname(NNO) <- switch(unit, degree = c("degree", "degrees"), radian = c("radian", "radians")) return(NNO) } spatstat.core/R/update.ppm.R0000644000176200001440000003141114144333464015453 0ustar liggesusers# # update.ppm.R # # # $Revision: 1.61 $ $Date: 2017/10/04 03:51:04 $ # # # update.ppm <- local({ ## update point pattern dataset using either data or formula newpattern <- function(oldpattern, lhs, callframe, envir) { eval(eval(substitute(substitute(l, list("."=Q)), list(l=lhs, Q=oldpattern)), envir=as.list(envir), enclos=callframe), envir=as.list(envir), enclos=callframe) } update.ppm <- function(object, ..., fixdummy=TRUE, use.internal=NULL, envir=environment(terms(object))) { verifyclass(object, "ppm") new.callstring <- short.deparse(sys.call()) aargh <- list(...) if(inherits(object, "ippm")) { call <- object$dispatched$call callframe <- object$dispatched$callframe } else { call <- getCall(object) if(!is.call(call)) stop(paste("Internal error - getCall(object) is not of class", sQuote("call"))) callframe <- object$callframe } callfun <- as.character(call[[1]]) newstyle <- (callfun == "ppm.formula") oldstyle <- !newstyle ## Special cases ## (1) no new information given if(length(aargh) == 0 && !identical(use.internal, TRUE)) { result <- eval(call, as.list(envir), enclos=callframe) result$callframe <- callframe return(result) } ## (2) model can be updated using existing covariate data frame if(!identical(use.internal, FALSE) && ## single argument which is a formula (length(aargh) == 1) && inherits(fmla <- aargh[[1]], "formula") && is.null(lhs.of.formula(fmla)) && ## not a ppm.formula call oldstyle && ## fitted by mpl using glm/gam with(object, method == "mpl" && !is.null(fitter) && fitter %in% c("gam", "glm"))) { ## This is a dangerous hack! glmdata <- object$internal$glmdata ## check whether data for new variables are available ## (this doesn't work with things like 'pi') vars.available <- c(colnames(glmdata), names(object$covfunargs)) if(all(variablesinformula(fmla) %in% c(".", vars.available))) { ## we can update using internal data FIT <- object$internal$glmfit orig.env <- environment(FIT$terms) ## update formulae using "." rules trend <- newformula(object$trend, fmla, callframe, envir) fmla <- newformula(formula(FIT), fmla, callframe, envir) ## expand polynom() in formula if(spatstat.options("expand.polynom")) { fmla <- expand.polynom(fmla) trend <- expand.polynom(trend) } ## update GLM/GAM fit upd.glm.call <- update(FIT, fmla, evaluate=FALSE) FIT <- eval(upd.glm.call, envir=orig.env) environment(FIT$terms) <- orig.env object$internal$glmfit <- FIT ## update entries of object object$trend <- trend object$terms <- terms(fmla) object$coef <- co <- FIT$coef object$callstring <- new.callstring object$internal$fmla <- fmla ## if(is.finite(object$maxlogpl)) { ## Update maxlogpl provided it is finite ## (If the likelihood is infinite, this is due to the interaction; ## if we update the trend, the likelihood will remain infinite.) W <- glmdata$.mpl.W SUBSET <- glmdata$.mpl.SUBSET Z <- is.data(object$Q) object$maxlogpl <- -(deviance(FIT)/2 + sum(log(W[Z & SUBSET])) + sum(Z & SUBSET)) } ## update the model call upd.call <- call upd.call$trend <- trend object$call <- upd.call ## update fitted interaction (depends on coefficients, if not Poisson) if(!is.null(inter <- object$interaction) && !is.poisson(inter)) object$fitin <- fii(inter, co, object$internal$Vnames, object$internal$IsOffset) ## if(is.stationary(object) && !is.marked(object)) { ## uniform Poisson if(eval(call$rename.intercept) %orifnull% TRUE) { names(object$coef) <- "log(lambda)" } } return(object) } } ## (3) Need to use internal data if(oldstyle) { ## decide whether to use internal data undecided <- is.null(use.internal) || !is.logical(use.internal) force.int <- !undecided && use.internal force.ext <- !undecided && !use.internal if(!force.int) { ## check for validity of format badformat <- damaged.ppm(object) } if(undecided) { use.internal <- badformat if(badformat) message("object format corrupted; repairing it") } else if(force.ext && badformat) warning("object format corrupted; try update(object, use.internal=TRUE)") if(use.internal) { ## reset the main arguments in the call using the internal data call$Q <- quad.ppm(object) namobj <- names(call) if("trend" %in% namobj) call$trend <- newformula(call$trend, object$trend, callframe, envir) if("interaction" %in% namobj) call$interaction <- object$interaction if("covariates" %in% namobj) call$covariates <- object$covariates } } ## General case. X.is.new <- FALSE ## First split named and unnamed arguments nama <- names(aargh) named <- if(is.null(nama)) rep.int(FALSE, length(aargh)) else nzchar(nama) namedargs <- aargh[named] unnamedargs <- aargh[!named] nama <- names(namedargs) ## Find the argument 'Q' by name or implicitly by class ## (including detection of conflicts) argQ <- NULL if(n <- sp.foundclasses(c("ppp", "quad"), unnamedargs, "Q", nama)) { argQ <- unnamedargs[[n]] unnamedargs <- unnamedargs[-n] } if("Q" %in% nama) { argQ <- namedargs$Q nama <- setdiff(nama, "Q") namedargs <- namedargs[nama] } ## Deal with argument 'Q' which has several possible forms if(!is.null(argQ)) { X.is.new <- TRUE if(inherits(argQ, "formula")) { ## Q = X ~ trend if(newstyle) { ## update the formula call$Q <- newformula(call$Q, argQ, callframe, envir) } else { ## split into Q = X and trend = ~trend if(!is.null(lhs <- lhs.of.formula(argQ))) call$Q <- newpattern(call$Q, lhs, callframe, envir) call$trend <- newformula(call$trend, rhs.of.formula(eval(argQ)), callframe, envir) } } else { ## Q = X if(newstyle) { ## convert old call to old style fo <- as.formula(call$Q) Yexpr <- lhs.of.formula(fo) trend <- rhs.of.formula(fo) newcall <- call("ppm", Q=Yexpr, trend=trend) if(length(call) > 2) { whichQ <- which(names(call) == "Q") morecall <- call[-c(1, whichQ)] if((mc <- length(morecall)) > 0) { newcall[3 + 1:mc] <- morecall names(newcall)[3 + 1:mc] <- names(call)[-c(1, whichQ)] } } call <- newcall newstyle <- FALSE oldstyle <- TRUE } ## Now update the dataset call$Q <- argQ } } ## Find any formula arguments ## (including detection of conflicts) argfmla <- NULL if(n <- sp.foundclass("formula", unnamedargs, "trend", nama)) { argfmla <- unnamedargs[[n]] unnamedargs <- unnamedargs[-n] } else if(n <- sp.foundclass("character", unnamedargs, "trend", nama)) { ## string that might be interpreted as a formula strg <- unnamedargs[[n]] if(!is.na(charmatch("~", strg))) { argfmla <- as.formula(strg) unnamedargs <- unnamedargs[-n] } } if("trend" %in% nama) { argfmla <- namedargs$trend nama <- setdiff(nama, "trend") namedargs <- namedargs[nama] } ## Handle new formula if(!is.null(argfmla)) { lhs <- lhs.of.formula(argfmla) if(newstyle) { ## ppm.formula: update the formula if(is.null(lhs)) { argfmla <- as.formula(paste(".", deparse(argfmla))) } else X.is.new <- TRUE call$Q <- newformula(call$Q, argfmla, callframe, envir) } else { ## ppm.ppp: update the trend and possibly the data if(is.null(lhs)) { ## assign new trend call$trend <- newformula(call$trend, argfmla, callframe, envir) } else { ## split into Q = X and trend = ~trend X.is.new <- TRUE call$Q <- newpattern(call$Q, lhs, callframe, envir) call$trend <- newformula(call$trend, rhs.of.formula(argfmla), callframe, envir) } } } if(length(namedargs) > 0) { ## any other named arguments that were also present in the original call ## override their original values. existing <- !is.na(match(nama, names(call))) for (a in nama[existing]) call[[a]] <- aargh[[a]] ## add any named arguments not present in the original call if (any(!existing)) { call <- c(as.list(call), namedargs[!existing]) call <- as.call(call) } } if(length(unnamedargs) > 0) { ## some further objects identified by their class if(n<- sp.foundclass("interact", unnamedargs, "interaction", nama)) { call$interaction <- unnamedargs[[n]] unnamedargs <- unnamedargs[-n] } if(n <- sp.foundclasses(c("data.frame", "im"), unnamedargs, "covariates", nama)) { call$covariates <- unnamedargs[[n]] unnamedargs <- unnamedargs[-n] } } ## ************************************************************* ## ****** Special action when Q is a point pattern ************* ## ************************************************************* if(X.is.new && fixdummy && oldstyle && is.ppp(X <- eval(call$Q, as.list(envir), enclos=callframe)) && identical(Window(X), Window(data.ppm(object)))) { ## Instead of allowing default.dummy(X) to occur, ## explicitly create a quadrature scheme from X, ## using the same dummy points and weight parameters ## as were used in the fitted model Qold <- quad.ppm(object) if(is.marked(Qold)) { dpar <- Qold$param$dummy wpar <- Qold$param$weight Qnew <- do.call(quadscheme, append(list(X), append(dpar, wpar))) } else { Dum <- Qold$dummy wpar <- Qold$param$weight Qnew <- do.call(quadscheme, append(list(X, Dum), wpar)) } ## replace X by new Q call$Q <- Qnew } ## finally call ppm call[[1]] <- as.name('ppm') return(eval(call, as.list(envir), enclos=callframe)) } update.ppm }) sp.foundclass <- function(cname, inlist, formalname, argsgiven) { ok <- unlist(lapply(inlist, inherits, what=cname)) nok <- sum(ok) if(nok > 1) stop(paste("I am confused: there are two unnamed arguments", "of class", sQuote(cname))) if(nok == 0) return(0) absent <- !(formalname %in% argsgiven) if(!absent) stop(paste("I am confused: there is an unnamed argument", "of class", sQuote(cname), "which conflicts with the", "named argument", sQuote(formalname))) theposition <- seq_along(ok)[ok] return(theposition) } sp.foundclasses <- function(cnames, inlist, formalname, argsgiven) { ncn <- length(cnames) pozzie <- logical(ncn) for(i in seq_len(ncn)) pozzie[i] <- sp.foundclass(cnames[i], inlist, formalname, argsgiven) found <- (pozzie > 0) nfound <- sum(found) if(nfound == 0) return(0) else if(nfound == 1) return(pozzie[found]) else stop(paste("I am confused: there are", nfound, "unnamed arguments of different classes (", paste(sQuote(cnames(pozzie[found])), collapse=", "), ") which could be interpreted as", sQuote(formalname))) } damaged.ppm <- function(object) { ## guess whether the object format has been damaged ## e.g. by dump/restore gf <- getglmfit(object) badfit <- !is.null(gf) && !inherits(gf$terms, "terms") if(badfit) return(TRUE) ## escape clause for fake models if(identical(object$fake, TRUE)) return(FALSE) ## otherwise it was made by ppm Qcall <- object$call$Q cf <- object$callframe if(is.null(cf)) { ## Old format of ppm objects if(is.name(Qcall) && !exists(paste(Qcall))) return(TRUE) Q <- eval(Qcall) } else { ## New format of ppm objects if(is.name(Qcall) && !exists(paste(Qcall), cf)) return(TRUE) Q <- eval(Qcall, cf) } badQ <- is.null(Q) || !(inherits(Q, c("ppp", "quad", "formula"))) return(badQ) } spatstat.core/R/Jest.R0000644000176200001440000000475414144333461014312 0ustar liggesusers# Jest.S # # Usual invocation to compute J function # if F and G are not required # # $Revision: 4.25 $ $Date: 2019/10/31 02:58:29 $ # # # Jest <- function(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) { X <- as.ppp(X) W <- Window(X) brks <- handle.r.b.args(r, breaks, window=W, pixeps=eps, rmaxdefault=rmax.rule("J", W, intensity(X))) checkspacing <- !isFALSE(list(...)$checkspacing) #' compute F and G FF <- Fest(X, eps, breaks=brks, correction=correction, checkspacing=checkspacing) G <- Gest(X, breaks=brks, correction=correction) # initialise fv object rvals <- FF$r rmax <- max(rvals) Z <- fv(data.frame(r=rvals, theo=1), "r", substitute(J(r), NULL), "theo", . ~ r, c(0,rmax), c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="J") # compute J function estimates # this has to be done manually because of the mismatch between names Fnames <- names(FF) Gnames <- names(G) bothnames <- intersect(Fnames, Gnames) if("raw" %in% bothnames) { Jun <- ratiotweak(1-G$raw, 1-FF$raw) Z <- bind.fv(Z, data.frame(un=Jun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") attr(Z, "alim") <- range(rvals[FF$raw <= 0.9]) } if("rs" %in% bothnames) { Jrs <- ratiotweak(1-G$rs, 1-FF$rs) Z <- bind.fv(Z, data.frame(rs=Jrs), "hat(%s)[rs](r)", "border corrected estimate of %s", "rs") attr(Z, "alim") <- range(rvals[FF$rs <= 0.9]) } if("han" %in% Gnames && "cs" %in% Fnames) { Jhan <- ratiotweak(1-G$han, 1-FF$cs) Z <- bind.fv(Z, data.frame(han=Jhan), "hat(%s)[han](r)", "Hanisch-style estimate of %s", "han") attr(Z, "alim") <- range(rvals[FF$cs <= 0.9]) } if("km" %in% bothnames) { Jkm <- ratiotweak(1-G$km, 1-FF$km) Z <- bind.fv(Z, data.frame(km=Jkm), "hat(%s)[km](r)", "Kaplan-Meier estimate of %s", "km") attr(Z, "alim") <- range(rvals[FF$km <= 0.9]) } if("hazard" %in% bothnames) { Jhaz <- G$hazard - FF$hazard Z <- bind.fv(Z, data.frame(hazard=Jhaz), "hazard(r)", "Kaplan-Meier estimate of derivative of log(%s)") } # set default plotting values and order nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) # add more info attr(Z, "F") <- FF attr(Z, "G") <- G attr(Z, "conserve") <- attr(FF, "conserve") unitname(Z) <- unitname(X) return(Z) } spatstat.core/R/hopskel.R0000644000176200001440000000500414144333462015040 0ustar liggesusers## ## hopskel.R ## Hopkins-Skellam test ## ## $Revision: 1.2 $ $Date: 2014/09/23 08:24:36 $ hopskel <- function(X) { stopifnot(is.ppp(X)) n <- npoints(X) if(n < 2) return(NA) dX <- nndist(X) U <- runifpoint(n, Window(X)) dU <- nncross(U, X, what="dist") A <- mean(dX^2)/mean(dU^2) return(A) } hopskel.test <- function(X, ..., alternative=c("two.sided", "less", "greater", "clustered", "regular"), method=c("asymptotic", "MonteCarlo"), nsim=999 ) { Xname <- short.deparse(substitute(X)) verifyclass(X, "ppp") W <- Window(X) n <- npoints(X) method <- match.arg(method) # alternative hypothesis alternative <- match.arg(alternative) if(alternative == "clustered") alternative <- "less" if(alternative == "regular") alternative <- "greater" altblurb <- switch(alternative, two.sided="two-sided", less="clustered (A < 1)", greater="regular (A > 1)") ## compute observed value statistic <- hopskel(X) ## p-value switch(method, asymptotic = { ## F-distribution nn <- 2 * n p.value <- switch(alternative, less = pf(statistic, nn, nn, lower.tail=TRUE), greater = pf(statistic, nn, nn, lower.tail=FALSE), two.sided = 2 * pf(statistic, nn, nn, lower.tail=(statistic < 1))) pvblurb <- "using F distribution" }, MonteCarlo = { ## Monte Carlo p-value sims <- numeric(nsim) for(i in 1:nsim) { Xsim <- runifpoint(n, win=W) sims[i] <- hopskel(Xsim) p.upper <- (1 + sum(sims >= statistic))/(1 + nsim) p.lower <- (1 + sum(sims <= statistic))/(1 + nsim) p.value <- switch(alternative, less=p.lower, greater=p.upper, two.sided=2*min(p.lower, p.upper)) } pvblurb <- paste("Monte Carlo test based on", nsim, "simulations of CSR with fixed n") }) statistic <- as.numeric(statistic) names(statistic) <- "A" out <- list(statistic=statistic, p.value=p.value, alternative=altblurb, method=c("Hopkins-Skellam test of CSR", pvblurb), data.name=Xname) class(out) <- "htest" return(out) } spatstat.core/R/Jmulti.R0000644000176200001440000001274414144333461014647 0ustar liggesusers# Jmulti.S # # Usual invocations to compute multitype J function(s) # if F and G are not required # # $Revision: 4.45 $ $Date: 2020/10/30 03:59:35 $ # # # "Jcross" <- function(X, i, j, eps=NULL, r=NULL, breaks=NULL, ..., correction=NULL) { # # multitype J function J_{ij}(r) # # X: point pattern (an object of class 'ppp') # i, j: types for which J_{i,j}(r) is calculated # eps: raster grid mesh size for distance transform # (unless specified by X$window) # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) checkspacing <- !isFALSE(list(...)$checkspacing) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] # I <- (marx == i) if(sum(I) == 0) stop(paste("No points have mark = ", i)) # if(i == j){ result <- Jest(X[I], eps=eps, r=r, breaks=breaks, correction=correction, checkspacing=checkspacing) } else { J <- (marx == j) result <- Jmulti(X, I, J, eps=eps, r=r, breaks=breaks, disjoint=TRUE, correction=correction, checkspacing=checkspacing) } conserve <- attr(result, "conserve") result <- rebadge.as.crossfun(result, "J", NULL, i, j) attr(result, "conserve") <- conserve return(result) } "Jdot" <- function(X, i, eps=NULL, r=NULL, breaks=NULL, ..., correction=NULL) { # # multitype J function J_{i\dot}(r) # # X: point pattern (an object of class 'ppp') # i: mark i for which we calculate J_{i\cdot}(r) # eps: raster grid mesh size for distance transform # (unless specified by X$window) # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) checkspacing <- !isFALSE(list(...)$checkspacing) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] # I <- (marx == i) if(sum(I) == 0) stop(paste("No points have mark = ", i)) J <- rep.int(TRUE, X$n) # result <- Jmulti(X, I, J, eps=eps, r=r, breaks=breaks, disjoint=FALSE, correction=correction, checkspacing=checkspacing) conserve <- attr(result, "conserve") result <- rebadge.as.dotfun(result, "J", NULL, i) attr(result, "conserve") <- conserve return(result) } "Jmulti" <- function(X, I, J, eps=NULL, r=NULL, breaks=NULL, ..., disjoint=NULL, correction=NULL) { # # multitype J function (generic engine) # # X marked point pattern (of class ppp) # # I,J logical vectors of length equal to the number of points # and identifying the two subsets of points to be # compared. # # eps: raster grid mesh size for distance transform # (unless specified by X$window) # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # # X <- as.ppp(X) W<- X$window I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") XJ <- X[J] lambdaJ <- intensity(XJ) rmaxdefault <- rmax.rule("J", W, lambdaJ) brks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)$val FJ <- Fest(XJ, eps, breaks=brks, correction=correction, ...) GIJ <- Gmulti(X, I, J, breaks=brks, disjoint=disjoint, correction=correction, ...) rvals <- FJ$r Fnames <- names(FJ) Gnames <- names(GIJ) bothnames <- Fnames[Fnames %in% Gnames] # initialise fv object alim <- attr(FJ, "alim") fname <- c("J", "list(I,J)") Z <- fv(data.frame(r=rvals, theo=1), "r", quote(J[I,J](r)), "theo", . ~ r, alim, c("r", makefvlabel(NULL, NULL, fname, "pois")), c("distance argument r", "theoretical Poisson %s"), fname=fname, yexp=quote(J[list(I,J)](r))) # add pieces manually ratio <- function(a, b) { result <- a/b result[ b == 0 ] <- NA result } if("raw" %in% bothnames) { Jun <- ratio(1-GIJ$raw, 1-FJ$raw) Z <- bind.fv(Z, data.frame(un=Jun), makefvlabel(NULL, "hat", fname, "un"), "uncorrected estimate of %s", "un") } if("rs" %in% bothnames) { Jrs <- ratio(1-GIJ$rs, 1-FJ$rs) Z <- bind.fv(Z, data.frame(rs=Jrs), makefvlabel(NULL, "hat", fname, "rs"), "border corrected estimate of %s", "rs") } if("han" %in% Gnames && "cs" %in% Fnames) { Jhan <- ratio(1-GIJ$han, 1-FJ$cs) Z <- bind.fv(Z, data.frame(han=Jhan), makefvlabel(NULL, "hat", fname, "han"), "Hanisch-style estimate of %s", "han") } if("km" %in% bothnames) { Jkm <- ratio(1-GIJ$km, 1-FJ$km) Z <- bind.fv(Z, data.frame(km=Jkm), makefvlabel(NULL, "hat", fname, "km"), "Kaplan-Meier estimate of %s", "km") if("hazard" %in% names(GIJ) && "hazard" %in% names(FJ)) { Jhaz <- GIJ$hazard - FJ$hazard Z <- bind.fv(Z, data.frame(hazard=Jhaz), "hazard(r)", "Kaplan-Meier estimate of derivative of log(%s)") } } # set default plotting values and order nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) # add other info attr(Z, "G") <- GIJ attr(Z, "F") <- FJ attr(Z, "conserve") <- attr(FJ, "conserve") unitname(Z) <- unitname(X) return(Z) } spatstat.core/R/rmhsnoop.R0000644000176200001440000005223714144333463015253 0ustar liggesusers# # rmhsnoop.R # # visual debug mechanism for rmh # # $Revision: 1.33 $ $Date: 2020/01/07 09:14:59 $ # # When rmh is called in visual debug mode (snooping = TRUE), # it calls e <- rmhSnoopEnv(...) to create an R environment 'e' # containing variables that will represent the current state # of the M-H algorithm with initial state X and model reach R. # # The environment 'e' is passed to the C routine xmethas. # This makes it possible for data to be exchanged between # the C and R code. # # When xmethas reaches the debugger's stopping time, # the current state of the simulation and the proposal # are copied from C into the R environment 'e'. # # Then to execute the visual display, the C code calls # 'eval' to execute the R function rmhsnoop(). # # The function rmhsnoop uses the 'simplepanel' class # to generate a plot showing the state of the simulation # and the proposal, and then wait for point-and-click input using # locator(). # # When rmhsnoop() exits, it returns an integer giving the # (user-specified) next stopping time. This is read back into # the C code. Then xmethas resumes simulations. # # I said it was simple! %^] rmhSnoopEnv <- function(Xinit, Wclip, R) { stopifnot(is.ppp(Xinit)) # Create an environment that will be accessible to R and C code e <- new.env() # initial state (point pattern) X <- Xinit assign("Wsim", as.owin(X), envir=e) assign("xcoords", coords(X)[,1], envir=e) assign("ycoords", coords(X)[,2], envir=e) if(is.multitype(X)) { mcodes <- as.integer(marks(X)) - 1L mlevels <- levels(marks(X)) assign("mcodes", mcodes, envir=e) assign("mlevels", mlevels, envir=e) } else { assign("mcodes", NULL, envir=e) assign("mlevels", NULL, envir=e) } # clipping window assign("Wclip", Wclip, envir=e) # reach of model (could be infinite) assign("R", R, envir=e) # current iteration number assign("irep", 0L, envir=e) # next iteration to be inspected assign("inxt", 1L, envir=e) # next transition to be inspected assign("tnxt", 1L, envir=e) # proposal type assign("proptype", NULL, envir=e) # outcome of proposal assign("itype", NULL, envir=e) # proposal location assign("proplocn", NULL, envir=e) # proposal mark assign("propmark", NULL, envir=e) # index of proposal point in existing pattern assign("propindx", NULL, envir=e) # Hastings ratio assign("numerator", NULL, envir=e) assign("denominator", NULL, envir=e) # Expression actually evaluated to execute visual debug # Expression is evaluated in the environment 'e' snoopexpr <- expression({ rslt <- rmhsnoop(Wsim=Wsim, Wclip=Wclip, R=R, xcoords=xcoords, ycoords=ycoords, mlevels=mlevels, mcodes=mcodes, irep=irep, itype=itype, proptype=proptype, proplocn=proplocn, propmark=propmark, propindx=propindx, numerator=numerator, denominator=denominator) inxt <- rslt$inxt tnxt <- rslt$tnxt itype <- if(rslt$accepted) rslt$itype else 0 storage.mode(tnxt) <- storage.mode(inxt) <- storage.mode(itype) <- "integer" }) assign("snoopexpr", snoopexpr, envir=e) # callback expression assign("callbackexpr", quote(eval(snoopexpr)), envir=e) return(e) } # visual debug display using base graphics rmhsnoop <- local({ rmhsnoop <- function(..., Wsim, Wclip, R, xcoords, ycoords, mlevels=NULL, mcodes, irep, itype, proptype, proplocn, propmark, propindx, numerator, denominator, panel.only=FALSE) { trap.extra.arguments(..., .Context="In rmhsnoop") X <- ppp(xcoords, ycoords, window=Wsim) if(ismarked <- (length(mlevels) > 0)) marks(X) <- factor(mlevels[mcodes+1L], levels=mlevels) Wclip.orig <- Wclip # determine plot arguments if(is.mask(Wclip)) { parg.Wclip <- list(invert=TRUE, col="grey") } else { Wclip <- edges(Wclip) parg.Wclip <- list(lty=3, lwd=2, col="grey") } parg.birth <- list(cols="green", lwd=3) parg.death <- list(cols="red", lwd=3) parg.birthcircle <- list(col="green", lty=3) parg.deathcircle <- list(col="red", lty=3) # assemble a layered object representing the state and the proposal if(is.null(proptype)) { # initial state L <- layered(Wsim, Wclip, X) layerplotargs(L)$Wclip <- parg.Wclip accepted <- TRUE } else { accepted <- (itype == proptype) # add proposal info switch(decode.proptype(proptype), Reject= { propname <- "rejected" L <- layered(Wsim=Wsim, Wclip=Wclip, X=X) layerplotargs(L)$Wclip <- parg.Wclip }, Birth = { propname <- "birth proposal" U <- ppp(proplocn[1L], proplocn[2L], window=Wsim) if(ismarked) marks(U) <- factor(mlevels[propmark+1L], levels=mlevels) D <- if(is.finite(R) && R > 0) { edges(disc(R, proplocn))[Wsim] } else NULL L <- layered(Wsim=Wsim, Wclip=Wclip, PrevState=X, Reach=D, NewPoint=U) layerplotargs(L)$Wclip <- parg.Wclip layerplotargs(L)$NewPoint <- parg.birth }, Death = { propname <- "death proposal" # convert from C to R indexing propindx <- propindx + 1 XminI <- X[-propindx] XI <- X[propindx] D <- if(is.finite(R) && R > 0) { edges(disc(R, c(XI$x, XI$y)))[Wsim] } else NULL L <- layered(Wsim=Wsim, Wclip=Wclip, RetainedPoints=XminI, Reach=D, Deletion=XI) layerplotargs(L)$Wclip <- parg.Wclip layerplotargs(L)$Reach <- parg.deathcircle layerplotargs(L)$Deletion <- parg.death }, Shift = { propname <- "shift proposal" # convert from C to R indexing propindx <- propindx + 1L # make objects XminI <- X[-propindx] XI <- X[propindx] U <- ppp(proplocn[1L], proplocn[2L], window=Wsim) if(ismarked) marks(U) <- factor(mlevels[propmark+1L], levels=mlevels) if(is.finite(R) && R > 0) { DU <- edges(disc(R, proplocn))[Wsim] DXI <- edges(disc(R, c(XI$x, XI$y)))[Wsim] } else { DU <- DXI <- NULL } # make layers L <- layered(Wsim=Wsim, Wclip=Wclip, OtherPoints=XminI, ReachAfter=DU, AfterShift=U, ReachBefore=DXI, BeforeShift=XI) layerplotargs(L)$Wclip <- parg.Wclip layerplotargs(L)$ReachAfter <- parg.birthcircle layerplotargs(L)$AfterShift <- parg.birth layerplotargs(L)$ReachBefore <- parg.deathcircle layerplotargs(L)$BeforeShift <- parg.death }, stop("Unrecognised proposal type") ) } header <- c(paste("Iteration", irep), propname, paste("Hastings ratio =", signif(numerator, 4), "/", signif(denominator, 4), "=", signif(numerator/denominator, 4))) info <- list(irep=irep, Wsim=Wsim, Wclip=Wclip.orig, X=X, proptype=proptype, proplocn=proplocn, propindx=propindx, propmark=propmark, mlevels=mlevels, accepted=accepted, numerator=numerator, denominator=denominator) inspectProposal(L, info, title=header, panel.only=panel.only) } decode.proptype <- function(n) { if(n < 0 || n > 3) stop(paste("Unrecognised proposal type:", n)) switch(n+1, "Reject", "Birth", "Death", "Shift") } encode.proptype <- function(s) { switch(s, Reject=0, Birth=1, Death=2, Shift=3) } inspectProposal <- function(X, info, ..., title, panel.only=FALSE) { if(missing(title)) title <- short.deparse(substitute(X)) if(!inherits(X, "layered")) X <- layered(X) lnames <- names(X) if(sum(nzchar(lnames)) != length(X)) lnames <- paste("Layer", seq_len(length(X))) # Find window and bounding box (validates X) W <- as.owin(X) BX <- as.rectangle(W) # Initialise environment for state variables etc # This environment is accessible to the panel button functions en <- new.env() assign("X", X, envir=en) assign("W", W, envir=en) assign("BX", BX, envir=en) assign("zoomfactor", 1L, envir=en) midX <- unlist(centroid.owin(BX)) assign("midX", midX, envir=en) assign("zoomcentre", midX, envir=en) assign("irep", info$irep, envir=en) assign("inxt", info$irep+1, envir=en) assign("tnxt", -1, envir=en) assign("accepted", info$accepted, envir=en) assign("proplocn", info$proplocn, envir=en) assign("info", info, envir=en) # Build interactive panel # Start with data panel P <- simplepanel(title, BX, list(Data=BX), list(Data=dataclickfun), list(Data=dataredrawfun), snoopexit, en) # Add pan buttons margin <- max(sidelengths(BX))/4 panelwidth <- sidelengths(BX)[1L]/2 P <- grow.simplepanel(P, "top", margin, navfuns["Up"], aspect=1) P <- grow.simplepanel(P, "bottom", margin, navfuns["Down"], aspect=1) P <- grow.simplepanel(P, "left", margin, navfuns["Left"], aspect=1) P <- grow.simplepanel(P, "right", margin, navfuns["Right"], aspect=1) # Zoom/Pan buttons at right P <- grow.simplepanel(P, "right", panelwidth, zoomfuns) # Accept/reject buttons at top P <- grow.simplepanel(P, "top", margin, accept.clicks, accept.redraws) # Dump/print buttons at bottom P <- grow.simplepanel(P, "bottom", margin, dumpfuns) # Jump controls at left maxchars <- max(4, nchar(names(jump.clicks))) P <- grow.simplepanel(P, "left", panelwidth * maxchars/6, jump.clicks) ## exit for debug/test code if(panel.only) return(P) ## go rslt <- run.simplepanel(P, popup=FALSE) clear.simplepanel(P) rm(en) return(rslt) } # button control functions zoomfuns <- rev(list( "Zoom In"=function(env, xy) { z <- get("zoomfactor", envir=env) assign("zoomfactor", z * 2, envir=env) return(TRUE) }, "Zoom Out"=function(env, xy) { z <- get("zoomfactor", envir=env) assign("zoomfactor", z / 2, envir=env) return(TRUE) }, "At Proposal"=function(env, xy) { proplocn <- get("proplocn", envir=env) assign("zoomcentre", proplocn, envir=env) return(TRUE) }, Reset=function(env, xy) { assign("zoomfactor", 1L, envir=env) midX <- get("midX", envir=env) assign("zoomcentre", midX, envir=env) return(TRUE) })) navfuns <- list( Left = function(env, xy) { zoom <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) BX <- get("BX", envir=env) width <- sidelengths(BX)[1L] stepsize <- (width/4)/zoom ce <- ce - c(stepsize, 0) assign("zoomcentre", ce, envir=env) return(TRUE) }, Right = function(env, xy) { zoom <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) BX <- get("BX", envir=env) width <- sidelengths(BX)[1L] stepsize <- (width/4)/zoom ce <- ce + c(stepsize, 0) assign("zoomcentre", ce, envir=env) return(TRUE) }, Up = function(env, xy) { zoom <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) BX <- get("BX", envir=env) height <- sidelengths(BX)[2] stepsize <- (height/4)/zoom ce <- ce + c(0, stepsize) assign("zoomcentre", ce, envir=env) return(TRUE) }, Down = function(env, xy) { zoom <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) BX <- get("BX", envir=env) height <- sidelengths(BX)[2] stepsize <- (height/4)/zoom ce <- ce - c(0, stepsize) assign("zoomcentre", ce, envir=env) return(TRUE) }) accept.clicks <- rev(list( Accept=function(env, xy) { assign("accepted", TRUE, envir=env) return(TRUE) }, Reject=function(env, xy) { assign("accepted", FALSE, envir=env) return(TRUE) })) accept.redraws <- rev(list( Accept=function(button, name, env) { accepted <- get("accepted", envir=env) if(accepted) { plot(button, add=TRUE, col="green") } else { plot(button, add=TRUE) } text(centroid.owin(button), labels=name) }, Reject=function(button, name, env) { accepted <- get("accepted", envir=env) if(accepted) { plot(button, add=TRUE) } else { plot(button, add=TRUE, col="pink") } text(centroid.owin(button), labels=name) })) jump.clicks <- rev(list( "Next Iteration"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+1, envir=env) return(FALSE) }, "Skip 10"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+10, envir=env) return(FALSE) }, "Skip 100"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+100, envir=env) return(FALSE) }, "Skip 1000"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+1000, envir=env) return(FALSE) }, "Skip 10,000"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+10000, envir=env) return(FALSE) }, "Skip 100,000"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+100000, envir=env) return(FALSE) }, "Next Birth"=function(env, xy) { assign("inxt", -1, envir=env) assign("tnxt", encode.proptype("Birth"), envir=env) return(FALSE) }, "Next Death"=function(env, xy) { assign("inxt", -1, envir=env) assign("tnxt", encode.proptype("Death"), envir=env) return(FALSE) }, "Next Shift"=function(env, xy) { assign("inxt", -1, envir=env) assign("tnxt", encode.proptype("Shift"), envir=env) return(FALSE) }, "Exit Debugger"=function(env, xy) { assign("inxt", -1L, envir=env) return(FALSE) })) dataclickfun <- function(env, xy) { # function for handling clicks in the data window z <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) midX <- get("midX", envir=env) ce <- ce + (unlist(xy) - midX)/z assign("zoomcentre", ce, envir=env) return(TRUE) } dataredrawfun <- function(button, name, env) { # redraw data window X <- get("X", envir=env) BX <- get("BX", envir=env) W <- get("W", envir=env) midX <- get("midX", envir=env) z <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) scaleX <- shift(affine(shift(X, -ce), diag(c(z,z))), unlist(midX)) scaleW <- shift(affine(shift(W, -ce), diag(c(z,z))), unlist(midX)) scaleX <- scaleX[, BX] scaleW <- intersect.owin(scaleW, BX, fatal=FALSE) # redraw data in 'BX' if(!is.null(scaleW)) { if(z == 1 && is.rectangle(scaleW)) { plot(scaleW, add=TRUE, lwd=2) } else { plot(BX, add=TRUE, lty=3, border="red") if(!identical(BX, scaleW)) plot(scaleW, add=TRUE, invert=TRUE) } } if(!is.null(scaleX)) plot(scaleX, add=TRUE) invisible(NULL) } # functions to dump the current state, etc dumpfuns <- list( "Dump to file"=function(env, xy) { irep <- get("irep", envir=env) X <- get("X", envir=env) xname <- paste("dump", irep, sep="") assign(xname, X) fname <- paste(xname, ".rda", sep="") eval(substitute(save(x, file=y, compress=TRUE), list(x=xname, y=fname))) splat("Saved to", sQuote(fname)) return(TRUE) }, "Print Info"=function(env, xy) { info <- get("info", envir=env) will.accept <- get("accepted", envir=env) cat("\n\n------------------- \n") with(info, { splat("Iteration", irep) splat("Simulation window:") print(Wsim) splat("Clipping window:") print(Wclip) splat("Current state:") print(X) propname <- decode.proptype(proptype) splat("Proposal type:", propname) switch(propname, Reject = { }, Birth = { splat("Birth of new point at location", pastepoint(proplocn, propmark, mlevels)) }, Death = { Xi <- X[propindx] splat("Death of data point", propindx, "located at", pastepoint(Xi)) }, Shift = { Xi <- X[propindx] splat("Shift data point", propindx, "from current location", pastepoint(Xi), "to new location", pastepoint(proplocn, propmark, mlevels)) }) splat("Hastings ratio = ", numerator, "/", denominator, "=", numerator/denominator) splat("Fate of proposal:", if(will.accept) "Accepted" else "Rejected") return(TRUE) }) }) pastepoint <- function(X, markcode, marklevels) { if(is.ppp(X)) { xy <- coords(X) m <- if(is.marked(X)) dQuote(marks(X)) else NULL } else { xy <- X m <- if(length(marklevels)) dQuote(marklevels[markcode+1L]) else NULL } xy <- signif(as.numeric(xy), 6) paren(paste(c(xy, m), collapse=", ")) } # function to determine return value snoopexit <- function(env) { ans <- eval(quote(list(inxt=inxt, tnxt=tnxt, accepted=accepted)), envir=env) return(ans) } rmhsnoop }) # testit <- function() { # rmhsnoop(Wsim=owin(), Wclip=square(0.7), R=0.1, # xcoords=runif(40), # ycoords=runif(40), # mlevels=NULL, mcodes=NULL, # irep=3, itype=1, # proptype=1, proplocn=c(0.5, 0.5), propmark=0, propindx=0, # numerator=42, denominator=24) # } spatstat.core/R/densityAdaptiveKernel.R0000644000176200001440000000506414144333462017677 0ustar liggesusers#' #' densityAdaptiveKernel.R #' #' $Revision: 1.4 $ $Date: 2019/03/07 02:58:08 $ #' #' #' Adaptive kernel smoothing via 3D FFT #' densityAdaptiveKernel <- function(X, ...) { UseMethod("densityAdaptiveKernel") } densityAdaptiveKernel.ppp <- function(X, bw, ..., weights=NULL, at=c("pixels", "points"), edge=TRUE, ngroups) { stopifnot(is.ppp(X)) at <- match.arg(at) nX <- npoints(X) if(nX == 0) switch(at, points = return(numeric(nX)), pixels = return(as.im(0, W=Window(X), ...))) if(missing(ngroups) || is.null(ngroups)) { ngroups <- max(1L, floor(sqrt(npoints(X)))) } else if(any(is.infinite(ngroups))) { ngroups <- nX } else { check.1.integer(ngroups) ngroups <- min(nX, ngroups) } if(weighted <- !is.null(weights)) { check.nvector(weights, nX, oneok=TRUE) if(length(weights) == 1) weights <- rep(weights, nX) } else weights <- rep(1,nX) ## determine bandwidth for each data point if(missing(bw) || is.null(bw)) { bw <- do.call.matched(bw.abram, resolve.defaults(list(X=quote(X), at="points"), list(...)), extrargs=names(args(as.mask))) } else if(is.numeric(bw)) { check.nvector(bw, nX, oneok=TRUE) if(length(bw) == 1) bw <- rep(bw, nX) } else if(is.im(bw)) { bw <- safelookup(bw, X, warn=FALSE) if(anyNA(bw)) stop("Some data points lie outside the domain of image 'bw'", call.=FALSE) } else if(inherits(bw, "funxy")) { bw <- bw(X) if(anyNA(bw)) stop("Some data points lie outside the domain of function 'bw'", call.=FALSE) } else stop("Argument 'bw' should be a numeric vector or a pixel image") #' divide bandwidths into groups p <- seq(0,1,length=ngroups+1) qbands <- quantile(bw, p) groupid <- findInterval(bw,qbands,all.inside=TRUE) #' map to middle of group pmid <- (p[-1] + p[-length(p)])/2 qmid <- quantile(bw, pmid) marks(X) <- if(weighted) weights else NULL group <- factor(groupid, levels=1:ngroups) Y <- split(X, group) Z <- mapply(density.ppp, x=Y, sigma=as.list(qmid), weights=lapply(Y, marks), MoreArgs=list(edge=edge, at=at, ...), SIMPLIFY=FALSE) ZZ <- switch(at, pixels = im.apply(Z, "sum"), points = unsplit(Z, group)) return(ZZ) } spatstat.core/R/harmonic.R0000644000176200001440000000327614144333462015204 0ustar liggesusers# # # harmonic.R # # $Revision: 1.2 $ $Date: 2004/01/07 08:57:39 $ # # harmonic() # Analogue of polynom() for harmonic functions only # # ------------------------------------------------------------------- # harmonic <- function(x,y,n) { if(missing(n)) stop("the order n must be specified") n <- as.integer(n) if(is.na(n) || n <= 0) stop("n must be a positive integer") if(n > 3) stop("Sorry, harmonic() is not implemented for degree > 3") namex <- deparse(substitute(x)) namey <- deparse(substitute(y)) if(!is.name(substitute(x))) namex <- paste("(", namex, ")", sep="") if(!is.name(substitute(y))) namey <- paste("(", namey, ")", sep="") switch(n, { result <- cbind(x, y) names <- c(namex, namey) }, { result <- cbind(x, y, x*y, x^2-y^2) names <- c(namex, namey, paste("(", namex, ".", namey, ")", sep=""), paste("(", namex, "^2-", namey, "^2)", sep="")) }, { result <- cbind(x, y, x * y, x^2-y^2, x^3 - 3 * x * y^2, y^3 - 3 * x^2 * y) names <- c(namex, namey, paste("(", namex, ".", namey, ")", sep=""), paste("(", namex, "^2-", namey, "^2)", sep=""), paste("(", namex, "^3-3", namex, ".", namey, "^2)", sep=""), paste("(", namey, "^3-3", namex, "^2.", namey, ")", sep="") ) } ) dimnames(result) <- list(NULL, names) return(result) } spatstat.core/R/sharpen.R0000644000176200001440000000406214144333464015040 0ustar liggesusers# # sharpen.R # # $Revision: 1.6 $ $Date: 2013/08/29 03:52:17 $ # sharpen <- function(X, ...) { UseMethod("sharpen") } sharpen.ppp <- function(X, sigma=NULL, ..., varcov=NULL, edgecorrect=FALSE) { stopifnot(is.ppp(X)) Yx <- Smooth(X %mark% X$x, at="points", sigma=sigma, varcov=varcov, edge=TRUE) Yy <- Smooth(X %mark% X$y, at="points", sigma=sigma, varcov=varcov, edge=TRUE) # trap NaN etc nbad <- sum(!(is.finite(Yx) & is.finite(Yy))) if(nbad > 0) stop(paste(nbad, ngettext(nbad, "point is", "points are"), "undefined due to numerical problems;", "smoothing parameter is probably too small")) # W <- as.owin(X) if(edgecorrect) { # convolve x and y coordinate functions with kernel xim <- as.im(function(x,y){x}, W) yim <- as.im(function(x,y){y}, W) xblur <- blur(xim, sigma=sigma, varcov=varcov, normalise=TRUE, ...) yblur <- blur(yim, sigma=sigma, varcov=varcov, normalise=TRUE, ...) # evaluate at data locations xx <- safelookup(xblur, X, warn=FALSE) yy <- safelookup(yblur, X, warn=FALSE) # estimated vector bias of sharpening procedure xbias <- xx - X$x ybias <- yy - X$y # adjust Yx <- Yx - xbias Yy <- Yy - ybias # check this does not place points outside window if(any(uhoh <- !inside.owin(Yx, Yy, W))) { # determine mass of edge effect edgeim <- blur(as.im(W), sigma=sigma, varcov=varcov, normalise=FALSE, ...) edg <- safelookup(edgeim, X[uhoh], warn=FALSE) # contract bias correction Yx[uhoh] <- (1 - edg) * X$x[uhoh] + edg * Yx[uhoh] Yy[uhoh] <- (1 - edg) * X$y[uhoh] + edg * Yy[uhoh] } # check again if(any(nbg <- !inside.owin(Yx, Yy, W))) { # give up Yx[nbg] <- X$x[nbg] Yy[nbg] <- X$y[nbg] } } # make point pattern Y <- ppp(Yx, Yy, marks=marks(X), window=W) # tack on smoothing information attr(Y, "sigma") <- sigma attr(Y, "varcov") <- varcov attr(Y, "edgecorrected") <- edgecorrect return(Y) } spatstat.core/R/measures.R0000644000176200001440000006215414144333462015230 0ustar liggesusers# # measures.R # # signed/vector valued measures with atomic and diffuse components # # $Revision: 1.96 $ $Date: 2020/11/30 09:40:48 $ # msr <- function(qscheme, discrete, density, check=TRUE) { if(!is.quad(qscheme)) stop("qscheme should be a quadrature scheme") nquad <- n.quad(qscheme) U <- union.quad(qscheme) wt <- w.quad(qscheme) Z <- is.data(qscheme) ndata <- sum(Z) # ensure conformable vectors/matrices stopifnot(is.numeric(discrete) || is.logical(discrete)) stopifnot(is.numeric(density)) if(is.vector(discrete) && is.vector(density)) { # handle constants if(length(discrete) == 1) discrete <- rep.int(discrete, ndata) if(length(density) == 1) density <- rep.int(density, nquad) # check lengths if(check) { check.nvector(discrete, ndata, things="data points", naok=TRUE) check.nvector(density, nquad, things="quadrature points", naok=TRUE) } discretepad <- numeric(nquad) discretepad[Z] <- discrete } else { if(length(discrete) == 1 && is.matrix(density)) { # replicate constant 'discrete' component to matrix of correct size discrete <- matrix(discrete, ndata, ncol(density)) } else if(length(density) == 1 && is.matrix(discrete)) { # replicate constant 'density' to matrix of correct size density <- matrix(density, nquad, ncol(discrete)) } else { discrete <- as.matrix(discrete) density <- as.matrix(density) } if(check) { # check numbers of rows check.nmatrix(discrete, ndata, things="data points", naok=TRUE, squarematrix=FALSE) check.nmatrix(density, nquad, things="quadrature points", naok=TRUE, squarematrix=FALSE) } nd <- ncol(discrete) nc <- ncol(density) if(nd != nc) { if(nd == 1) { # replicate columns of discrete component discrete <- matrix(rep.int(discrete, nc), ndata, nc) colnames(discrete) <- colnames(density) } else if(nc == 1) { # replicate columns of density component density <- matrix(rep.int(density, nd), nquad, nd) colnames(density) <- colnames(discrete) } else stop(paste("Incompatible numbers of columns in", sQuote("discrete"), paren(nd), "and", sQuote("density"), paren(nc))) } discretepad <- matrix(0, nquad, max(nd, nc)) discretepad[Z, ] <- discrete colnames(discretepad) <- colnames(density) } ## ## Discretised measure (value of measure for each quadrature tile) ## val <- discretepad + wt * density if(is.matrix(density)) colnames(val) <- colnames(density) ## ## if(check && !all(ok <- complete.cases(val))) { warning("Some infinite, NA or NaN increments were removed", call.=FALSE) val <- ok * val discretepad <- ok * discretepad density <- ok * density } ## finished out <- list(loc = U, val = val, atoms = Z, discrete = discretepad, density = density, wt = wt) class(out) <- "msr" return(out) } # Translation table for usage of measures # # e.g. res <- residuals(fit, ...) # # OLD NEW # res[ ] res$val[ ] with(res, "increment") # attr(res, "atoms") res$atoms with(res, "is.atom") # attr(res, "discrete") res$discrete with(res, "discrete") # attr(res, "continuous") res$density with(res, "density") # w.quad(quad.ppm(fit)) res$wt with(res, "qweights") # union.quad(quad.ppm(fit)) res$loc with(res, "qlocations") # ................................................. with.msr <- function(data, expr, ...) { stopifnot(inherits(data, "msr")) stuff <- list(increment = data$val, is.atom = data$atoms, discrete = data$discrete, density = data$density, continuous = data$density * data$wt, qweights = data$wt, qlocations = data$loc, atoms = data$loc[data$atoms], atommass = marksubset(data$discrete, data$atoms)) y <- eval(substitute(expr), envir=stuff, enclos=parent.frame()) if(is.character(y) && length(y) == 1 && y %in% names(stuff)) y <- stuff[[y]] return(y) } print.msr <- function(x, ...) { xloc <- x$loc n <- npoints(xloc) d <- ncol(as.matrix(x$val)) splat(paste0(if(d == 1) "Scalar" else paste0(d, "-dimensional vector"), "-valued measure")) if(d > 1 && !is.null(cn <- colnames(x$val)) && waxlyrical("space")) splat("vector components:", commasep(sQuote(cn))) if(is.marked(xloc)) { splat("\tDefined on 2-dimensional space x marks") if(is.multitype(xloc)) exhibitStringList("\tPossible marks: ", levels(marks(xloc))) } if(waxlyrical("gory")) { splat("Approximated by", n, "quadrature points") print(as.owin(xloc)) splat(sum(x$atoms), "atoms") } if(waxlyrical("extras")) { splat("Total mass:") if(d == 1) { splat("discrete =", signif(sum(with(x, "discrete")), 5), " continuous =", signif(sum(with(x, "continuous")), 5), " total =", signif(sum(with(x, "increment")), 5)) } else { if(is.null(cn)) cn <- paste("component", 1:d) for(j in 1:d) { splat(paste0(cn[j], ":\t"), "discrete =", signif(sum(with(x, "discrete")[,j]), 5), " continuous =", signif(sum(with(x, "continuous")[,j]), 5), " total =", signif(sum(with(x, "increment")[,j]), 5)) } } } return(invisible(NULL)) } summary.msr <- function(object, ...) { print(object) } is.multitype.msr <- function(X, ...) { is.multitype(X$loc, ...) } is.marked.msr <- function(X, ...) { is.marked(X$loc, ...) } split.msr <- function(x, f, drop=FALSE, ...) { xloc <- x$loc ## determine split using rules for split.ppp locsplit <- if(missing(f)) split(xloc, drop=drop) else split(xloc, f, drop=drop) ## extract grouping factor g <- attr(locsplit, "fgroup") ## split contributions to measure atomsplit <- split(x$atoms, g, drop=drop) # hyuk wtsplit <- split(x$wt, g, drop=drop) if(ncol(x) == 1) { ## scalar measure valsplit <- split(x$val, g, drop=drop) discsplit <- split(x$discrete, g, drop=drop) denssplit <- split(x$density, g, drop=drop) } else { ## vector measure valsplit <- lapply(split(as.data.frame(x$val), g, drop=drop), as.matrix) discsplit <- lapply(split(as.data.frame(x$discrete), g, drop=drop), as.matrix) denssplit <- lapply(split(as.data.frame(x$density), g, drop=drop), as.matrix) } ## form the component measures result <- mapply(list, loc=locsplit, val=valsplit, atoms=atomsplit, discrete=discsplit, density=denssplit, wt=wtsplit, SIMPLIFY=FALSE) names(result) <- names(locsplit) result <- lapply(result, "class<-", value="msr") if(drop && any(isnul <- (sapply(locsplit, npoints) == 0))) result[isnul] <- NULL result <- as.solist(result) return(result) } integral.msr <- function(f, domain=NULL, ...) { stopifnot(inherits(f, "msr")) if(is.tess(domain)) { result <- sapply(tiles(domain), integral.msr, f = f) if(length(dim(result)) > 1) result <- t(result) return(result) } if(!is.null(domain)) f <- f[domain] y <- with(f, "increment") z <- if(is.matrix(y)) apply(y, 2, sum) else sum(y) return(z) } update.msr <- function(object, ...) { #' reconcile internal data if(!is.null(smo <- attr(object, "smoothdensity"))) { sigma <- attr(smo, "sigma") object <- augment.msr(object, ..., sigma=sigma, recompute=TRUE) } return(object) } plot.msr <- function(x, ..., add=FALSE, how=c("image", "contour", "imagecontour"), main=NULL, do.plot=TRUE, multiplot=TRUE, massthresh=0, equal.markscale=FALSE, equal.ribbon=FALSE) { if(is.null(main)) main <- short.deparse(substitute(x)) how <- match.arg(how) if(!multiplot) { ## compress everything to a single panel x$loc <- unmark(x$loc) if(is.matrix(x$val)) x$val <- rowSums(x$val) if(is.matrix(x$discrete)) x$discrete <- rowSums(x$discrete) if(is.matrix(x$density)) x$density <- rowSums(x$density) if(!is.null(smo <- attr(x, "smoothdensity")) && inherits(smo, "solist")) attr(x, "smoothdensity") <- im.apply(smo, sum, check=FALSE) ## WAS: attr(x, "smoothdensity") <- Reduce("+", smo) } d <- dim(x)[2] k <- if(is.multitype(x)) length(levels(marks(x$loc))) else 1 ## multiple plot panels may be generated if(k == 1 && d == 1) { ## single plot y <- solist(x) } else if(k > 1 && d == 1) { ## multitype y <- split(x) } else if(k == 1 && d > 1) { ## vector-valued y <- unstack(x) } else if(k > 1 && d > 1) { ## both multitype and vector-valued y <- split(x) typenames <- names(y) vecnames <- colnames(x$val) y <- unstack(y) names(y) <- as.vector(t(outer(typenames, vecnames, paste, sep="."))) } #' ensure image of density is present y <- solapply(y, augment.msr) #' ready to plot if(length(y) > 1) { ## plot as an array of panels userarg <- list(...) rowcol <- list(nrows=k, ncols=d) if(any(c("nrows", "ncols") %in% names(userarg))) rowcol <- list() #' determine common scales if required scaleinfo <- list() if(equal.markscale) { W <- Window(x) #' extract vectors of atomic masses from each panel marx <- lapply(y, with, "atommass") #' make a separate scale calculation for each panel scales <- sapply(marx, mark.scale.default, w=W, ...) scaleinfo$markscale <- min(scales) scaleinfo$markrange <- range(unlist(marx)) } if(equal.ribbon) { images <- lapply(y, attr, which="smoothdensity") scaleinfo$zlim <- range(sapply(images, range)) } ## go result <- do.call(plot.solist, resolve.defaults(list(quote(y)), userarg, rowcol, scaleinfo, list(how=how, main=main, equal.scales=TRUE, halign=TRUE, valign=TRUE, claim.title.space=TRUE))) return(invisible(result)) } ## scalar measure x <- y[[1]] ## get atoms xatomic <- (x$loc %mark% x$discrete)[x$atoms] if(length(massthresh) && all(is.finite(massthresh))) { ## ignore atoms with absolute mass <= massthresh check.1.real(massthresh) xatomic <- xatomic[abs(marks(xatomic)) > massthresh] } xtra.im <- graphicsPars("image") xtra.pp <- setdiff(graphicsPars("ppp"), c("box", "col")) xtra.pp <- union(xtra.pp, c("markrange", "marklevels")) xtra.ow <- graphicsPars("owin") smo <- attr(x, "smoothdensity") ## do.image <- how %in% c("image", "imagecontour") do.contour <- how %in% c("contour", "imagecontour") ## allocate space for plot and legend using do.plot=FALSE mechanism pdata <- do.call.matched(plot.ppp, resolve.defaults(list(x=quote(xatomic), do.plot=FALSE, main=main), list(...), list(show.all=TRUE)), extrargs=xtra.pp) result <- pdata bb <- attr(pdata, "bbox") if(do.image) { idata <- do.call.matched(plot.im, resolve.defaults(list(x=quote(smo), main=main, do.plot=FALSE), list(...)), extrargs=xtra.im) result <- idata bb <- boundingbox(bb, attr(idata, "bbox")) } ## attr(result, "bbox") <- bb ## if(do.plot) { if(!add) { blankmain <- prepareTitle(main)$blank ## initialise plot do.call.matched(plot.owin, resolve.defaults(list(x=quote(bb), type="n", main=blankmain), list(...)), extrargs=xtra.ow) } ## display density if(do.image) do.call.matched(plot.im, resolve.defaults(list(x=quote(smo), add=TRUE), list(...), list(main=main, show.all=TRUE)), extrargs=xtra.im) if(do.contour) do.call.matched(contour.im, resolve.defaults(list(x=quote(smo), add=TRUE), list(...), list(main=main, axes=FALSE, show.all=!do.image)), extrargs=c("zlim", "labels", "labcex", ## DO NOT ALLOW 'col' "drawlabels", "method", "vfont", "lty", "lwd", "claim.title.space")) ## display atoms do.call.matched(plot.ppp, resolve.defaults(list(x=quote(xatomic), add=TRUE, main=""), list(...), list(show.all=TRUE)), extrargs=xtra.pp) } return(invisible(result)) } "[.msr" <- function(x, i, j, ...) { valu <- as.matrix(x$val) disc <- as.matrix(x$discrete) dens <- as.matrix(x$density) wt <- x$wt atoms <- x$atoms # if(!missing(j)) { valu <- valu[, j] disc <- disc[, j] dens <- dens[, j] } loc <- x$loc if(!missing(i)) { # use [.ppp to identify which points are retained locn <- loc %mark% seq_len(npoints(loc)) loci <- locn[i, clip=TRUE] loc <- unmark(loci) id <- marks(loci) # extract valu <- valu[id, , drop=FALSE] disc <- disc[id, , drop=FALSE] dens <- dens[id, , drop=FALSE] wt <- wt[id] atoms <- atoms[id] } out <- list(loc=loc, val=valu, atoms=atoms, discrete=disc, density=dens, wt=wt) class(out) <- "msr" return(out) } dim.msr <- function(x) { dim(as.matrix(x$val)) } dimnames.msr <- function(x) { list(NULL, colnames(x$val)) } # smooth.msr <- function(X, ...) { # .Deprecated("Smooth.msr", package="spatstat", # msg="smooth.msr is deprecated: use the generic Smooth with a capital S") # Smooth(X, ...) # } Smooth.msr <- function(X, ..., drop=TRUE) { verifyclass(X, "msr") loc <- X$loc val <- X$val result <- density(loc, weights=val, ...) if(!drop && is.im(result)) result <- solist(result) return(result) } as.owin.msr <- function(W, ..., fatal=TRUE) { as.owin(W$loc, ..., fatal=fatal) } domain.msr <- Window.msr <- function(X, ...) { as.owin(X) } shift.msr <- function(X, ...) { X$loc <- Xloc <- shift(X$loc, ...) if(!is.null(smo <- attr(X, "smoothdensity"))) attr(X, "smoothdensity") <- shift(smo, getlastshift(Xloc)) putlastshift(X, getlastshift(Xloc)) } as.layered.msr <- local({ as.layered.msr <- function(X) { nc <- ncol(X) if(nc == 0) return(layered()) if(nc == 1) return(layered(X)) Y <- lapply(seq_len(nc), pickcol, x=X) names(Y) <- colnames(X) return(layered(LayerList=Y)) } pickcol <- function(j,x) x[,j] as.layered.msr }) unitname.msr <- function(x) unitname(x$loc) "unitname<-.msr" <- function(x, value) { unitname(x$loc) <- value return(x) } scalardilate.msr <- function(X, f, ...) { X$loc <- scalardilate(X$loc, f, ...) X$density <- X$density/f^2 X$wt <- X$wt * f^2 return(X) } rotate.msr <- function(X, angle=pi/2, ..., centre=NULL) { X$loc <- rotate(X$loc, angle=angle, ..., centre=centre) return(X) } flipxy.msr <- function(X) { X$loc <- flipxy(X$loc) return(X) } rescale.msr <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier Y <- scalardilate(X, 1/s) unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } affine.msr <- function(X, mat = diag(c(1, 1)), vec = c(0, 0), ...) { X$loc <- affine(X$loc, mat=mat, vec=vec, ...) detmat <- abs(det(mat)) X$density <- X$density/detmat X$wt <- X$wt * detmat return(X) } Ops.msr <- function(e1,e2=NULL){ vn <- c("val", "discrete", "density") if(nargs() == 1L) { #' unary operator if(!is.element(.Generic, c("+", "-"))) stop(paste("Unary operation", sQuote(paste0(.Generic, "A")), "is undefined for a measure A."), call.=FALSE) e1 <- unclass(e1) e1[vn] <- lapply(e1[vn], .Generic) if(!is.null(sm <- attr(e1, "smoothdensity"))) attr(e1, "smoothdensity") <- do.call(.Generic, sm) class(e1) <- "msr" return(e1) } else { #' binary operator m1 <- inherits(e1, "msr") m2 <- inherits(e2, "msr") if(m1 && m2) { if(!is.element(.Generic, c("+", "-"))) stop(paste("Operation", sQuote(paste0("A", .Generic, "B")), "is undefined for measures A, B"), call.=FALSE) k1 <- dim(e1)[2] k2 <- dim(e2)[2] if(k1 != k2) stop(paste("Operation", sQuote(paste0("A", .Generic, "B")), "is undefined because A, B have incompatible dimensions:", "A is", ngettext(k1, "scalar", paste0(k1, "-vector")), ", B is", ngettext(k2, "scalar", paste0(k2, "-vector"))), call.=FALSE) if(!identical(e1$loc, e2$loc)) { haha <- harmonise(e1, e2) e1 <- haha[[1L]] e2 <- haha[[2L]] } e1 <- unclass(e1) e2 <- unclass(e2) e1[vn] <- mapply(.Generic, e1[vn], e2[vn], SIMPLIFY=FALSE) class(e1) <- "msr" #' handle smoothed densities sm1 <- attr(e1, "smoothdensity") sm2 <- attr(e2, "smoothdensity") sm <- if(is.null(sm1) || is.null(sm2)) { NULL } else if(is.im(sm1) && is.im(sm2)) { do.call(.Generic, list(sm1, sm2)) } else if(is.im(sm1) && is.solist(sm2)) { mapply(.Generic, e2=sm2, MoreArgs=list(e1=sm1), SIMPLIFY=FALSE) } else if(is.solist(sm1) && is.im(sm2)) { mapply(.Generic, e1=sm1, MoreArgs=list(e2=sm2), SIMPLIFY=FALSE) } else if(is.solist(sm1) && is.solist(sm2)) { mapply(.Generic, e1=sm1, e2=sm2, SIMPLIFY=FALSE) } else NULL attr(e1, "smoothdensity") <- sm return(e1) } else if(m1 && is.numeric(e2)) { if(!is.element(.Generic, c("/", "*"))) stop(paste("Operation", sQuote(paste0("A", .Generic, "z")), "is undefined for a measure A and numeric z."), call.=FALSE) e1 <- unclass(e1) e1[vn] <- lapply(e1[vn], .Generic, e2=e2) class(e1) <- "msr" #' handle smoothed density sm1 <- attr(e1, "smoothdensity") sm <- if(is.null(sm1)) NULL else if(is.im(sm1)) do.call(.Generic, list(e1=sm1, e2=e2)) else if(is.solist(sm1)) solapply(sm1, .Generic, e2=e2) else NULL attr(e1, "smoothdensity") <- sm return(e1) } else if(m2 && is.numeric(e1)) { if(.Generic != "*") stop(paste("Operation", sQuote(paste0("z", .Generic, "A")), "is undefined for a measure A and numeric z."), call.=FALSE) e2 <- unclass(e2) e2[vn] <- lapply(e2[vn], .Generic, e1=e1) class(e2) <- "msr" #' handle smoothed density sm2 <- attr(e2, "smoothdensity") sm <- if(is.null(sm2)) NULL else if(is.im(sm2)) do.call(.Generic, list(e1=e1, e2=sm2)) else if(is.solist(sm2)) solapply(sm2, .Generic, e1=e1) else NULL attr(e2, "smoothdensity") <- sm return(e2) } stop(paste("Operation", sQuote(paste0("e1", .Generic, "e2")), "is undefined for this kind of data"), call.=FALSE) } } measurePositive <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$discrete <- pmax(0, x$discrete) y$density <- pmax(0, x$density) y$val <- y$discrete + y$wt * y$density y <- update(y) return(y) } measureNegative <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$discrete <- -pmin(0, x$discrete) y$density <- -pmin(0, x$density) y$val <- y$discrete + y$wt * y$density y <- update(y) return(y) } measureVariation <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$discrete <- abs(x$discrete) y$density <- abs(x$density) y$val <- y$discrete + y$wt * y$density y <- update(y) return(y) } totalVariation <- function(x) integral(measureVariation(x)) measureDiscrete <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$density[] <- 0 y$val <- y$discrete y <- update(y) return(y) } measureContinuous <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$discrete[] <- 0 y$val <- y$wt * y$density y <- update(y) return(y) } harmonise.msr <- local({ harmonise.msr <- function(...) { argz <- list(...) n <- length(argz) if(n == 0) return(argz) ismeasure <- sapply(argz, inherits, what="msr") if(!any(ismeasure)) stop("No measures supplied") if(!all(ismeasure)) stop("All arguments should be measures (objects of class msr)") if(n < 2) return(argz) result <- vector(mode="list", length=n) ## extract entries loclist <- lapply(argz, getElement, name="loc") atomlist <- lapply(argz, getElement, name="atoms") masslist <- lapply(argz, getElement, name="discrete") denslist <- lapply(argz, getElement, name="density") ## check for compatible dimensions of measure values dimen <- unique(sapply(argz, ncol)) if(length(dimen) > 1) stop("Measures have different dimensions:", commasep(sort(dimen))) ## check for marked points ismarked <- sapply(loclist, is.marked) if(any(ismarked) && !all(ismarked)) stop("Some, but not all, quadrature schemes are marked") ismarked <- all(ismarked) ## union of all quadrature points in all measures Uloc <- do.call(superimpose, append(unname(loclist), list(check=FALSE))) Uloc <- unique(Uloc) nU <- npoints(Uloc) ## match each quadrature set to the union ## and find nearest data point to each point in the union if(!ismarked) { matchlist <- lapply(loclist, nncross, Y=Uloc, what="which") nearlist <- lapply(loclist, ssorcnn, xx=Uloc, what="which") } else { stop("Not yet implemented for marked quadrature schemes") } ## nearest neighbour interpolation of density values of each argument ## onto the common quadrature set Udenslist <- mapply(extract, x=denslist, i=nearlist, SIMPLIFY=FALSE) ## initialise other bits noatoms <- logical(nU) zeromass <- if(dimen == 1) numeric(nU) else matrix(0, nU, dimen) Uatomlist <- rep(list(noatoms), n) Umasslist <- rep(list(zeromass), n) ## assign atoms in each argument Uatomlist <- mapply(subsetgets, x=Uatomlist, i=matchlist, value=atomlist, SIMPLIFY=FALSE) Umasslist <- mapply(subsetgets, x=Umasslist, i=matchlist, value=masslist, SIMPLIFY=FALSE) ## union of atoms isatom <- Reduce("|", Uatomlist) ## masses at atoms Umasslist <- lapply(Umasslist, extract, i=isatom) ## make common quadrature scheme UQ <- quadscheme(Uloc[isatom], Uloc[!isatom]) ## reorder density data correspondingly neworder <- c(which(isatom), which(!isatom)) Udenslist <- lapply(Udenslist, extract, i=neworder) ## make new measures result <- mapply(msr, MoreArgs=list(qscheme=UQ), discrete=Umasslist, density=Udenslist, SIMPLIFY=FALSE) names(result) <- names(argz) class(result) <- unique(c("solist", class(result))) return(result) } ssorcnn <- function(xx, yy, what) nncross(xx, yy, what=what) extract <- function(x, i) { if(is.matrix(x)) x[i, , drop=FALSE] else x[i] } subsetgets <- function(x, i, value) { if(is.matrix(x)) { x[i, ] <- value } else { x[i] <- value } return(x) } harmonise.msr }) unstack.msr <- function(x, ...) { trap.extra.arguments(...) d <- dim(x) if(is.null(d)) return(solist(x)) smo <- attr(x, "smoothdensity") if(!inherits(smo, "imlist")) smo <- NULL nc <- d[2] y <- vector(mode="list", length=nc) for(j in seq_len(nc)) { xj <- x[,j,drop=FALSE] if(!is.null(smo)) attr(xj, "smoothdensity") <- smo[[j]] y[[j]] <- xj } names(y) <- colnames(x) return(as.solist(y)) } spatstat.core/R/ppqq.R0000644000176200001440000000737214144333463014367 0ustar liggesusers## ## ppqq.R ## ## P-P and Q-Q versions of fv objects ## PPversion <- local({ PPversion <- function(f, theo="theo", columns=".") { if(!any(colnames(f) == theo)) stop(paste(sQuote(theo), "is not the name of a column of f")) ## set up inverse theoretical function f_0: 'theo' |-> 'r' xname <- fvnames(f, ".x") df <- as.data.frame(f) theo.table <- df[,theo] x.table <- df[,xname] invfun <- approxfun(x=theo.table, y=x.table, rule=1) ## evaluate f_0^{-1}(theo) for evenly-spaced grid of 'theo' values ra <- range(theo.table) theo.seq <- seq(from=ra[1], to=ra[2], length.out=nrow(df)) x.vals <- invfun(theo.seq) ## convert f to a function and evaluate at these 'r' values ynames <- setdiff(fvnames(f, columns), theo) ff <- as.function(f, value=ynames) y.vals <- lapply(ynames, evalselected, x=x.vals, f=ff) ## build data frame all.vals <- append(list(theo=theo.seq), y.vals) names(all.vals) <- c(theo, ynames) DF <- as.data.frame(all.vals) ## set up fv object atr <- attributes(f) cnames <- colnames(f) i.theo <- match(theo, cnames) i.yval <- match(ynames, cnames) ii <- c(i.theo, i.yval) old.best <- fvnames(f, ".y") best <- if(old.best %in% ynames) old.best else ynames[length(ynames)] result <- fv(DF, argu = theo, ylab = atr$ylab, valu = best, fmla = . ~ .x, alim = ra, labl = atr$labl[ii], desc = atr$desc[ii], unitname = NULL, fname = atr$fname, yexp = atr$yexp) fvnames(result, ".") <- c(ynames, theo) return(result) } evalselected <- function(what, f, x){ f(x, what=what) } PPversion }) QQversion <- function(f, theo="theo", columns=".") { if(!any(colnames(f) == theo)) stop(paste(sQuote(theo), "is not the name of a column of f")) ## extract relevant columns of data xname <- fvnames(f, ".x") ynames <- fvnames(f, columns) df <- as.data.frame(f) theo.table <- df[,theo] x.table <- df[,xname] y.table <- df[,ynames, drop=FALSE] ## set up inverse theoretical function f_0: 'theo' |-> 'r' invfun <- approxfun(x=theo.table, y=x.table, rule=1) ## apply f_0^{-1} to tabulated function values z.table <- as.data.frame(lapply(y.table, invfun)) ## build data frame DF <- cbind(df[,xname,drop=FALSE], z.table) ## set up fv object atr <- attributes(f) cnames <- colnames(f) i.x <- match(xname, cnames) i.y <- match(ynames, cnames) ii <- c(i.x, i.y) old.best <- fvnames(f, ".y") best <- if(old.best %in% ynames) old.best else ynames[length(ynames)] if(versionstring.spatstat() < package_version("1.38-2")) { fvl <- fvlabels(f, expand=TRUE) theo.string <- fvl[colnames(f) == theo] } else { theo.string <- fvlabels(f, expand=TRUE)[[theo]] } ## remove '(r)' from outer function theo.string <- sub(paren(xname), "", theo.string, fixed=TRUE) theo.expr <- parse(text=theo.string) theo.lang <- theo.expr[[1]] ylab <- substitute({{THEO}^{-1}}(FUN), list(FUN=atr$ylab, THEO=theo.lang)) yexp <- substitute({{THEO}^{-1}}(FUN), list(FUN=atr$yexp, THEO=theo.lang)) oldlabl <- atr$labl labl.iy <- sprintf("{{%s}^{-1}}(%s)", theo.string, oldlabl[i.y]) labl.ii <- c(oldlabl[i.x], labl.iy) result <- fv(DF, argu = atr$argu, ylab = ylab, valu = best, fmla = . ~ .x, alim = atr$alim, labl = labl.ii, desc = atr$desc[ii], unitname = NULL, fname = atr$fname, yexp = yexp) fvnames(result, ".") <- ynames unitname(result) <- unitname(f) return(result) } spatstat.core/R/badgey.R0000644000176200001440000001616514144333461014637 0ustar liggesusers# # # badgey.S # # $Revision: 1.17 $ $Date: 2018/03/15 07:37:41 $ # # Hybrid Geyer process # # BadGey() create an instance of the process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # BadGey <- local({ # ........... auxiliary functions .............. delBG <- function(i, r, sat) { r <- r[-i] if(length(r) == length(sat)) { r <- r[-i] sat <- sat[-i] } else if(length(sat) == 1) { r <- r[-i] } else stop("Mismatch in dimensions of arguments r and sat") nr <- length(r) if(nr == 0) return(Poisson()) if(nr == 1) return(Geyer(r, sat)) return(BadGey(r, sat)) } # .............. template .................... BlankBG <- list( name = "hybrid Geyer process", creator = "BadGey", family = "pairsat.family", # will be evaluated later pot = function(d, par) { r <- par$r nr <- length(r) out <- array(FALSE, dim=c(dim(d), nr)) for(i in 1:nr) out[,,i] <- (d <= r[i]) out }, par = list(r = NULL, sat=NULL), # to fill in later parnames = c("interaction radii", "saturation parameters"), hasInf = FALSE, init = function(self) { r <- self$par$r sat <- self$par$sat if(!is.numeric(r) || !all(r > 0)) stop("interaction radii r must be positive numbers") if(length(r) > 1 && !all(diff(r) > 0)) stop("interaction radii r must be strictly increasing") if(!is.numeric(sat) || any(sat < 0)) stop("saturation parameters must be nonnegative numbers") if(length(sat) != length(r) && length(sat) != 1) stop("vectors r and sat must have equal length") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { r <- self$par$r npiece <- length(r) # extract coefficients gammas <- exp(as.numeric(coeffs)) # name them gn <- gammas names(gn) <- paste("[0,", r, ")", sep="") # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_i", printable=dround(gn))) }, valid = function(coeffs, self) { # interaction parameters gamma must be # non-NA # finite, if sat > 0 # less than 1, if sat = Inf gamma <- (self$interpret)(coeffs, self)$param$gammas sat <- self$par$sat if(anyNA(gamma)) return(FALSE) return(all((is.finite(gamma) | sat == 0) & (gamma <= 1 | sat != Inf))) }, project = function(coeffs, self){ loggammas <- as.numeric(coeffs) sat <- self$par$sat r <- self$par$r good <- is.finite(loggammas) & (is.finite(sat) | loggammas <= 0) if(all(good)) return(NULL) if(!any(good)) return(Poisson()) bad <- !good if(spatstat.options("project.fast") || sum(bad) == 1) { # remove smallest threshold with an unidentifiable parameter firstbad <- min(which(bad)) return(delBG(firstbad, r, sat)) } else { # consider all candidate submodels subs <- lapply(which(bad), delBG, r=r, sat=sat) return(subs) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r sat <- self$par$sat if(all(is.na(coeffs))) return(2 * max(r)) gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- (abs(log(gamma)) > epsilon) & (sat > 0) if(!any(active)) return(0) else return(2 * max(r[active])) }, version=NULL, # to be added later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ..., halfway=FALSE) { # fast evaluator for BadGey interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for BadGey") r <- potpars$r sat <- potpars$sat # ensure r and sat have equal length if(length(r) != length(sat)) { if(length(r) == 1) r <- rep.int(r, length(sat)) else if(length(sat) == 1) sat <- rep.int(sat, length(r)) else stop("lengths of r and sat do not match") } # first ensure all data points are in U nX <- npoints(X) nU <- npoints(U) Xseq <- seq_len(nX) if(length(EqualPairs) == 0) { # no data points currently included missingdata <- rep.int(TRUE, nX) } else { Xused <- EqualPairs[,1L] missingdata <- !(Xseq %in% Xused) } somemissing <- any(missingdata) if(somemissing) { # add the missing data points nmiss <- sum(missingdata) U <- superimpose(U, X[missingdata], W=X$window) # correspondingly augment the list of equal pairs originalrows <- seq_len(nU) newXindex <- Xseq[missingdata] newUindex <- nU + seq_len(nmiss) EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex)) nU <- nU + nmiss } nterms <- length(r) answer <- matrix(, nrow=nU, ncol=nterms) for(k in 1:nterms) { # first determine saturated pair counts counts <- strausscounts(U, X, r[k], EqualPairs) satcounts <- pmin.int(sat[k], counts) # trapdoor used by suffstat() if(halfway) answer[,k] <- satcounts else if(sat[k] == Inf) answer[,k] <- 2 * satcounts else { # extract counts for data points Uindex <- EqualPairs[,2L] Xindex <- EqualPairs[,1L] Xcounts <- integer(npoints(X)) Xcounts[Xindex] <- counts[Uindex] # evaluate change in saturated counts of other data points change <- geyercounts(U, X, r[k], sat[k], Xcounts, EqualPairs) answer[,k] <- satcounts + change } } if(somemissing) answer <- answer[originalrows, , drop=FALSE] return(answer) } ) class(BlankBG) <- "interact" BadGey <- function(r, sat) { instantiate.interact(BlankBG, list(r=r, sat=sat)) } BadGey <- intermaker(BadGey, BlankBG) BadGey }) spatstat.core/R/hasenvelope.R0000644000176200001440000000120514144333462015703 0ustar liggesusers#' #' hasenvelope.R #' #' A simple class of objects which contain additional envelope data #' #' $Revision: 1.1 $ $Date: 2015/10/05 06:20:31 $ hasenvelope <- function(X, E=NULL) { if(inherits(E, "envelope")) { attr(X, "envelope") <- E class(X) <- c("hasenvelope", class(X)) } return(X) } print.hasenvelope <- function(x, ...) { NextMethod("print") splat("[Object contains simulation envelope data]") return(invisible(NULL)) } envelope.hasenvelope <- function(Y, ..., Yname=NULL) { if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) E <- attr(Y, "envelope") return(envelope(E, ..., Yname=Yname)) } spatstat.core/R/Gest.R0000644000176200001440000001041314144333461014274 0ustar liggesusers# # Gest.S # # Compute estimates of nearest neighbour distance distribution function G # # $Revision: 4.32 $ $Date: 2020/08/25 06:13:10 $ # ################################################################################ # "Gest" <- "nearest.neighbour" <- function(X, r=NULL, breaks=NULL, ..., correction=c("rs", "km", "han"), domain=NULL) { verifyclass(X, "ppp") if(!is.null(domain)) stopifnot(is.subset.owin(domain, Window(X))) ## W <- X$window npts <- npoints(X) lambda <- npts/area(W) ## determine r values rmaxdefault <- rmax.rule("G", W, lambda) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max zeroes <- numeric(length(rvals)) ## choose correction(s) # correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) { correction <- c("rs", "km", "han") } else correction <- pickoption("correction", correction, c(none="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", cs="han", ChiuStoyan="han", best="km"), multi=TRUE) ## compute nearest neighbour distances nnd <- nndist(X$x, X$y) ## distance to boundary bdry <- bdist.points(X) ## restrict to subset ? if(!is.null(domain)) { ok <- inside.owin(X, w=domain) nnd <- nnd[ok] bdry <- bdry[ok] } ## observations o <- pmin.int(nnd,bdry) ## censoring indicators d <- (nnd <= bdry) ## initialise fv object df <- data.frame(r=rvals, theo=1-exp(-lambda * pi * rvals^2)) Z <- fv(df, "r", substitute(G(r), NULL), "theo", . ~ r, c(0,rmax), c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="G") if("none" %in% correction) { ## UNCORRECTED e.d.f. of nearest neighbour distances: use with care if(npts <= 1) edf <- zeroes else { hh <- hist(nnd[nnd <= rmax],breaks=breaks$val,plot=FALSE)$counts edf <- cumsum(hh)/length(nnd) } Z <- bind.fv(Z, data.frame(raw=edf), "hat(%s)[raw](r)", "uncorrected estimate of %s", "raw") } if("han" %in% correction) { if(npts <= 1) G <- zeroes else { ## uncensored distances x <- nnd[d] ## weights a <- eroded.areas(W, rvals, subset=domain) ## calculate Hanisch estimator h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts G <- cumsum(h/a) G <- G/max(G[is.finite(G)]) } ## add to fv object Z <- bind.fv(Z, data.frame(han=G), "hat(%s)[han](r)", "Hanisch estimate of %s", "han") ## modify recommended plot range attr(Z, "alim") <- range(rvals[G <= 0.9]) } if(any(correction %in% c("rs", "km"))) { ## calculate Kaplan-Meier and border correction (Reduced Sample) estimates want.rs <- "rs" %in% correction want.km <- "km" %in% correction if(npts == 0) { result <- list(rs=zeroes, km=zeroes, hazard=zeroes, theohaz=zeroes) } else { result <- km.rs.opt(o, bdry, d, breaks, KM=want.km, RS=want.rs) if(want.km) result$theohaz <- 2 * pi * lambda * rvals } wanted <- c(want.rs, rep(want.km, 3L)) wantednames <- c("rs", "km", "hazard", "theohaz")[wanted] result <- as.data.frame(result[wantednames]) ## add to fv object Z <- bind.fv(Z, result, c("hat(%s)[bord](r)", "hat(%s)[km](r)", "hat(h)[km](r)", "h[pois](r)")[wanted], c("border corrected estimate of %s", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function h(r)", "theoretical Poisson hazard function h(r)")[wanted], if(want.km) "km" else "rs") ## modify recommended plot range attr(Z, "alim") <- with(Z, range(.x[.y <= 0.9])) } nama <- names(Z) fvnames(Z, ".") <- rev(setdiff(nama, c("r", "hazard", "theohaz"))) unitname(Z) <- unitname(X) return(Z) } spatstat.core/R/resid4plot.R0000644000176200001440000006041514144333463015474 0ustar liggesusers# # # Residual plots: # resid4plot four panels with matching coordinates # resid1plot one or more unrelated individual plots # resid1panel one panel of resid1plot # # $Revision: 1.39 $ $Date: 2020/12/19 05:25:06 $ # # resid4plot <- local({ Contour <- function(..., pch, chars, cols, etch, size, maxsize, meansize, markscale, symap, zap, legend, leg.side, leg.args) { ## avoid passing arguments of plot.ppp to contour.default contour(...) } do.clean <- function(fun, ..., pch, chars, cols, etch, size, maxsize, meansize, markscale, symap, zap, legend, leg.side, leg.args, nlevels, levels, labels, drawlabels, labcex) { ## avoid passing arguments of plot.ppp, contour.default to other functions fun(...) } do.lines <- function(x, y, defaulty=1, ...) { force(x) force(y) dont.complain.about(x,y) do.call(lines, resolve.defaults(list(quote(x), quote(y)), list(...), list(lty=defaulty))) } resid4plot <- function(RES, plot.neg=c("image", "discrete", "contour", "imagecontour"), plot.smooth=c("imagecontour", "image", "contour", "persp"), spacing=0.1, outer=3, srange=NULL, monochrome=FALSE, main=NULL, xlab="x coordinate", ylab="y coordinate", rlab, col.neg=NULL, col.smooth=NULL, ...) { plot.neg <- match.arg(plot.neg) if(missing(rlab)) rlab <- NULL rlablines <- if(is.null(rlab)) 1 else sum(nzchar(rlab)) clip <- RES$clip Yclip <- RES$Yclip Z <- RES$smooth$Z W <- RES$W Wclip <- Yclip$window type <- RES$type typename <- RES$typename Ydens <- RES$Ydens[Wclip, drop=FALSE] Ymass <- RES$Ymass[Wclip] # set up 2 x 2 plot with space wide <- diff(W$xrange) high <- diff(W$yrange) space <- spacing * max(wide,high) width <- wide + space + wide height <- high + space + high outerspace <- outer * space outerRspace <- (outer - 1 + rlablines) * space plot(c(0, width) + c(-outerRspace, outerspace), c(0, height) + c(-outerspace, outerRspace), type="n", asp=1.0, axes=FALSE, xlab="", ylab="") # determine colour map for background nullvalue <- if(type == "eem") 1 else 0 if(is.null(srange)) { Yrange <- if(!is.null(Ydens)) summary(Ydens)$range else NULL Zrange <- if(!is.null(Z)) summary(Z)$range else NULL srange <- range(c(Yrange, Zrange, nullvalue), na.rm=TRUE) } else check.range(srange) backcols <- beachcolours(srange, nullvalue, monochrome) if(is.null(col.neg)) col.neg <- backcols if(is.null(col.smooth)) col.smooth <- backcols # ------ plot residuals/marks (in top left panel) ------------ Xlowleft <- c(W$xrange[1],W$yrange[1]) vec <- c(0, high) + c(0, space) - Xlowleft # shift the original window Ws <- shift(W, vec) # shift the residuals Ys <- shift(Yclip,vec) # determine whether pre-plotting the window(s) is redundant redundant <- (plot.neg == "image") && (type != "eem") && (Yclip$window$type == "mask") # pre-plot the window(s) if(!redundant) { YsWin <- Ys$window if(!clip) do.clean(plot, YsWin, add=TRUE, ...) else do.clean(ploterodewin, Ws, YsWin, add=TRUE, ...) } ## adjust position of legend associated with eroded window sep <- if(clip) Wclip$yrange[1] - W$yrange[1] else NULL ## decide whether mark scale should be shown showscale <- (type != "raw") switch(plot.neg, discrete={ neg <- (Ys$marks < 0) ## plot negative masses of discretised measure as squares if(any(c("maxsize","meansize","markscale") %in% names(list(...)))) { plot(Ys[neg], add=TRUE, legend=FALSE, ...) } else { hackmax <- 0.5 * sqrt(area(Wclip)/Yclip$n) plot(Ys[neg], add=TRUE, legend=FALSE, maxsize=hackmax, ...) } ## plot positive masses at atoms plot(Ys[!neg], add=TRUE, leg.side="left", leg.args=list(sep=sep), show.all=TRUE, main="", ...) }, contour = { Yds <- shift(Ydens, vec) Yms <- shift(Ymass, vec) Contour(Yds, add=TRUE, ...) dont.complain.about(Yms) do.call(plot, resolve.defaults(list(x=quote(Yms), add=TRUE), list(...), list(use.marks=showscale, leg.side="left", show.all=TRUE, main="", leg.args=list(sep=sep)))) }, imagecontour=, image={ Yds <- shift(Ydens, vec) Yms <- shift(Ymass, vec) if(redundant) do.clean(ploterodeimage, Ws, Yds, rangeZ=srange, colsZ=col.neg, ...) else if(type != "eem") do.clean(image, Yds, add=TRUE, ribbon=FALSE, col=col.neg, zlim=srange, ...) if(plot.neg == "imagecontour") Contour(Yds, add=TRUE, ...) ## plot positive masses at atoms dont.complain.about(Yms) do.call(plot, resolve.defaults(list(x=quote(Yms), add=TRUE), list(...), list(use.marks=showscale, leg.side="left", show.all=TRUE, main="", leg.args=list(sep=sep)))) } ) # --------- plot smoothed surface (in bottom right panel) ------------ vec <- c(wide, 0) + c(space, 0) - Xlowleft Zs <- shift.im(Z, vec) switch(plot.smooth, image={ do.clean(image, Zs, add=TRUE, col=col.smooth, zlim=srange, ribbon=FALSE, ...) }, contour={ Contour(Zs, add=TRUE, ...) }, persp={ warning("persp not available in 4-panel plot") }, imagecontour={ do.clean(image, Zs, add=TRUE, col=col.smooth, zlim=srange, ribbon=FALSE, ...) Contour(Zs, add=TRUE, ...) } ) lines(Zs$xrange[c(1,2,2,1,1)], Zs$yrange[c(1,1,2,2,1)]) # -------------- lurking variable plots ----------------------- # --------- lurking variable plot for x coordinate ------------------ # (cumulative or marginal) # in bottom left panel if(!is.null(RES$xmargin)) { a <- RES$xmargin observedV <- a$xZ observedX <- a$x theoreticalV <- a$ExZ theoreticalX <- a$x theoreticalSD <- theoreticalHI <- theoreticalLO <- NULL if(is.null(rlab)) rlab <- paste("marginal of", typename) } else if(!is.null(RES$xcumul)) { a <- RES$xcumul observedX <- a$empirical$covariate observedV <- a$empirical$value theoreticalX <- a$theoretical$covariate theoreticalV <- a$theoretical$mean theoreticalSD <- a$theoretical$sd theoreticalHI <- a$theoretical$upper theoreticalLO <- a$theoretical$lower if(is.null(rlab)) rlab <- paste("cumulative sum of", typename) } # pretty axis marks pX <- pretty(theoreticalX) rV <- range(0, observedV, theoreticalV, theoreticalHI, theoreticalLO) if(!is.null(theoreticalSD)) rV <- range(rV, theoreticalV+2*theoreticalSD, theoreticalV-2*theoreticalSD) pV <- pretty(rV) # rescale smoothed values rr <- range(c(0, observedV, theoreticalV, pV)) yscale <- function(y) { high * (y - rr[1])/diff(rr) } xscale <- function(x) { x - W$xrange[1] } if(!is.null(theoreticalHI)) { ## shaded confidence bands xp <- xscale( c(theoreticalX, rev(theoreticalX)) ) yp <- yscale( c(theoreticalHI, rev(theoreticalLO)) ) dont.complain.about(xp, yp) do.call.matched(polygon, resolve.defaults( list(x=quote(xp), y=quote(yp)), list(...), list(col="grey", border=NA))) } do.clean(do.lines, xscale(observedX), yscale(observedV), 1, ...) do.clean(do.lines, xscale(theoreticalX), yscale(theoreticalV), 2, ...) if(!is.null(theoreticalSD)) { do.clean(do.lines, xscale(theoreticalX), yscale(theoreticalV + 2 * theoreticalSD), 3, ...) do.clean(do.lines, xscale(theoreticalX), yscale(theoreticalV - 2 * theoreticalSD), 3, ...) } axis(side=1, pos=0, at=xscale(pX), labels=pX) text(xscale(mean(theoreticalX)), - outerspace, xlab) axis(side=2, pos=0, at=yscale(pV), labels=pV) text(-outerRspace, yscale(mean(pV)), rlab, srt=90) # --------- lurking variable plot for y coordinate ------------------ # (cumulative or marginal) # in top right panel if(!is.null(RES$ymargin)) { a <- RES$ymargin observedV <- a$yZ observedY <- a$y theoreticalV <- a$EyZ theoreticalY <- a$y theoreticalSD <- NULL if(is.null(rlab)) rlab <- paste("marginal of", typename) } else if(!is.null(RES$ycumul)) { a <- RES$ycumul observedV <- a$empirical$value observedY <- a$empirical$covariate theoreticalY <- a$theoretical$covariate theoreticalV <- a$theoretical$mean theoreticalSD <- a$theoretical$sd theoreticalHI <- a$theoretical$upper theoreticalLO <- a$theoretical$lower if(is.null(rlab)) rlab <- paste("cumulative sum of", typename) } # pretty axis marks pY <- pretty(theoreticalY) rV <- range(0, observedV, theoreticalV, theoreticalHI, theoreticalLO) if(!is.null(theoreticalSD)) rV <- range(rV, theoreticalV+2*theoreticalSD, theoreticalV-2*theoreticalSD) pV <- pretty(rV) # rescale smoothed values rr <- range(c(0, observedV, theoreticalV, pV)) yscale <- function(y) { y - W$yrange[1] + high + space} xscale <- function(x) { wide + space + wide * (rr[2] - x)/diff(rr) } if(!is.null(theoreticalHI)) do.call.matched(polygon, resolve.defaults( list(x=xscale(c(theoreticalHI, rev(theoreticalLO))), y=yscale(c(theoreticalY, rev(theoreticalY)))), list(...), list(col="grey", border=NA))) do.clean(do.lines, xscale(observedV), yscale(observedY), 1, ...) do.clean(do.lines, xscale(theoreticalV), yscale(theoreticalY), 2, ...) if(!is.null(theoreticalSD)) { do.clean(do.lines, xscale(theoreticalV+2*theoreticalSD), yscale(theoreticalY), 3, ...) do.clean(do.lines, xscale(theoreticalV-2*theoreticalSD), yscale(theoreticalY), 3, ...) } axis(side=4, pos=width, at=yscale(pY), labels=pY) text(width + outerspace, yscale(mean(theoreticalY)), ylab, srt=90) axis(side=3, pos=height, at=xscale(pV), labels=pV) text(xscale(mean(pV)), height + outerRspace, rlab) # if(!is.null(main)) title(main=main) invisible(NULL) } resid4plot }) # # # Residual plot: single panel(s) # # resid1plot <- local({ Contour <- function(..., pch, chars, cols, etch, size, maxsize, meansize, markscale, symap, zap, legend, leg.side, leg.args) { ## avoid passing arguments of plot.ppp to contour.default contour(...) } do.clean <- function(fun, ..., pch, chars, cols, etch, size, maxsize, meansize, markscale, symap, zap, legend, leg.side, leg.args, nlevels, levels, labels, drawlabels, labcex) { ## avoid passing arguments of plot.ppp, contour.default to other functions fun(...) } resid1plot <- function(RES, opt, plot.neg=c("image", "discrete", "contour", "imagecontour"), plot.smooth=c("imagecontour", "image", "contour", "persp"), srange=NULL, monochrome=FALSE, main=NULL, add=FALSE, show.all=!add, do.plot=TRUE, col.neg=NULL, col.smooth=NULL, ...) { if(!any(unlist(opt[c("all", "marks", "smooth", "xmargin", "ymargin", "xcumul", "ycumul")]))) return(invisible(NULL)) if(!add && do.plot) { ## determine size of plot area by calling again with do.plot=FALSE cl <- match.call() cl$do.plot <- FALSE b <- eval(cl, parent.frame()) bb <- as.owin(b, fatal=FALSE) if(is.owin(bb)) { ## initialise plot area plot(bb, type="n", main="") force(show.all) add <- TRUE } } ## extract info clip <- RES$clip Y <- RES$Y Yclip <- RES$Yclip Z <- RES$smooth$Z W <- RES$W Wclip <- Yclip$window type <- RES$type Ydens <- RES$Ydens[Wclip, drop=FALSE] Ymass <- RES$Ymass[Wclip] ## determine colour map if(opt$all || opt$marks || opt$smooth) { nullvalue <- if(type == "eem") 1 else 0 if(is.null(srange)) { Yrange <- if(!is.null(Ydens)) summary(Ydens)$range else NULL Zrange <- if(!is.null(Z)) summary(Z)$range else NULL srange <- range(c(Yrange, Zrange, nullvalue), na.rm=TRUE) } else check.range(srange) backcols <- beachcolours(srange, nullvalue, monochrome) if(is.null(col.neg)) col.neg <- backcols if(is.null(col.smooth)) col.smooth <- backcols } ## determine main heading if(is.null(main)) { prefix <- if(opt$marks) NULL else if(opt$smooth) "Smoothed" else if(opt$xcumul) "Lurking variable plot for x coordinate\n" else if(opt$ycumul) "Lurking variable plot for y coordinate\n" else if(opt$xmargin) "Lurking variable plot for x coordinate\n" else if(opt$ymargin) "Lurking variable plot for y coordinate\n" else NULL main <- paste(prefix, RES$typename) } ## ------------- residuals --------------------------------- if(opt$marks) { ## determine whether pre-plotting the window(s) is redundant redundant <- (plot.neg == "image") && (type != "eem") && (Yclip$window$type == "mask") ## pre-plot the window(s) if(redundant && !add) { z <- do.clean(plot, as.rectangle(W), box=FALSE, main="", do.plot=do.plot, ...) } else { if(!clip) z <- do.clean(plot, W, main="", add=add, show.all=show.all, do.plot=do.plot, ...) else z <- do.clean(ploterodewin, W, Wclip, main="", add=add, show.all=show.all, do.plot=do.plot, ...) } bb <- as.owin(z) switch(plot.neg, discrete={ neg <- (Y$marks < 0) ## plot negative masses of discretised measure as squares if(any(c("maxsize", "markscale") %in% names(list(...)))) { z <- plot(Y[neg], add=TRUE, show.all=show.all, do.plot=do.plot, ...) } else { hackmax <- 0.5 * sqrt(area(Wclip)/Yclip$n) z <- plot(Y[neg], add=TRUE, maxsize=hackmax, show.all=show.all, do.plot=do.plot, ...) } ## plot positive masses at atoms zp <- plot(Y[!neg], add=TRUE, show.all=show.all, do.plot=do.plot, ...) bb <- boundingbox(bb, z, zp) }, contour = { z <- Contour(Ydens, add=TRUE, do.plot=do.plot, ...) bb <- boundingbox(bb, z) }, imagecontour=, image={ if(redundant) { z <- do.clean(ploterodeimage, W, Ydens, rangeZ=srange, colsZ=col.neg, add=add, show.all=show.all, main="", do.plot=do.plot, ...) } else if(type != "eem") { z <- do.clean(image, Ydens, col=col.neg, zlim=srange, ribbon=FALSE, add=TRUE, show.all=show.all, do.plot=do.plot, main="", ...) } bb <- boundingbox(bb, z) if(plot.neg == "imagecontour") { z <- Contour(Ydens, add=TRUE, show.all=show.all, do.plot=do.plot, ...) bb <- boundingbox(bb, z) } ## decide whether mark scale should be shown showscale <- (type != "raw") ## plot positive masses at atoms dont.complain.about(Ymass) z <- do.call(plot, resolve.defaults(list(x=quote(Ymass), add=TRUE), list(...), list(use.marks=showscale, do.plot=do.plot))) bb <- boundingbox(bb, z) } ) if(do.plot && show.all) title(main=main) } # ------------- smooth ------------------------------------- if(opt$smooth) { if(!clip) { switch(plot.smooth, image={ z <- do.clean(image, Z, main="", axes=FALSE, xlab="", ylab="", col=col.smooth, zlim=srange, ribbon=FALSE, do.plot=do.plot, add=add, show.all=show.all, ...) bb <- as.owin(z) }, contour={ z <- Contour(Z, main="", axes=FALSE, xlab="", ylab="", do.plot=do.plot, add=add, show.all=show.all, ...) bb <- as.owin(z) }, persp={ if(do.plot) do.clean(persp, Z, main="", axes=FALSE, xlab="", ylab="", ...) bb <- NULL }, imagecontour={ z <- do.clean(image, Z, main="", axes=FALSE, xlab="", ylab="", col=col.smooth, zlim=srange, ribbon=FALSE, do.plot=do.plot, add=add, show.all=show.all, ...) Contour(Z, add=TRUE, do.plot=do.plot, ...) bb <- as.owin(z) } ) if(do.plot && show.all) title(main=main) } else { switch(plot.smooth, image={ do.clean(plot, as.rectangle(W), box=FALSE, main=main, do.plot=do.plot, add=add, ...) z <- do.clean(ploterodeimage, W, Z, colsZ=col.smooth, rangeZ=srange, do.plot=do.plot, ...) bb <- boundingbox(as.rectangle(W), z) }, contour={ do.clean(plot, W, main=main, do.plot=do.plot, add=add, show.all=show.all, ...) z <- Contour(Z, add=TRUE, show.all=show.all, do.plot=do.plot, ...) bb <- as.owin(z) }, persp={ if(do.plot) do.clean(persp, Z, main=main, axes=FALSE, xlab="", ylab="", ...) bb <- NULL }, imagecontour={ do.clean(plot, as.rectangle(W), box=FALSE, main=main, do.plot=do.plot, add=add, ...) z <- do.clean(ploterodeimage, W, Z, colsZ=col.smooth, rangeZ=srange, do.plot=do.plot, ...) Contour(Z, add=TRUE, do.plot=do.plot, ...) bb <- as.owin(z) } ) } } # ------------ cumulative x ----------------------------------------- if(opt$xcumul) { a <- RES$xcumul obs <- a$empirical theo <- a$theoretical do.clean(resid1panel, obs$covariate, obs$value, theo$covariate, theo$mean, theo$sd, "x coordinate", "cumulative mark", main=main, ..., do.plot=do.plot) bb <- NULL } # ------------ cumulative y ----------------------------------------- if(opt$ycumul) { a <- RES$ycumul obs <- a$empirical theo <- a$theoretical do.clean(resid1panel, obs$covariate, obs$value, theo$covariate, theo$mean, theo$sd, "y coordinate", "cumulative mark", main=main, ..., do.plot=do.plot) bb <- NULL } ## ------------ x margin ----------------------------------------- if(opt$xmargin) { a <- RES$xmargin do.clean(resid1panel, a$x, a$xZ, a$x, a$ExZ, NULL, "x coordinate", "marginal of residuals", main=main, ..., do.plot=do.plot) bb <- NULL } # ------------ y margin ----------------------------------------- if(opt$ymargin) { a <- RES$ymargin do.clean(resid1panel, a$y, a$yZ, a$y, a$EyZ, NULL, "y coordinate", "marginal of residuals", main=main, ..., do.plot=do.plot) bb <- NULL } attr(bb, "bbox") <- bb return(invisible(bb)) } resid1plot }) resid1panel <- local({ do.lines <- function(x, y, defaulty=1, ...) { do.call(lines, resolve.defaults(list(x, y), list(...), list(lty=defaulty))) } resid1panel <- function(observedX, observedV, theoreticalX, theoreticalV, theoreticalSD, xlab, ylab, ..., do.plot=TRUE) { if(!do.plot) return(NULL) ## work out plot range rX <- range(observedX, theoreticalX) rV <- range(c(0, observedV, theoreticalV)) if(!is.null(theoreticalSD)) rV <- range(c(rV, theoreticalV + 2*theoreticalSD, theoreticalV - 2*theoreticalSD)) ## argument handling ## start plot plot(rX, rV, type="n", xlab=xlab, ylab=ylab, ...) do.lines(observedX, observedV, 1, ...) do.lines(theoreticalX, theoreticalV, 2, ...) if(!is.null(theoreticalSD)) { do.lines(theoreticalX, theoreticalV + 2 * theoreticalSD, 3, ...) do.lines(theoreticalX, theoreticalV - 2 * theoreticalSD, 3, ...) } } resid1panel }) # # ploterodewin <- function(W1, W2, col.edge=grey(0.75), col.inside=rgb(1,0,0), do.plot=TRUE, ...) { ## internal use only ## W2 is assumed to be an erosion of W1 switch(W1$type, rectangle={ z <- plot(W1, ..., do.plot=do.plot) plot(W2, add=TRUE, lty=2, do.plot=do.plot) }, polygonal={ z <- plot(W1, ..., do.plot=do.plot) plot(W2, add=TRUE, lty=2, do.plot=do.plot) }, mask={ Z <- as.im(W1) x <- as.vector(rasterx.mask(W1)) y <- as.vector(rastery.mask(W1)) ok <- inside.owin(x, y, W2) Z$v[ok] <- 2 z <- plot(Z, ..., col=c(col.edge, col.inside), add=TRUE, ribbon=FALSE, do.plot=do.plot, show.all=TRUE) } ) return(z) } ploterodeimage <- function(W, Z, ..., Wcol=grey(0.75), rangeZ, colsZ, do.plot=TRUE) { # Internal use only # Image Z is assumed to live on a subset of mask W # colsZ are the colours for the values in the range 'rangeZ' if(!is.mask(W)) { if(do.plot) plot(W, add=TRUE) W <- as.mask(W) } # Extend the colour map to include an extra colour for pixels in W # (1) Add the desired colour of W to the colour map pseudocols <- c(Wcol, colsZ) # (2) Breakpoints bks <- seq(from=rangeZ[1], to=rangeZ[2], length=length(colsZ)+1) dZ <- diff(bks)[1] pseudobreaks <- c(rangeZ[1] - dZ, bks) # (3) Determine a fake value for pixels in W Wvalue <- rangeZ[1] - dZ/2 # Create composite image on W grid # (with W-pixels initialised to Wvalue) X <- as.im(Wvalue, W) # Look up Z-values of W-pixels xx <- as.vector(rasterx.mask(W)) yy <- as.vector(rastery.mask(W)) Zvalues <- lookup.im(Z, xx, yy, naok = TRUE, strict=FALSE) # Overwrite pixels in Z inZ <- !is.na(Zvalues) X$v[inZ] <- Zvalues[inZ] z <- image(X, ..., add=TRUE, ribbon=FALSE, col=pseudocols, breaks=pseudobreaks, do.plot=do.plot) out <- list(X, pseudocols, pseudobreaks) attr(out, "bbox") <- as.owin(z) return(out) } spatstat.core/R/lurkmppm.R0000644000176200001440000001512114144333462015243 0ustar liggesusers#' lurkmppm.R #' Lurking variable plot for mppm #' $Revision: 1.8 $ $Date: 2019/02/10 08:33:42 $ lurking.mppm <- local({ zerofun <- function(x) rep(0, length(x)) threshfun <- function(threshold, value) { force(threshold) force(value) function(x) { value * (x >= threshold) } } approxcumfun <- function(x, y) { stopifnot(length(x) == length(y)) n <- length(x) if(n == 0) return(zerofun) if(n == 1) return(threshfun(x, y)) return(approxfun(x=x, y=y, yleft=0, yright=y[n], rule=2)) } as.function.lurk <- function(x, ..., what=c("empirical", "theoretical")) { what <- match.arg(what) switch(what, empirical = { with(x$empirical, approxcumfun(covariate, value)) }, theoretical = { with(x$theoretical, approxcumfun(covariate, mean)) }) } acceptable <- function(x) { is.im(x) || is.numeric(x) || is.expression(x) } approxcumul <- function(yin, xin, xout) { if(length(yin) > 1) { z <- approx(x=xin, y=yin, xout=xout, rule=2)$y } else { z <- yin * (xout >= xin) } return(z) } interpolateworking <- function(object, xx) { #' extract working data (variance terms) #' and interpolate them at the specified covariate values xx w <- attr(object, "working") if(is.null(w)) return(NULL) w <- as.data.frame(w) covariate <- object$theoretical$covariate y <- apply(w, 2, approxcumul, xin=covariate, xout=xx) return(as.data.frame(y)) } multilurk <- function(object, covariate, type="eem", ..., separate=FALSE, plot.it=TRUE, covname, oldstyle=FALSE, nx=512, main="") { cl <- match.call() stopifnot(is.mppm(object)) if(missing(covname)) { co <- cl$covariate covname <- if(is.name(co)) as.character(co) else if(is.expression(co)) format(co[[1]]) else "covariate" } Fisher <- vcov(object, what="fisher") Vcov <- solve(Fisher) if(acceptable(covariate)) { cov.is.list <- FALSE } else { cov.is.list <- is.list(covariate) && length(covariate) == object$npat && all(sapply(covariate, acceptable)) if(!cov.is.list) stop(paste("Argument 'covariate' should be", "a pixel image, a numeric vector, an expression", "or a list of such arguments", "with one entry for each row of original data"), call.=FALSE) } #' pseudo fitted model for each row of data futs <- subfits(object) #' make lurking variable plot object for each row if(cov.is.list) { #' list of covariate arguments, one for each row of data lurks <- mapply(lurking.ppm, object=futs, covariate=covariate, MoreArgs=list(type=type, plot.it=FALSE, ..., internal=list(saveworking=TRUE, Fisher=Fisher), nx=nx, oldstyle=oldstyle, covname=covname), SIMPLIFY=FALSE) } else { #' One covariate argument to rule them all #' First determine range of covariate values covrange <- range(sapply(futs, lurking, covariate=covariate, type=type, internal=list(getrange=TRUE)), na.rm=TRUE) #' Now compute lurking variable plots lurks <- anylapply(futs, lurking, covariate=covariate, type=type, plot.it=FALSE, ..., internal=list(saveworking=TRUE, Fisher=Fisher, covrange=covrange), nx=nx, oldstyle=oldstyle, covname=covname) } if(separate) { #' separate lurking variable plots for each row if(plot.it) { do.call(plot, resolve.defaults(list(x=quote(lurks)), list(...), list(main=main, mar.panel=c(5,4,2,3)))) return(invisible(lurks)) } else { return(lurks) } } #' auxiliary info infos <- lapply(lurks, attr, which="info") #' range of covariate values covrange <- range(unlist(lapply(infos, getElement, name="covrange")), na.rm=TRUE) xx <- seq(covrange[1], covrange[2], length=nx) #' empirical part efuns <- lapply(lurks, as.function.lurk, what="empirical") vlist <- lapply(efuns, do.call, list(xx)) sumv <- Reduce("+", vlist) empirical <- data.frame(covariate=xx, value=sumv) #' similar for theoretical curves tfuns <- lapply(lurks, as.function.lurk, what="theoretical") vlist <- lapply(tfuns, do.call, list(xx)) sumv <- Reduce("+", vlist) theoretical <- data.frame(covariate=xx, mean=sumv) #' variance calculation if available wlist <- lapply(lurks, interpolateworking, xx=xx) if(!any(sapply(wlist, is.null))) { w <- Reduce("+", wlist) varI <- w$varI if(oldstyle) { theoretical$sd <- sqrt(varI) } else { Bnames <- setdiff(colnames(w), c("varI", "varII")) B <- as.matrix(w[, Bnames, drop=FALSE]) if(ncol(B) != nrow(Vcov)) { warning("Internal variance data are incomplete; reverting to oldstyle=TRUE") oldstyle <- TRUE theoretical$sd <- sqrt(varI) } else { varII <- quadform(B, Vcov) varR <- varI - varII ra <- range(varR, finite=TRUE) if(ra[1] < 0) { warning(paste("Negative values of residual variance!", "Range =", prange(signif(ra, 4))), call.=FALSE) varR <- pmax(0, varR) } theoretical$sd <- sqrt(varR) } } } ## form result result <- list(empirical=empirical, theoretical=theoretical) class(result) <- "lurk" ## copy info e.g. type of residual info <- infos[[1]] info$covrange <- covrange attr(result, "info") <- info if(plot.it) plot(result, ..., main=main) return(invisible(result)) } multilurk }) spatstat.core/R/ppm.R0000644000176200001440000002106414144333463014174 0ustar liggesusers# # $Revision: 1.58 $ $Date: 2017/10/04 04:10:33 $ # # ppm() # Fit a point process model to a two-dimensional point pattern # # ppm <- function(Q, ...) { UseMethod("ppm") } ppm.formula <- function(Q, interaction=NULL, ..., data=NULL, subset) { ## remember call callstring <- short.deparse(sys.call()) cl <- match.call() ## trap a common error to give a more informative message if(is.sob(data) || is.function(data)) stop(paste("The argument", sQuote("data"), "should not be a spatial object;", "it should be a list of spatial objects"), call.=FALSE) ########### INTERPRET FORMULA ############################## if(!inherits(Q, "formula")) stop(paste("Argument 'Q' should be a formula")) formula <- Q ## check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Formula must have a left hand side")) Yexpr <- formula[[2]] trend <- formula[c(1,3)] ## FIT ####################################### thecall <- if(missing(subset)) { call("ppm", Q=Yexpr, trend=trend, data=data, interaction=interaction) } else { call("ppm", Q=Yexpr, trend=trend, data=data, interaction=interaction, subset=substitute(subset)) } ncall <- length(thecall) argh <- list(...) nargh <- length(argh) if(nargh > 0) { thecall[ncall + 1:nargh] <- argh names(thecall)[ncall + 1:nargh] <- names(argh) } callenv <- list2env(as.list(data), parent=parent.frame()) result <- eval(thecall, envir=callenv) result$call <- cl result$callstring <- callstring result$callframe <- parent.frame() return(result) } ppm.quad <- ppm.ppp <- ppm.default <- function(Q, trend = ~1, interaction = Poisson(), ..., covariates = data, data = NULL, covfunargs = list(), subset, clipwin, correction="border", rbord = reach(interaction), use.gam=FALSE, method = "mpl", forcefit=FALSE, emend=project, project=FALSE, prior.mean = NULL, prior.var = NULL, nd = NULL, eps = NULL, gcontrol=list(), nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE, callstring=NULL ) { Qname <- short.deparse(substitute(Q)) subsetexpr <- if(!missing(subset)) substitute(subset) else NULL clipwin <- if(!missing(clipwin)) clipwin else NULL datalistname <- if(missing(covariates)) "data" else "covariates" if(!(method %in% c("mpl", "ho", "logi", "VBlogi"))) stop(paste("Unrecognised fitting method", sQuote(method))) if(!missing(emend) && !missing(project) && emend != project) stop("Conflicting options: emend != project") if(!is.null(prior.mean) | !is.null(prior.var)){ if(missing(method)) method <- "VBlogi" if(method!="VBlogi") stop("Prior specification only works with method ", sQuote("VBlogi")) } if(method=="VBlogi"){ VB <- TRUE method <- "logi" } else{ VB <- FALSE } if(is.sob(covariates) || is.function(covariates)) stop(paste("The argument", sQuote(datalistname), "should not be a spatial object;", "it should be a list of spatial objects"), call.=FALSE) if(inherits(Q, "logiquad")){ if(missing(method)) method <- "logi" if(method != "logi") stop(paste("Only method =", sQuote("logi"), "makes sense when Q is of type", sQuote("logiquad"))) } cl <- match.call() if(is.null(callstring)) callstring <- paste(short.deparse(sys.call()), collapse="") if(is.ppp(Q) && is.marked(Q) && !is.multitype(Q)) stop(paste("ppm is not yet implemented for marked point patterns,", "other than multitype patterns.")) if(!(is.ppp(Q) || is.quad(Q) || checkfields(Q, c("data", "dummy")))) { stop("Argument Q must be a point pattern or a quadrature scheme") } X <- if(is.ppp(Q)) Q else Q$data ## Validate interaction if(is.null(interaction)) { interaction <- Poisson() } else if(inherits(interaction, "intermaker")) { ## e.g. 'interaction=Hardcore': invoke it without arguments interaction <- (f <- interaction)() dont.complain.about(f) } else if(!is.interact(interaction)) stop("Argument 'interaction' must be an object of class 'interact'") ## Ensure interaction is fully defined if(!is.null(ss <- interaction$selfstart)) { # invoke selfstart mechanism to fix all parameters interaction <- ss(X, interaction) } if(inherits(trend, "formula")) { ## handle "." in formula, representing all variables in 'data' if("." %in% variablesinformula(trend)) { if(is.null(covariates)) stop("Cannot expand '.' since 'data' is not present", call.=FALSE) rhs <- paste(names(covariates), collapse=" + ") allmaineffects <- as.formula(paste("~", rhs)) environment(allmaineffects) <- environment(trend) trend <- update(allmaineffects, trend) } ## expand polynom() in formula if(spatstat.options("expand.polynom")) trend <- expand.polynom(trend) } # validate choice of edge correction correction <- pickoption("correction", correction, c(border="border", periodic="periodic", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", none="none")) # validate rbord if(correction == "border") { # rbord for border correction rbord.given <- !missing(rbord) && !is.null(rbord) if(is.null(rbord)) rbord <- reach(interaction) infin <- is.infinite(rbord) too.large <- infin || (eroded.areas(as.owin(X), rbord) == 0) if(too.large) { whinge <- paste(if(rbord.given) "rbord" else "the reach of this interaction", if(infin) "is infinite or unknown;" else "is too large for this window;", "please specify", if(rbord.given) "a smaller value of", "rbord, or use a different edge correction") stop(whinge) } } else { # rbord must be numeric to satisfy mpl.engine if(is.null(rbord)) rbord <- 0 } if(method == "logi") { fitLOGI <- logi.engine(Q=Q, trend=trend, interaction=interaction, covariates=covariates, covfunargs=covfunargs, subsetexpr=subsetexpr, clipwin=clipwin, correction=correction, rbord=rbord, use.gam=use.gam, forcefit=forcefit, nd = nd, gcontrol=gcontrol, callstring=callstring, prior.mean=prior.mean, prior.var=prior.var, VB=VB, ...) fitLOGI$Qname <- Qname fitLOGI$call <- cl fitLOGI$callstring <- callstring fitLOGI$callframe <- parent.frame() if(emend && !valid.ppm(fitLOGI)) fitLOGI <- emend.ppm(fitLOGI) return(fitLOGI) } # fit by maximum pseudolikelihood fitMPL <- mpl.engine(Q=Q, trend=trend, interaction=interaction, covariates=covariates, covfunargs=covfunargs, subsetexpr=subsetexpr, clipwin=clipwin, correction=correction, rbord=rbord, use.gam=use.gam, forcefit=forcefit, nd = nd, eps = eps, gcontrol=gcontrol, callstring=callstring, ...) fitMPL$Qname <- Qname if(!is.ppm(fitMPL)) { # internal use only - returns some other data return(fitMPL) } fitMPL$call <- cl fitMPL$callstring <- callstring fitMPL$callframe <- parent.frame() if(emend && !valid.ppm(fitMPL)) fitMPL <- emend.ppm(fitMPL) if(method == "mpl" || is.poisson.ppm(fitMPL)) return(fitMPL) fitHO <- ho.engine(fitMPL, nsim=nsim, nrmh=nrmh, start=start, control=control, verb=verb) if(is.null(fitHO)) return(fitMPL) if(emend && !valid.ppm(fitHO)) fitHO <- emend.ppm(fitHO) return(fitHO) } spatstat.core/R/rshift.R0000644000176200001440000001315614144333463014702 0ustar liggesusers# # rshift.R # # random shift with optional toroidal boundary # # $Revision: 1.19 $ $Date: 2020/04/29 13:20:21 $ # # rshift <- function(X, ...) { UseMethod("rshift") } rshift.splitppp <- function(X, ..., which=seq_along(X), nsim=1, drop=TRUE) { verifyclass(X, "splitppp") check.1.integer(nsim) if("group" %in% names(list(...))) stop(paste("argument", sQuote("group"), "not implemented for splitppp objects")) if(is.null(which)) { iwhich <- which <- seq_along(X) } else { id <- seq_along(X) names(id) <- names(X) iwhich <- id[which] if(length(iwhich) == 0) stop(paste("Argument", sQuote("which"), "did not match any marks")) } # validate arguments and determine common clipping window arglist <- handle.rshift.args(X[[1]]$window, ..., edgedefault="torus") if(!is.null(clip <- arglist$clip)) { # clip the patterns that are not to be shifted if(length(iwhich) < length(X)) X[-iwhich] <- lapply(X[-iwhich], "[.ppp", i=clip) } Xvariable <- X[iwhich] resultlist <- vector(mode="list", length=nsim) for(isim in seq_len(nsim)) { Xsim <- X ## perform shift on selected patterns ## (setting group = NULL ensures each pattern is not split further) shiftXsub <- do.call(lapply, append(list(Xvariable, rshift.ppp, group=NULL), arglist)) ## put back Xsim[iwhich] <- shiftXsub resultlist[[isim]] <- Xsim } return(simulationresult(resultlist, nsim, drop)) } rshift.ppp <- function(X, ..., which=NULL, group, nsim=1, drop=TRUE) { verifyclass(X, "ppp") check.1.integer(nsim) # validate arguments and determine common clipping window arglist <- handle.rshift.args(X$window, ..., edgedefault="torus") # default grouping # (NULL is not the default) # (NULL means all points shifted in parallel) if(missing(group)) group <- if(is.multitype(X)) marks(X) else NULL # if no grouping, use of `which' is undefined if(is.null(group) && !is.null(which)) stop(paste("Cannot apply argument", sQuote("which"), "; no grouping defined")) resultlist <- vector(mode="list", length=nsim) # if grouping, use split if(!is.null(group)) { Y <- split(X, group) splitshifts <- do.call(rshift.splitppp, append(list(Y, which=which, nsim=nsim, drop=FALSE), arglist)) for(isim in seq_len(nsim)) { Xsim <- X split(Xsim, group) <- splitshifts[[isim]] resultlist[[isim]] <- Xsim } return(simulationresult(resultlist, nsim, drop)) } # ungrouped point pattern # shift all points in parallel # recover arguments radius <- arglist$radius width <- arglist$width height <- arglist$height edge <- arglist$edge clip <- arglist$clip W <- rescue.rectangle(Window(X)) if(edge == "torus") { if(!is.rectangle(W)) stop("edge = 'torus' is only meaningful for rectangular windows") xr <- W$xrange yr <- W$yrange Wide <- diff(xr) High <- diff(yr) } ## .......... simulation loop .................. resultlist <- vector(mode="list", length=nsim) for(isim in seq_len(nsim)) { #' generate random translation vector if(!is.null(radius)) { jump <- runifdisc(1, radius=radius) } else { jump <- list(x=runif(1, min=0, max=width), y=runif(1, min=0, max=height)) } #' translate points of X x <- X$x + jump$x y <- X$y + jump$y #' wrap points if(edge == "torus") { x <- xr[1] + (x - xr[1]) %% Wide y <- yr[1] + (y - yr[1]) %% High } #' save as point pattern Xsim <- X Xsim$x <- x Xsim$y <- y #' clip to window if(!is.null(clip)) Xsim <- Xsim[clip] #' save result resultlist[[isim]] <- Xsim } ## ................ end loop .................. return(simulationresult(resultlist, nsim, drop)) } handle.rshift.args <- function(W, ..., radius=NULL, width=NULL, height=NULL, edge=NULL, clip=NULL, edgedefault) { verifyclass(W, "owin") W <- rescue.rectangle(W) if(length(aargh <- list(...)) > 0) stop(paste("Unrecognised arguments:", paste(names(aargh), collapse=","))) if(!is.null(radius)) { # radial generator if(!(is.null(width) && is.null(height))) stop(paste(sQuote("radius"), "is incompatible with", sQuote("width"), "and", sQuote("height"))) } else { # rectangular generator if(is.null(width) != is.null(height)) stop("Must specify both width and height, if one is specified") if(is.null(width)) width <- diff(W$xrange) if(is.null(height)) height <- diff(W$yrange) } if(is.null(edge)) edge <- edgedefault else if(!(edge %in% c("torus", "erode", "none"))) stop(paste("Unrecognised option erode=", sQuote(edge))) # determine whether clipping window is needed if(is.null(clip)) clip <- switch(edge, torus= NULL, none= W, erode={ if(!is.null(radius)) erosion.owin(W, radius) else if(W$type == "rectangle") trim.rectangle(W, width, height) else erosion.owin(W, max(width, height)) }) return(list(radius=radius, width=width, height=height, edge=edge, clip=clip)) } # rtoro <- function(X, which=NULL, radius=NULL, width=NULL, height=NULL) { # .Deprecated("rshift", package="spatstat") # rshift(X, which=which, radius=radius, width=width, height=height) # } spatstat.core/R/fryplot.R0000644000176200001440000000454014144333462015076 0ustar liggesusers# # fryplot.R # # $Revision: 1.17 $ $Date: 2020/12/19 05:25:06 $ # fryplot <- function(X, ..., width=NULL, from=NULL, to=NULL, axes=FALSE) { Xname <- short.deparse(substitute(X)) X <- as.ppp(X) b <- as.rectangle(X) halfspan <- with(b, c(diff(xrange), diff(yrange))) if(!is.null(width)) { halfwidth <- ensure2vector(width)/2 halfspan <- pmin.int(halfspan, halfwidth) } bb <- owin(c(-1,1) * halfspan[1L], c(-1,1) * halfspan[2L]) Y <- frypoints(X, from=from, to=to, dmax=diameter(bb))[bb] dont.complain.about(Y) do.call(plot.ppp, resolve.defaults(list(x=quote(Y)), list(...), list(main=paste("Fry plot of", Xname)))) if(axes) { lines(c(0,0), c(-1,1) * halfspan[1L]) lines(c(-1,1) * halfspan[2L], c(0,0)) } return(invisible(NULL)) } frypoints <- function(X, from=NULL, to=NULL, dmax=Inf) { X <- as.ppp(X) b <- as.rectangle(X) bb <- owin(c(-1,1) * diff(b$xrange), c(-1,1) * diff(b$yrange)) n <- X$n xx <- X$x yy <- X$y ## determine (dx, dy) for all relevant pairs if(is.null(from) && is.null(to)) { if(is.infinite(dmax)) { dx <- outer(xx, xx, "-") dy <- outer(yy, yy, "-") notsame <- matrix(TRUE, n, n) diag(notsame) <- FALSE DX <- as.vector(dx[notsame]) DY <- as.vector(dy[notsame]) I <- row(notsame)[notsame] } else { cl <- closepairs(X, dmax) DX <- cl$dx DY <- cl$dy I <- cl$j ## sic: I is the index of the 'TO' element } } else { seqn <- seq_len(n) from <- if(is.null(from)) seqn else seqn[from] to <- if(is.null(to)) seqn else seqn[to] if(is.infinite(dmax)) { dx <- outer(xx[to], xx[from], "-") dy <- outer(yy[to], yy[from], "-") notsame <- matrix(TRUE, n, n) diag(notsame) <- FALSE notsame <- notsame[to, from, drop=FALSE] DX <- as.vector(dx[notsame]) DY <- as.vector(dy[notsame]) I <- row(notsame)[notsame] } else { cl <- crosspairs(X[from], X[to], dmax) ok <- with(cl, from[i] != to[j]) DX <- cl$dx[ok] DY <- cl$dy[ok] I <- cl$j[ok] } } ## form into point pattern Fry <- ppp(DX, DY, window=bb, check=FALSE) if(is.marked(X)) { marx <- as.data.frame(marks(X)) marxto <- if(is.null(to)) marx else marx[to, ,drop=FALSE] marks(Fry) <- marxto[I, ] } return(Fry) } spatstat.core/R/rmh.ppm.R0000644000176200001440000001230614144333463014760 0ustar liggesusers# # simulation of FITTED model # # $Revision: 1.37 $ $Date: 2020/09/10 06:14:36 $ # # rmh.ppm <- function(model, start = NULL, control = default.rmhcontrol(model, w=w), ..., w = NULL, project=TRUE, nsim=1, drop=TRUE, saveinfo=TRUE, verbose=TRUE, new.coef=NULL) { verifyclass(model, "ppm") argh <- list(...) if(is.null(control)) { control <- default.rmhcontrol(model, w=w) } else { control <- rmhcontrol(control) } # override if(length(list(...)) > 0) control <- update(control, ...) # convert fitted model object to list of parameters for rmh.default X <- rmhmodel(model, w=w, verbose=verbose, project=project, control=control, new.coef=new.coef) # set initial state if(is.null(start)) { datapattern <- data.ppm(model) start <- rmhstart(n.start=datapattern$n) } # call rmh.default # passing only arguments unrecognised by rmhcontrol known <- names(argh) %in% names(formals(rmhcontrol.default)) fargs <- argh[!known] Y <- do.call(rmh.default, append(list(model=X, start=start, control=control, nsim=nsim, drop=drop, saveinfo=saveinfo, verbose=verbose), fargs)) return(Y) } simulate.ppm <- function(object, nsim=1, ..., singlerun=FALSE, start = NULL, control = default.rmhcontrol(object, w=w), w = window, window = NULL, project=TRUE, new.coef=NULL, verbose=FALSE, progress=(nsim > 1), drop=FALSE) { verifyclass(object, "ppm") argh <- list(...) if(nsim == 0) return(list()) starttime = proc.time() # set up control parameters if(missing(control) || is.null(control)) { rcontr <- default.rmhcontrol(object, w=w) } else { rcontr <- rmhcontrol(control) } if(singlerun) { # allow nsave, nburn to determine nrep nsave <- resolve.1.default("nsave", list(...), as.list(rcontr), .MatchNull=FALSE) nburn <- resolve.1.default("nburn", list(...), as.list(rcontr), list(nburn=nsave), .MatchNull=FALSE) if(!is.null(nsave)) { nrep <- nburn + (nsim-1) * sum(nsave) rcontr <- update(rcontr, nrep=nrep, nsave=nsave, nburn=nburn) } } # other overrides if(length(list(...)) > 0) rcontr <- update(rcontr, ...) # Set up model parameters for rmh rmodel <- rmhmodel(object, w=w, verbose=FALSE, project=TRUE, control=rcontr, new.coef=new.coef) if(is.null(start)) { datapattern <- data.ppm(object) start <- rmhstart(n.start=datapattern$n) } rstart <- rmhstart(start) ######### if(singlerun && nsim > 1) { # ////////////////////////////////////////////////// # execute one long run and save every k-th iteration if(is.null(rcontr$nsave)) { # determine spacing between subsamples if(!is.null(rcontr$nburn)) { nsave <- max(1, with(rcontr, floor((nrep - nburn)/(nsim-1)))) } else { # assume nburn = 2 * nsave nsave <- max(1, with(rcontr, floor(nrep/(nsim+1)))) nburn <- 2 * nsave } rcontr <- update(rcontr, nsave=nsave, nburn=nburn) } # check nrep is enough nrepmin <- with(rcontr, nburn + (nsim-1) * sum(nsave)) if(rcontr$nrep < nrepmin) rcontr <- update(rcontr, nrep=nrepmin) # OK, run it if(progress) { cat(paste("Generating", nsim, "simulated patterns in a single run ... ")) flush.console() } Y <- rmh(rmodel, rstart, rcontr, verbose=verbose) if(progress) cat("Done.\n") # extract sampled states out <- attr(Y, "saved") nout <- length(out) if(nout == nsim+1L && identical(names(out)[1], "Iteration_0")) { ## expected behaviour: first entry is initial state out <- out[-1L] } else if(nout != nsim) { stop(paste("Internal error: wrong number of simulations generated:", nout, "!=", nsim)) } } else { # ////////////////////////////////////////////////// # execute 'nsim' independent runs out <- list() # pre-digest arguments rmhinfolist <- rmh(rmodel, rstart, rcontr, preponly=TRUE, verbose=verbose) # go if(nsim > 0) { if(progress) { cat(paste("Generating", nsim, "simulated", ngettext(nsim, "pattern", "patterns"), "...")) flush.console() } # call rmh # passing only arguments unrecognised by rmhcontrol known <- names(argh) %in% names(formals(rmhcontrol.default)) fargs <- argh[!known] rmhargs <- append(list(InfoList=rmhinfolist, verbose=verbose), fargs) if(progress) pstate <- list() for(i in 1:nsim) { out[[i]] <- do.call(rmhEngine, rmhargs) if(progress) pstate <- progressreport(i, nsim, state=pstate) } } } out <- simulationresult(out, nsim, drop) out <- timed(out, starttime=starttime) return(out) } spatstat.core/R/Kscaled.R0000644000176200001440000001355714144333461014754 0ustar liggesusers# # Kscaled.R Estimation of K function for locally-scaled process # # $Revision: 1.17 $ $Date: 2019/12/08 04:29:28 $ # "Lscaled" <- function(...) { K <- Kscaled(...) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) # relabel the fv object L <- rebadge.fv(L, quote(L[scaled](r)), c("L","scaled")) attr(L, "labl") <- attr(K, "labl") return(L) } "Kscaled"<- function (X, lambda=NULL, ..., r = NULL, breaks = NULL, rmax = 2.5, correction=c("border", "isotropic", "translate"), renormalise=FALSE, normpower=1, sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") # rfixed <- !missing(r) || !missing(breaks) ## determine basic parameters W <- X$window npts <- X$n areaW <- area(W) halfdiameter <- diameter(W)/2 ## match corrections correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) correction <- implemented.for.K(correction, W$type, correction.given) ########################################################### ## DETERMINE WEIGHTS AND VALIDATE ## if(missing(lambda)) { ## No intensity data provided ## Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambda <- as.numeric(lambda) } else { ## lambda values provided if(is.im(lambda)) lambda <- safelookup(lambda, X) else if(is.function(lambda)) lambda <- lambda(X$x, X$y) else if(is.ppm(lambda)) lambda <- safelookup(predict(lambda, type="trend"), X) else if(!is.numeric(lambda) || !is.null(dim(lambda))) stop(paste(sQuote("lambda"), "should be a vector, a pixel image, a function or a ppm")) check.nvector(lambda, npts) } if(renormalise) { ## renormalise. Here we only need half the power ;-) check.1.real(normpower) stopifnot(normpower %in% 1:2) renorm.factor <- (areaW/sum(1/lambda))^(normpower/2) lambda <- lambda/renorm.factor } ## Calculate range of r values using max lambda sra <- sqrt(range(lambda)) minrescale <- sra[1] maxrescale <- sra[2] ## convert arguments to absolute distances absr <- if(!is.null(r)) r/maxrescale else NULL absrmaxdefault <- min(rmax.rule("K", W), rmax/maxrescale) absbreaks <- if(!is.null(breaks)) scalardilate(breaks, 1/maxrescale) else NULL ## determine absolute distances absbreaks <- handle.r.b.args(absr, absbreaks, W, rmaxdefault=absrmaxdefault) absr <- absbreaks$r ## convert to rescaled distances breaks <- scalardilate(absbreaks, maxrescale) r <- breaks$r rmax <- breaks$max ## recommended range of scaled r values alim <- c(0, min(rmax, maxrescale * absrmaxdefault)) rthresh <- minrescale * halfdiameter ## maximum absolute distance ever needed maxabsdist <- min(rmax/minrescale, halfdiameter) ## this will be the output data frame K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") K <- fv(K, "r", quote(K[scaled](r)), "theo", , alim, c("r","{%s[%s]^{pois}}(r)"), desc, fname=c("K", "scaled")) ## identify all relevant close pairs needXI <- any(correction %in% c("translate", "isotropic")) close <- closepairs(X, maxabsdist, what=if(needXI) "all" else "ijd") I <- close$i J <- close$j ## locally-scaled distances sqrtLambda <- sqrt(lambda) lamIJ <- (sqrtLambda[I] + sqrtLambda[J])/2 absDIJ <- close$d DIJ <- absDIJ * lamIJ ## first point of each pair XI <- if(needXI) ppp(close$xi, close$yi, window=W, check=FALSE) else NULL if(any(correction == "none")) { ## uncorrected! For demonstration purposes only! wh <- whist(DIJ, breaks$val) # no weights Kun <- cumsum(wh)/npts K <- bind.fv(K, data.frame(un=Kun), "{hat(%s)[%s]^{un}}(r)", "uncorrected estimate of %s", "un") } if(any(correction == "border")) { ## border method ## Compute SCALED distances to boundary b <- bdist.points(X) * sqrtLambda bI <- b[I] ## apply reduced sample algorithm to scaled distances RS <- Kount(DIJ, bI, b, breaks) Kb <- RS$numerator/RS$denom.count Kb[r > rthresh] <- NA K <- bind.fv(K, data.frame(border=Kb), "{hat(%s)[%s]^{bord}}(r)", "border-corrected estimate of %s", "border") } if(any(correction == "translate")) { ## translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) wh <- whist(DIJ, breaks$val, edgewt) Ktrans <- cumsum(wh)/npts Ktrans[r >= rthresh] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { ## Ripley isotropic correction (using UN-SCALED distances) edgewt <- edge.Ripley(XI, matrix(absDIJ, ncol=1)) wh <- whist(DIJ, breaks$val, edgewt) Kiso <- cumsum(wh)/npts Kiso[r >= rthresh] <- NA K <- bind.fv(K, data.frame(iso=Kiso), "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso") } ## default plot will display all edge corrections formula(K) <- . ~ r nama <- rev(colnames(K)) fvnames(K, ".") <- nama[!(nama %in% c("r", "rip", "ls"))] ## unitname(K) <- c("normalised unit", "normalised units") return(K) } spatstat.core/R/transect.R0000644000176200001440000000743714144333464015234 0ustar liggesusers# # transect.R # # Line transects of pixel images # # $Revision: 1.8 $ $Date: 2021/06/22 05:33:50 $ # transect.im <- local({ specify.location <- function(loc, rect) { lname <- short.deparse(substitute(loc)) if(is.numeric(loc) && length(loc) == 2) return(list(x=loc[1], y=loc[2])) if(is.list(loc)) return(xy.coords(loc)) if(!(is.character(loc) && length(loc) == 1)) stop(paste("Unrecognised format for", sQuote(lname)), call.=FALSE) xr <- rect$xrange yr <- rect$yrange switch(loc, bottomleft = list(x=xr[1], y=yr[1]), bottom = list(x=mean(xr), y=yr[1]), bottomright = list(x=xr[2], y=yr[1]), right = list(x=xr[2], y=mean(yr)), topright = list(x=xr[2], y=yr[2]), top = list(x=mean(xr), y=yr[2]), topleft = list(x=xr[1], y=yr[2]), left = list(x=xr[1], y=mean(yr)), centre=, center = list(x=mean(xr), y=mean(yr)), stop(paste("Unrecognised location", sQuote(lname), "=", dQuote(loc)), call.=FALSE) ) } transect.im <- function(X, ..., from="bottomleft", to="topright", nsample=512, click=FALSE, add=FALSE, curve=NULL) { Xname <- short.deparse(substitute(X)) Xname <- sensiblevarname(Xname, "X") stopifnot(is.im(X)) check.1.integer(nsample) if(length(curve)) { ## parametric curve ## validate specification of curve check.named.list(curve, c("f", "tlim"), namopt=c("tname", "tdescrip")) stopifnot(is.function(curve$f)) check.range(curve$tlim) ## extract info tlim <- curve$tlim tname <- curve$tname %orifnull% "t" tdescrip <- curve$tdescrip %orifnull% "curve parameter" tunits <- NULL ## create sample points along curve t <- seq(tlim[1L], tlim[2L], length.out=nsample) xy <- (curve$f)(t) if(is.null(dim(xy))) stop("curve$f() should return a matrix or data frame") if(ncol(xy) != 2L) stop("curve$f() should return a matrix or data frame with 2 columns") hasnames <- all(c("x", "y") %in% colnames(xy)) x <- xy[, if(hasnames) "x" else 1L] y <- xy[, if(hasnames) "y" else 2L] } else { ## straight line transect if(click) { ## interactive if(!add) plot(X) from <- spatstatLocator(1) points(from) to <- spatstatLocator(1) points(to) segments(from$x, from$y, to$x, to$y) } else { ## data defining a line segment R <- as.rectangle(X) from <- specify.location(from, R) to <- specify.location(to, R) } ## create sample points along transect if(identical(from,to)) stop(paste(sQuote("from"), "and", sQuote("to"), "must be distinct points"), call.=FALSE) u <- seq(0,1,length.out=nsample) x <- from$x + u * (to$x - from$x) y <- from$y + u * (to$y - from$y) leng <- sqrt( (to$x - from$x)^2 + (to$y - from$y)^2) t <- u * leng tname <- "t" tdescrip <- "distance along transect" tunits <- unitname(X) } ## look up pixel values (may be NA) v <- X[list(x=x, y=y), drop=FALSE] ## package into fv object df <- data.frame(t=t, v=v) colnames(df) <- c(tname, Xname) fv(df, argu = tname, ylab = substitute(Xname(tname), list(Xname=as.name(Xname), tname=as.name(tname))), valu=Xname, labl = c(tname, paste0("%s", paren(tname))), desc = c(tdescrip, "pixel value of %s"), unitname = tunits, fname = Xname) } transect.im }) spatstat.core/R/objsurf.R0000644000176200001440000001346414144333463015057 0ustar liggesusers# # objsurf.R # # surface of the objective function for an M-estimator # # $Revision: 1.32 $ $Date: 2021/10/31 08:52:52 $ # objsurf <- function(x, ...) { UseMethod("objsurf") } objsurf.kppm <- objsurf.dppm <- function(x, ..., ngrid=32, xlim=NULL, ylim=NULL, ratio=1.5, verbose=TRUE) { Fit <- x$Fit switch(Fit$method, mincon = { result <- objsurf(Fit$mcfit, ..., ngrid=ngrid, xlim=xlim, ylim=ylim, ratio=ratio, verbose=verbose) }, palm = , clik2 = { optpar <- x$par objfun <- Fit$objfun objargs <- Fit$objargs result <- objsurfEngine(objfun, optpar, objargs, ..., objname = "log composite likelihood", ngrid=ngrid, xlim=xlim, ylim=ylim, ratio=ratio, verbose=verbose) }, stop(paste("Unrecognised fitting method", dQuote(Fit$method)), call.=FALSE) ) return(result) } objsurf.minconfit <- function(x, ..., ngrid=32, xlim=NULL, ylim=NULL, ratio=1.5, verbose=TRUE) { optpar <- x$par objfun <- x$objfun objargs <- x$objargs dotargs <- x$dotargs result <- objsurfEngine(objfun, optpar, objargs, ..., objname = "contrast", dotargs=dotargs, ngrid=ngrid, xlim=xlim, ylim=ylim, ratio=ratio, verbose=verbose) return(result) } objsurfEngine <- function(objfun, optpar, objargs, ..., dotargs=list(), objname="objective", ngrid=32, xlim=NULL, ylim=NULL, ratio=1.5, verbose=TRUE) { trap.extra.arguments(...) if(!is.function(objfun)) stop("Object is in an outdated format and needs to be re-fitted") npar <- length(optpar) if(npar != 2) stop("Only implemented for functions of 2 arguments") ## create grid of values of (possibly transformed) parameters ratio <- ensure2vector(ratio) ngrid <- ensure2vector(ngrid) stopifnot(all(ratio > 1)) if(is.null(xlim)) xlim <- optpar[1] * c(1/ratio[1], ratio[1]) if(is.null(ylim)) ylim <- optpar[2] * c(1/ratio[2], ratio[2]) xgrid <- seq(xlim[1], xlim[2], length=ngrid[1]) ygrid <- seq(ylim[1], ylim[2], length=ngrid[2]) pargrid <- expand.grid(xgrid, ygrid) colnames(pargrid) <- names(optpar) # evaluate objective function if(verbose) cat(paste("Evaluating", nrow(pargrid), "function values...")) values <- do.call(apply, append(list(pargrid, 1, objfun, objargs=objargs), dotargs)) if(verbose) cat("Done.\n") result <- list(x=xgrid, y=ygrid, z=matrix(values, ngrid[1], ngrid[2])) attr(result, "optpar") <- optpar attr(result, "objname") <- objname class(result) <- "objsurf" return(result) } print.objsurf <- function(x, ...) { cat("Objective function surface\n") optpar <- attr(x, "optpar") objname <- attr(x, "objname") nama <- names(optpar) cat(paste("\tFunction value:", objname, "\n")) cat("Parameter limits:\n") cat(paste("\t", paste0(nama[1L], ":"), prange(signif(range(x$x), 4)), "\n")) cat(paste("\t", paste0(nama[2L], ":"), prange(signif(range(x$y), 4)), "\n")) invisible(NULL) } summary.objsurf <- function(object, ...) { y <- list(xrange=range(object$x), yrange=range(object$y), objrange=range(object$z), optpar=as.list(attr(object, "optpar")), objname=attr(object, "objname") ) class(y) <- c("summary.objsurf", class(y)) return(y) } print.summary.objsurf <- function(x, ...) { with(x, { cat("Objective function surface\n") cat(paste("\tFunction value:", objname, "\n")) cat(paste("\tRange of values:", prange(objrange), "\n")) cat("Parameter limits (xrange, yrange):\n") nama <- names(optpar) cat(paste("\t", paste0(nama[1L], ":"), prange(xrange), "\n")) cat(paste("\t", paste0(nama[2L], ":"), prange(yrange), "\n")) cat("Selected parameter values (optpar):\n") cat(paste("\t", paste(nama, "=", optpar, collapse=", "), "\n")) }) return(invisible(NULL)) } image.objsurf <- plot.objsurf <- function(x, ...) { xname <- short.deparse(substitute(x)) optpar <- attr(x, "optpar") nama <- names(optpar) xx <- unclass(x) dont.complain.about(xx) do.call(image, resolve.defaults(list(x=quote(xx)), list(...), list(xlab=nama[1L], ylab=nama[2L], main=xname))) abline(v=optpar[1L], lty=3) abline(h=optpar[2L], lty=3) return(invisible(NULL)) } contour.objsurf <- function(x, ...) { xname <- short.deparse(substitute(x)) optpar <- summary(x)[["optpar"]] nama <- names(optpar) xx <- unclass(x) dont.complain.about(xx) do.call(contour, resolve.defaults(list(x=quote(xx)), list(...), list(xlab=nama[1], ylab=nama[2], main=xname))) abline(v=optpar[1], lty=3) abline(h=optpar[2], lty=3) return(invisible(NULL)) } persp.objsurf <- function(x, ...) { xname <- short.deparse(substitute(x)) optpar <- attr(x, "optpar") objname <- attr(x, "objname") nama <- names(optpar) xx <- x$x yy <- x$y zz <- x$z dont.complain.about(xx, yy, zz) r <- do.call(persp, resolve.defaults(list(x=quote(xx), y=quote(yy), z=quote(zz)), list(...), list(xlab=nama[1], ylab=nama[2], zlab=objname, main=xname))) return(invisible(r)) } spatstat.core/R/pkgRandomFields.R0000644000176200001440000000467214144333463016457 0ustar liggesusers#' #' pkgRandomFields.R #' #' Dealing with the Random Fields package #' #' $Revision: 1.3 $ $Date: 2020/11/30 10:14:04 $ kraeverRandomFields <- function() { kraever("RandomFieldsUtils") kraever("RandomFields") # should no longer be needed: # capture.output(RandomFieldsUtils:::.onLoad()) # capture.output(RandomFields:::.onLoad()) return(invisible(NULL)) } # require a namespace and optionally check whether it is attached kraever <- function(package, fatal=TRUE) { if(!requireNamespace(package, quietly=TRUE)) { if(fatal) stop(paste("The package", sQuote(package), "is required"), call.=FALSE) return(FALSE) } if(spatstat.options(paste("check", package, "loaded", sep=".")) && !isNamespaceLoaded(package)){ if(fatal) stop(paste("The package", sQuote(package), "must be loaded: please type", sQuote(paste0("library", paren(package)))), call.=FALSE) return(FALSE) } return(TRUE) } getRandomFieldsModelGen <- function(model) { kraeverRandomFields() if(inherits(model, "RMmodelgenerator")) return(model) if(!is.character(model)) stop(paste("'model' should be a character string", "or one of the functions in the RandomFields package", "with a name beginning 'RM'"), call.=FALSE) f <- switch(model, cauchy = RandomFields::RMcauchy, exponential = , exp = RandomFields::RMexp, gencauchy = RandomFields::RMgencauchy, gauss = RandomFields::RMgauss, gneiting = RandomFields::RMgneiting, matern = RandomFields::RMmatern, nugget = RandomFields::RMnugget, spheric = RandomFields::RMspheric, stable = RandomFields::RMstable, whittle = RandomFields::RMwhittle, { modgen <- try(getExportedValue("RandomFields", paste0("RM", model)), silent=TRUE) if(inherits(modgen, "try-error") || !inherits(modgen, "RMmodelgenerator")) stop(paste("Model", sQuote(model), "is not recognised")) modgen }) if(!is.function(f)) stop(paste0("Unable to retrieve RandomFields::RM", model)) return(f) } # legacy function RandomFieldsSafe <- function() { TRUE } spatstat.core/R/dclftest.R0000644000176200001440000003417714144333462015220 0ustar liggesusers# # dclftest.R # # $Revision: 1.46 $ $Date: 2019/10/09 06:25:49 $ # # Monte Carlo tests for CSR (etc) # # clf.test <- function(...) { # .Deprecated("dclf.test", package="spatstat") # dclf.test(...) # } dclf.test <- function(X, ..., alternative=c("two.sided", "less", "greater"), rinterval=NULL, leaveout=1, scale=NULL, clamp=FALSE, interpolate=FALSE) { Xname <- short.deparse(substitute(X)) envelopeTest(X, ..., exponent=2, alternative=alternative, rinterval=rinterval, leaveout=leaveout, scale=scale, clamp=clamp, interpolate=interpolate, Xname=Xname) } mad.test <- function(X, ..., alternative=c("two.sided", "less", "greater"), rinterval=NULL, leaveout=1, scale=NULL, clamp=FALSE, interpolate=FALSE) { Xname <- short.deparse(substitute(X)) envelopeTest(X, ..., exponent=Inf, alternative=alternative, rinterval=rinterval, leaveout=leaveout, scale=scale, clamp=clamp, interpolate=interpolate, Xname=Xname) } ## measure deviation of summary function ## leaveout = 0: typically 'ref' is exact theoretical value ## Compute raw deviation. ## leaveout = 1: 'ref' is mean of simulations *and* observed. ## Use algebra to compute leave-one-out deviation. ## leaveout = 2: 'ref' is mean of simulations ## Use algebra to compute leave-two-out deviation. Deviation <- function(x, ref, leaveout, n, xi=x) { if(leaveout == 0) return(x-ref) if(leaveout == 1) return((x-ref) * (n+1)/n) jackmean <- (n * ref - xi)/(n-1) return(x - jackmean) } ## Evaluate signed or absolute deviation, ## taking account of alternative hypothesis and possible scaling ## (Large positive values always favorable to alternative) RelevantDeviation <- local({ positivepart <- function(x) { d <- dim(x) y <- pmax(0, x) if(!is.null(d)) y <- matrix(y, d[1L], d[2L]) return(y) } negativepart <- function(x) positivepart(-x) RelevantDeviation <- function(x, alternative, clamp=FALSE, scaling=NULL) { if(!is.null(scaling)) x <- x/scaling switch(alternative, two.sided = abs(x), less = if(clamp) negativepart(x) else -x, greater = if(clamp) positivepart(x) else x) } RelevantDeviation }) ## workhorse function envelopeTest <- function(X, ..., exponent=1, alternative=c("two.sided", "less", "greater"), rinterval=NULL, leaveout=1, scale=NULL, clamp=FALSE, tie.rule=c("randomise","mean"), interpolate=FALSE, save.interpolant = TRUE, save.envelope = savefuns || savepatterns, savefuns = FALSE, savepatterns = FALSE, Xname=NULL, badXfatal=TRUE, verbose=TRUE) { if(is.null(Xname)) Xname <- short.deparse(substitute(X)) tie.rule <- match.arg(tie.rule) alternative <- match.arg(alternative) if(!(leaveout %in% 0:2)) stop("Argument leaveout should equal 0, 1 or 2") force(save.envelope) check.1.real(exponent) explain.ifnot(exponent >= 0) deviationtype <- switch(alternative, two.sided = "absolute", greater = if(clamp) "positive" else "signed", less = if(clamp) "negative" else "signed") deviationblurb <- paste(deviationtype, "deviation") ## compute or extract simulated functions X <- envelope(X, ..., savefuns=TRUE, savepatterns=savepatterns, Yname=Xname, verbose=verbose) Y <- attr(X, "simfuns") ## extract values r <- with(X, .x) obs <- with(X, .y) sim <- as.matrix(as.data.frame(Y))[, -1L] nsim <- ncol(sim) nr <- length(r) ## choose function as reference has.theo <- ("theo" %in% names(X)) use.theo <- identical(attr(X, "einfo")$use.theory, TRUE) if(use.theo && !has.theo) warning("No theoretical function available; use.theory ignored") if(use.theo && has.theo) { theo.used <- TRUE reference <- with(X, theo) leaveout <- 0 } else { theo.used <- FALSE if(leaveout == 2) { ## use sample mean of simulations only reference <- apply(sim, 1L, mean, na.rm=TRUE) } else { ## use sample mean of simulations *and* observed reference <- apply(cbind(sim, obs), 1L, mean, na.rm=TRUE) } } ## determine interval of r values for computation if(is.null(rinterval)) { rinterval <- range(r) ok <- rep(TRUE, nr) first <- 1L } else { #' argument 'rinterval' specified check.range(rinterval) if(max(r) < rinterval[2L]) { oldrinterval <- rinterval rinterval <- intersect.ranges(rinterval, range(r), fatal=FALSE) if(is.null(rinterval)) stop(paste("The specified rinterval", prange(oldrinterval), "has empty intersection", "with the range of r values", prange(range(r)), "computed by the summary function"), call.=FALSE) if(verbose) warning(paste("The interval", prange(oldrinterval), "is too large for the available data;", "it has been trimmed to", prange(rinterval))) } ok <- (rinterval[1L] <= r & r <= rinterval[2L]) first <- min(which(ok)) } #' check for valid function values, and possibly adjust rinterval #' observed function values badr <- !is.finite(obs) if(badXfatal && all(badr)) stop("Observed function values are all infinite, NA or NaN", call.=FALSE) if(any(badr[ok])) { if(badr[first] && !any(badr[ok][-1L])) { ## ditch smallest r value (usually zero) ok[first] <- FALSE first <- first + 1L rmin <- r[first] if(verbose) warning(paste("Some function values were infinite, NA or NaN", "at distance r =", paste0(rinterval[1L], ";"), "lower limit of r interval was reset to", rmin, summary(unitname(X))$plural)) rinterval[1] <- rmin } else { ## problem rbadmax <- paste(max(r[badr]), summary(unitname(X))$plural) warning(paste("Some function values were infinite, NA or NaN", "at distances r up to", paste0(rbadmax, "."), "Consider specifying a shorter", sQuote("rinterval"))) } } #' simulated function values badsim <- matcolall(!is.finite(sim[ok,,drop=FALSE])) if(all(badsim)) stop(paste("Simulated function values are all infinite, NA or NaN.", "Check whether simulated patterns are empty"), call.=FALSE) if(any(badsim)) { warning(paste("In", sum(badsim), "out of", length(badsim), "simulations,", "the simulated function values were infinite, NA or NaN", "at every distance r.", "Check whether some simulated patterns are empty"), call.=FALSE) } #' finally trim data rok <- r[ok] obs <- obs[ok] sim <- sim[ok, ] reference <- reference[ok] nr <- sum(ok) if(nr == 0) { ## rinterval is very short: pick nearest r value ok <- which.min(abs(r - mean(rinterval))) nr <- 1L } ## determine rescaling if any if(is.null(scale)) { scaling <- NULL } else if(is.function(scale)) { scaling <- scale(rok) sname <- "scale(r)" ans <- check.nvector(scaling, nr, things="values of r", fatal=FALSE, vname=sname) if(!ans) stop(attr(ans, "whinge"), call.=FALSE) if(any(bad <- (scaling <= 0))) { ## issue a warning unless this only happens at r=0 if(any(bad[rok > 0])) warning(paste("Some values of", sname, "were negative or zero:", "scale was reset to 1 for these values"), call.=FALSE) scaling[bad] <- 1 } } else stop("Argument scale should be a function") ## compute deviations rawdevDat <- Deviation(obs, reference, leaveout, nsim, sim[,1L]) rawdevSim <- Deviation(sim, reference, leaveout, nsim) ## evaluate signed/absolute deviation relevant to alternative ddat <- RelevantDeviation(rawdevDat, alternative, clamp, scaling) dsim <- RelevantDeviation(rawdevSim, alternative, clamp, scaling) if(!all(is.finite(ddat))) warning("Some deviation values were Inf, NA or NaN") if(!all(is.finite(dsim))) warning("Some simulated deviations were Inf, NA or NaN") ## compute test statistic if(is.infinite(exponent)) { ## MAD devdata <- max(ddat,na.rm=TRUE) devsim <- apply(dsim, 2, max, na.rm=TRUE) names(devdata) <- "mad" testname <- paste("Maximum", deviationblurb, "test") statisticblurb <- paste("Maximum", deviationblurb) } else { L <- if(nr > 1) diff(rinterval) else 1 if(exponent == 2) { ## Cramer-von Mises ddat2 <- if(clamp) ddat^2 else (sign(ddat) * ddat^2) dsim2 <- if(clamp) dsim^2 else (sign(dsim) * dsim^2) devdata <- L * mean(ddat2, na.rm=TRUE) devsim <- L * .colMeans(dsim2, nr, nsim, na.rm=TRUE) names(devdata) <- "u" testname <- "Diggle-Cressie-Loosmore-Ford test" statisticblurb <- paste("Integral of squared", deviationblurb) } else if(exponent == 1) { ## integral absolute deviation devdata <- L * mean(ddat, na.rm=TRUE) devsim <- L * .colMeans(dsim, nr, nsim, na.rm=TRUE) names(devdata) <- "L1" testname <- paste("Integral", deviationblurb, "test") statisticblurb <- paste("Integral of", deviationblurb) } else { ## general p if(clamp) { ddatp <- ddat^exponent dsimp <- dsim^exponent } else { ddatp <- sign(ddat) * (abs(ddat)^exponent) dsimp <- sign(dsim) * (abs(dsim)^exponent) } devdata <- L * mean(ddatp, na.rm=TRUE) devsim <- L * .colMeans(dsimp, nr, nsim, na.rm=TRUE) names(devdata) <- "Lp" testname <- paste("Integrated", ordinal(exponent), "Power Deviation test") statisticblurb <- paste("Integral of", ordinal(exponent), "power of", deviationblurb) } } if(!interpolate) { ## standard Monte Carlo test ## compute rank and p-value datarank <- sum(devdata < devsim, na.rm=TRUE) + 1 nties <- sum(devdata == devsim, na.rm=TRUE) if(nties > 0) { tierank <- switch(tie.rule, mean = nties/2, randomise = sample(1:nties, 1L)) datarank <- datarank + tierank if(verbose) message("Ties were encountered") } pvalue <- datarank/(nsim+1) ## bookkeeping statistic <- data.frame(devdata, rank=datarank) colnames(statistic)[1L] <- names(devdata) } else { ## Dao-Genton style interpolation fhat <- density(devsim, na.rm=TRUE) pvalue <- with(fhat, { if(max(x) <= devdata) 0 else mean(y[x >= devdata]) * (max(x) - devdata) }) statistic <- data.frame(devdata) colnames(statistic)[1L] <- names(devdata) nties <- 0 } e <- attr(X, "einfo") nullmodel <- if(identical(e$csr, TRUE)) "CSR" else if(!is.null(e$simtype)) { switch(e$simtype, csr = "CSR", rmh = paste("fitted", if(identical(e$pois, TRUE)) "Poisson" else "Gibbs", "model"), kppm = "fitted cluster model", expr = "model simulated by evaluating expression", func = "model simulated by evaluating function", list = "model simulated by drawing patterns from a list", "unrecognised model") } else "unrecognised model" fname <- deparse(attr(X, "ylab")) uname <- with(summary(unitname(X)), if(!vanilla) paste(plural, explain) else NULL) testtype <- paste0(if(interpolate) "Interpolated " else NULL, "Monte Carlo") scaleblurb <- if(is.null(scale)) NULL else paste("Scale function:", paste(deparse(scale), collapse=" ")) refblurb <- if(theo.used) "theoretical" else "sample mean" leaveblurb <- if(leaveout == 0) paste("observed minus", refblurb) else if(leaveout == 1) "leave-one-out" else "leave-two-out" testname <- c(paste(testname, "of", nullmodel), paste(testtype, "test based on", nsim, "simulations", e$constraints), paste("Summary function:", fname), paste("Reference function:", refblurb), paste("Alternative:", alternative), paste("Interval of distance values:", prange(rinterval), uname), scaleblurb, paste("Test statistic:", statisticblurb), paste("Deviation =", leaveblurb) ) result <- structure(list(statistic = statistic, p.value = pvalue, method = testname, data.name = e$Yname), class="htest") attr(result, "rinterval") <- rinterval if(save.interpolant && interpolate) attr(result, "density") <- fhat if(save.envelope) { result <- hasenvelope(result, X) attr(result, "statistics") <- list(data=devdata, sim=devsim) attr(result, "info") <- list(exponent=exponent, alternative=alternative, nties=nties, leaveout=leaveout, interpolate=interpolate, scale=scale, clamp=clamp, tie.rule=tie.rule, use.theo=use.theo) } return(result) } spatstat.core/R/GJfox.R0000644000176200001440000001035414144333461014413 0ustar liggesusers# # GJfox.R # # Foxall G-function and J-function # # $Revision: 1.11 $ $Date: 2020/02/20 02:32:21 $ # Gfox <- function(X, Y, r=NULL, breaks=NULL, correction=c("km", "rs", "han"), W=NULL, ...) { stopifnot(is.ppp(X)) #' validate and resolve windows a <- resolve.foxall.window(X, Y, W) X <- a$X Y <- a$Y W <- a$W #' if(is.null(correction)) correction <- c("rs", "km", "cs") correction <- pickoption("correction", correction, c(none="none", raw="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", best="km"), multi=TRUE) corxtable <- c("km", "rs", "han", "none") corx <- as.list(corxtable %in% correction) names(corx) <- corxtable ## compute distances and censoring distances D <- distfun(Y) dist <- D(X) bdry <- bdist.points(X[W]) # sic ## histogram breakpoints dmax <- max(dist) breaks <- handle.r.b.args(r, breaks, Window(X), NULL, rmaxdefault=dmax) rval <- breaks$r ## censoring indicators d <- (dist <= bdry) ## observed distances o <- pmin.int(dist, bdry) ## calculate estimates Z <- censtimeCDFest(o, bdry, d, breaks, KM=corx$km, RS=corx$rs, HAN=corx$han, RAW=corx$none, han.denom=if(corx$han) eroded.areas(Window(X), rval) else NULL, tt=dist) ## relabel Z <- rebadge.fv(Z, quote(G[fox](r)), c("G", "fox")) unitname(Z) <- unitname(Y) return(Z) } Jfox <- function(X, Y, r=NULL, breaks=NULL, correction=c("km", "rs", "han"), W=NULL, ..., warn.trim=TRUE) { ## validate and resolve windows a <- resolve.foxall.window(X, Y, W, isTRUE(warn.trim)) X <- a$X Y <- a$Y W <- a$W ## process H <- Hest(Y, r=r, breaks=breaks, correction=correction, ..., W=W) G <- Gfox(X, Y, r=H$r, correction=correction, ..., W=W) ## derive J-function J <- eval.fv((1-G)/(1-H), dotonly=FALSE) ## correct calculation of hazard is different if("hazard" %in% names(J)) J$hazard <- G$hazard - H$hazard ## base labels on 'J' rather than full expression attr(J, "labl") <- attr(H, "labl") ## add column of 1's J <- bind.fv(J, data.frame(theo=rep.int(1, nrow(J))), "%s[theo](r)", "theoretical value of %s for independence") ## rename J <- rebadge.fv(J, quote(J[fox](r)), c("J", "fox")) funs <- c("km", "han", "rs", "raw", "theo") fvnames(J, ".") <- funs[funs %in% names(J)] unitname(J) <- unitname(Y) attr(J, "conserve") <- attr(H, "conserve") return(J) } resolve.foxall.window <- function(X, Y, W=NULL, warn.trim=TRUE) { if(!(is.ppp(Y) || is.psp(Y) || is.owin(Y) || is.im(Y))) stop("Y should be an object of class ppp, psp, owin or im") if(is.im(Y) && !is.logical(ZeroValue(Y))) stop("When Y is an image, its pixel values should be logical values") if(!identical(unitname(X), unitname(Y))) warning("X and Y are not in the same units") ## default window based on Y if(is.ppp(Y) || is.psp(Y)) { W0 <- Window(Y) W0describe <- "the observation window of Y" } else if(is.owin(Y)) { W0 <- Frame(Y) W0describe <- "the Frame of Y" } else if(is.im(Y)) { W0 <- Window(Y) W0describe <- "the observation window of Y" Y <- solutionset(Y) } else stop("Y should be an object of class ppp, psp, owin or im") ## actual window used for estimation if(!is.null(W)) { stopifnot(is.owin(W)) if(!is.subset.owin(W, W0)) stop(paste("W is not a subset of", W0describe)) Wdescribe <- "W" } else { W <- W0 Wdescribe <- W0describe } ## ensure compatible windows WX <- Window(X) if(!is.subset.owin(WX, W)) { if(warn.trim) warning(paste("Trimming the window of X to be a subset of", Wdescribe)) WX <- intersect.owin(WX, W) if(area.owin(WX) == 0) stop("Trimmed window has zero area") X <- X[WX] if(npoints(X) == 0) stop("No points remaining after trimming window") } return(list(X=X, Y=Y, W=W)) } spatstat.core/R/digestCovariates.R0000644000176200001440000000377714144333462016712 0ustar liggesusers#' #' digestCovariates.R #' #' $Revision: 1.4 $ $Date: 2018/05/03 08:33:44 $ #' is.scov <- function(x) { #' Determines whether x is a valid candidate for a spatial covariate #' A spatial object is OK if it can be coerced to a function if(inherits(x, c("im", "funxy", "owin", "tess", "ssf", "leverage.ppm"))) return(TRUE) #' A function(x,y,...) is OK if(is.function(x) && identical(names(formals(x))[1:2], c("x", "y"))) return(TRUE) #' A single character "x" or "y" is OK if(is.character(x) && length(x) == 1 && (x %in% c("x", "y"))) return(TRUE) #' Can't handle input return(FALSE) } ## Assumes each input (besides W) is a single covariate or a list of covariates ## Returns a `solist` with possibly a unitname attribute digestCovariates <- function(..., W = NULL) { x <- list(...) #' Find individual covariates in list valid <- sapply(x, is.scov) covs <- x[valid] #' The remaining entries are assumed to be lists of covariates #' so we unlist them x <- unlist(x[!valid], recursive = FALSE) valid <- sapply(x, is.scov) if(!all(valid)) stop("Couldn't interpret all input as spatial covariates.") covs <- append(covs, x) if(any(needW <- !sapply(covs, is.sob))) { if(is.null(W)){ boxes <- lapply(covs[!needW], Frame) W <- do.call(boundingbox, boxes) } else stopifnot(is.owin(W)) } covunits <- vector("list", length(covs)) # Now covs is a list of valid covariates we can loop through for(i in seq_along(covs)){ covar <- covs[[i]] if(inherits(covar, "distfun")) covunits[[i]] <- unitname(covar) if(is.character(covar) && length(covar) == 1 && (covar %in% c("x", "y"))) { covar <- if(covar == "x"){ function(x,y) { x } } else{ function(x,y) { y } } covunits[[i]] <- unitname(W) } if(is.function(covar) && !inherits(covar, "funxy")){ covar <- funxy(f = covar, W = W) } covs[[i]] <- covar } covs <- as.solist(covs) attr(covs, "covunits") <- covunits return(covs) } spatstat.core/R/residuals.mppm.R0000644000176200001440000000566414144333464016354 0ustar liggesusers# # residuals.mppm.R # # computes residuals for fitted multiple point process model # # # $Revision: 1.5 $ $Date: 2015/01/29 06:44:26 $ # residuals.mppm <- function(object, type="raw", ..., fittedvalues = fitted.mppm(object)) { verifyclass(object, "mppm") userfitted <- !missing(fittedvalues) type <- pickoption("type", type, c(inverse="inverse", raw="raw", pearson="pearson", Pearson="pearson")) typenames <- c(inverse="inverse-lambda residuals", raw="raw residuals", pearson="Pearson residuals") typename <- typenames[[type]] # Extract quadrature points and weights Q <- quad.mppm(object) # U <- lapply(Q, union.quad) # quadrature point patterns Z <- unlist(lapply(Q, is.data)) # indicator data/dummy W <- unlist(lapply(Q, w.quad)) # quadrature weights # total number of quadrature points nquadrature <- length(W) # number of quadrature points in each pattern nU <- unlist(lapply(Q, n.quad)) # number of rows of hyperframe npat <- object$npat # attribution of each quadrature point id <- factor(rep(1:npat, nU), levels=1:npat) # Compute fitted conditional intensity at quadrature points if(!is.list(fittedvalues) || length(fittedvalues) != npat) stop(paste(sQuote("fittedvalues"), "should be a list of length", npat, "containing vectors of fitted values")) lambda <- unlist(fittedvalues) # consistency check if(length(lambda) != nquadrature) stop(paste(if(!userfitted) "internal error:" else NULL, "number of fitted values", paren(length(lambda)), "does not match number of quadrature points", paren(nquadrature))) # indicator is 1 if lambda > 0 # (adjusted for numerical behaviour of predict.glm) indicator <- (lambda > .Machine$double.eps) # Evaluate residual measure components discrete <- ifelse(Z, switch(type, raw = 1, inverse = 1/lambda, pearson = 1/sqrt(lambda) ), 0) density <- switch(type, raw = -lambda, inverse = -indicator, pearson = -indicator * sqrt(lambda)) atoms <- as.logical(Z) # All components resdf <- data.frame(discrete=discrete, density=density, atoms=atoms) # Split residual data according to point pattern affiliation splitres <- split(resdf, id) # Associate with quadrature scheme reshf <- hyperframe(R=splitres, Q=Q) # Convert to signed measures answer <- with(reshf, msr(Q, R$discrete[R$atoms], R$density)) # tag answer <- lapply(answer, "attr<-", which="type", value=type) answer <- lapply(answer, "attr<-", which="typename", value=typename) return(as.solist(answer)) } spatstat.core/R/bw.pcf.R0000644000176200001440000001372214144333461014557 0ustar liggesusers#' #' bw.pcf.R #' #' $Revision: 1.5 $ $Date: 2019/09/30 07:51:52 $ #' #' bandwidth selection for pcf #' with least-squares cross-validation method #' #' Original code by: Rasmus Waagepetersen and Abdollah Jalilian #' #' References: #' Guan, Y. (2007). A composite likelihood cross-validation approach in #' selecting bandwidth for the estimation of the pair correlation function. #' Scandinavian Journal of Statistics, 34(2), 336--346. #' DOI: http://doi.org/10.1111/j.1467-9469.2006.00533.x #' Guan, Y. (2007). A least-squares cross-validation bandwidth #' selection approach in pair correlation function estimations. #' Statistics & Probability Letters, 77(18), 1722--1729. #' DOI: http://doi.org/10.1016/j.spl.2007.04.016 bw.pcf <- function(X, rmax=NULL, lambda=NULL, divisor="r", kernel="epanechnikov", nr=10000, bias.correct=TRUE, cv.method=c("compLik", "leastSQ"), simple=TRUE, srange=NULL, ..., verbose=FALSE, warn=TRUE) { stopifnot(is.ppp(X)) X <- unmark(X) win <- Window(X) areaW <- area(win) nX <- npoints(X) cv.method <- match.arg(cv.method) kernel <- match.kernel(kernel) #' maximum distance lag: rmax if (is.null(rmax)) rmax <- rmax.rule("K", win, nX/areaW) if(is.null(srange)) srange <- c(0, rmax/4) #' number of subintervals for discretization of [0, rmax]: nr #' length of subintervals discr <- rmax / nr #' breaks of subintervals rs <- seq(0, rmax, length.out= nr + 1) #' closepairs distances: \\ u - v \\ #' Pre-compute close pair distances for use in 'pcf' #' we need close pairs up to a distance rmax + smax #' where 'smax' is the maximum halfwidth of the support of the kernel smax <- srange[2] * (if(kernel == "gaussian") 2 else kernel.factor(kernel)) cpfull <- closepairs(X, rmax + smax, what="all", twice=TRUE) #' For cross-validation, restrict close pairs to distance rmax ok <- (cpfull$d <= rmax) cp <- lapply(cpfull, "[", i=ok) ds <- cp$d #' determining closepairs distances are in which subinterval idx <- round(ds / discr) + 1L idx <- pmin.int(idx, nr+1L) #' translation edge correction factor: /W|/|W \cap W_{u-v}| edgewt <- edge.Trans(dx=cp$dx, dy=cp$dy, W=win, paired=TRUE) if(homogeneous <- is.null(lambda)) { #' homogeneous case lambda <- nX/areaW lambda2area <- lambda^2 * areaW pcfargs <- list(X=X, r=rs, divisor=divisor, kernel=kernel, correction="translate", close=cpfull) renorm.factor <- 1 } else { # inhomogeneous case: lambda is assumed to be a numeric vector giving # the intensity at the points of the point pattern X check.nvector(lambda, nX) lambda2area <- lambda[cp$i] * lambda[cp$j] * areaW pcfargs <- list(X=X, lambda=lambda, r=rs, divisor=divisor, kernel=kernel, correction="translate", close=cpfull) renorm.factor <- (areaW/sum(1/lambda)) } stuff <- list(cv.method=cv.method, kernel=kernel, homogeneous=homogeneous, bias.correct=bias.correct, simple = simple, discr=discr, rs=rs, cp=cp, ds=ds, idx=idx, edgewt=edgewt, pcfargs=pcfargs, lambda=lambda, lambda2area=lambda2area, renorm.factor=renorm.factor, show=verbose) stuff <- list2env(stuff) #' find optimum bandwidth z <- optimizeWithTrace(CVforPCF, srange, maximum=TRUE, stuff=stuff) #' pack up ox <- order(z$x) sigma <- z$x[ox] cv <- z$y[ox] criterion <- switch(cv.method, compLik = "composite likelihood cross-validation", leastSQ = "least squares cross-validation") result <- bw.optim(cv, sigma, which.max(cv), criterion = criterion, warnextreme=warn, hargnames=c("rmax", "srange"), unitname=unitname(X)) return(result) } CVforPCF <- function(bw, stuff) { stuff$bw <- bw with(stuff, { if(show) splat("bw=", bw) #' values of pair correlation at breaks of subintervals a <- append(pcfargs, list(bw=bw)) grs <- if(homogeneous) do.call(pcf.ppp, a) else do.call(pcfinhom, a) grs <- grs$trans #' bias correction if (bias.correct) { grs <- grs / pkernel(rs, kernel, 0, bw) dcorrec <- pkernel(ds, kernel, 0, bw) } else { dcorrec <- 1 } #' make sure that the estimated pair correlation at origin is finite if (!is.finite(grs[1])) grs[1] <- grs[2] #' approximate the pair correlation values at closepairs distances gds <- grs[idx] wt <- edgewt / (2 * pi * ds * lambda2area * dcorrec) * renorm.factor #' remove pairs to approximate the cross-validation term: g^{-(u, v)} if (simple) { gds <- gds - 2 * wt * dkernel(0, kernel, 0, bw) } else { cpi <- cp$i cpj <- cp$j for (k in 1:length(ds)) { exclude <- (cpi == cpi[k]) | (cpj == cpj[k]) gds[k] <- gds[k] - 2 * sum(wt[exclude] * dkernel(ds[k] - ds[exclude], kernel, 0, bw)) } } #' remove negative and zero values gds <- pmax.int(.Machine$double.eps, gds) switch(cv.method, compLik={ #' composite likelihood cross-validation #' the integral term: 2 \pi \int_{0}^{rmax} \hat g(r) r dr normconst <- 2 * pi * sum(grs * rs) * discr value <- mean(log(gds)) - log(normconst) }, leastSQ={ #' least squares cross-validation #' the integral term: 2 \pi \int_{0}^{rmax} \hat g^2(r) r dr normconst <- 2 * pi * sum(grs^2 * rs) * discr value <- 2 * sum(gds * edgewt / (lambda2area)) - normconst }, stop("Unrecognised cross-validation method")) if(show) splat("value=", value) return(value) }) } spatstat.core/R/localpcf.R0000644000176200001440000001412714144333462015164 0ustar liggesusers# # localpcf.R # # $Revision: 1.26 $ $Date: 2021/01/07 03:08:41 $ # # localpcf <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, rvalue=NULL) { if(length(list(...)) > 0) warning("Additional arguments ignored") stopifnot(is.ppp(X)) localpcfengine(X, delta=delta, rmax=rmax, nr=nr, stoyan=stoyan, rvalue=rvalue) } localpcfinhom <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, lambda=NULL, sigma=NULL, varcov=NULL, update=TRUE, leaveoneout=TRUE, rvalue=NULL) { stopifnot(is.ppp(X)) a <- resolve.lambda(X, lambda, ..., sigma=sigma, varcov=varcov, update=update, leaveoneout=leaveoneout) result <- localpcfengine(X, ..., delta=delta, rmax=rmax, nr=nr, stoyan=stoyan, lambda=a$lambda, rvalue=rvalue) if(is.null(rvalue) && a$danger) attr(result, "dangerous") <- a$dangerous return(result) } localpcfengine <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, lambda=NULL, rvalue=NULL) { if(!is.null(rvalue)) rmax <- rvalue m <- localpcfmatrix(X, delta=delta, rmax=rmax, nr=nr, stoyan=stoyan, lambda=lambda) r <- attr(m, "r") if(!is.null(rvalue)) { nr <- length(r) if(r[nr] != rvalue) stop("Internal error - rvalue not attained") return(as.numeric(m[nr,])) } delta <- attr(m, "delta") nX <- npoints(X) if(nX == 0) { df <- data.frame(r=r, theo=rep.int(1, length(r))) nama <- desc <- labl <- NULL } else { # border correction dbord <- bdist.points(X) m[r[row(m)] > dbord[col(m)]] <- NA # df <- data.frame(m, r=r, theo=rep.int(1, length(r))) icode <- unlist(lapply(seq_len(nX), numalign, nmax=nX)) nama <- paste("est", icode, sep="") desc <- paste("estimate of %s for point", icode) labl <- paste("%s[", icode, "](r)", sep="") } names(df) <- c(nama, "r", "theo") desc <- c(desc, "distance argument r", "theoretical Poisson %s") labl <- c(labl, "r", "%s[pois](r)") # create fv object g <- fv(df, "r", quote(localg(r)), "theo", , c(0, max(r)), labl, desc, fname="localg") # default is to display them all formula(g) <- . ~ r fvnames(g, ".") <- names(df)[names(df) != "r"] unitname(g) <- unitname(X) attr(g, "delta") <- delta attr(g, "correction") <- "border" return(g) } localpcfmatrix <- function(X, i=seq_len(npoints(X)), ..., lambda = NULL, delta=NULL, rmax=NULL, nr=512, stoyan=0.15) { missi <- missing(i) weighted <- !is.null(lambda) nX <- npoints(X) nY <- if(missi) nX else length(seq_len(nX)[i]) W <- as.owin(X) lambda.ave <- nX/area(W) if(is.null(delta)) delta <- stoyan/sqrt(lambda.ave) if(is.null(rmax)) rmax <- rmax.rule("K", W, lambda.ave) # if(nX == 0 || nY == 0) { out <- matrix(0, nr, 0) } else { # sort points in increasing order of x coordinate oX <- fave.order(X$x) Xsort <- X[oX] idXsort <- (1:nX)[oX] if(weighted) { lambdaXsort <- lambda[oX] weightXsort <- 1/lambdaXsort } if(missi) { Y <- X oY <- oX Ysort <- Xsort idYsort <- idXsort } else { # i is some kind of index Y <- X[i] idY <- (1:nX)[i] oY <- fave.order(Y$x) Ysort <- Y[oY] idYsort <- idY[oY] } nY <- npoints(Y) force(nr) # call C if(!weighted) { zz <- .C(SC_locpcfx, nn1 = as.integer(nY), x1 = as.double(Ysort$x), y1 = as.double(Ysort$y), id1 = as.integer(idYsort), nn2 = as.integer(nX), x2 = as.double(Xsort$x), y2 = as.double(Xsort$y), id2 = as.integer(idXsort), nnr = as.integer(nr), rmaxi=as.double(rmax), del=as.double(delta), pcf=as.double(double(nr * nY)), PACKAGE="spatstat.core") } else { zz <- .C(SC_locWpcfx, nn1 = as.integer(nY), x1 = as.double(Ysort$x), y1 = as.double(Ysort$y), id1 = as.integer(idYsort), nn2 = as.integer(nX), x2 = as.double(Xsort$x), y2 = as.double(Xsort$y), id2 = as.integer(idXsort), w2 = as.double(weightXsort), nnr = as.integer(nr), rmaxi=as.double(rmax), del=as.double(delta), pcf=as.double(double(nr * nY)), PACKAGE="spatstat.core") } out <- matrix(zz$pcf, nr, nY) # reorder columns to match original out[, oY] <- out # rescale out <- out/(2 * pi * if(!weighted) lambda.ave else 1) } # dress up attr(out, "r") <- seq(from=0, to=rmax, length.out=nr) attr(out, "delta") <- delta class(out) <- c("localpcfmatrix", class(out)) return(out) } print.localpcfmatrix <- function(x, ...) { cat("Matrix of local pair correlation estimates\n") nc <- ncol(x) nr <- nrow(x) cat(paste("pcf estimates for", nc, ngettext(nc, "point", "points"), "\n")) rval <- attr(x, "r") cat(paste("r values from 0 to", max(rval), "in", nr, "steps\n")) return(invisible(NULL)) } plot.localpcfmatrix <- function(x, ...) { xname <- short.deparse(substitute(x)) force(x) rval <- attr(x, "r") do.call(matplot, resolve.defaults(list(rval, quote(x)), list(...), list(type="l", main=xname, xlab="r", ylab="pair correlation"))) } "[.localpcfmatrix" <- function(x, i, ...) { r <- attr(x, "r") delta <- attr(x, "delta") class(x) <- "matrix" if(missing(i)) { x <- x[ , ...] } else { x <- x[i, ...] if(is.matrix(i)) return(x) r <- r[i] } if(!is.matrix(x)) x <- matrix(x, nrow=length(r)) attr(x, "r") <- r attr(x, "delta") <- delta class(x) <- c("localpcfmatrix", class(x)) return(x) } spatstat.core/R/Kmeasure.R0000644000176200001440000004322214144333461015152 0ustar liggesusers# # Kmeasure.R # # $Revision: 1.72 $ $Date: 2020/11/04 01:09:44 $ # # Kmeasure() compute an estimate of the second order moment measure # # Kest.fft() use Kmeasure() to form an estimate of the K-function # # second.moment.calc() underlying algorithm # # second.moment.engine() underlying underlying algorithm! # Kmeasure <- function(X, sigma, edge=TRUE, ..., varcov=NULL) { stopifnot(is.ppp(X)) sigma.given <- !missing(sigma) && !is.null(sigma) varcov.given <- !is.null(varcov) ngiven <- sigma.given + varcov.given if(ngiven == 2) stop(paste("Give only one of the arguments", sQuote("sigma"), "and", sQuote("varcov"))) if(ngiven == 0) stop(paste("Please specify smoothing bandwidth", sQuote("sigma"), "or", sQuote("varcov"))) if(varcov.given) { stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov)==2 ) sigma <- NULL } else { stopifnot(is.numeric(sigma)) stopifnot(length(sigma) %in% c(1,2)) stopifnot(all(sigma > 0)) if(length(sigma) == 2) { varcov <- diag(sigma^2) sigma <- NULL } } second.moment.calc(x=X, sigma=sigma, edge=edge, what="Kmeasure", varcov=varcov, ...) } second.moment.calc <- function(x, sigma=NULL, edge=TRUE, what=c("Kmeasure", "kernel", "smooth", "Bartlett", "edge", "smoothedge", "all"), ..., varcov=NULL, expand=FALSE, obswin, npts=NULL, debug=FALSE) { if(is.null(sigma) && is.null(varcov)) stop("must specify sigma or varcov") obswin.given <- !missing(obswin) what <- match.arg(what) sig <- if(!is.null(sigma)) sigma else max(c(diag(varcov), sqrt(det(varcov)))) xtype <- if(is.ppp(x)) "ppp" else if(is.im(x)) "im" else if(inherits(x, "imlist")) "imlist" else if(all(sapply(x, is.im))) "imlist" else stop("x should be a point pattern or a pixel image") nimages <- switch(xtype, ppp = 1, im = 1, imlist = length(x)) win <- if(nimages == 1) as.owin(x) else as.owin(x[[1]]) win <- rescue.rectangle(win) rec <- as.rectangle(win) across <- min(diff(rec$xrange), diff(rec$yrange)) # determine whether to expand window if(!expand || (6 * sig < across)) { if(!obswin.given) obswin <- NULL result <- second.moment.engine(x, sigma=sigma, edge=edge, what=what, debug=debug, ..., obswin=obswin, npts=npts, varcov=varcov) return(result) } #' need to expand window wid <- (7 * sig - across)/2 bigger <- grow.rectangle(rec, wid) switch(xtype, ppp = { # pixellate first (to preserve pixel resolution) X <- pixellate(x, ..., padzero=TRUE) np <- npoints(x) }, im = { X <- x np <- NULL }, imlist = { X <- x np <- NULL }) # Now expand if(nimages == 1) { X <- rebound.im(X, bigger) X <- na.handle.im(X, 0) } else { X <- lapply(X, rebound.im, rect=bigger) X <- lapply(X, na.handle.im, na.replace=0) } ## handle override arguments ow <- if(obswin.given) obswin else win # may be NULL if given if(!is.null(npts)) np <- npts ## Compute! out <- second.moment.engine(X, sigma=sigma, edge=edge, what=what, debug=debug, ..., obswin=ow, varcov=varcov, npts=np) # Now clip it fbox <- shift(rec, origin="midpoint") if(nimages == 1) { result <- switch(what, kernel = out[fbox], smooth = out[win], Kmeasure = out[fbox], Bartlett = out[fbox], edge = out[win], smoothedge = list(smooth=out$smooth[win], edge =out$edge[win]), all = list(kernel=out$kernel[fbox], smooth=out$smooth[win], Kmeasure=out$Kmeasure[fbox], Bartlett=out$Bartlett[fbox], edge=out$edge[win])) } else { result <- switch(what, kernel = out[fbox], smooth = lapply(out, "[", i=win), Kmeasure = lapply(out, "[", i=fbox), Bartlett = lapply(out, "[", i=fbox), edge = out[win], smoothedge = list( smooth = lapply(out$smooth, "[", i=win), edge = out$edge[win]), all = list( kernel=out$kernel[fbox], smooth=lapply(out$smooth, "[", i=win), Kmeasure=lapply(out$Kmeasure, "[", i=fbox), Bartlett=lapply(out$Bartlett, "[", i=fbox), edge=out$edge[win])) } return(result) } second.moment.engine <- function(x, sigma=NULL, edge=TRUE, what=c("Kmeasure", "kernel", "smooth", "Bartlett", "edge", "smoothedge", "all"), ..., kernel="gaussian", scalekernel=is.character(kernel), obswin = as.owin(x), varcov=NULL, npts=NULL, debug=FALSE, fastgauss=FALSE) { what <- match.arg(what) validate2Dkernel(kernel) obswin.given <- !missing(obswin) && !is.null(obswin) is.second.order <- what %in% c("Kmeasure", "Bartlett", "all") needs.kernel <- what %in% c("kernel", "all", "Kmeasure") returns.several <- what %in% c("all", "smoothedge") # check whether Fastest Fourier Transform in the West is available west <- fftwAvailable() if(returns.several) result <- list() # several results will be returned in a list if(is.ppp(x)) { # convert list of points to mass distribution X <- pixellate(x, ..., padzero=TRUE) if(is.null(npts)) npts <- npoints(x) } else X <- x if(is.im(X)) { Xlist <- list(X) nimages <- 1 } else if(all(unlist(lapply(X, is.im)))) { Xlist <- X X <- Xlist[[1]] nimages <- length(Xlist) blanklist <- vector(mode="list", length=nimages) names(blanklist) <- names(Xlist) } else stop("internal error: unrecognised format for x") unitsX <- unitname(X) xstep <- X$xstep ystep <- X$ystep ## ensure obswin has same bounding frame as X if(!obswin.given) { obswin <- Window(x) } else if(!identical(Frame(obswin), Frame(X))) { obswin <- rebound.owin(obswin, as.rectangle(X)) } # go to work Y <- X$v Ylist <- lapply(Xlist, getElement, name="v") # pad with zeroes nr <- nrow(Y) nc <- ncol(Y) Ypad <- matrix(0, ncol=2*nc, nrow=2*nr) Ypadlist <- rep(list(Ypad), nimages) for(i in 1:nimages) Ypadlist[[i]][1:nr, 1:nc] <- Ylist[[i]] Ypad <- Ypadlist[[1]] lengthYpad <- 4 * nc * nr # corresponding coordinates xcol.pad <- X$xcol[1] + xstep * (0:(2*nc-1)) yrow.pad <- X$yrow[1] + ystep * (0:(2*nr-1)) # compute kernel and its Fourier transform if(fastgauss && !needs.kernel && identical(kernel, "gaussian") && is.numeric(sigma) && (length(sigma) == 1)) { #' compute Fourier transform of kernel directly (*experimental*) ii <- c(0:(nr-1), nr:1) jj <- c(0:(nc-1), nc:1) cc <- -(sigma^2 * pi^2)/2 ww <- sidelengths(Frame(X))^2 uu <- exp(ii^2 * cc/ww[2]) vv <- exp(jj^2 * cc/ww[1]) fK <- outer(uu, vv, "*") } else { # set up kernel xcol.ker <- xstep * c(0:(nc-1),-(nc:1)) yrow.ker <- ystep * c(0:(nr-1),-(nr:1)) #' kerpixarea <- xstep * ystep if(identical(kernel, "gaussian")) { if(!is.null(sigma)) { densX.ker <- dnorm(xcol.ker, sd=sigma) densY.ker <- dnorm(yrow.ker, sd=sigma) #' WAS: Kern <- outer(densY.ker, densX.ker, "*") * kerpixarea Kern <- outer(densY.ker, densX.ker, "*") Kern <- Kern/sum(Kern) } else if(!is.null(varcov)) { ## anisotropic kernel Sinv <- solve(varcov) halfSinv <- Sinv/2 #' WAS: #' detSigma <- det(varcov) #' constker <- kerpixarea/(2 * pi * sqrt(detSigma)) xsq <- matrix((xcol.ker^2)[col(Ypad)], ncol=2*nc, nrow=2*nr) ysq <- matrix((yrow.ker^2)[row(Ypad)], ncol=2*nc, nrow=2*nr) xy <- outer(yrow.ker, xcol.ker, "*") #' WAS: Kern <- constker * exp(.... Kern <- exp(-(xsq * halfSinv[1,1] + xy * (halfSinv[1,2]+halfSinv[2,1]) + ysq * halfSinv[2,2])) Kern <- Kern/sum(Kern) } else stop("Must specify either sigma or varcov") } else { ## non-Gaussian kernel ## evaluate kernel at array of points xker <- as.vector(xcol.ker[col(Ypad)]) yker <- as.vector(yrow.ker[row(Ypad)]) #' WAS: Kern <- kerpixarea * evaluate2Dkernel(... Kern <- evaluate2Dkernel(kernel, xker, yker, sigma=sigma, varcov=varcov, scalekernel=scalekernel, ...) if(!all(ok <- is.finite(Kern))) { if(anyNA(Kern)) stop("kernel function produces NA values") if(any(is.nan(Kern))) stop("kernel function produces NaN values") ra <- range(Kern[ok]) Kern[Kern == Inf] <- ra[2] Kern[Kern == -Inf] <- ra[1] } Kern <- matrix(Kern, ncol=2*nc, nrow=2*nr) Kern <- Kern/sum(Kern) } if(what %in% c("kernel", "all")) { ## kernel will be returned ## first rearrange it into spatially sensible order (monotone x and y) rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1 ctwist <- (-nc):(nc-1) %% (2*nc) + 1 if(debug) { if(any(fave.order(xcol.ker) != rtwist)) splat("something round the twist") } Kermit <- Kern[ rtwist, ctwist] ker <- im(Kermit, xcol.ker[ctwist], yrow.ker[ rtwist], unitname=unitsX) if(what == "kernel") return(ker) else result$kernel <- ker } ## convolve using fft fK <- fft2D(Kern, west=west) } if(what != "edge") { if(nimages == 1) { fY <- fft2D(Ypad, west=west) sm <- fft2D(fY * fK, inverse=TRUE, west=west)/lengthYpad if(debug) { splat("smooth: maximum imaginary part=", signif(max(Im(sm)),3)) if(!is.null(npts)) splat("smooth: mass error=", signif(sum(Mod(sm))-npts,3)) } } else { fYlist <- smlist <- blanklist for(i in 1:nimages) { fYlist[[i]] <- fY.i <- fft2D(Ypadlist[[i]], west=west) smlist[[i]] <- sm.i <- fft2D(fY.i * fK, inverse=TRUE, west=west)/lengthYpad if(debug) { splat("smooth component", i, ": maximum imaginary part=", signif(max(Im(sm.i)),3)) if(!is.null(npts)) splat("smooth component", i, ": mass error=", signif(sum(Mod(sm.i))-npts,3)) } } } } if(what %in% c("smooth", "all", "smoothedge")) { # compute smoothed point pattern without edge correction if(nimages == 1) { smo <- im(Re(sm)[1:nr, 1:nc], xcol.pad[1:nc], yrow.pad[1:nr], unitname=unitsX) if(what == "smooth") { return(smo) } else { result$smooth <- smo } } else { smolist <- blanklist for(i in 1:nimages) smolist[[i]] <- im(Re(smlist[[i]])[1:nr, 1:nc], xcol.pad[1:nc], yrow.pad[1:nr], unitname=unitsX) smolist <- as.solist(smolist) if(what == "smooth") { return(smolist) } else { result$smooth <- smolist } } } if(is.second.order) { # compute Bartlett spectrum if(nimages == 1) { bart <- BartCalc(fY, fK) ## bart <- Mod(fY)^2 * fK } else { bartlist <- lapply(fYlist, BartCalc, fK=fK) } } if(what %in% c("Bartlett", "all")) { # Bartlett spectrum will be returned # rearrange into spatially sensible order (monotone x and y) rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1 ctwist <- (-nc):(nc-1) %% (2*nc) + 1 if(nimages == 1) { Bart <- bart[ rtwist, ctwist] Bartlett <- im(Mod(Bart),(-nc):(nc-1), (-nr):(nr-1)) if(what == "Bartlett") return(Bartlett) else result$Bartlett <- Bartlett } else { Bartlist <- blanklist for(i in 1:nimages) { Bart <- (bartlist[[i]])[ rtwist, ctwist] Bartlist[[i]] <- im(Mod(Bart),(-nc):(nc-1), (-nr):(nr-1)) } Bartlist <- as.solist(Bartlist) if(what == "Bartlett") return(Bartlist) else result$Bartlett <- Bartlist } } #### ------- Second moment measure -------------- # if(is.second.order) { if(nimages == 1) { mom <- fft2D(bart, inverse=TRUE, west=west)/lengthYpad if(debug) { splat("2nd moment measure: maximum imaginary part=", signif(max(Im(mom)),3)) if(!is.null(npts)) splat("2nd moment measure: mass error=", signif(sum(Mod(mom))-npts^2, 3)) } mom <- Mod(mom) # subtract (delta_0 * kernel) * npts if(is.null(npts)) stop("Internal error: second moment measure requires npts") mom <- mom - npts* Kern } else { momlist <- blanklist for(i in 1:nimages) { mom.i <- fft2D(bartlist[[i]], inverse=TRUE, west=west)/lengthYpad if(debug) { splat("2nd moment measure: maximum imaginary part=", signif(max(Im(mom.i)),3)) if(!is.null(npts)) splat("2nd moment measure: mass error=", signif(sum(Mod(mom.i))-npts^2, 3)) } mom.i <- Mod(mom.i) # subtract (delta_0 * kernel) * npts if(is.null(npts)) stop("Internal error: second moment measure requires npts") mom.i <- mom.i - npts* Kern momlist[[i]] <- mom.i } } } # edge correction if(edge || what %in% c("edge", "all", "smoothedge")) { M <- as.mask(obswin, xy=list(x=X$xcol, y=X$yrow))$m # previous line ensures M has same dimensions and scale as Y Mpad <- matrix(0, ncol=2*nc, nrow=2*nr) Mpad[1:nr, 1:nc] <- M lengthMpad <- 4 * nc * nr fM <- fft2D(Mpad, west=west) if(edge && is.second.order) { # compute kernel-smoothed set covariance # apply edge correction co <- fft2D(Mod(fM)^2 * fK, inverse=TRUE, west=west)/lengthMpad co <- Mod(co) a <- sum(M) wt <- a/co me <- spatstat.options("maxedgewt") weight <- matrix(pmin.int(me, wt), ncol=2*nc, nrow=2*nr) # apply edge correction to second moment measure if(nimages == 1) { mom <- mom * weight # set to NA outside 'reasonable' region mom[wt > 10] <- NA } else { wgt10 <- (wt > 10) for(i in 1:nimages) { mom.i <- momlist[[i]] mom.i <- mom.i * weight # set to NA outside 'reasonable' region mom.i[wgt10] <- NA momlist[[i]] <- mom.i } } } } if(is.second.order) { # rearrange second moment measure # into spatially sensible order (monotone x and y) rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1 ctwist <- (-nc):(nc-1) %% (2*nc) + 1 if(nimages == 1) { mom <- mom[ rtwist, ctwist] } else { momlist <- lapply(momlist, "[", i=rtwist, j=ctwist) } if(debug) { if(any(fave.order(xcol.ker) != rtwist)) splat("internal error: something round the twist") } } if(what %in% c("edge", "all", "smoothedge")) { # return convolution of window with kernel # (evaluated inside window only) con <- fft2D(fM * fK, inverse=TRUE, west=west)/lengthMpad edg <- Mod(con[1:nr, 1:nc]) edg <- im(edg, xcol.pad[1:nc], yrow.pad[1:nr], unitname=unitsX) if(what == "edge") return(edg) else result$edge <- edg } if(what == "smoothedge") return(result) # Second moment measure, density estimate # Divide by number of points * lambda and convert mass to density pixarea <- xstep * ystep if(nimages == 1) { mom <- mom * area(obswin) / (pixarea * npts^2) # this is the second moment measure mm <- im(mom, xcol.ker[ctwist], yrow.ker[rtwist], unitname=unitsX) if(what == "Kmeasure") return(mm) else result$Kmeasure <- mm } else { ccc <- area(obswin) / (pixarea * npts^2) mmlist <- blanklist for(i in 1:nimages) { mom.i <- momlist[[i]] mom.i <- mom.i * ccc # this is the second moment measure mmlist[[i]] <- im(mom.i, xcol.ker[ctwist], yrow.ker[rtwist], unitname=unitsX) } mmlist <- as.solist(mmlist) if(what == "Kmeasure") return(mmlist) else result$Kmeasure <- mmlist } # what = "all", so return all computed objects return(result) } BartCalc <- function(fY, fK) { Mod(fY)^2 * fK } Kest.fft <- function(X, sigma, r=NULL, ..., breaks=NULL) { verifyclass(X, "ppp") W <- Window(X) lambda <- npoints(X)/area(W) rmaxdefault <- rmax.rule("K", W, lambda) bk <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) breaks <- bk$val rvalues <- bk$r u <- Kmeasure(X, sigma, ...) xx <- rasterx.im(u) yy <- rastery.im(u) rr <- sqrt(xx^2 + yy^2) tr <- whist(rr, breaks, u$v) K <- cumsum(tr) * with(u, xstep * ystep) rmax <- min(rr[is.na(u$v)]) K[rvalues >= rmax] <- NA result <- data.frame(r=rvalues, theo=pi * rvalues^2, border=K) w <- X$window alim <- c(0, min(diff(w$xrange), diff(w$yrange))/4) out <- fv(result, "r", quote(K(r)), "border", . ~ r, alim, c("r", "%s[pois](r)", "hat(%s)[fb](r)"), c("distance argument r", "theoretical Poisson %s", "border-corrected FFT estimate of %s"), fname="K", unitname=unitname(X) ) return(out) } spatstat.core/R/reduceformula.R0000644000176200001440000000627414144333464016244 0ustar liggesusers# # reduceformula.R # # $Revision: 1.7 $ $Date: 2016/12/30 01:44:07 $ # # delete variable from formula # #...................................................... # reduceformula <- function(fmla, deletevar, verbose=FALSE) { ## removes the variable `deletevar' from the formula `fmla' ## returns a simplified formula, or NULL if it can't simplify. stopifnot(inherits(fmla, "formula")) stopifnot(is.character(deletevar) && length(deletevar) == 1) if(!(deletevar %in% all.vars(as.expression(fmla)))) { if(verbose) message(paste("The formula does not involve", dQuote(deletevar), "and is therefore unchanged")) return(fmla) } lhs <- if(length(fmla) < 3) NULL else fmla[[2]] ## create terms object tt <- attributes(terms(fmla)) ## formula.has.intercept <- (tt$intercept == 1) ## extract all variables appearing in the model vars <- as.list(tt$variables)[-1] nvars <- length(vars) varexprs <- lapply(vars, as.expression) varstrings <- sapply(varexprs, paste) ## identify any offsets offs <- tt$offset v.is.offset <- if(!is.null(offs)) (1:nvars) %in% offs else rep(FALSE, nvars) ## remove the response repo <- tt$response if(repo != 0) { vars <- vars[-repo] varstrings <- varstrings[-repo] varexprs <- varexprs[-repo] v.is.offset <- v.is.offset[-repo] } ## a term may be a variable name ## v.is.name <- sapply(vars, is.name) ## a term may be an expression like sin(x), poly(x,y,degree=2) v.args <- lapply(varexprs, all.vars) matches.delete <- lapply(v.args, "==", deletevar) v.has.delete <- sapply(matches.delete, any) v.has.other <- !sapply(matches.delete, all) v.is.mixed <- v.has.delete & v.has.other ## we can't handle mixed terms like sin(x-d), poly(x,d) ## where d is to be deleted. Handling these would require ## knowledge about the functions sin and poly. if(any(v.is.mixed)) { nmixed <- sum(v.is.mixed) if(verbose) message(paste("Don't know how to reduce the", ngettext(nmixed, "term", "terms"), paste(dQuote(varstrings[v.is.mixed]), collapse=","))) return(NULL) } ## OK. We have identified all first order terms to be deleted. condemned.names <- varstrings[v.has.delete] ## Determine the terms of all orders that include these first order terms ## (1) terms with model coefficients fax <- tt$factors if(prod(dim(fax)) == 0) retained.terms <- character(0) else { ## Rows are first order terms condemned.row <- rownames(fax) %in% condemned.names ## Columns are the terms of all orders allterms <- colnames(fax) ## Find all columns containing a 1 in a row that is to be deleted if(any(condemned.row)) { condemned.column <- matcolany(fax[condemned.row, , drop=FALSE] != 0) retained.terms <- allterms[!condemned.column] } else retained.terms <- allterms } ## (2) offsets if any if(any(v.is.offset)) retained.terms <- c(retained.terms, varstrings[v.is.offset & !v.has.delete]) ## (3) intercept forced? if(length(retained.terms) == 0) retained.terms <- "1" ## OK. Cut-and-paste f <- paste(lhs, "~", paste(retained.terms, collapse=" + ")) return(as.formula(f)) } spatstat.core/R/profilepl.R0000644000176200001440000002766414144333463015410 0ustar liggesusers# # profilepl.R # # $Revision: 1.47 $ $Date: 2020/11/17 03:47:24 $ # # computes profile log pseudolikelihood # profilepl <- local({ ## Determine edge correction ## with partial matching, avoiding collisions with ## other arguments to ppm that have similar names. getppmcorrection <- function(..., correction = "border", covariates = NULL, covfunargs = NULL, control = NULL) { return(correction) } isSingleNA <- function(x) { length(x) == 1 && is.na(x) } profilepl <- function(s, f, ..., aic=FALSE, rbord=NULL, verbose=TRUE, fast=TRUE) { callenv <- parent.frame() s <- as.data.frame(s) n <- nrow(s) stopifnot(is.function(f)) ## validate 's' parms <- names(s) fargs <- names(formals(f)) if(!all(fargs %in% parms)) { bad <- !(fargs %in% parms) forgiven <- sapply(formals(f)[bad], isSingleNA) if(!all(forgiven)) { slecht <- fargs[bad[!forgiven]] nsl <- length(slecht) stop(paste(ngettext(nsl, "Argument", "Arguments"), commasep(sQuote(slecht)), ngettext(nsl, "is", "are"), "not provided in the data frame s")) } } ## extra columns in 's' are assumed to be parameters of covariate functions is.farg <- parms %in% fargs pass.cfa <- any(!is.farg) got.cfa <- "covfunargs" %in% names(list(...)) if(pass.cfa && got.cfa) stop("Some columns in s are superfluous") ## criterion <- numeric(n) ## make a fake call pseudocall <- match.call() pseudocall[[1]] <- as.symbol("ppm") namcal <- names(pseudocall) ## remove arguments 's' and 'verbose' retain <- !(namcal %in% c("s", "verbose")) pseudocall <- pseudocall[retain] namcal <- namcal[retain] ## place 'f' argument third np <- length(pseudocall) fpos <- (1:np)[namcal == "f"] indices <- (1:np)[-fpos] if(length(indices) < 3) { indices <- c(indices, fpos) } else { indices <- c(indices[1:3], fpos, indices[-(1:3)]) } pseudocall <- pseudocall[indices] namcal <- names(pseudocall) namcal[namcal=="f"] <- "interaction" names(pseudocall) <- namcal ## get correction correction <- getppmcorrection(...) if(correction == "border") { ## determine border correction distance if(is.null(rbord)) { ## compute rbord = max reach of interactions if(verbose) message("(computing rbord)") for(i in 1:n) { fi <- do.call(f, as.list(s[i, is.farg, drop=FALSE])) if(!inherits(fi, "interact")) stop(paste("f did not yield an object of class", sQuote("interact"))) re <- reach(fi) if(is.null(rbord)) rbord <- re else if(rbord < re) rbord <- re } } } ## determine whether computations can be saved if(pass.cfa || got.cfa) { savecomp <- FALSE } else { Q <- do.call(ppm, append(list(...), list(rbord=rbord, justQ=TRUE)), envir=callenv) savecomp <- !oversize.quad(Q) } ## go gc() if(verbose) { message(paste("comparing", n, "models...")) pstate <- list() } for(i in 1:n) { if(verbose) pstate <- progressreport(i, n, state=pstate) fi <- do.call(f, as.list(s[i, is.farg, drop=FALSE])) if(!inherits(fi, "interact")) stop(paste("f did not yield an object of class", sQuote("interact"))) if(pass.cfa) cfai <- list(covfunargs=as.list(s[i, !is.farg, drop=FALSE])) ## fit model if(i == 1) { ## fit from scratch arg1 <- list(..., interaction=fi, rbord=rbord, savecomputed=savecomp, warn.illegal=FALSE, callstring="", skip.border=TRUE, clip.interaction=!fast) if(pass.cfa) arg1 <- append(arg1, cfai) fiti <- do.call(ppm, arg1, envir=callenv) ## save intermediate computations (pairwise distances, etc) precomp <- fiti$internal$computed savedargs <- list(..., rbord=rbord, precomputed=precomp, warn.illegal=FALSE, callstring="", skip.border=TRUE, clip.interaction=!fast) } else { ## use precomputed data argi <- append(savedargs, list(interaction=fi)) if(pass.cfa) argi <- append(argi, cfai) fiti <- do.call(ppm, argi, envir=callenv) } ## save log pl for each fit criterion[i] <- if(aic) -AIC(fiti) else as.numeric(logLik(fiti, warn=FALSE)) ## save fitted coefficients for each fit co <- coef(fiti) if(i == 1) { allcoef <- data.frame(matrix(co, nrow=1)) names(allcoef) <- names(co) } else allcoef <- rbind(allcoef, co) } if(verbose) message("fitting optimal model...") opti <- which.max(criterion) gc() optint <- do.call(f, as.list(s[opti, is.farg, drop=FALSE])) optarg <- list(..., interaction=optint, rbord=rbord) if(pass.cfa) { optcfa <- as.list(s[opti, !is.farg, drop=FALSE]) attr(optcfa, "fitter") <- "profilepl" optarg <- append(optarg, list(covfunargs=optcfa)) } optfit <- do.call(ppm, optarg, envir=callenv) if(verbose) message("done.") critname <- if(aic) "-AIC" else if(is.poisson(optfit)) "log l" else if(optfit$method == "logi") "log CL" else "log PL" result <- list(param=s, prof=criterion, critname=critname, iopt=opti, fit=optfit, rbord=rbord, fname=as.interact(optfit)$name, allcoef=allcoef, otherstuff=list(...), pseudocall=pseudocall) class(result) <- c("profilepl", class(result)) return(result) } profilepl }) ## ## print method ## print.profilepl <- function(x, ...) { head1 <- "profile log pseudolikelihood" head2 <- "for model: " psc <- paste(unlist(strsplitretain(format(x$pseudocall))), collapse=" ") if(nchar(psc) + nchar(head2) + 1 <= getOption('width')) { splat(head1) splat(head2, psc) } else { splat(head1, head2) splat(psc) } nparm <- ncol(x$param) if(waxlyrical('extras')) { corx <- x$fit$correction if(identical(corx, "border") && !is.null(x$rbord)) splat("fitted with rbord =", x$rbord) splat("interaction:", x$fname) splat("irregular", ngettext(nparm, "parameter:", "parameters:\n"), paste(names(x$param), "in", unlist(lapply(lapply(as.list(x$param), range), prange)), collapse="\n")) } popt <- x$param[x$iopt,, drop=FALSE] splat("optimum", ngettext(nparm, "value", "values"), "of irregular", ngettext(nparm, "parameter: ", "parameters:\n"), commasep(paste(names(popt), "=", popt))) invisible(NULL) } ## ## summary method ## summary.profilepl <- function(object, ...) { print(object) cat("\n\noptimal model:\n") print(object$fit) } as.ppm.profilepl <- function(object) { object$fit } fitin.profilepl <- function(object) { fitin(as.ppm(object)) } predict.profilepl <- function(object, ...) { predict(as.ppm(object), ...) } ## ## plot method ## plot.profilepl <- local({ plot.profilepl <- function(x, ..., add=FALSE, main=NULL, tag=TRUE, coeff=NULL, xvariable=NULL, col=1, lty=1, lwd=1, col.opt="green", lty.opt=3, lwd.opt=1) { para <- x$param ## graphics arguments may be expressions involving parameters if(ncol(para) > 1) { col <- eval(substitute(col), para) lwd <- eval(substitute(lwd), para) lty <- eval(substitute(lty), para) px <- cbind(para, col, lwd, lty, stringsAsFactors=FALSE) col <- px$col lwd <- px$lwd lty <- px$lty } ## strip any column that is entirely na if(any(nacol <- sapply(para, none.finite))) { warning(paste("Deleted the irregular", ngettext(sum(nacol), "parameter", "parameters"), commasep(sQuote(names(para)[nacol])), "because all values were NA"), call.=FALSE) para <- para[, !nacol, drop=FALSE] } ## npara <- ncol(para) ## main header if(is.null(main)) main <- short.deparse(x$pseudocall) ## x variable for plot if(is.null(xvariable)) { xvalues <- para[,1L] xname <- names(para)[1L] } else { stopifnot(is.character(xvariable)) if(!(xvariable %in% names(para))) stop("there is no irregular parameter named", sQuote(xvariable)) xvalues <- para[[xvariable]] xname <- xvariable } ## y variable for plot if(is.null(coeff)) { yvalues <- x$prof ylab <- x$critname %orifnull% "log pl" } else { stopifnot(is.character(coeff)) allcoef <- x$allcoef if(!(coeff %in% names(allcoef))) stop(paste("there is no coefficient named", sQuote(coeff), "in the fitted model")) yvalues <- allcoef[[coeff]] ylab <- paste("coefficient:", coeff) } ## start plot if(!add) do.call.matched(plot.default, resolve.defaults(list(x=range(xvalues), y=range(yvalues)), list(type="n", main=main), list(...), list(ylab=ylab, xlab=xname)), extrargs=graphicsPars("plot")) linepars <- graphicsPars("lines") if(npara == 1) { ## single curve do.call.matched(lines.default, resolve.defaults(list(x=quote(xvalues), y=quote(yvalues), ...), spatstat.options("par.fv")), extrargs=linepars) } else { ## multiple curves xvarindex <- match(xname, names(para)) other <- para[, -xvarindex, drop=FALSE] tapply(1:nrow(para), as.list(other), plotslice, xvalues=xvalues, yvalues=yvalues, other=other, tag=tag, ..., col=col, lwd=lwd, lty=lty, lineargs=linepars) } ## show optimal value do.call.matched(abline, resolve.defaults(list(v = xvalues[x$iopt]), list(...), list(lty=lty.opt, lwd=lwd.opt, col=col.opt)), extrargs=linepars) return(invisible(NULL)) } plotslice <- function(z, xvalues, yvalues, other, tag=TRUE, ..., lty=1, col=1, lwd=1, lineargs) { fz <- xvalues[z] pz <- yvalues[z] n <- length(xvalues) if(length(lty) == n) lty <- unique(lty[z])[1] if(length(col) == n) col <- unique(col[z])[1] if(length(lwd) == n) lwd <- unique(lwd[z])[1] do.call.matched(lines.default, resolve.defaults(list(x=quote(fz), y=quote(pz), col=col, lwd=lwd, lty=lty), list(...)), extrargs=lineargs) if(tag) { oz <- other[z, , drop=FALSE] uniques <- apply(oz, 2, unique) labels <- paste(names(uniques), "=", uniques, sep="") label <- paste(labels, sep=",") ii <- which.max(pz) do.call.matched(text.default, list(x=fz[ii], y=pz[ii], labels=label, col=col, ...), funargs=graphicsPars("text")) } return(NULL) } none.finite <- function(x) all(!is.finite(x)) plot.profilepl }) simulate.profilepl <- function(object, ...) { simulate(as.ppm(object), ...) } parameters.profilepl <- function(model, ...) { parameters(as.ppm(model)) } spatstat.core/R/qqplotppm.R0000644000176200001440000002657314144333463015447 0ustar liggesusers# # QQ plot of smoothed residual field against model # # qqplot.ppm() QQ plot (including simulation) # # $Revision: 1.31 $ $Date: 2020/11/18 03:07:14 $ # qqplot.ppm <- local({ ## How to refit the model refit <- function(fit, pattern) { update.ppm(fit, Q=pattern, use.internal=(fit$method != "mppm")) } ## how to compute the residual field residualfield <- function(fit, ...) { d <- diagnose.ppm(fit, which="smooth", plot.it=FALSE, compute.cts=FALSE, compute.sd=FALSE, check=FALSE, ...) return(d$smooth$Z$v) } qqplot.ppm <- function(fit, nsim=100, expr=NULL, ..., type="raw", style="mean", fast=TRUE, verbose=TRUE, plot.it=TRUE, dimyx=NULL, nrep=if(fast) 5e4 else 1e5, control=update(default.rmhcontrol(fit), nrep=nrep), saveall=FALSE, monochrome=FALSE, limcol=if(monochrome) "black" else "red", maxerr=max(100, ceiling(nsim/10)), check=TRUE, repair=TRUE, envir.expr) { verifyclass(fit, "ppm") if(check && damaged.ppm(fit)) { if(!repair) stop("object format corrupted; try update(fit, use.internal=TRUE)") message("object format corrupted; repairing it.") fit <- update(fit, use.internal=TRUE) } if(fast) { oldnpixel <- spatstat.options("npixel") if(is.null(dimyx)) dimyx <- pmin(40, rev(oldnpixel)) spatstat.options(npixel=rev(dimyx)) } ################ How to evaluate residuals ########################## ## Quantiles of the residual field will be computed. ## Data values dat <- residualfield(fit, type=type, ..., dimyx=dimyx) ################## How to perform simulations? ####################### ## envir.call <- sys.parent() envir.here <- sys.frame(sys.nframe()) ## extract.from.list <- FALSE inext <- 0 # to placate package checker dont.complain.about(inext) if(is.null(expr)) { ## We will simulate from the fitted model 'nsim' times ## and refit the model to these simulations simsource <- "fit" how.simulating <- "simulating from fitted model" ## prepare rmh arguments rcontrol <- rmhcontrol(control) rmodel <- rmhmodel(fit, control=rcontrol, project=FALSE, verbose=verbose) rstart <- rmhstart(n.start=data.ppm(fit)$n) ## pre-digest arguments rmhinfolist <- rmh(rmodel, rstart, rcontrol, preponly=TRUE, verbose=FALSE) ## expression to be evaluated each time expr <- expression( refit(fit, rmhEngine(rmhinfolist, verbose=FALSE))) envir.expr <- envir.here ## pacify code checkers dont.complain.about(rmhinfolist) } else if(is.expression(expr)) { simsource <- "expr" how.simulating <- paste("evaluating", sQuote("expr")) if(missing(envir.expr) || is.null(envir.expr)) envir.expr <- parent.frame() } else if(inherits(expr, "envelope")) { simpat <- attr(expr, "simpatterns") if(!is.null(simpat) && all(sapply(simpat, is.ppp))) { expr <- expression(simpat[[inext]]) envir.expr <- envir.here dont.complain.about(simpat) simsource <- "list" how.simulating <- "extracting point pattern from list" } else stop(paste("Argument", sQuote("expr"), "is an envelope object,", "but does not contain point patterns"), call.=FALSE) } else if(is.list(expr) && all(sapply(expr, is.ppp))) { simpat <- expr expr <- expression(simpat[[inext]]) envir.expr <- envir.here dont.complain.about(simpat) simsource <- "list" how.simulating <- "extracting point pattern from list" } else stop(paste(sQuote("expr"), "should be an expression, or an envelope object,", "or a list of point patterns"), call.=FALSE) exprstring <- if(simsource == "expr") deparse(expr) else NULL ###### Perform simulations if(verbose) { cat(paste("Simulating", nsim, "realisations... ")) pstate <- list() } simul.sizes <- numeric(nsim) isim <- 0 ierr <- 0 repeat { inext <- isim + 1 ## protect from randomly-generated crashes in gam ei <- try(eval(expr, envir=envir.expr), silent=!verbose) if(inherits(ei, "try-error")) { ## error encountered in evaluating 'expr' ierr <- ierr + 1 if(ierr > maxerr) stop(paste("Exceeded maximum of", maxerr, "failures in", how.simulating, "after generating only", isim, "realisations")) else break } else { ## simulation successful isim <- isim + 1 fiti <- if(simsource == "fit") ei else if(is.ppm(ei)) ei else if(is.ppp(ei)) refit(fit, ei) else stop("result of eval(expr) is not a ppm or ppp object") ## diagnostic info simul.sizes[isim] <- data.ppm(fiti)$n ## compute residual field resi <- residualfield(fiti, type=type, ..., dimyx=dimyx) if(isim == 1) sim <- array(, dim=c(dim(resi), nsim)) sim[,,isim] <- resi if(verbose) pstate <- progressreport(isim, nsim, state=pstate) if(isim >= nsim) break } } ###### Report diagnostics if(ierr > 0) cat(paste("\n\n**Alert:", ierr, "failures occurred in", how.simulating, "\n\n")) nempty <- sum(simul.sizes == 0) if(nempty > 0) cat(paste("\n\n**Alert:", nempty, "out of", nsim, "simulated patterns were empty.\n\n")) else cat(paste("\nDiagnostic info:\n", "simulated patterns contained an average of", mean(simul.sizes), "points.\n")) if(nempty == nsim) warning("All simulated patterns were empty") ############ Plot them switch(style, classical = { rr <- range(c(dat,sim)) result <- qqplot(sim, dat, xlim=rr, ylim=rr, asp=1.0, xlab="Quantiles of simulation", ylab="Quantiles of data",plot.it=plot.it) title(sub=paste("Residuals:", type)) abline(0,1, lty=2) result <- append(result, list(data=dat, sim=sim, xlim=rr, ylim=rr, xlab="Quantiles of simulation", ylab="Quantiles of data", rtype=type, nsim=nsim, fit=fit, expr=exprstring, simsource = simsource ) ) }, mean = { ## compute quantiles corresponding to probabilities p[i] ## separately in each realisation. if(verbose) cat("Calculating quantiles...") if(fast) { p <- ppoints(min(100,length(dat)), 3/8) qsim <- apply(sim, 3, quantile, probs=p, na.rm=TRUE) } else { qsim <- apply(sim, 3, sort, na.last=TRUE) } if(verbose) cat("averaging...") ## sample mean of each quantile meanq <- apply(qsim, 1, mean, na.rm=TRUE) ## et cetera varq <- apply(qsim, 1, var, na.rm=TRUE) sdq <- sqrt(varq) q.025 <- apply(qsim, 1, quantile, probs=0.025, na.rm=TRUE) q.975 <- apply(qsim, 1, quantile, probs=0.975, na.rm=TRUE) rr <- range(c(meanq,dat), na.rm=TRUE) dats <- if(fast) quantile(dat, probs=p, na.rm=TRUE) else sort(dat, na.last=TRUE) if(verbose) cat("..Done.\n") if(plot.it) { plot(meanq, dats, xlab="Mean quantile of simulations", ylab="data quantile", xlim=rr, ylim=rr, asp=1.0) abline(0,1) lines(meanq, q.025, lty=2, col=limcol) lines(meanq, q.975, lty=2, col=limcol) title(sub=paste("Residuals:", type)) } result <- list(x=meanq, y=dats, sdq=sdq, q.025=q.025, q.975=q.975, data=dat, sim=sim, xlim=rr, ylim=rr, xlab="Mean quantile of simulations", ylab="data quantile", rtype=type, nsim=nsim, fit=fit, expr=exprstring, simsource=simsource) }, stop(paste("Unrecognised option for", sQuote("style"))) ) ## Throw out baggage if not wanted if(!saveall) { result$fit <- summary(fit, quick=TRUE) result$sim <- NULL } ## reset npixel if(fast) spatstat.options(npixel=oldnpixel) ## class(result) <- c("qqppm", class(result)) return(invisible(result)) } qqplot.ppm }) plot.qqppm <- local({ plot.qqppm <- function(x, ..., limits=TRUE, monochrome=spatstat.options('monochrome'), limcol=if(monochrome) "black" else "red") { stopifnot(inherits(x, "qqppm")) default.type <- if(length(x$x) > 150) "l" else "p" do.call(myplot, resolve.defaults(list(quote(x), ..., type=default.type, limits=limits, limcol=limcol))) return(invisible(x)) } myplot <- function(object, xlab = object$xlab, ylab = object$ylab, xlim = object$xlim, ylim = object$ylim, asp = 1, type = default.type, ..., limits=TRUE, limcol="red") { plot(object$x, object$y, xlab = xlab, ylab = ylab, xlim = xlim, ylim = ylim, asp = asp, type = type, ...) abline(0, 1) if(limits) { if(!is.null(object$q.025)) lines(object$x, object$q.025, lty = 2, col=limcol) if(!is.null(object$q.975)) lines(object$x, object$q.975, lty = 2, col=limcol) } title(sub=paste("Residuals:", object$rtype)) } plot.qqppm }) print.qqppm <- function(x, ...) { stopifnot(inherits(x, "qqppm")) splat("Q-Q plot of point process residuals", "of type", sQuote(x$rtype), "\n", "based on", x$nsim, "simulations") simsource <- x$simsource if(is.null(simsource)) # old version simsource <- if(x$simulate.from.fit) "fit" else "expr" switch(simsource, fit = { fit <- x$fit sumfit <- if(is.ppm(fit)) summary(fit, quick=TRUE) else if(inherits(fit, "summary.ppm")) fit else list(name="(unrecognised format)") splat("\nSimulations from fitted model:", sumfit$name) }, expr = { splat("Simulations obtained by evaluating the following expression:") print(x$expr) }, list = { splat("Simulated point patterns were provided in a list") }) invisible(NULL) } spatstat.core/R/multipair.util.R0000644000176200001440000000173014144333463016360 0ustar liggesusers## ## ## multipair.util.R ## ## $Revision: 1.13 $ $Date: 2014/04/29 01:13:35 $ ## ## Utilities for multitype pairwise interactions ## ## ------------------------------------------------------------------- ## MultiPair.checkmatrix <- function(mat, n, matname, naok=TRUE, zerook=TRUE, asymmok=FALSE) { if(missing(matname)) matname <- short.deparse(substitute(mat)) if(!is.matrix(mat)) stop(paste(matname, "must be a matrix")) if(any(dim(mat) != rep.int(n,2))) stop(paste(matname, "must be a square matrix,", "of size", n, "x", n)) isna <- is.na(mat) if(!naok && any(isna)) stop(paste("NA entries not allowed in", matname)) if(any(mat[!isna] < 0)) stop(paste("Negative entries not allowed in", matname)) if(!zerook && any(mat[!isna] == 0)) stop(paste("Zero entries not allowed in", matname)) if(!asymmok && !isSymmetric(mat)) stop(paste(matname, "must be a symmetric matrix")) } spatstat.core/R/densityHeat.ppp.R0000644000176200001440000002567014141452520016457 0ustar liggesusers#' #' densityHeat.ppp.R #' #' Diffusion estimator of density/intensity #' densityHeat <- function(x, sigma, ...) { UseMethod("densityHeat") } densityHeat.ppp <- function(x, sigma, ..., weights=NULL, connect=8, symmetric=FALSE, sigmaX=NULL, k=1, show=FALSE, se=FALSE, at=c("pixels", "points"), leaveoneout = TRUE, extrapolate = FALSE, coarsen = TRUE, verbose=TRUE, internal=NULL) { stopifnot(is.ppp(x)) nX <- npoints(x) at <- match.arg(at) if(length(weights)) check.nvector(weights, nX) else weights <- NULL if(extrapolate) { ## Richardson extrapolation ## first compute intensity estimate on the desired grid cl <- sys.call() cl$extrapolate <- FALSE L <- eval(cl, sys.parent()) dimL <- dim(L) ## remove all function arguments that control pixel resolution cl$dimyx <- cl$eps <- cl$xy <- NULL if(coarsen) { ## compute on the desired grid and on a coarser grid Lfine <- L dimfine <- dimL ## compute on coarser grid dimcoarse <- round(dimfine/2) cl$dimyx <- dimcoarse Lcoarse <- eval(cl, sys.parent()) ## interpolate coarse to fine Lcoarse <- as.im(interp.im, W=Window(Lfine), Z=Lcoarse, xy=Lfine, bilinear=TRUE) } else { ## compute on the desired grid and a finer grid Lcoarse <- L dimcoarse <- dimL ## compute on finer grid dimfine <- round(dimcoarse * 2) cl$dimyx <- dimfine Lfine <- eval(cl, sys.parent()) ## sample from fine to coarse Lfine <- as.im(Lfine, xy=Lcoarse) } ## Richardson extrapolation, ratio = 2, exponent = 1 Lextrap <- 2 * Lfine - Lcoarse if(se) attr(Lextrap, "se") <- attr(L, "se") return(Lextrap) } delayed <- !is.null(sigmaX) setuponly <- identical(internal$setuponly, TRUE) want.Xpos <- delayed || setuponly if(!setuponly && (se || (at == "points" && leaveoneout))) { #' NEED INDIVIDUAL HEAT KERNELS FOR EACH DATA POINT #' to calculate estimate and standard error, #' or leave-one-out estimate if(!is.null(sigmaX)) stop("variance calculation is not implemented for lagged arrivals") lambda <- varlam <- switch(at, pixels = as.im(0, W=Window(x), ...), points = numeric(nX)) if(verbose) { pstate <- list() cat(paste("Processing", nX, "heat kernels... ")) } if(is.null(weights)) { ## unweighted calculation: coded separately for efficiency for(i in seq_len(nX)) { Heat.i <- densityHeat.ppp(x[i], sigma, ..., connect=connect, symmetric=symmetric, k=k) switch(at, pixels = { lambda <- lambda + Heat.i varlam <- varlam + Heat.i^2 }, points = { if(leaveoneout) { Heat.ixi <- safelookup(Heat.i,x[-i],warn=FALSE) #'was: Heat.ixi <- Heat.i[ x[-i] ] lambda[-i] <- lambda[-i] + Heat.ixi varlam[-i] <- varlam[-i] + Heat.ixi^2 } else { lambda <- lambda + Heat.i[x] varlam <- varlam + Heat.i[x]^2 } }) if(verbose) pstate <- progressreport(i, nX, state=pstate) } } else { ## weighted calculation for(i in seq_len(nX)) { Heat.i <- densityHeat.ppp(x[i], sigma, ..., connect=connect, symmetric=symmetric, k=k) w.i <- weights[i] switch(at, pixels = { lambda <- lambda + w.i * Heat.i varlam <- varlam + w.i * Heat.i^2 }, points = { if(leaveoneout) { Heat.ixi <- Heat.i[ x[-i] ] lambda[-i] <- lambda[-i] + w.i * Heat.ixi varlam[-i] <- varlam[-i] + w.i * Heat.ixi^2 } else { lambda <- lambda + w.i * Heat.i[x] varlam <- varlam + w.i * Heat.i[x]^2 } }) if(verbose) pstate <- progressreport(i, nX, state=pstate) } } if(verbose) splat("Done.") result <- lambda attr(result, "se") <- sqrt(varlam) return(result) } check.1.integer(k) stopifnot(k >= 1) if(!(connect %in% c(4,8))) stop("connectivity must be 4 or 8") ## initial state for diffusion if(delayed) { #' smoothing bandwidths attributed to each data point check.nvector(sigmaX, nX) stopifnot(all(is.finite(sigmaX))) stopifnot(all(sigmaX >= 0)) if(missing(sigma)) sigma <- max(sigmaX) else check.1.real(sigma) #' sort in decreasing order of bandwidth osx <- order(sigmaX, decreasing=TRUE) sigmaX <- sigmaX[osx] x <- x[osx] #' discretise window W <- do.call.matched(as.mask, resolve.defaults(list(...), list(w=Window(x)))) #' initial state is zero Y <- as.im(W, value=0) #' discretised coordinates Xpos <- nearest.valid.pixel(x$x, x$y, Y) } else { #' pixellate pattern Y <- pixellate(x, ..., weights=weights, preserve=TRUE, savemap=want.Xpos) Xpos <- attr(Y, "map") } #' validate sigma if(is.im(sigma)) { # ensure Y and sigma are on the same grid A <- harmonise(Y=Y, sigma=sigma) Y <- A$Y sigma <- A$sigma } else if(is.function(sigma)) { sigma <- as.im(sigma, as.owin(Y)) } else { sigma <- as.numeric(sigma) check.1.real(sigma) } #' normalise as density pixelarea <- with(Y, xstep * ystep) Y <- Y / pixelarea v <- as.matrix(Y) #' initial state u <- as.vector(v) if(want.Xpos) { #' map (row, col) to serial number serial <- matrix(seq_len(length(v)), nrow(v), ncol(v)) Xpos <- serial[as.matrix(as.data.frame(Xpos))] } #' symmetric random walk? if(symmetric) { asprat <- with(Y, ystep/xstep) if(abs(asprat-1) > 0.01) warning(paste("Symmetric random walk on a non-square grid", paren(paste("aspect ratio", asprat))), call.=FALSE) } #' determine appropriate jump probabilities & time step pmax <- 1/(connect+1) # maximum permitted jump probability xstep <- Y$xstep ystep <- Y$ystep minstep <- min(xstep, ystep) if(symmetric) { #' all permissible transitions have the same probability 'pjump'. #' Determine Nstep, and dt=sigma^2/Nstep, such that #' Nstep >= 16 and M * pjump * minstep^2 = dt M <- if(connect == 4) 2 else 6 Nstep <- max(16, ceiling(max(sigma)^2/(M * pmax * minstep^2))) dt <- sn <- (sigma^2)/Nstep px <- py <- pxy <- sn/(M * minstep^2) } else { #' px is the probability of jumping 1 step to the right #' py is the probability of jumping 1 step up #' if connect=4, horizontal and vertical jumps are exclusive. #' if connect=8, horizontal and vertical increments are independent #' Determine Nstep, and dt = sigma^2/Nstep, such that #' Nstep >= 16 and 2 * pmax * minstep^2 = dt Nstep <- max(16, ceiling(max(sigma)^2/(2 * pmax * minstep^2))) dt <- sn <- (sigma^2)/Nstep px <- sn/(2 * xstep^2) py <- sn/(2 * ystep^2) if(max(px) > pmax) stop("Internal error: px exceeds pmax") if(max(py) > pmax) stop("Internal error: py exceeds pmax") if(connect == 8) pxy <- px * py } #' arrival times if(!is.null(sigmaX)) iarrive <- pmax(1, pmin(Nstep, Nstep - round((sigmaX^2)/sn))) #' construct adjacency matrices dimv <- dim(v) my <- gridadjacencymatrix(dimv, across=FALSE, down=TRUE, diagonal=FALSE) mx <- gridadjacencymatrix(dimv, across=TRUE, down=FALSE, diagonal=FALSE) if(connect == 8) mxy <- gridadjacencymatrix(dimv, across=FALSE, down=FALSE, diagonal=TRUE) #' restrict to window if(anyNA(u)) { ok <- !is.na(u) u <- u[ok] if(want.Xpos) { #' adjust serial numbers Xpos <- cumsum(ok)[Xpos] } mx <- mx[ok,ok,drop=FALSE] my <- my[ok,ok,drop=FALSE] if(connect == 8) mxy <- mxy[ok,ok,drop=FALSE] if(is.im(sigma)) { px <- px[ok] py <- py[ok] if(connect == 8) pxy <- pxy[ok] } } else { ok <- TRUE if(is.im(sigma)) { px <- px[] py <- py[] if(connect == 8) pxy <- pxy[] } } #' construct iteration matrix if(connect == 4) { A <- px * mx + py * my } else { A <- px * (1 - 2 * py) * mx + py * (1 - 2 * px) * my + pxy * mxy } #' debug stopifnot(min(rowSums(A)) >= 0) stopifnot(max(rowSums(A)) <= 1) #' diag(A) <- 1 - rowSums(A) #' k-step transition probabilities if(k > 1) { Ak <- A for(j in 2:k) Ak <- Ak %*% A } else Ak <- A k <- as.integer(k) Nstep <- as.integer(Nstep) Nblock <- Nstep/k Nrump <- Nstep - Nblock * k #' secret exit - return setup data only if(setuponly) return(list(Y=Y, u=u, Xpos=Xpos, sigma=sigma, A=A, Ak=Ak, k=k, Nstep=Nstep, Nblock=Nblock, Nrump=Nrump, dx=xstep, dy=ystep, dt=dt)) #' run U <- u Z <- Y if(!delayed) { if(!show) { for(iblock in 1:Nblock) U <- U %*% Ak } else { opa <- par(ask=FALSE) each <- max(1, round(Nblock/60)) for(iblock in 1:Nblock) { U <- U %*% Ak if(iblock %% each == 0) { Z[] <- as.vector(U) f <- sqrt((iblock * k)/Nstep) main <- if(is.im(sigma)) paste(signif(f, 3), "* sigma") else paste("sigma =", signif(f * sigma, 3)) plot(Z, main=main) Sys.sleep(0.4) } } par(opa) } if(Nrump > 0) for(istep in 1:Nrump) U <- U %*% A } else { #' lagged arrivals used <- rep(FALSE, nX) contrib <- (weights %orifnull% rep(1,nX))/pixelarea if(!show) { for(iblock in 1:Nblock) { U <- U %*% Ak if(any(ready <- (!used & (iarrive <= iblock * k)))) { #' add points for(i in which(ready)) { j <- Xpos[i] U[j] <- U[j] + contrib[i] used[i] <- TRUE } } } } else { opa <- par(ask=FALSE) each <- max(1, round(Nblock/60)) for(iblock in 1:Nblock) { U <- U %*% Ak if(any(ready <- (!used & (iarrive <= iblock * k)))) { #' add points for(i in which(ready)) { j <- Xpos[i] U[j] <- U[j] + contrib[i] used[i] <- TRUE } } if(iblock %% each == 0) { Z[] <- as.vector(U) f <- sqrt((iblock * k)/Nstep) main <- if(is.im(sigma)) paste(signif(f, 3), "* sigma") else paste("sigma =", signif(f * sigma, 3)) plot(Z, main=main) Sys.sleep(0.4) } } par(opa) } if(Nrump > 0) for(istep in 1:Nrump) U <- U %*% A } #' pack up Z[] <- as.vector(U) if(at == "points") Z <- Z[x] return(Z) } spatstat.core/R/localK.R0000644000176200001440000001550514144333462014607 0ustar liggesusers# # localK.R Getis-Franklin neighbourhood density function # # $Revision: 1.25 $ $Date: 2019/06/23 06:30:55 $ # # "localL" <- function(X, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { localK(X, wantL=TRUE, rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue) } "localLinhom" <- function(X, lambda=NULL, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL, sigma=NULL, varcov=NULL, update=TRUE, leaveoneout=TRUE) { localKinhom(X, lambda=lambda, wantL=TRUE, ..., rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue, sigma=sigma, varcov=varcov, update=update, leaveoneout=leaveoneout) } "localK" <- function(X, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { verifyclass(X, "ppp") localKengine(X, ..., rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue) } "localKinhom" <- function(X, lambda=NULL, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL, sigma=NULL, varcov=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") a <- resolve.lambda(X, lambda, ..., sigma=sigma, varcov=varcov, update=update, leaveoneout=leaveoneout) result <- localKengine(X, lambda=a$lambda, ..., rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue) if(a$danger) attr(result, "dangerous") <- a$dangerous return(result) } "localKengine" <- function(X, ..., wantL=FALSE, lambda=NULL, rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { npts <- npoints(X) W <- X$window areaW <- area(W) lambda.ave <- npts/areaW lambda1.ave <- (npts - 1)/areaW weighted <- !is.null(lambda) if(is.null(rvalue)) rmaxdefault <- rmax %orifnull% rmax.rule("K", W, lambda.ave) else { stopifnot(is.numeric(rvalue)) stopifnot(length(rvalue) == 1) stopifnot(rvalue >= 0) rmaxdefault <- rvalue } breaks <- handle.r.b.args(NULL, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(none="none", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=FALSE) correction <- implemented.for.K(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # identify all close pairs rmax <- max(r) close <- closepairs(X, rmax) DIJ <- close$d XI <- ppp(close$xi, close$yi, window=W, check=FALSE) I <- close$i if(weighted) { J <- close$j lambdaJ <- lambda[J] weightJ <- 1/lambdaJ } # initialise df <- as.data.frame(matrix(NA, length(r), npts)) labl <- desc <- character(npts) if(verbose) state <- list() switch(correction, none={ # uncorrected! For demonstration purposes only! for(i in 1:npts) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, if(weighted) weightJ[ii] else NULL) # no edge weights df[,i] <- cumsum(wh) icode <- numalign(i, npts) names(df)[i] <- paste("un", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("uncorrected estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, npts, state=state) } if(!weighted) df <- df/lambda1.ave }, translate={ # Translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) if(weighted) edgewt <- edgewt * weightJ for(i in 1:npts) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, edgewt[ii]) Ktrans <- cumsum(wh) df[,i] <- Ktrans icode <- numalign(i, npts) names(df)[i] <- paste("trans", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("translation-corrected estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, npts, state=state) } if(!weighted) df <- df/lambda1.ave h <- diameter(W)/2 df[r >= h, ] <- NA }, isotropic={ # Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) if(weighted) edgewt <- edgewt * weightJ for(i in 1:npts) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, edgewt[ii]) Kiso <- cumsum(wh) df[,i] <- Kiso icode <- numalign(i, npts) names(df)[i] <- paste("iso", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("Ripley isotropic correction estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, npts, state=state) } if(!weighted) df <- df/lambda1.ave h <- diameter(W)/2 df[r >= h, ] <- NA }) # transform values if L required if(wantL) df <- sqrt(df/pi) # return vector of values at r=rvalue, if desired if(!is.null(rvalue)) { nr <- length(r) if(r[nr] != rvalue) stop("Internal error - rvalue not attained") return(as.numeric(df[nr,])) } # function value table required # add r and theo if(!wantL) { df <- cbind(df, data.frame(r=r, theo=pi * r^2)) if(!weighted) { fnam <- c("K", "loc") yexp <- ylab <- quote(K[loc](r)) } else { fnam <- c("K", "list(inhom,loc)") ylab <- quote(K[inhom,loc](r)) yexp <- quote(K[list(inhom,loc)](r)) } } else { df <- cbind(df, data.frame(r=r, theo=r)) if(!weighted) { fnam <- c("L", "loc") yexp <- ylab <- quote(L[loc](r)) } else { fnam <- c("L", "list(inhom,loc)") ylab <- quote(L[inhom,loc](r)) yexp <- quote(L[list(inhom,loc)](r)) } } desc <- c(desc, c("distance argument r", "theoretical Poisson %s")) labl <- c(labl, c("r", "{%s[%s]^{pois}}(r)")) # create fv object K <- fv(df, "r", ylab, "theo", , alim, labl, desc, fname=fnam, yexp=yexp) # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) attr(K, "correction") <- correction return(K) } spatstat.core/R/rmhtemper.R0000644000176200001440000000430414144333463015401 0ustar liggesusers#' #' rmhtemper.R #' #' $Revision: 1.4 $ $Date: 2018/10/18 02:07:56 $ #' reheat <- local({ expon <- function(x, alpha) { if(is.null(x)) return(NULL) if(is.numeric(x)) return(x^alpha) if(is.im(x)) return(x^alpha) if(is.function(x)) { f <- x g <- function(...) { f(...)^alpha } if(!inherits(f, "funxy")) return(g) return(funxy(g, W=as.owin(f))) } if(is.list(x)) return(lapply(x, expon)) stop("Unrecognised format for x in x^alpha", call.=FALSE) } reheat <- function(model, invtemp) { model <- rmhmodel(model) cif <- model$cif par <- model$par w <- model$w trend <- model$trend types <- model$types newtrend <- expon(trend, invtemp) rules <- lapply(cif, spatstatRmhInfo) temperfuns <- lapply(rules, getElement, name="temper") if(any(bad <- sapply(temperfuns, is.null))) stop(paste("reheating the", commasep(sQuote(cif[bad])), ngettext(sum(bad), "cif", "cifs"), "is not supported")) Ncif <- length(cif) if(Ncif == 1) { newpar <- temperfuns[[1]](par, invtemp) } else { newpar <- par for(i in 1:Ncif) newpar[[i]] <- temperfuns[[i]](par[[i]], invtemp) } newmodel <- rmhmodel(cif=cif, par=newpar, trend=newtrend, w=w, types=types) return(newmodel) } reheat }) rtemper <- function(model, invtemp, nrep, ..., track=FALSE, start=NULL, verbose=FALSE){ df <- data.frame(invtemp, nrep) ndf <- nrow(df) X <- NULL h <- NULL for(i in 1:ndf) { if(verbose) cat(paste("Step", i, "of", paste0(ndf, ":"), "Running", nrep[i], "iterations", "at inverse temperature", signif(invtemp[i], 4), "... ")) model.i <- reheat(model, invtemp[i]) X <- rmh(model.i, nrep=nrep[i], ..., start=start, overrideXstart = X, overrideclip = (i != ndf), track=track, saveinfo = FALSE, verbose=FALSE) if(track) { hnew <- attr(X, "history") h <- rbind(h, hnew) } } if(verbose) cat("Done.\n") if(track) attr(X, "history") <- h return(X) } spatstat.core/R/randomtess.R0000644000176200001440000000270114144333463015554 0ustar liggesusers# # randomtess.R # # Random tessellations # # $Revision: 1.7 $ $Date: 2015/10/21 09:06:57 $ # # Poisson line tessellation rpoislinetess <- function(lambda, win=owin()) { win <- as.owin(win) if(win$type == "mask") stop("Not implemented for masks") # determine circumcircle xr <- win$xrange yr <- win$yrange xmid <- mean(xr) ymid <- mean(yr) width <- diff(xr) height <- diff(yr) rmax <- sqrt(width^2 + height^2)/2 boundbox <- owin(xmid + c(-1,1) * rmax, ymid + c(-1,1) * rmax) # generate poisson lines through circumcircle n <- rpois(1, lambda * 2 * pi * rmax) if(n == 0) return(tess(tiles=list(win))) theta <- runif(n, max= 2 * pi) p <- runif(n, max=rmax) Y <- infline(p=p, theta=theta) # form the induced tessellation in bounding box Z <- chop.tess(boundbox, Y) # clip to window Z <- intersect.tess(Z, win) attr(Z, "lines") <- Y return(Z) } rMosaicSet <- function(X, p=0.5) { stopifnot(is.tess(X)) Y <- tiles(X) Y <- Y[runif(length(Y)) < p] if(length(Y) == 0) return(NULL) Z <- NULL for(i in seq_along(Y)) Z <- union.owin(Z, Y[[i]]) return(Z) } rMosaicField <- function(X, rgen=function(n) { sample(0:1, n, replace=TRUE)}, ..., rgenargs=NULL ) { stopifnot(is.tess(X)) Y <- as.im(X, ...) ntiles <- length(levels(Y)) values <- do.call(rgen, append(list(ntiles),rgenargs)) Z <- eval.im(values[as.integer(Y)]) return(Z) } spatstat.core/R/stienen.R0000644000176200001440000000415214144333464015045 0ustar liggesusers## stienen.R ## ## Stienen diagram with border correction ## ## $Revision: 1.9 $ $Date: 2020/12/19 05:25:06 $ stienen <- function(X, ..., bg="grey", border=list(bg=NULL)) { Xname <- short.deparse(substitute(X)) stopifnot(is.ppp(X)) if(npoints(X) <= 1) { W <- Window(X) dont.complain.about(W) do.call(plot, resolve.defaults(list(x=quote(W)), list(...), list(main=Xname))) return(invisible(NULL)) } d <- nndist(X) b <- bdist.points(X) Y <- X %mark% d observed <- (b >= d) Yobserved <- Y[observed] gp <- union(graphicsPars("symbols"), "lwd") dont.complain.about(Yobserved) do.call.plotfun(plot.ppp, resolve.defaults(list(x=quote(Yobserved), markscale=1), list(...), list(bg=bg), list(main=Xname)), extrargs=gp) if(!identical(border, FALSE)) { if(!is.list(border)) border <- list() Ycensored <- Y[!observed] dont.complain.about(Ycensored) do.call.plotfun(plot.ppp, resolve.defaults(list(x=quote(Ycensored), markscale=1, add=TRUE), border, list(...), list(bg=bg), list(cols=grey(0.5), lwd=2)), extrargs=gp) } return(invisible(NULL)) } stienenSet <- function(X, edge=TRUE) { stopifnot(is.ppp(X)) nnd <- nndist(X) if(!edge) { ok <- bdist.points(X) >= nnd X <- X[ok] nnd <- nnd[ok] } n <- npoints(X) if(n == 0) return(emptywindow(Window(X))) if(n == 1) return(Window(X)) rad <- nnd/2 if(!all(ok <- (rad > 0))) { eps <- min(rad[ok], shortside(Frame(X)))/100 rad <- pmax(rad, eps) } delta <- 2 * pi * max(rad)/128 Z <- disc(rad[1], X[1], delta=delta) for(i in 2:n) Z <- union.owin(Z, disc(rad[i], X[i], delta=delta)) return(Z) } spatstat.core/R/residppm.R0000644000176200001440000000704314144333463015224 0ustar liggesusers# # residppm.R # # computes residuals for fitted point process model # # # $Revision: 1.25 $ $Date: 2017/12/07 03:03:48 $ # residuals.ppm <- function(object, type="raw", ..., check=TRUE, drop=FALSE, fittedvalues = NULL, new.coef=NULL, dropcoef=FALSE, quad=NULL) { verifyclass(object, "ppm") trap.extra.arguments(..., .Context="In residuals.ppm") type <- pickoption("type", type, c(inverse="inverse", raw="raw", pearson="pearson", Pearson="pearson", score="score")) typenames <- c(inverse="inverse-lambda residuals", raw="raw residuals", pearson="Pearson residuals", score="score residuals") typename <- typenames[[type]] given.fitted <- !missing(fittedvalues) && !is.null(fittedvalues) # ................. determine fitted values ................. NewCoef <- NULL if(is.null(new.coef) && is.null(quad)) { # use 'object' without modification # validate 'object' if(check && !given.fitted && damaged.ppm(object)) stop("object format corrupted; try update(object, use.internal=TRUE)") } else { # determine a new set of model coefficients if(!is.null(new.coef)) { # use specified model parameters NewCoef <- new.coef } else { # estimate model parameters using a (presumably) denser set of dummy pts # Determine new quadrature scheme if(is.quad(quad)) hi.res.quad <- quad else if(is.ppp(quad)) hi.res.quad <- quadscheme(data=data.ppm(object), dummy=quad) else { # assume 'quad' is a list of arguments to 'quadscheme' hi.res.quad <- do.call(quadscheme, append(list(data.ppm(object)), quad)) } # refit the model with new quadscheme hi.res.fit <- update(object, hi.res.quad) NewCoef <- coef(hi.res.fit) } } #' now compute fitted values using new coefficients if(!given.fitted) fittedvalues <- fitted(object, drop=drop, check=check, new.coef=NewCoef, dropcoef=dropcoef) # ..................... compute residuals ..................... # Extract quadrature points and weights Q <- quad.ppm(object, drop=drop, clip=drop) # U <- union.quad(Q) # quadrature points Z <- is.data(Q) # indicator data/dummy # W <- w.quad(Q) # quadrature weights # Compute fitted conditional intensity at quadrature points lambda <- fittedvalues # indicator is 1 if lambda > 0 # (adjusted for numerical behaviour of predict.glm) indicator <- (lambda > .Machine$double.eps) if(type == "score") { # need the covariates X <- model.matrix(object) if(drop) { gs <- getglmsubset(object) ok <- !is.na(gs) & gs X <- X[ok, , drop=FALSE] } } # Evaluate residual measure components discrete <- switch(type, raw = rep.int(1, sum(Z)), inverse = 1/lambda[Z], pearson = 1/sqrt(lambda[Z]), score = X[Z, , drop=FALSE] ) density <- switch(type, raw = -lambda, inverse = -indicator, pearson = -indicator * sqrt(lambda), score = -lambda * X) # Residual measure (return value) res <- msr(Q, discrete, density) # name the residuals attr(res, "type") <- type attr(res, "typename") <- typename return(res) } spatstat.core/R/summary.mppm.R0000644000176200001440000002176214144333464016053 0ustar liggesusers# # summary.mppm.R # # $Revision: 1.18 $ $Date: 2021/03/29 07:26:25 $ # summary.mppm <- function(object, ..., brief=FALSE) { # y will be the summary y <- object[c("Call", "Info", "Inter", "trend", "iformula", "random", "npat", "maxlogpl")] y$brief <- brief Info <- object$Info Inter <- object$Inter FIT <- object$Fit$FIT moadf <- object$Fit$moadf y$Fit <- object$Fit[c("fitter", "use.gam", "fmla", "Vnamelist", "Isoffsetlist")] y$Fit$FIT <- summary(FIT) y$Fit$moadf <- list(nrow=nrow(moadf), colnames=colnames(moadf)) ninteract <- Inter$ninteract interaction <- Inter$interaction iused <- Inter$iused itags <- Inter$itags processnames <- Inter$processes constant <- Inter$constant trivial <- Inter$trivial npat <- y$npat iformula <- y$iformula random <- y$random Vnamelist <- y$Fit$Vnamelist allVnames <- unlist(Vnamelist) Isoffsetlist <- y$Fit$Isoffsetlist poistags <- itags[trivial] intertags <- c(allVnames, poistags) ## does the model depend on covariates? y$depends.covar <- Info$has.covar && (length(Info$used.cov.names) > 0) # rownames <- y$Info$rownames switch(y$Fit$fitter, glmmPQL={ y$coef <- co <- fixed.effects(FIT) y$coef.rand <- random.effects(FIT) }, gam=, glm={ y$coef <- co <- coef(FIT) }) ## identify model terms which involve interpoint interaction md <- model.depends(FIT) is.interpoint <- colnames(md) %in% intertags involves.interpoint <- apply(md[ , is.interpoint, drop=FALSE], 1, any) y$coef.inter <- co[involves.interpoint] ## identify trend and design coefficients systematic <- !involves.interpoint y$coef.syst <- co[systematic] # random effects y$ranef <- if(Info$has.random) summary(FIT$modelStruct) else NULL ### Interpoint interactions # model is Poisson ? y$poisson <- ispois <- all(trivial[iused]) # Determine how complicated the interactions are: # (0) are there random effects involving the interactions randominteractions <- !is.null(random) && any(variablesinformula(random) %in% itags) # (1) is the interaction formula of the form ~ tag + tag + ... + tag isimple <- identical(sort(variablesinformula(iformula)), sort(termsinformula(iformula))) # (2) is it of the form ~tag trivialformula <- (isimple && ninteract == 1) # (3) is it of the form ~tag where the interaction is the same in each row fixedinteraction <- (trivialformula && constant && !randominteractions) ### Determine printing of interactions, accordingly ### iprint <- list() if(randominteractions) { toohard <- TRUE printeachrow <- FALSE } else if(fixedinteraction || ispois) { # exactly the same interaction for all patterns interaction <- interaction[1L,1L,drop=TRUE] fi.all <- fii(interaction, co, Vnamelist[[1L]], Isoffsetlist[[1L]]) iprint <- list("Interaction for all patterns"=fi.all) printeachrow <- FALSE toohard <- FALSE } else if(trivialformula) { ## same interaction structure for all patterns; ## irregular parameters may be different on each row; ## regular parameters of interaction do not depend on design pname <- unlist(processnames)[iused] iprint <- list("Interaction for each pattern" = pname) printeachrow <- TRUE toohard <- FALSE } else if(sum(iused) == 1) { ## same interaction structure for all patterns; ## irregular parameters may be different on each row; ## regular parameters of interaction may depend on design pname <- unlist(processnames)[iused] iprint <- list("Interaction for each pattern" = pname) printeachrow <- TRUE toohard <- FALSE ## look for design : interaction terms mm <- md[involves.interpoint, !is.interpoint, drop=FALSE] tangled <- apply(mm, 2, any) if(any(tangled)) { tanglednames <- colnames(mm)[tangled] textra <- list(commasep(sQuote(tanglednames))) names(textra) <- paste("Interaction depends on design", ngettext(length(tanglednames), "covariate", "covariates")) iprint <- append(iprint, textra) } } else if(isimple && all(constant)) { # several interactions involved, each of which is the same for all patterns iprint <- list("Interaction formula"=iformula, "Interactions defined for each pattern"=NULL) for(j in (1:ninteract)[iused]) { name.j <- paste("Interaction", sQuote(itags[j])) int.j <- Inter$interaction[1L,j,drop=TRUE] Vnames.j <- Vnamelist[[j]] Isoffset.j <- Isoffsetlist[[j]] fii.j <- fii(int.j, co, Vnames.j, Isoffset.j) extra.j <- list(fii.j) names(extra.j) <- name.j iprint <- append(iprint, extra.j) } printeachrow <- FALSE toohard <- FALSE } else { # general case # determine which interaction(s) are active on each row active <- active.interactions(object) if(ninteract > 1 || !all(active)) iprint <- list("Active interactions"=active) printeachrow <- TRUE toohard <- any(rowSums(active) > 1) } y$ikind <- list( randominteractions=randominteractions, isimple =isimple, trivialformula =trivialformula, fixedinteraction =fixedinteraction, toohard =toohard, printeachrow =printeachrow) y$depends.on.row <- ("id" %in% variablesinformula(y$trend)) || !fixedinteraction if(toohard) iprint <- append(iprint, list("(Sorry, cannot interpret fitted interactions)")) else if(printeachrow) { subs <- subfits(object, what="interactions") um <- uniquemap(subs) uniq <- (um == seq_along(um)) if(mean(uniq) <= 0.5) { icode <- cumsum(uniq)[um] inames <- if(max(icode) <= 26) LETTERS[icode] else as.character(icode) itable <- data.frame(row=seq_along(um), interaction=inames) usubs <- subs[um[uniq]] names(usubs) <- inames[uniq] iprint <- append(iprint, list("Summary table of interactions"=itable, "key to interaction table"=usubs, "=== Interactions on each row ===="=NULL)) } names(subs) <- paste("Interaction on row", 1:npat) iprint <- append(iprint, subs) } y$iprint <- iprint class(y) <- c("summary.mppm", class(list)) return(y) } print.summary.mppm <- function(x, ..., brief=x$brief) { # NB: x is an object of class "summary.mppm" npat <- x$npat # Inter <- x$Inter # ninteract <- Inter$ninteract # interaction <- Inter$interaction # iused <- Inter$iused # constant <- Inter$constant # iformula <- x$iformula # processnames <- Inter$processes # itags <- Inter$itags # trivial <- Inter$trivial # random <- x$random FIT <- x$Fit$FIT # Vnamelist <- x$Fit$Vnamelist # allVnames <- unlist(Vnamelist) # poistags <- itags[trivial] terselevel <- spatstat.options("terse") # rownames <- x$Info$rownames splat("Point process model fitted to", npat, "point patterns") if(waxlyrical('gory', terselevel)) splat("Call:", x$Call$callstring) splat("Log trend formula:", pasteFormula(x$trend)) switch(x$Fit$fitter, glmmPQL={ cat("\nFixed effects:\n") print(x$coef.syst) cat("Random effects:\n") print(x$coef.rand) co <- fixed.effects(FIT) }, gam=, glm={ cat("\nFitted trend coefficients:\n") print(x$coef.syst) co <- coef(FIT) }) if(length(x$coef.inter)) { cat("\nFitted coefficients of interpoint interaction:\n") print(x$coef.inter) } if(!brief && waxlyrical('extras', terselevel)) { cat("All fitted coefficients:\n") print(co) } parbreak(terselevel) if(!is.null(x$ranef)) { splat("Random effects summary:") print(x$ranef) parbreak(terselevel) } ### Print interaction information ### if(waxlyrical('extras', terselevel)) { iprint <- x$iprint nama <- names(iprint) %orifnull% rep("", length(iprint)) for(i in seq_along(iprint)) { nami <- nama[i] vali <- iprint[[i]] if(brief && is.matrix(vali)) vali <- paren(paste(nrow(vali), "x", ncol(vali), "matrix")) if(nami != "") { inline <- inherits(vali, "formula") || is.character(vali) || (brief && inherits(vali, "fii")) if(inline) cat(paste0(nami, ":\t")) else splat(paste0(nami, ":")) } if(!is.null(vali)) { if(inherits(vali, "fii")) { print(vali, tiny=brief) } else if(is.character(vali)) { splat(vali) } else { print(vali) } } parbreak(terselevel) } } if(!brief && waxlyrical('gory', terselevel)) { splat("--- Gory details: ---") splat("Combined data frame has", x$Fit$moadf$nrow, "rows") print(FIT) } invisible(NULL) } spatstat.core/R/random.R0000644000176200001440000007321114144333463014661 0ustar liggesusers## ## random.R ## ## Functions for generating random point patterns ## ## $Revision: 4.104 $ $Date: 2021/04/17 03:29:58 $ ## ## runifpoint() n i.i.d. uniform random points ("binomial process") ## runifdisc() special case of disc (faster) ## ## runifpoispp() uniform Poisson point process ## ## rpoispp() general Poisson point process (thinning method) ## ## rpoint() n independent random points (rejection/pixel list) ## ## rMaternI() Mat'ern model I ## rMaternII() Mat'ern model II ## rMaternInhibition Generalisation ## rSSI() Simple Sequential Inhibition process ## ## rPoissonCluster() generic Poisson cluster process ## rGaussPoisson() Gauss-Poisson process ## ## rthin() independent random thinning ## rcell() Baddeley-Silverman cell process ## ## Examples: ## u01 <- owin(0:1,0:1) ## plot(runifpoispp(100, u01)) ## X <- rpoispp(function(x,y) {100 * (1-x/2)}, 100, u01) ## X <- rpoispp(function(x,y) {ifelse(x < 0.5, 100, 20)}, 100) ## plot(X) ## plot(rMaternI(100, 0.02)) ## plot(rMaternII(100, 0.05)) ## runifdisc <- function(n, radius=1, centre=c(0,0), ..., nsim=1, drop=TRUE) { ## i.i.d. uniform points in the disc of radius r and centre (x,y) check.1.real(radius) stopifnot(radius > 0) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } disque <- disc(centre=centre, radius=radius, ...) twopi <- 2 * pi rad2 <- radius^2 result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { theta <- runif(n, min=0, max=twopi) s <- sqrt(runif(n, min=0, max=rad2)) result[[isim]] <- ppp(centre[1] + s * cos(theta), centre[2] + s * sin(theta), window=disque, check=FALSE) } result <- simulationresult(result, nsim, drop) return(result) } runifpoint <- function(n, win=owin(c(0,1),c(0,1)), giveup=1000, warn=TRUE, ..., nsim=1, drop=TRUE, ex=NULL) { if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } if(missing(n) && missing(win) && !is.null(ex)) { stopifnot(is.ppp(ex)) n <- npoints(ex) win <- Window(ex) } else { win <- as.owin(win) check.1.integer(n) stopifnot(n >= 0) } if(n == 0) { emp <- ppp(window=win) result <- rep(list(emp), nsim) result <- simulationresult(result, nsim, drop) return(result) } if(warn) { nhuge <- spatstat.options("huge.npoints") if(n > nhuge) { whinge <- paste("Attempting to generate", n, "random points") message(whinge) warning(whinge, call.=FALSE) } } switch(win$type, rectangle = { return(runifrect(n, win, nsim=nsim, drop=drop)) }, mask = { dx <- win$xstep dy <- win$ystep ## extract pixel coordinates and probabilities rxy <- rasterxy.mask(win, drop=TRUE) xpix <- rxy$x ypix <- rxy$y ## make a list of nsim point patterns result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { ## select pixels with equal probability id <- sample(seq_along(xpix), n, replace=TRUE) ## extract pixel centres and randomise within pixels x <- xpix[id] + runif(n, min= -dx/2, max=dx/2) y <- ypix[id] + runif(n, min= -dy/2, max=dy/2) result[[isim]] <- ppp(x, y, window=win, check=FALSE) } }, polygonal={ ## make a list of nsim point patterns result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { ## rejection method ## initialise empty pattern x <- numeric(0) y <- numeric(0) X <- ppp(x, y, window=win) ## ## rectangle in which trial points will be generated box <- boundingbox(win) ## ntries <- 0 repeat { ntries <- ntries + 1 ## generate trial points in batches of n qq <- runifrect(n, box) ## retain those which are inside 'win' qq <- qq[win] ## add them to result X <- superimpose(X, qq, W=win, check=FALSE) ## if we have enough points, exit if(X$n > n) { result[[isim]] <- X[1:n] break } else if(X$n == n) { result[[isim]] <- X break } else if(ntries >= giveup) { ## otherwise get bored eventually stop(paste("Gave up after", giveup * n, "trials,", X$n, "points accepted")) } } } }, stop("Unrecognised window type") ) ## list of point patterns produced. result <- simulationresult(result, nsim, drop) return(result) } runifpoispp <- function(lambda, win = owin(c(0,1),c(0,1)), ..., nsim=1, drop=TRUE) { win <- as.owin(win) if(!is.numeric(lambda) || length(lambda) > 1 || !is.finite(lambda) || lambda < 0) stop("Intensity lambda must be a single finite number >= 0") if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } if(lambda == 0) { ## return empty pattern emp <- ppp(window=win) result <- rep(list(emp), nsim) result <- simulationresult(result, nsim, drop) return(result) } ## will generate Poisson process in enclosing rectangle and trim it box <- boundingbox(win) meanN <- lambda * area(box) result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { n <- rpois(1, meanN) if(!is.finite(n)) stop(paste("Unable to generate Poisson process with a mean of", meanN, "points")) X <- runifpoint(n, box) ## trim to window if(win$type != "rectangle") X <- X[win] result[[isim]] <- X } result <- simulationresult(result, nsim, drop) return(result) } rpoint <- function(n, f, fmax=NULL, win=unit.square(), ..., giveup=1000,verbose=FALSE, nsim=1, drop=TRUE, forcewin=FALSE) { if(missing(f) || (is.numeric(f) && length(f) == 1)) ## uniform distribution return(runifpoint(n, win, giveup, nsim=nsim, drop=drop)) ## non-uniform distribution.... if(!is.function(f) && !is.im(f)) stop(paste(sQuote("f"), "must be either a function or an", sQuote("im"), "object")) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } if(is.im(f)) { ## ------------ PIXEL IMAGE --------------------- if(forcewin) { ## force simulation points to lie inside 'win' f <- f[win, drop=FALSE] win.out <- win } else { ## default - ignore 'win' win.out <- as.owin(f) } if(n == 0) { ## return empty pattern(s) emp <- ppp(window=win.out) result <- rep(list(emp), nsim) result <- simulationresult(result, nsim, drop) return(result) } ## need to check simulated point coordinates? checkinside <- forcewin if(checkinside && is.rectangle(win) && is.subset.owin(Frame(f), win)) checkinside <- FALSE ## prepare w <- as.mask(if(forcewin) f else win.out) M <- w$m dx <- w$xstep dy <- w$ystep halfdx <- dx/2.0 halfdy <- dy/2.0 ## extract pixel coordinates and probabilities rxy <- rasterxy.mask(w, drop=TRUE) xpix <- rxy$x ypix <- rxy$y npix <- length(xpix) ppix <- as.vector(f$v[M]) ## not normalised - OK ## generate result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { ## select pixels id <- sample(npix, n, replace=TRUE, prob=ppix) ## extract pixel centres and randomise location within pixels x <- xpix[id] + runif(n, min= -halfdx, max=halfdx) y <- ypix[id] + runif(n, min= -halfdy, max=halfdy) if(checkinside) { edgy <- which(!inside.owin(x,y,win.out)) ## reject points just outside boundary ntries <- 0 while((nedgy <- length(edgy)) > 0) { ntries <- ntries + 1 ide <- sample(npix, nedgy, replace=TRUE, prob=ppix) x[edgy] <- xe <- xpix[ide] + runif(nedgy, min= -halfdx, max=halfdx) y[edgy] <- ye <- ypix[ide] + runif(nedgy, min= -halfdy, max=halfdy) edgy <- edgy[!inside.owin(xe, ye, win.out)] if(ntries > giveup) break; } } result[[isim]] <- ppp(x, y, window=win.out, check=FALSE) } result <- simulationresult(result, nsim, drop) return(result) } ## ------------ FUNCTION --------------------- ## Establish parameters for rejection method verifyclass(win, "owin") if(n == 0) { ## return empty pattern(s) emp <- ppp(window=win) result <- rep(list(emp), nsim) result <- simulationresult(result, nsim, drop) return(result) } if(is.null(fmax)) { ## compute approx maximum value of f imag <- as.im(f, win, ...) summ <- summary(imag) fmax <- summ$max + 0.05 * diff(summ$range) } irregular <- (win$type != "rectangle") box <- boundingbox(win) result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { ## initialise empty pattern X <- ppp(numeric(0), numeric(0), window=win) pbar <- 1 nremaining <- n totngen <- 0 ## generate uniform random points in batches ## and apply the rejection method. ## Collect any points that are retained in X ntries <- 0 repeat{ ntries <- ntries + 1 ## proposal points ngen <- nremaining/pbar + 10 totngen <- totngen + ngen prop <- runifrect(ngen, box) if(irregular) prop <- prop[win] if(prop$n > 0) { fvalues <- f(prop$x, prop$y, ...) paccept <- fvalues/fmax u <- runif(prop$n) ## accepted points Y <- prop[u < paccept] if(Y$n > 0) { ## add to X X <- superimpose(X, Y, W=win, check=FALSE) nX <- X$n pbar <- nX/totngen nremaining <- n - nX if(nremaining <= 0) { ## we have enough! if(verbose) splat("acceptance rate = ", round(100 * pbar, 2), "%") result[[isim]] <- if(nX == n) X else X[1:n] break } } } if(ntries > giveup) stop(paste("Gave up after",giveup * n,"trials with", X$n, "points accepted")) } } result <- simulationresult(result, nsim, drop) return(result) } rpoispp <- function(lambda, lmax=NULL, win = owin(), ..., nsim=1, drop=TRUE, ex=NULL, warnwin=TRUE) { ## arguments: ## lambda intensity: constant, function(x,y,...) or image ## lmax maximum possible value of lambda(x,y,...) ## win default observation window (of class 'owin') ## ... arguments passed to lambda(x, y, ...) ## nsim number of replicate simulations if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } if(missing(lambda) && is.null(lmax) && missing(win) && !is.null(ex)) { lambda <- intensity(unmark(ex)) win <- Window(ex) } else { if(!(is.numeric(lambda) || is.function(lambda) || is.im(lambda))) stop(paste(sQuote("lambda"), "must be a constant, a function or an image")) if(is.numeric(lambda) && !(length(lambda) == 1 && lambda >= 0)) stop(paste(sQuote("lambda"), "must be a single, nonnegative number")) if(!is.null(lmax)) { if(!is.numeric(lmax)) stop("lmax should be a number") if(length(lmax) > 1) stop("lmax should be a single number") } if(is.im(lambda)) { if(warnwin && !missing(win)) warning("Argument win ignored", call.=FALSE) win <- rescue.rectangle(as.owin(lambda)) } else { win <- as.owin(win) } } if(is.numeric(lambda)) ## uniform Poisson return(runifpoispp(lambda, win, nsim=nsim, drop=drop)) ## inhomogeneous Poisson ## perform thinning of uniform Poisson ## determine upper bound if(is.null(lmax)) { imag <- as.im(lambda, win, ...) summ <- summary(imag) lmax <- summ$max + 0.05 * diff(summ$range) } if(is.function(lambda)) { ## function lambda #' runifpoispp checks 'lmax' result <- runifpoispp(lmax, win, nsim=nsim, drop=FALSE) #' result is a 'ppplist' with appropriate names for(isim in 1:nsim) { X <- result[[isim]] if(X$n > 0) { prob <- lambda(X$x, X$y, ...)/lmax u <- runif(X$n) retain <- (u <= prob) result[[isim]] <- X[retain] } } if(nsim == 1 && drop) result <- result[[1L]] return(result) } if(is.im(lambda)) { ## image lambda if(spatstat.options("fastpois")) { ## new code: sample pixels directly mu <- integral(lambda) dx <- lambda$xstep/2 dy <- lambda$ystep/2 df <- as.data.frame(lambda) npix <- nrow(df) lpix <- df$value result <- vector(mode="list", length=nsim) nn <- rpois(nsim, mu) if(!all(is.finite(nn))) stop(paste("Unable to generate Poisson process with a mean of", mu, "points")) for(isim in seq_len(nsim)) { ni <- nn[isim] ii <- sample.int(npix, size=ni, replace=TRUE, prob=lpix) xx <- df$x[ii] + runif(ni, -dx, dx) yy <- df$y[ii] + runif(ni, -dy, dy) result[[isim]] <- ppp(xx, yy, window=win, check=FALSE) } result <- simulationresult(result, nsim, drop) return(result) } else { ## old code: thinning result <- runifpoispp(lmax, win, nsim=nsim, drop=FALSE) for(isim in 1:nsim) { X <- result[[isim]] if(X$n > 0) { prob <- lambda[X]/lmax u <- runif(X$n) retain <- (u <= prob) result[[isim]] <- X[retain] } } if(nsim == 1 && drop) return(result[[1L]]) return(result) } } stop(paste(sQuote("lambda"), "must be a constant, a function or an image")) } rMaternI <- function(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, ..., nsim=1, drop=TRUE) { rMaternInhibition(type=1, kappa=kappa, r=r, win=win, stationary=stationary, ..., nsim=nsim, drop=drop) } rMaternII <- function(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, ..., nsim=1, drop=TRUE) { rMaternInhibition(type=2, kappa=kappa, r=r, win=win, stationary=stationary, ..., nsim=nsim, drop=drop) } rMaternInhibition <- function(type, kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, ..., nsim=1, drop=TRUE) { stopifnot(is.numeric(r) && length(r) == 1) stopifnot(type %in% c(1,2)) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } ## Resolve window class if(!inherits(win, c("owin", "box3", "boxx"))) { givenwin <- win win <- try(as.owin(givenwin), silent = TRUE) if(inherits(win, "try-error")) win <- try(as.boxx(givenwin), silent = TRUE) if(inherits(win, "try-error")) stop("Could not coerce argument win to a window (owin, box3 or boxx).") } dimen <- spatdim(win) if(dimen == 2) { bigbox <- if(stationary) grow.rectangle(win, r) else win result <- rpoispp(kappa, win = bigbox, nsim = nsim, drop=FALSE) } else if(dimen == 3) { bigbox <- if(stationary) grow.box3(win, r) else win result <- rpoispp3(kappa, domain = bigbox, nsim = nsim, drop=FALSE) } else { bigbox <- if(stationary) grow.boxx(win, r) else win result <- rpoisppx(kappa, domain = bigbox, nsim = nsim, drop=FALSE) } for(isim in 1:nsim) { Y <- result[[isim]] nY <- npoints(Y) if(type == 1) { ## Matern Model I if(nY > 1) { d <- nndist(Y) Y <- Y[d > r] } } else { ## Matern Model II if(nY > 1) { ## matrix of squared pairwise distances d2 <- pairdist(Y, squared=TRUE) close <- (d2 <= r^2) ## random order 1:n age <- sample(seq_len(nY), nY, replace=FALSE) earlier <- outer(age, age, ">") conflict <- close & earlier ## delete <- apply(conflict, 1, any) delete <- matrowany(conflict) Y <- Y[!delete] } } if(stationary) Y <- Y[win] result[[isim]] <- Y } if(nsim == 1 && drop) return(result[[1L]]) if(is.owin(win)) result <- as.ppplist(result) return(result) } rSSI <- function(r, n=Inf, win = square(1), giveup = 1000, x.init=NULL, ..., f=NULL, fmax=NULL, nsim=1, drop=TRUE) { win.given <- !missing(win) && !is.null(win) stopifnot(is.numeric(r) && length(r) == 1 && r >= 0) stopifnot(is.numeric(n) && length(n) == 1 && n >= 0) must.reach.n <- is.finite(n) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } ## if(!is.null(f)) { stopifnot(is.numeric(f) || is.im(f) || is.function(f)) if(is.null(fmax) && !is.numeric(f)) fmax <- if(is.im(f)) max(f) else max(as.im(f, win)) } ## result <- vector(mode="list", length=nsim) if(!win.given) win <- square(1) ## validate initial state if(is.null(x.init)) { ## start with empty pattern in specified window win <- as.owin(win) x.init <- ppp(numeric(0),numeric(0), window=win) } else { ## start with specified pattern stopifnot(is.ppp(x.init)) if(!win.given) { win <- as.owin(x.init) } else { ## check compatibility of windows if(!identical(win, as.owin(x.init))) warning(paste("Argument", sQuote("win"), "is not the same as the window of", sQuote("x.init"))) x.init.new <- x.init[win] if(npoints(x.init.new) == 0) stop(paste("No points of x.init lie inside the specified window", sQuote("win"))) nlost <- npoints(x.init) - npoints(x.init.new) if(nlost > 0) warning(paste(nlost, "out of", npoints(x.init), "points of the pattern x.init", "lay outside the specified window", sQuote("win"))) x.init <- x.init.new } if(n < npoints(x.init)) stop(paste("x.init contains", npoints(x.init), "points", "but a pattern containing only n =", n, "points", "is required")) if(n == npoints(x.init)) { warning(paste("Initial state x.init already contains", n, "points;", "no further points were added")) result <- rep(list(x.init), nsim) result <- simulationresult(result, nsim, drop) return(result) } } #' validate radius and 'n' r2 <- r^2 winArea <- area(win) discarea <- pi * r2/4 nmelt <- floor(winArea/discarea) packdensity <- pi * sqrt(3)/6 npack <- floor(packdensity * winArea/discarea) if(is.finite(n)) { if(n > nmelt) { warning(paste("Window is too small to fit", n, "points", "at minimum separation", r, paren(paste("absolute maximum number is", nmelt)))) } else if(n > npack) { warning(paste("Window is probably too small to fit", n, "points", "at minimum separation", r, paren(paste("packing limit is", nmelt)))) } } #' start simulation pstate <- list() for(isim in 1:nsim) { if(nsim > 1) pstate <- progressreport(isim, nsim, state=pstate) ## Simple Sequential Inhibition process ## fixed number of points xx <- coords(x.init)$x yy <- coords(x.init)$y nn <- npoints(x.init) ## Naive implementation, proposals are uniform xprop <- yprop <- numeric(0) nblock <- if(is.finite(n)) n else min(1024, nmelt) ntries <- 0 while(ntries < giveup) { ntries <- ntries + 1 if(length(xprop) == 0) { ## generate some more proposal points prop <- if(is.null(f)) runifpoint(nblock, win) else rpoint(nblock, f, fmax, win) xprop <- coords(prop)$x yprop <- coords(prop)$y } ## extract next proposal xnew <- xprop[1L] ynew <- yprop[1L] xprop <- xprop[-1L] yprop <- yprop[-1L] ## check hard core constraint dx <- xnew - xx dy <- ynew - yy if(!any(dx^2 + dy^2 <= r2)) { xx <- c(xx, xnew) yy <- c(yy, ynew) nn <- nn + 1L ntries <- 0 } if(nn >= n) break } if(must.reach.n && nn < n) warning(paste("Gave up after", giveup, "attempts with only", nn, "points placed out of", n)) X <- ppp(xx, yy, window=win, check=FALSE) result[[isim]] <- X } result <- simulationresult(result, nsim, drop) return(result) } rPoissonCluster <- function(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)), ..., lmax=NULL, nsim=1, drop=TRUE, saveparents=TRUE) { ## Generic Poisson cluster process ## Implementation for bounded cluster radius ## ## 'rcluster' is a function(x,y) that takes the coordinates ## (x,y) of the parent point and generates a list(x,y) of offspring ## ## "..." are arguments to be passed to 'rcluster()' ## ## Catch old argument name rmax for expand, and allow rmax to be ## passed to rcluster (and then be ignored) if(missing(expand) && !is.null(rmax <- list(...)$rmax)){ expand <- rmax f <- rcluster rcluster <- function(..., rmax) f(...) } win <- as.owin(win) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } ## Generate parents in dilated window frame <- boundingbox(win) dilated <- owin(frame$xrange + c(-expand, expand), frame$yrange + c(-expand, expand)) if(is.im(kappa) && !is.subset.owin(dilated, as.owin(kappa))) stop(paste("The window in which the image", sQuote("kappa"), "is defined\n", "is not large enough to contain the dilation of the window", sQuote("win"))) parentlist <- rpoispp(kappa, lmax=lmax, win=dilated, nsim=nsim) if(nsim == 1) parentlist <- list(parentlist) resultlist <- vector(mode="list", length=nsim) for(isim in 1:nsim) { parents <- parentlist[[isim]] result <- NULL ## generate clusters np <- parents$n if(np > 0) { xparent <- parents$x yparent <- parents$y for(i in seq_len(np)) { ## generate random offspring of i-th parent point cluster <- rcluster(xparent[i], yparent[i], ...) if(!inherits(cluster, "ppp")) cluster <- ppp(cluster$x, cluster$y, window=frame, check=FALSE) ## skip if cluster is empty if(cluster$n > 0) { ## trim to window cluster <- cluster[win] if(is.null(result)) { ## initialise offspring pattern and offspring-to-parent map result <- cluster parentid <- rep.int(1, cluster$n) } else { ## add to pattern result <- superimpose(result, cluster, W=win, check=FALSE) ## update offspring-to-parent map parentid <- c(parentid, rep.int(i, cluster$n)) } } } } else { ## no parents - empty pattern result <- ppp(numeric(0), numeric(0), window=win) parentid <- integer(0) } if(saveparents) { attr(result, "parents") <- parents attr(result, "parentid") <- parentid attr(result, "expand") <- expand } resultlist[[isim]] <- result } result <- simulationresult(resultlist, nsim, drop) return(result) } rGaussPoisson <- local({ rGaussPoisson <- function(kappa, r, p2, win=owin(c(0,1), c(0,1)), ..., nsim=1, drop=TRUE) { ## Gauss-Poisson process result <- rPoissonCluster(kappa, 1.05 * r, oneortwo, win, radius=r/2, p2=p2, nsim=nsim, drop=drop) return(result) } oneortwo <- function(x0, y0, radius, p2) { if(runif(1) > p2) ## one point return(list(x=x0, y=y0)) ## two points theta <- runif(1, min=0, max=2*pi) return(list(x=x0+c(-1,1)*radius*cos(theta), y=y0+c(-1,1)*radius*sin(theta))) } rGaussPoisson }) rstrat <- function(win=square(1), nx, ny=nx, k=1, nsim=1, drop=TRUE) { win <- as.owin(win) stopifnot(nx >= 1 && ny >= 1) stopifnot(k >= 1) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { xy <- stratrand(win, nx, ny, k) Xbox <- ppp(xy$x, xy$y, win$xrange, win$yrange, check=FALSE) result[[isim]] <- Xbox[win] } result <- simulationresult(result, nsim, drop) return(result) } rcellnumber <- local({ rcellnumber <- function(n, N=10, mu=1) { if(missing(mu) || mu == 1) { z <- rCellUnit(n=n, N=N) } else { z <- replicate(n, rCellCumul(x=mu, N=N)) } return(z) } rCellUnit <- function(n, N=10) { if(!missing(N)) { if(round(N) != N) stop("N must be an integer") stopifnot(is.finite(N)) stopifnot(N > 1) } u <- runif(n, min=0, max=1) p0 <- 1/N pN <- 1/(N * (N-1)) k <- ifelse(u < p0, 0, ifelse(u < (1 - pN), 1, N)) return(k) } rCellCumul <- function(x, N=10) { check.1.real(x) n <- ceiling(x) if(n <= 0) return(0) y <- rCellUnit(n=n, N=N) if(n == x) return(sum(y)) p <- x - (n-1) z <- sum(y[-1]) + rbinom(1, size=y[1], prob=p) return(z) } rcellnumber }) rcell <- function(win=square(1), nx=NULL, ny=nx, ..., dx=NULL, dy=dx, N=10, nsim=1, drop=TRUE) { if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } win <- as.owin(win) xr <- win$xrange yr <- win$yrange ## determine grid coordinates if(missing(ny)) ny <- NULL if(missing(dy)) dy <- NULL g <- xy.grid(xr, yr, nx, ny, dx, dy) nx <- g$nx ny <- g$ny x0 <- g$x0 y0 <- g$y0 dx <- g$dx dy <- g$dy ## generate pattern(s) result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { x <- numeric(0) y <- numeric(0) for(ix in seq_len(nx)) for(iy in seq_len(ny)) { nij <- rcellnumber(1, N) x <- c(x, x0[ix] + runif(nij, min=0, max=dx)) y <- c(y, y0[iy] + runif(nij, min=0, max=dy)) } Xbox <- ppp(x, y, xr, yr, check=FALSE) result[[isim]] <- Xbox[win] } result <- simulationresult(result, nsim, drop) return(result) } thinjump <- function(n, p) { # equivalent to which(runif(n) < p) for constant p stopifnot(length(p) == 1) if(p <= 0) return(integer(0)) if(p >= 1) return(seq_len(n)) if(p > 0.5) { #' for retention prob > 0.5 we find the ones to discard instead discard <- thinjump(n, 1-p) retain <- if(length(discard)) -discard else seq_len(n) return(retain) } guessmaxlength <- ceiling(n * p + 2 * sqrt(n * p * (1-p))) i <- .Call(SC_thinjumpequal, n, p, guessmaxlength, PACKAGE="spatstat.core") return(i) } rthin <- function(X, P, ..., nsim=1, drop=TRUE) { if(!(is.ppp(X) || is.lpp(X) || is.pp3(X) || is.ppx(X) || is.psp(X))) stop(paste("X should be a point pattern (class ppp, lpp, pp3 or ppx)", "or a line segment pattern (class psp)"), call.=FALSE) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } nX <- nobjects(X) if(nX == 0) { result <- rep(list(X), nsim) result <- simulationresult(result, nsim, drop) return(result) } if(is.numeric(P) && length(P) == 1 && spatstat.options("fastthin")) { # special algorithm for constant probability result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { retain <- thinjump(nX, P) Y <- X[retain] ## also handle offspring-to-parent map if present if(!is.null(parentid <- attr(X, "parentid"))) attr(Y, "parentid") <- parentid[retain] result[[isim]] <- Y } result <- simulationresult(result, nsim, drop) return(result) } if(is.numeric(P)) { ## vector of retention probabilities pX <- P if(length(pX) != nX) { if(length(pX) == 1) pX <- rep.int(pX, nX) else stop("Length of vector P does not match number of points of X") } if(anyNA(pX)) stop("P contains NA's") } else if(is.function(P)) { ## function - evaluate it at points of X if(!(is.ppp(X) || is.lpp(X))) stop(paste("Don't know how to apply a function to an object of class", commasep(sQuote(class(X)))), call.=FALSE) pX <- if(inherits(P, c("linfun", "funxy"))) P(X, ...) else P(X$x, X$y, ...) if(length(pX) != nX) stop("Function P returned a vector of incorrect length") if(!is.numeric(pX)) stop("Function P returned non-numeric values") if(anyNA(pX)) stop("Function P returned some NA values") } else if(is.im(P)) { ## image - look it up if(!(is.ppp(X) || is.lpp(X))) stop(paste("Don't know how to apply image values to an object of class", commasep(sQuote(class(X)))), call.=FALSE) if(!(P$type %in% c("integer", "real"))) stop("Values of image P should be numeric") pX <- P[X, drop=FALSE] if(anyNA(pX)) stop("some points of X lie outside the domain of image P") } else stop("Unrecognised format for P") if(min(pX) < 0) stop("some probabilities are negative") if(max(pX) > 1) stop("some probabilities are greater than 1") result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { retain <- (runif(length(pX)) < pX) Y <- X[retain] ## also handle offspring-to-parent map if present if(!is.null(parentid <- attr(X, "parentid"))) attr(Y, "parentid") <- parentid[retain] result[[isim]] <- Y } result <- simulationresult(result, nsim, drop) return(result) } spatstat.core/R/evalcovarslrm.R0000644000176200001440000001146114144333462016257 0ustar liggesusers#' #' evalcovarslrm.R #' #' method for 'evalCovar' for class 'slrm' #' #' $Revision: 1.4 $ $Date: 2021/06/28 05:56:35 $ evalCovar.slrm <- function(model, covariate, ..., lambdatype=c("probabilities", "intensity"), jitter=TRUE, jitterfactor=1, modelname=NULL, covname=NULL, dataname=NULL, subset=NULL) { lambdatype <- match.arg(lambdatype) #' trap misuse badargs <- intersect(c("eps", "dimyx"), names(list(...))) nbad <- length(badargs) if(nbad > 0) warning(paste(ngettext(nbad, "Argument", "Arguments"), commasep(sQuote(badargs)), ngettext(nbad, "is", "are"), "ignored by rhohat.slrm"), call.=FALSE) #' evaluate covariate values at presence pixels and all pixels #' determine names if(is.null(modelname)) modelname <- short.deparse(substitute(model)) if(covNotNamed <- is.null(covname)) { covname <- singlestring(short.deparse(substitute(covariate))) if(is.character(covariate)) covname <- covariate } if(is.null(dataname)) dataname <- model$CallInfo$responsename csr <- is.stationary(model) info <- list(modelname=modelname, covname=covname, dataname=dataname, csr=csr, ispois=TRUE, spacename="two dimensions") FIT <- model$Fit$FIT link <- model$CallInfo$link ## original point pattern X <- model$Data$response W <- Window(X) ## extract data from each pixel (or split pixel) df <- model$Data$df ## restrict to subset if required if(!is.null(subset)) { ok <- inside.owin(df$x, df$y, subset) df <- df[ok, drop=FALSE] X <- X[subset] W <- W[subset, drop=FALSE] } ## presence/absence values responsename <- model$CallInfo$responsename presence <- as.logical(df[[responsename]]) ## areas of pixels or split pixels pixelareas <- exp(df$logpixelarea) ## pixel centres as a point pattern P <- ppp(df$x, df$y, window=W) #' parse covariate argument if(is.character(covariate)) { #' One of the characters 'x' or 'y' #' Turn it into a function. ns <- length(covariate) if(ns == 0) stop("covariate is empty") if(ns > 1) stop("more than one covariate specified") covname <- covariate covNotNamed <- FALSE covariate <- switch(covname, x=function(x,y) { x }, y=function(x,y) { y }, stop(paste("Unrecognised covariate", dQuote(covariate)))) } if(is.im(covariate)) { type <- "im" ZP <- safelookup(covariate, P) Z <- covariate[W, drop=FALSE] W <- as.owin(Z) } else if(is.function(covariate)) { type <- "function" ZP <- covariate(P$x, P$y) if(!all(is.finite(ZP))) warning("covariate function returned NA or Inf values") #' window W <- as.mask(W) #' covariate in window Z <- as.im(covariate, W=W) #' collapse function body to single string if(covNotNamed) covname <- singlestring(covname) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("The covariate should be", "an image, a function(x,y)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) #' values of covariate at pixels or split pixels Zvalues <- ZP #'values of covariate at 'presence' pixels ZX <- Zvalues[presence] #' fitted probability/intensity values at all pixels or split pixels switch(lambdatype, probabilities = { lambda <- predict(FIT, newdata=df, type="response") }, intensity = { if(link == "cloglog") { linkvalues <- predict(FIT, newdata=df, type="link") lambda <- exp(linkvalues)/pixelareas } else { probs <- predict(FIT, newdata=df, type="response") lambda <- -log(1-probs)/pixelareas } }) #' apply jittering to avoid ties if(jitter) { ZX <- jitter(ZX, factor=jitterfactor) Zvalues <- jitter(Zvalues, factor=jitterfactor) } lambdaname <- paste("the fitted", lambdatype) check.finite(lambda, xname=lambdaname, usergiven=FALSE) check.finite(Zvalues, xname="the covariate", usergiven=TRUE) #' lambda values at data points lambdaX <- lambda[presence] #' lambda image(s) lambdaimage <- predict(model, window=W, type=lambdatype) #' wrap up values <- list(Zimage = Z, lambdaimage = lambdaimage, Zvalues = Zvalues, lambda = lambda, lambdaX = lambdaX, weights = pixelareas, ZX = ZX, type = type) return(list(values=values, info=info)) } spatstat.core/R/randomNS.R0000644000176200001440000003501614144333463015123 0ustar liggesusers## ## randomNS.R ## ## simulating from Neyman-Scott processes ## ## $Revision: 1.27 $ $Date: 2020/09/01 10:10:26 $ ## ## Original code for rCauchy and rVarGamma by Abdollah Jalilian ## Other code and modifications by Adrian Baddeley ## Bug fixes by Abdollah, Adrian, and Rolf Turner rNeymanScott <- function(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)), ..., lmax=NULL, nsim=1, drop=TRUE, nonempty=TRUE, saveparents=TRUE) { ## Generic Neyman-Scott process ## Implementation for bounded cluster radius ## ## Catch old argument name rmax for expand if(missing(expand) && !is.null(rmax <- list(...)$rmax)) expand <- rmax ## 'rcluster' may be ## ## (1) a function(x,y, ...) that takes the coordinates ## (x,y) of the parent point and generates a list(x,y) of offspring ## if(is.function(rcluster)) return(rPoissonCluster(kappa, expand, rcluster, win, ..., lmax=lmax, nsim=nsim, drop=drop, saveparents=saveparents)) ## (2) a list(mu, f) where mu is a numeric value, function, or pixel image ## and f is a function(n, ...) generating n i.i.d. offspring at 0,0 if(!(is.list(rcluster) && length(rcluster) == 2)) stop("rcluster should be either a function, or a list of two elements") win <- as.owin(win) mu <- rcluster[[1]] rdisplace <- rcluster[[2]] if(is.numeric(mu)) { ## homogeneous if(!(length(mu) == 1 && mu >= 0)) stop("rcluster[[1]] should be a single nonnegative number") mumax <- mu } else if (is.im(mu) || is.function(mu)) { ## inhomogeneous if(is.function(mu)) mu <- as.im(mu, W=win, ..., strict=TRUE) mumax <- max(mu) } else stop("rcluster[[1]] should be a number, a function or a pixel image") if(!is.function(rdisplace)) stop("rcluster[[2]] should be a function") ## Generate parents in dilated window frame <- boundingbox(win) dilated <- grow.rectangle(frame, expand) if(is.im(kappa) && !is.subset.owin(dilated, as.owin(kappa))) stop(paste("The window in which the image", sQuote("kappa"), "is defined\n", "is not large enough to contain the dilation of the window", sQuote("win"))) if(nonempty) { if(is.function(kappa)) { kappa <- as.im(kappa, W=dilated, ..., strict=TRUE) lmax <- NULL } ## intensity of parents with at least one offspring point kappa <- kappa * (1 - exp(-mumax)) } ## generate parentlist <- rpoispp(kappa, lmax=lmax, win=dilated, nsim=nsim, drop=FALSE, warnwin=FALSE) resultlist <- vector(mode="list", length=nsim) for(i in 1:nsim) { parents <- parentlist[[i]] np <- npoints(parents) ## generate cluster sizes if(np == 0) { ## no parents - empty pattern result <- ppp(numeric(0), numeric(0), window=win) parentid <- integer(0) } else { if(!nonempty) { ## cluster sizes are Poisson csize <- rpois(np, mumax) } else { ## cluster sizes are Poisson conditional on > 0 csize <- qpois(runif(np, min=dpois(0, mumax)), mumax) } noff <- sum(csize) xparent <- parents$x yparent <- parents$y x0 <- rep.int(xparent, csize) y0 <- rep.int(yparent, csize) ## invoke random generator dd <- rdisplace(noff, ...) mm <- if(is.ppp(dd)) marks(dd) else NULL ## validate xy <- xy.coords(dd) dx <- xy$x dy <- xy$y if(!(length(dx) == noff)) stop("rcluster returned the wrong number of points") ## create offspring and offspring-to-parent map xoff <- x0 + dx yoff <- y0 + dy parentid <- rep.int(1:np, csize) ## trim to window retain <- inside.owin(xoff, yoff, win) if(is.im(mu)) retain[retain] <- inside.owin(xoff[retain], yoff[retain], as.owin(mu)) xoff <- xoff[retain] yoff <- yoff[retain] parentid <- parentid[retain] if(!is.null(mm)) mm <- marksubset(mm, retain) ## done result <- ppp(xoff, yoff, window=win, check=FALSE, marks=mm) } if(is.im(mu)) { ## inhomogeneously modulated clusters a la Waagepetersen P <- eval.im(mu/mumax) result <- rthin(result, P) } if(saveparents) { attr(result, "parents") <- parents attr(result, "parentid") <- parentid attr(result, "expand") <- expand } resultlist[[i]] <- result } result <- simulationresult(resultlist, nsim, drop) return(result) } fakeNeyScot <- function(Y, lambda, win, saveLambda, saveparents) { ## Y is a ppp or ppplist obtained from rpoispp ## which will be returned as the realisation of a Neyman-Scott process ## when the process is degenerately close to Poisson. if(saveLambda || saveparents) { if(saveLambda && !is.im(lambda)) lambda <- as.im(lambda, W=win) if(saveparents) parents <- ppp(window=win) # empty pattern if(isSingle <- is.ppp(Y)) Y <- solist(Y) for(i in seq_along(Y)) { Yi <- Y[[i]] if(saveLambda) attr(Yi, "lambda") <- lambda if(saveparents) attr(Yi, "parents") <- parents Y[[i]] <- Yi } if(isSingle) Y <- Y[[1L]] } return(Y) } rMatClust <- local({ ## like runifdisc but returns only the coordinates rundisk <- function(n, radius) { R <- radius * sqrt(runif(n, min=0, max=1)) Theta <- runif(n, min=0, max=2*pi) cbind(R * cos(Theta), R * sin(Theta)) } rMatClust <- function(kappa, scale, mu, win = owin(c(0,1),c(0,1)), nsim=1, drop=TRUE, saveLambda=FALSE, expand = scale, ..., poisthresh=1e-6, saveparents=TRUE) { ## Matern Cluster Process with Poisson (mu) offspring distribution ## Catch old scale syntax (r) if(missing(scale)) scale <- list(...)$r check.1.real(scale) stopifnot(scale > 0) ## trap case of large clusters, close to Poisson kok <- is.numeric(kappa) || is.im(kappa) if(kok) { kappamax <- max(kappa) } else { kim <- as.im(kappa, W=win, ..., strict=TRUE) kra <- range(kim) kappamax <- kra[2] + 0.05 * diff(kra) } if(1/(pi * kappamax * scale^2) < poisthresh) { kapmu <- mu * (if(kok) kappa else kim) result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE) result <- fakeNeyScot(result, kapmu, win, saveLambda, saveparents) return(result) } result <- rNeymanScott(kappa, scale, list(mu, rundisk), win, radius=scale, nsim=nsim, drop=FALSE, saveparents = saveparents || saveLambda) if(saveLambda){ for(i in 1:nsim) { parents <- attr(result[[i]], "parents") Lambda <- clusterfield("MatClust", parents, scale=scale, mu=mu, ...) attr(result[[i]], "Lambda") <- Lambda[win, drop=FALSE] } } return(if(nsim == 1 && drop) result[[1]] else result) } rMatClust }) rThomas <- local({ ## random displacements gaus <- function(n, sigma) { matrix(rnorm(2 * n, mean=0, sd=sigma), ncol=2) } ## main function rThomas <- function(kappa, scale, mu, win = owin(c(0,1),c(0,1)), nsim=1, drop=TRUE, saveLambda=FALSE, expand = 4*scale, ..., poisthresh=1e-6, saveparents=TRUE) { ## Thomas process with Poisson(mu) number of offspring ## at isotropic Normal(0,sigma^2) displacements from parent ## ## Catch old scale syntax (sigma) if(missing(scale)) scale <- list(...)$sigma check.1.real(scale) stopifnot(scale > 0) ## trap case of large clusters, close to Poisson kok <- is.numeric(kappa) || is.im(kappa) if(kok) { kappamax <- max(kappa) } else { kim <- as.im(kappa, W=win, ..., strict=TRUE) kra <- range(kim) kappamax <- kra[2] + 0.05 * diff(kra) } if(1/(4*pi * kappamax * scale^2) < poisthresh) { kapmu <- mu * (if(kok) kappa else kim) result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE) result <- fakeNeyScot(result, kapmu, win, saveLambda, saveparents) return(result) } ## determine the maximum radius of clusters if(missing(expand)) expand <- clusterradius("Thomas", scale = scale, ...) result <- rNeymanScott(kappa, expand, list(mu, gaus), win, sigma=scale, nsim=nsim, drop=FALSE, saveparents = saveparents || saveLambda) if(saveLambda){ for(i in 1:nsim) { parents <- attr(result[[i]], "parents") Lambda <- clusterfield("Thomas", parents, scale=scale, mu=mu, ...) attr(result[[i]], "Lambda") <- Lambda[win, drop=FALSE] } } return(if(nsim == 1 && drop) result[[1]] else result) } rThomas }) ## ================================================ ## Neyman-Scott process with Cauchy kernel function ## ================================================ ## scale / omega: scale parameter of Cauchy kernel function ## eta: scale parameter of Cauchy pair correlation function ## eta = 2 * omega rCauchy <- local({ ## simulate mixture of normals with inverse-gamma distributed variance rnmix.invgam <- function(n = 1, rate) { V <- matrix(rnorm(2 * n, 0, 1), nrow = n, ncol = 2) s <- 1/rgamma(n, shape=1/2, rate=rate) return(sqrt(s) * V) } ## main function rCauchy <- function (kappa, scale, mu, win = owin(), thresh = 0.001, nsim=1, drop=TRUE, saveLambda=FALSE, expand = NULL, ..., poisthresh=1e-6, saveparents=TRUE) { ## scale / omega: scale parameter of Cauchy kernel function ## eta: scale parameter of Cauchy pair correlation function ## Catch old scale syntax (omega) dots <- list(...) if(missing(scale)) scale <- dots$omega ## Catch old name 'eps' for 'thresh': if(missing(thresh)) thresh <- dots$eps %orifnull% 0.001 ## trap case of large clusters, close to Poisson kok <- is.numeric(kappa) || is.im(kappa) if(kok) { kappamax <- max(kappa) } else { kim <- as.im(kappa, W=win, ..., strict=TRUE) kra <- range(kim) kappamax <- kra[2] + 0.05 * diff(kra) } if(1/(pi * kappamax * scale^2) < poisthresh) { kapmu <- mu * (if(kok) kappa else kim) result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE) result <- fakeNeyScot(result, kapmu, win, saveLambda, saveparents) return(result) } ## determine the maximum radius of clusters if(missing(expand)){ expand <- clusterradius("Cauchy", scale = scale, thresh = thresh, ...) } else if(!missing(thresh)){ warning("Argument ", sQuote("thresh"), " is ignored when ", sQuote("expand"), " is given") } ## simulate result <- rNeymanScott(kappa, expand, list(mu, rnmix.invgam), win, rate = scale^2/2, nsim=nsim, drop=FALSE, saveparents = saveparents || saveLambda) ## correction from Abdollah: the rate is beta = omega^2 / 2 = eta^2 / 8. if(saveLambda){ for(i in 1:nsim) { parents <- attr(result[[i]], "parents") Lambda <- clusterfield("Cauchy", parents, scale=scale, mu=mu, ...) attr(result[[i]], "Lambda") <- Lambda[win, drop=FALSE] } } return(if(nsim == 1 && drop) result[[1]] else result) } rCauchy }) ## ## ================================================================= ## Neyman-Scott process with Variance Gamma (Bessel) kernel function ## ================================================================= ## nu.ker: smoothness parameter of Variance Gamma kernel function ## omega: scale parameter of kernel function ## nu.pcf: smoothness parameter of Variance Gamma pair correlation function ## eta: scale parameter of Variance Gamma pair correlation function ## nu.pcf = 2 * nu.ker + 1 and eta = omega rVarGamma <- local({ ## simulates mixture of isotropic Normal points in 2D with gamma variances rnmix.gamma <- function(n = 1, shape, rate) { V <- matrix(rnorm(2 * n, 0, 1), nrow = n, ncol = 2) s <- rgamma(n, shape=shape, rate=rate) return(sqrt(s) * V) } ## main function rVarGamma <- function (kappa, nu, scale, mu, win = owin(), thresh = 0.001, nsim=1, drop=TRUE, saveLambda=FALSE, expand = NULL, ..., poisthresh=1e-6, saveparents=TRUE) { ## nu / nu.ker: smoothness parameter of Variance Gamma kernel function ## scale / omega: scale parameter of kernel function ## Catch old nu.ker/nu.pcf syntax and resolve nu-value. dots <- list(...) if(missing(nu)){ nu <- resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf)$nu.ker } else{ check.1.real(nu) stopifnot(nu > -1/2) } ## Catch old scale syntax (omega) if(missing(scale)) scale <- dots$omega ## Catch old name 'eps' for 'thresh': if(missthresh <- missing(thresh)) thresh <- dots$eps %orifnull% 0.001 ## trap case of large clusters, close to Poisson kok <- is.numeric(kappa) || is.im(kappa) if(kok) { kappamax <- max(kappa) } else { kim <- as.im(kappa, W=win, ..., strict=TRUE) kra <- range(kim) kappamax <- kra[2] + 0.05 * diff(kra) } if(1/(4 * pi * kappamax * scale^2) < poisthresh) { kapmu <- mu * (if(kok) kappa else kim) result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE) result <- fakeNeyScot(result, kapmu, win, saveLambda, saveparents) return(result) } ## determine the maximum radius of clusters if(missing(expand)){ expand <- clusterradius("VarGamma", scale = scale, nu = nu, thresh = thresh, ...) } else if(!missthresh){ warning("Argument ", sQuote("thresh"), " is ignored when ", sQuote("expand"), " is given") } ## simulate result <- rNeymanScott(kappa, expand, list(mu, rnmix.gamma), win, ## WAS: shape = 2 * (nu.ker + 1) shape = nu + 1, rate = 1/(2 * scale^2), nsim=nsim, drop=FALSE, saveparents = saveparents || saveLambda) if(saveLambda){ for(i in 1:nsim) { parents <- attr(result[[i]], "parents") Lambda <- clusterfield("VarGamma", parents, scale=scale, nu=nu, mu=mu, ...) attr(result[[i]], "Lambda") <- Lambda[win, drop=FALSE] } } return(if(nsim == 1 && drop) result[[1]] else result) } rVarGamma }) spatstat.core/R/allstats.R0000644000176200001440000000220514144333461015221 0ustar liggesusers# # # allstats.R # # $Revision: 1.18 $ $Date: 2016/02/11 10:17:12 $ # # allstats <- function(pp, ..., dataname=NULL,verb=FALSE) { # # Function allstats --- to calculate the F, G, K, and J functions # for an unmarked point pattern. # verifyclass(pp,"ppp") if(is.marked(pp)) stop("This function is applicable only to unmarked patterns.\n") # estimate F, G and J if(verb) cat("Calculating F, G, J ...") Jout <- do.call.matched(Jest,list(X=pp, ...)) if(verb) cat("ok.\n") # extract F, G and J Fout <- attr(Jout, "F") Gout <- attr(Jout, "G") attr(Jout, "F") <- NULL attr(Jout, "G") <- NULL fns <- list("F function"=Fout, "G function"=Gout, "J function"=Jout) # compute second moment function K if(verb) cat("Calculating K function...") Kout <- do.call.matched(Kest, list(X=pp, ...)) fns <- append(fns, list("K function"=Kout)) if(verb) cat("done.\n") # add title if(is.null(dataname)) dataname <- short.deparse(substitute(pp)) title <- paste("Four summary functions for ", dataname,".",sep="") attr(fns, "title") <- title # fns <- as.anylist(fns) return(fns) } spatstat.core/R/clarkevans.R0000644000176200001440000001531414144333461015530 0ustar liggesusers## clarkevans.R ## Clark-Evans statistic and test ## $Revision: 1.17 $ $Date: 2015/10/19 05:03:37 $ clarkevans <- function(X, correction=c("none", "Donnelly", "cdf"), clipregion=NULL) { verifyclass(X, "ppp") W <- X$window # validate correction argument gavecorrection <- !missing(correction) correction <- pickoption("correction", correction, c(none="none", Donnelly="Donnelly", donnelly="Donnelly", guard="guard", cdf="cdf"), multi=TRUE) if(("Donnelly" %in% correction) && (W$type != "rectangle")) { if(gavecorrection) warning("Donnelly correction only available for rectangular windows") correction <- correction[correction != "Donnelly"] } # guard correction applied iff `clipregion' is present isguard <- "guard" %in% correction askguard <- any(isguard) gaveguard <- !is.null(clipregion) if(gaveguard) clipregion <- as.owin(clipregion) if(askguard && !gaveguard) { warning("guard correction not performed; clipregion not specified") correction <- correction[!isguard] } else if(gaveguard && !askguard) correction <- c(correction, "guard") result <- clarkevansCalc(X, correction, clipregion) if(length(result) == 1L) result <- unname(result) return(result) } clarkevans.test <- function(X, ..., correction="none", clipregion=NULL, alternative=c("two.sided", "less", "greater", "clustered", "regular"), nsim=999 ) { Xname <- short.deparse(substitute(X)) miss.nsim <- missing(nsim) verifyclass(X, "ppp") W <- Window(X) nX <- npoints(X) # validate SINGLE correction correction <- pickoption("correction", correction, c(none="none", Donnelly="Donnelly", donnelly="Donnelly", guard="guard", cdf="cdf")) switch(correction, none={ corrblurb <- "No edge correction" }, Donnelly={ if(W$type != "rectangle") stop("Donnelly correction only available for rectangular windows") corrblurb <- "Donnelly correction" }, guard={ if(is.null(clipregion)) stop("clipregion not specified") clipregion <- as.owin(clipregion) corrblurb <- "Guard correction" }, cdf={ corrblurb <- "CDF correction" }) # alternative hypothesis if(missing(alternative) || is.null(alternative)) alternative <- "two.sided" alternative <- pickoption("alternative", alternative, c(two.sided="two.sided", less="less", clustered="less", greater="greater", regular="greater")) altblurb <- switch(alternative, two.sided="two-sided", less="clustered (R < 1)", greater="regular (R > 1)") # compute observed value statistic <- clarkevansCalc(X, correction=correction, clipregion=clipregion, working=TRUE) working <- attr(statistic, "working") # if(correction == "none" && miss.nsim) { # standard Normal p-value SE <- with(working, sqrt(((4-pi)*areaW)/(4 * pi))/npts) Z <- with(working, (Dobs - Dpois)/SE) p.value <- switch(alternative, less=pnorm(Z), greater=1 - pnorm(Z), two.sided= 2*(1-pnorm(abs(Z)))) pvblurb <- "Z-test" } else { # Monte Carlo p-value sims <- numeric(nsim) for(i in 1:nsim) { Xsim <- runifpoint(nX, win=W) sims[i] <- clarkevansCalc(Xsim, correction=correction, clipregion=clipregion) } p.upper <- (1 + sum(sims >= statistic))/(1.0 + nsim) p.lower <- (1 + sum(sims <= statistic))/(1.0 + nsim) p.value <- switch(alternative, less=p.lower, greater=p.upper, two.sided=min(1, 2*min(p.lower, p.upper))) pvblurb <- paste("Monte Carlo test based on", nsim, "simulations of CSR with fixed n") } statistic <- as.numeric(statistic) names(statistic) <- "R" out <- list(statistic=statistic, p.value=p.value, alternative=altblurb, method=c("Clark-Evans test", corrblurb, pvblurb), data.name=Xname) class(out) <- "htest" return(out) } clarkevansCalc <- function(X, correction="none", clipregion=NULL, working=FALSE) { # calculations for Clark-Evans index or test W <- Window(X) areaW <- area(W) npts <- npoints(X) intensity <- npts/areaW # R undefined for empty point pattern if(npts == 0) return(NA) # Dobs = observed mean nearest neighbour distance nndistX <- nndist(X) Dobs <- mean(nndistX) # Dpois = Expected mean nearest neighbour distance for Poisson process Dpois <- 1/(2*sqrt(intensity)) statistic <- NULL if(working) work <- list(areaW=areaW, npts=npts, intensity=intensity, Dobs=Dobs, Dpois=Dpois) # Naive uncorrected value if("none" %in% correction) { Rnaive <- Dobs/Dpois statistic <- c(statistic, naive=Rnaive) } # Donnelly edge correction if("Donnelly" %in% correction) { # Dedge = Edge corrected mean nearest neighbour distance, Donnelly 1978 if(W$type == "rectangle") { perim <- perimeter(W) Dkevin <- Dpois + (0.0514+0.0412/sqrt(npts))*perim/npts Rkevin <- Dobs/Dkevin if(working) work <- append(work, list(perim=perim, Dkevin=Dkevin)) } else Rkevin <- NA statistic <- c(statistic, Donnelly=Rkevin) } # guard area method if("guard" %in% correction && !is.null(clipregion)) { # use nn distances from points inside `clipregion' ok <- inside.owin(X, , clipregion) Dguard <- mean(nndistX[ok]) Rguard <- Dguard/Dpois if(working) work <- append(work, list(Dguard=Dguard)) statistic <- c(statistic, guard=Rguard) } if("cdf" %in% correction) { # compute mean of estimated nearest-neighbour distance distribution G G <- Gest(X) numer <- stieltjes(function(x){x}, G)$km denom <- stieltjes(function(x){rep.int(1, length(x))}, G)$km Dcdf <- numer/denom Rcdf <- Dcdf/Dpois if(working) work <- append(work, list(Dcdf=Dcdf)) statistic <- c(statistic, cdf=Rcdf) } if(working) attr(statistic, "working") <- work return(statistic) } spatstat.core/R/dppmclass.R0000644000176200001440000000205714144333462015366 0ustar liggesusers#' Methods and support for class dppm #' #' $Revision: 1.5 $ $Date: 2019/10/18 03:41:43 $ is.dppm <- function(x) { inherits(x, "dppm") } plot.dppm <- function (x, ..., what = c("intensity", "statistic")){ objectname <- short.deparse(substitute(x)) if(missing(what) && is.stationary(x)) what <- "statistic" plot.kppm(x, ..., xname = objectname, what = what) } Kmodel.dppm <- function (model, ...){ Kmodel(model$fitted, W=model$window) } pcfmodel.dppm <- function (model, ...){ pcfmodel(model$fitted, W=model$window) } intensity.dppm <- function (X, ...){ return(intensity(X$fitted)) } reach.dppm <- function(x, ...){ reach(x$fitted, ...) } repul <- function(model, ...) { UseMethod("repul") } repul.dppm <- function(model, ...) { g <- pcfmodel(model) f <- function(x) { 2 * pi * x * (1 - g(x)) } rmax <- reach(model) h <- integrate(f, 0, rmax)$value lam <- intensity(model) ans <- h * lam return(ans) } #' print.dppm is identical to print.kppm and defined in kppm.R #' summary.dppm is defined in summary.dppm.R spatstat.core/R/density.psp.R0000644000176200001440000001177614144333462015670 0ustar liggesusers# # # density.psp.R # # $Revision: 1.19 $ $Date: 2021/03/01 01:04:39 $ # # density.psp <- function(x, sigma, ..., weights=NULL, edge=TRUE, method=c("FFT", "C", "interpreted"), at=NULL) { verifyclass(x, "psp") method <- match.arg(method) w <- x$window n <- x$n if(length(weights)) { check.nvector(weights, n, things="segments", oneok=TRUE) if(length(weights) == 1) weights <- rep(weights, n) } else weights <- NULL len <- lengths_psp(x) ang <- angles.psp(x, directed=TRUE) ux <- unitname(x) if(missing(sigma)) sigma <- 0.1 * diameter(w) #' determine locations for evaluation of density if(is.null(at)) { atype <- "window" w <- do.call.matched(as.mask, resolve.defaults(list(w=quote(w), ...))) } else if(is.owin(at)) { atype <- "window" w <- do.call.matched(as.mask, resolve.defaults(list(w=quote(at), ...))) } else { atype <- "points" atY <- try(as.ppp(at, W=w)) if(inherits(atY, "try-error")) stop("Argument 'at' should be a window or a point pattern", call.=FALSE) } #' detect empty pattern if(n == 0 || all(len == 0)) switch(atype, window = return(as.im(0, w)), points = return(rep(0, npoints(atY)))) #' determine prediction coordinates switch(atype, window = { xy <- rasterxy.mask(w) xx <- xy$x yy <- xy$y }, points = { xx <- atY$x yy <- atY$y }) #' c o m p u t e switch(method, interpreted = { #' compute matrix contribution from each segment coz <- cos(ang) zin <- sin(ang) if(is.null(weights)) { #' unweighted for(i in seq_len(n)) { en <- x$ends[i,] dx <- xx - en$x0 dy <- yy - en$y0 u1 <- dx * coz[i] + dy * zin[i] u2 <- - dx * zin[i] + dy * coz[i] value <- dnorm(u2, sd=sigma) * (pnorm(u1, sd=sigma) - pnorm(u1-len[i], sd=sigma)) totvalue <- if(i == 1L) value else (value + totvalue) } } else { #' weighted for(i in seq_len(n)) { en <- x$ends[i,] dx <- xx - en$x0 dy <- yy - en$y0 u1 <- dx * coz[i] + dy * zin[i] u2 <- - dx * zin[i] + dy * coz[i] value <- weights[i] * dnorm(u2, sd=sigma) * (pnorm(u1, sd=sigma) - pnorm(u1-len[i], sd=sigma)) totvalue <- if(i == 1L) value else (value + totvalue) } } dens <- switch(atype, window = im(totvalue, w$xcol, w$yrow, unitname=ux), points = totvalue) }, C = { #' C implementation of the above xs <- x$ends$x0 ys <- x$ends$y0 xp <- as.numeric(as.vector(xx)) yp <- as.numeric(as.vector(yy)) np <- length(xp) if(is.null(weights)) { #' unweighted z <- .C(SC_segdens, sigma = as.double(sigma), ns = as.integer(n), xs = as.double(xs), ys = as.double(ys), alps = as.double(ang), lens = as.double(len), np = as.integer(np), xp = as.double(xp), yp = as.double(yp), z = as.double(numeric(np)), PACKAGE="spatstat.core") } else { #' weighted z <- .C(SC_segwdens, sigma = as.double(sigma), ns = as.integer(n), xs = as.double(xs), ys = as.double(ys), alps = as.double(ang), lens = as.double(len), ws = as.double(weights), np = as.integer(np), xp = as.double(xp), yp = as.double(yp), z = as.double(numeric(np)), PACKAGE="spatstat.core") } dens <- switch(atype, window = im(z$z, w$xcol, w$yrow, unitname=ux), points = z$z) }, FFT = { Y <- pixellate(x, ..., weights=weights, DivideByPixelArea=TRUE) dens <- blur(Y, sigma, normalise=edge, bleed=FALSE, ...) if(atype == "points") dens <- dens[atY, drop=FALSE] }) if(edge && method != "FFT") { edg <- second.moment.calc(midpoints.psp(x), sigma, what="edge", ...) switch(atype, window = { dens <- eval.im(dens/edg) }, points = { edgY <- edg[atY, drop=FALSE] dens <- dens/edgY }) } if(atype == "window") dens <- dens[x$window, drop=FALSE] attr(dens, "sigma") <- sigma return(dens) } spatstat.core/R/penttinen.R0000644000176200001440000000413714144333463015406 0ustar liggesusers# # # penttinen.R # # $Revision: 1.3 $ $Date: 2018/03/15 07:37:41 $ # # Penttinen pairwise interaction # # # ------------------------------------------------------------------- # Penttinen <- local({ # create blank template object without family and pars BlankAntti <- list( name = "Penttinen process", creator = "Penttinen", family = "pairwise.family", # evaluated later pot = function(d, par) { ans <- numeric(length(d)) dim(ans) <- dim(d) zz <- d/(2 * par$r) ok <- (zz < 1) z <- zz[ok] ans[ok] <- (2/pi) * (acos(z) - z * sqrt(1-z^2)) return(ans) }, par = list(r = NULL), # to be filled in parnames = "circle radius", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { theta <- as.numeric(coeffs[1]) gamma <- exp(theta) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { theta <- as.numeric(coeffs[1]) return(is.finite(theta) && (theta <= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(anyNA(coeffs)) return(2 * r) theta <- coeffs[1] if(abs(theta) <= epsilon) return(0) else return(2 * r) }, version=NULL # to be filled in ) class(BlankAntti) <- "interact" # Finally define main function Penttinen <- function(r) { instantiate.interact(BlankAntti, list(r=r)) } Penttinen <- intermaker(Penttinen, BlankAntti) Penttinen }) spatstat.core/R/detPPF-class.R0000644000176200001440000002055714144333462015632 0ustar liggesusers## support for class 'detpointprocfamily' print.detpointprocfamily <- function(x, ...){ splat(x$name, "determinantal point process model", ifelse(is.numeric(x$dim), paste("in dimension", x$dim), "")) #' Not used: #' parnames <- names(x$par) anyfixed <- length(x$fixedpar)>0 if(anyfixed){ fixedlambda <- NULL if(!is.null(x$intensity) && is.element(x$intensity, names(x$fixedpar))){ lambda <- signif(x$fixedpar[[x$intensity]], 4) x$fixedpar <- x$fixedpar[names(x$fixedpar)!=x$intensity] fixedlambda <- paste(x$intensity, ifelse(is.null(x$thin), paste("=", lambda), "= an image")) } if(length(x$fixedpar)>0){ fixedparstring <- paste(names(x$fixedpar), signif(unlist(x$fixed),4), sep = " = ", collapse = ", ") fixedparstring <- paste(fixedlambda, fixedparstring, sep=", ") } else{ fixedparstring <- fixedlambda } } ## Partially specified model: if(length(x$freepar)>0){ splat("The model is only partially specified.") splat("The following parameters are free (e.g. to be estimated by dppm):") cat(x$freepar, sep = ", ") cat("\n") if(anyfixed){ cat("The fixed parameters are: ") cat(fixedparstring, sep = ", ") } else{ splat("There are no fixed parameters.") } } else{ cat("The parameters are: ") cat(fixedparstring, sep = ", ") } cat("\n") if(!is.null(x$intensity)){ splat("The parameter", x$intensity, "specifies the intensity of the process.") } if(is.character(x$dim)){ splat("The parameter", x$dim, "specifies the dimension of the state space.") } invisible(NULL) } reach.detpointprocfamily <- function(x, ...){ model <- x fun <- model$range nam <- names(formals(fun)) do.call(model$range, c(model$fixedpar[is.element(names(model$fixedpar),nam)], list(...))) } dppparbounds <- function(model, name, ...){ if(inherits(model, "dppm")) model <- model$fitted if(!inherits(model, "detpointprocfamily")) stop("input model must be of class detpointprocfamily or dppm") fun <- model$parbounds nam <- names(formals(fun)) if(missing(name)) name <- nam[!is.element(nam, c("name", model$dim))] rslt <- matrix(0,length(name), 2, dimnames = list(name, c("lower", "upper"))) for(nn in name){ tmp <- try(do.call(fun, c(model$fixedpar[is.element(names(model$fixedpar),nam)], list(...), list(name=nn))), silent=TRUE) if(class(tmp)=="try-error"){ rslt[nn,] <- c(NA, NA) }else{ rslt[nn,] <- tmp } } rslt } valid.detpointprocfamily <- function(object, ...){ if(length(object$freepar)>0) return(NA) ## If there is no function for checking validity we always return TRUE: if(is.null(object$valid)) return(TRUE) do.call(object$valid, object$fixedpar) } dppspecdenrange <- function(model){ ## If there is no function for checking finite range of spectral density we always return Inf: fun <- model$specdenrange if(is.null(fun)) return(Inf) xx <- try(fun(model), silent = TRUE) ifelse(class(xx)=="try-error", Inf, xx) } dppspecden <- function(model){ fun <- model$specden if(is.null(fun)) stop("Spectral density unknown for this model!") if(length(model$freepar)>0) stop("Cannot extract the spectral density of a partially specified model. Please supply all parameters.") specden <- function(x, ...){ allargs <- c(list(x), model$fixedpar, list(...)) do.call(fun, allargs) } return(specden) } dppkernel <- function(model, ...){ if(inherits(model, "dppm")) model <- model$fitted fun <- model$kernel if(is.null(fun)) return(dppapproxkernel(model, ...)) if(length(model$freepar)>0) stop("Cannot extract the kernel of a partially specified model. Please supply all parameters.") firstarg <- names(formals(fun))[1L] kernel <- function(x){ allargs <- c(structure(list(x), .Names=firstarg), model$fixedpar) do.call(fun, allargs) } return(kernel) } dppapproxkernel <- function(model, trunc = .99, W = NULL){ if(inherits(model, "dppm")){ W <- model$window model <- model$fitted } ####### BACKDOOR TO SPHERICAL CASE ######## if(!is.null(spherefun <- model$approxkernelfun)){ spherefun <- get(spherefun) rslt <- spherefun(model, trunc) return(rslt) } ########################################### d <- dim(model) if(is.null(W)) W <- boxx(replicate(d, c(-.5,.5), simplify=FALSE)) W <- as.boxx(W) if(d!=ncol(W$ranges)) stop(paste("The dimension of the window:", ncol(W$ranges), "is inconsistent with the dimension of the model:", d)) Wscale <- as.numeric(W$ranges[2L,]-W$ranges[1L,]) tmp <- dppeigen(model, trunc, Wscale, stationary=FALSE) index <- tmp$index eig <- tmp$eig prec <- tmp$prec trunc <- tmp$trunc rm(tmp) f <- function(r){ x <- matrix(0, nrow=length(r), ncol=d) x[,1L] <- r basis <- fourierbasis(x, index, win = W) approx <- matrix(eig, nrow=length(eig), ncol=length(r)) * basis return(Re(colSums(approx))) } attr(f, "dpp") <- list(prec = prec, trunc = trunc) return(f) } dppDpcf <- function(model, ...){ if(inherits(model, "dppm")) model <- model$fitted fun <- model$Dpcf if(is.null(fun)) stop("Gradient of the pcf is not available for this model.") if(length(model$freepar)>0) stop("Cannot extract the gradient of the pcf of a partially specified model. Please supply all parameters.") firstarg <- names(formals(fun))[1L] Dg <- function(x){ allargs <- c(structure(list(x), .Names=firstarg), model$fixedpar) do.call(fun, allargs) } return(Dg) } pcfmodel.detpointprocfamily <- function(model, ...){ kernel <- dppkernel(model, ...) f <- function(x){ 1 - (kernel(x)/kernel(0))^2 } return(f) } dppapproxpcf <- function(model, trunc = .99, W = NULL){ kernel <- dppapproxkernel(model, trunc = trunc, W = W) f <- function(x){ 1 - (kernel(x)/kernel(0))^2 } attr(f, "dpp") <- attr(kernel, "dpp") return(f) } Kmodel.detpointprocfamily <- function(model, ...){ if(length(model$freepar)>0) stop("Cannot extract the K function of a partially specified model. Please supply all parameters.") fun <- model$Kfun if(!is.null(fun)){ firstarg <- names(formals(fun))[1L] Kfun <- function(r){ allargs <- c(structure(list(r), .Names=firstarg), model$fixedpar) do.call(fun, allargs) } } else{ pcf <- pcfmodel(model, ...) intfun <- function(xx){ 2*pi*xx*pcf(xx) } Kfun <- function(r){ r <- sort(r) if(r[1L]<0) stop("Negative values not allowed in K function!") r <- c(0,r) int <- unlist(lapply(2:length(r), function(i) integrate(intfun, r[i-1L], r[i], subdivisions=10)$value)) return(cumsum(int)) } } return(Kfun) } update.detpointprocfamily <- function(object, ...){ newpar <- list(...) if(length(newpar)==1L && is.list(newpar[[1L]]) && !is.im(newpar[[1L]])) newpar <- newpar[[1L]] nam <- names(newpar) if(length(newpar)>0&&is.null(nam)) stop(paste("Named arguments are required. Please supply parameter values in a", sQuote("tag=value"), "form")) oldpar <- object$fixedpar[!is.element(names(object$fixedpar), nam)] thin <- object$thin object <- do.call(object$caller, c(newpar,oldpar)) if(is.null(object$thin)) object$thin <- thin return(object) } is.stationary.detpointprocfamily <- function(x){ if(is.null(x$intensity)) return(FALSE) lambda <- getElement(x$fixedpar, x$intensity) if(!is.null(lambda)&&is.numeric(lambda)&&is.null(x$thin)) return(TRUE) return(FALSE) } intensity.detpointprocfamily <- function(X, ...){ lambda <- NULL if(!is.null(X$intensity)) lambda <- getElement(X$fixedpar, X$intensity) if(!is.null(lambda)){ if(!is.null(X$thin)) lambda <- lambda*X$thin return(lambda) } return(NA) } parameters.dppm <- parameters.detpointprocfamily <- function(model, ...){ if(inherits(model, "dppm")) model <- model$fitted c(model$fixed, structure(rep(NA,length(model$freepar)), .Names = model$freepar)) } dim.detpointprocfamily <- function(x){ if(is.numeric(d <- x$dim)){ return(d) } else{ return(getElement(x$fixedpar, d)) } } spatstat.core/R/spatialcdf.R0000644000176200001440000000440514144333464015513 0ustar liggesusers## ## spatialcdf.R ## ## $Revision: 1.5 $ $Date: 2019/03/12 11:45:26 $ ## spatialcdf <- function(Z, weights=NULL, normalise=FALSE, ..., W=NULL, Zname=NULL) { Zdefaultname <- singlestring(short.deparse(substitute(Z))) if(is.character(Z) && length(Z) == 1) { if(is.null(Zname)) Zname <- Z switch(Zname, x={ Z <- function(x,y) { x } }, y={ Z <- function(x,y) { y } }, stop("Unrecognised covariate name") ) } if(is.null(Zname)) Zname <- Zdefaultname ## if(is.ppm(weights) || is.kppm(weights) || is.dppm(weights)) { Q <- quad.ppm(as.ppm(weights)) loc <- as.ppp(Q) df <- mpl.get.covariates(list(Z=Z), loc, covfunargs=list(...)) df$wt <- fitted(weights) * w.quad(Q) G <- with(df, ewcdf(Z, wt, normalise=normalise)) wtname <- if(normalise) "fraction of points" else "number of points" } else { if(is.null(W)) W <- as.owin(weights, fatal=FALSE) if(is.null(W)) W <- as.owin(Z, fatal=FALSE) if(is.null(W)) stop("No information specifying the spatial window") M <- as.mask(W, ...) loc <- rasterxy.mask(M, drop=TRUE) pixelarea <- with(unclass(M), xstep * ystep) if(is.null(weights)) { df <- mpl.get.covariates(list(Z=Z), loc, covfunargs=list(...)) G <- with(df, ewcdf(Z, normalise=normalise, adjust=pixelarea)) wtname <- if(normalise) "fraction of area" else "area" } else { df <- mpl.get.covariates(list(Z=Z, wt=weights), loc, covfunargs=list(...)) G <- with(df, ewcdf(Z, wt, normalise=normalise, adjust=pixelarea)) wtname <- if(normalise) "fraction of weight" else "weight" } } class(G) <- c("spatialcdf", class(G)) attr(G, "call") <- sys.call() attr(G, "Zname") <- Zname attr(G, "ylab") <- paste("Cumulative", wtname) return(G) } plot.spatialcdf <- function(x, ..., xlab, ylab) { if(missing(xlab) || is.null(xlab)) xlab <- attr(x, "Zname") if(missing(ylab) || is.null(ylab)) ylab <- attr(x, "ylab") if(inherits(x, "ecdf")) { plot.ecdf(x, ..., xlab=xlab, ylab=ylab) } else { plot.stepfun(x, ..., xlab=xlab, ylab=ylab) } } spatstat.core/R/auc.R0000644000176200001440000000725514144333461014154 0ustar liggesusers## ## auc.R ## ## Calculate ROC curve or area under it ## ## $Revision: 1.10 $ $Date: 2021/07/11 10:12:38 $ roc <- function(X, ...) { UseMethod("roc") } roc.ppp <- function(X, covariate, ..., high=TRUE) { nullmodel <- ppm(X) result <- rocData(covariate, nullmodel, ..., high=high) return(result) } rocData <- function(covariate, nullmodel, ..., high=TRUE) { d <- spatialCDFframe(nullmodel, covariate, ...) U <- d$values$U ec <- if(high) ecdf(1-U) else ecdf(U) p <- seq(0,1,length=1024) df <- data.frame(p=p, fobs=ec(p), fnull=p) result <- fv(df, argu="p", ylab=quote(roc(p)), valu="fobs", fmla= . ~ p, desc=c("fraction of area", "observed fraction of points", "expected fraction if no effect"), fname="roc") fvnames(result, ".") <- c("fobs", "fnull") return(result) } roc.ppm <- function(X, ...) { stopifnot(is.ppm(X)) model <- X lambda <- predict(model, ...) Y <- data.ppm(model) nullmodel <- ppm(Y) result <- rocModel(lambda, nullmodel, ...) return(result) } roc.kppm <- function(X, ...) { stopifnot(is.kppm(X)) model <- as.ppm(X) lambda <- predict(model, ...) Y <- data.ppm(model) nullmodel <- ppm(Y) result <- rocModel(lambda, nullmodel, ...) return(result) } roc.slrm <- function(X, ...) { stopifnot(is.slrm(X)) model <- X lambda <- predict(model, ..., type="probabilities") Y <- response(model) nullmodel <- slrm(Y ~ 1) result <- rocModel(lambda, nullmodel, ..., lambdatype="probabilities") return(result) } rocModel <- function(lambda, nullmodel, ..., high) { if(!missing(high)) warning("Argument 'high' is ignored when computing ROC for a fitted model") d<- spatialCDFframe(nullmodel, lambda, ...) U <- d$values$U ec <- ecdf(1-U) p <- seq(0,1,length=1024) fobs <- ec(p) FZ <- d$values$FZ lambdavalues <- if(is.im(lambda)) lambda[] else unlist(lapply(lambda, "[")) F1Z <- ewcdf(lambdavalues, lambdavalues/sum(lambdavalues)) pZ <- get("y", environment(FZ)) qZ <- get("x", environment(FZ)) FZinverse <- approxfun(pZ, qZ, rule=2) ftheo <- 1 - F1Z(FZinverse(1-p)) df <- data.frame(p=p, fobs=fobs, ftheo=ftheo, fnull=p) result <- fv(df, argu="p", ylab=quote(roc(p)), valu="fobs", fmla = . ~ p, desc=c("fraction of area", "observed fraction of points", "expected fraction of points", "expected fraction if no effect"), fname="roc") fvnames(result, ".") <- c("fobs", "ftheo", "fnull") return(result) } # ...................................................... auc <- function(X, ...) { UseMethod("auc") } auc.ppp <- function(X, covariate, ..., high=TRUE) { d <- spatialCDFframe(ppm(X), covariate, ...) U <- d$values$U EU <- mean(U) result <- if(high) EU else (1 - EU) return(result) } auc.kppm <- function(X, ...) { auc(as.ppm(X), ...) } auc.ppm <- function(X, ...) { model <- X if(is.multitype(model)) { # cheat ro <- roc(model, ...) aobs <- with(ro, mean(fobs)) atheo <- with(ro, mean(ftheo)) } else if(is.stationary(model)) { aobs <- atheo <- 1/2 } else { lambda <- intensity(model) Fl <- ecdf(lambda[]) lambda <- as.im(lambda, Window(model)) X <- data.ppm(model) lamX <- lambda[X] aobs <- mean(Fl(lamX)) atheo <- mean(lambda[] * Fl(lambda[]))/mean(lambda) } result <- c(aobs, atheo) names(result) <- c("obs", "theo") return(result) } auc.slrm <- function(X, ...) { ro <- roc(X, ...) result <- with(ro, list(obs=mean(fobs), theo=mean(ftheo))) return(unlist(result)) } spatstat.core/R/rmhmodel.R0000644000176200001440000013524114144333463015212 0ustar liggesusers# # # rmhmodel.R # # $Revision: 1.78 $ $Date: 2021/01/07 03:08:41 $ # # rmhmodel <- function(...) { UseMethod("rmhmodel") } rmhmodel.rmhmodel <- function(model, ...) { # Check for outdated internal format # C.par was replaced by C.beta and C.ipar in spatstat 1.22-3 if(outdated <- !is.null(model$C.par)) warning("Outdated internal format of rmhmodel object; rebuilding it") if(outdated || (length(list(...)) > 0)) model <- rmhmodel.list(unclass(model), ...) return(model) } rmhmodel.list <- function(model, ...) { argnames <- c("cif","par","w","trend","types") ok <- argnames %in% names(model) do.call(rmhmodel.default, resolve.defaults(list(...), model[argnames[ok]])) } rmhmodel.default <- local({ rmhmodel.default <- function(..., cif=NULL, par=NULL, w=NULL, trend=NULL, types=NULL) { rmhmodelDefault(..., cif=cif, par=par, w=w, trend=trend, types=types) } rmhmodelDefault <- function(..., cif=NULL, par=NULL, w=NULL, trend=NULL, types=NULL, stopinvalid=TRUE) { if(length(list(...)) > 0) stop(paste("rmhmodel.default: syntax should be", "rmhmodel(cif, par, w, trend, types)", "with arguments given by name if they are present"), call. = FALSE) ## Validate parameters if(is.null(cif)) stop("cif is missing or NULL") if(is.null(par)) stop("par is missing or NULL") if(!is.null(w)) w <- as.owin(w) if(!is.character(cif)) stop("cif should be a character string") betamultiplier <- 1 Ncif <- length(cif) if(Ncif > 1) { ## hybrid ## check for Poisson components ispois <- (cif == 'poisson') if(any(ispois)) { ## validate Poisson components Npois <- sum(ispois) poismodels <- vector(mode="list", length=Npois) parpois <- par[ispois] for(i in 1:Npois) poismodels[[i]] <- rmhmodel(cif='poisson', par=parpois[[i]], w=w, trend=NULL, types=types, stopinvalid=FALSE) ## consolidate Poisson intensity parameters poisbetalist <- lapply(poismodels, getElement, name="C.beta") poisbeta <- Reduce("*", poisbetalist) if(all(ispois)) { ## model collapses to a Poisson process cif <- 'poisson' Ncif <- 1 par <- list(beta=poisbeta) betamultiplier <- 1 } else { ## remove Poisson components cif <- cif[!ispois] Ncif <- sum(!ispois) par <- par[!ispois] if(Ncif == 1) # revert to single-cif format par <- par[[1]] ## absorb beta parameters betamultiplier <- poisbeta } } } if(Ncif > 1) { ## genuine hybrid models <- vector(mode="list", length=Ncif) check <- vector(mode="list", length=Ncif) for(i in 1:Ncif) models[[i]] <- rmhmodel(cif=cif[i], par=par[[i]], w=w, trend=NULL, types=types, stopinvalid=FALSE) C.id <- unlist(lapply(models, getElement, name="C.id")) C.betalist <- lapply(models, getElement, name="C.beta") C.iparlist <- lapply(models, getElement, name="C.ipar") ## absorb beta multiplier into beta parameter of first component C.betalist[[1]] <- C.betalist[[1]] * betamultiplier ## concatenate for use in C C.beta <- unlist(C.betalist) C.ipar <- unlist(C.iparlist) check <- lapply(models, getElement, name="check") maxr <- max(unlist(lapply(models, getElement, name="reach"))) ismulti <- unlist(lapply(models, getElement, name="multitype.interact")) multi <- any(ismulti) ## determine whether model exists integ <- unlist(lapply(models, getElement, name="integrable")) stabi <- unlist(lapply(models, getElement, name="stabilising")) integrable <- all(integ) || any(stabi) stabilising <- any(stabi) ## string explanations of conditions for validity expl <- lapply(models, getElement, name="explainvalid") integ.ex <- unlist(lapply(expl, getElement, name="integrable")) stabi.ex <- unlist(lapply(expl, getElement, name="stabilising")) stabi.oper <- !(stabi.ex %in% c("TRUE", "FALSE")) integ.oper <- !(integ.ex %in% c("TRUE", "FALSE")) compnames <- if(!anyDuplicated(C.id)) paste("cif", sQuote(C.id)) else paste("component", 1:Ncif, paren(sQuote(C.id))) if(!integrable && stopinvalid) { ## model is not integrable: explain why ifail <- !integ & integ.oper ireason <- paste(compnames[ifail], "should satisfy", paren(integ.ex[ifail], "{")) ireason <- verbalogic(ireason, "and") if(sum(ifail) <= 1) { ## There's only one offending cif, so stability is redundant sreason <- "FALSE" } else { sfail <- !stabi & stabi.oper sreason <- paste(compnames[sfail], "should satisfy", paren(stabi.ex[sfail], "{")) sreason <- verbalogic(sreason, "or") } reason <- verbalogic(c(ireason, sreason), "or") stop(paste("rmhmodel: hybrid model is not integrable; ", reason), call.=FALSE) } else { ## construct strings summarising conditions for validity if(!any(integ.oper)) ireason <- as.character(integrable) else { ireason <- paste(compnames[integ.oper], "should satisfy", paren(integ.ex[integ.oper], "{")) ireason <- verbalogic(ireason, "and") } if(!any(stabi.oper)) sreason <- as.character(stabilising) else { sreason <- paste(compnames[stabi.oper], "should satisfy", paren(stabi.ex[stabi.oper], "{")) sreason <- verbalogic(sreason, "or") } ireason <- verbalogic(c(ireason, sreason), "or") explainvalid <- list(integrable=ireason, stabilising=sreason) } out <- list(cif=cif, par=par, w=w, trend=trend, types=types, C.id=C.id, C.beta=C.beta, C.ipar=C.ipar, C.betalist=C.betalist, C.iparlist=C.iparlist, check=check, multitype.interact=multi, integrable=integrable, stabilising=stabilising, explainvalid=explainvalid, reach=maxr) class(out) <- c("rmhmodel", class(out)) return(out) } ## non-hybrid ## Check that this is a recognised model ## and look up the rules for this model rules <- spatstatRmhInfo(cif) ## Map the name of the cif from R to C ## (the names are normally identical in R and C, ## except "poisson" -> NA) C.id <- rules$C.id ## Check that the C name is recognised in C if(!is.na(C.id)) { z <- .C(SC_knownCif, cifname=as.character(C.id), answer=as.integer(0), PACKAGE="spatstat.core") ok <- as.logical(z$answer) if(!ok) stop(paste("Internal error: the cif", sQuote(C.id), "is not recognised in the C code")) } ## Validate the model parameters and reformat them check <- rules$parhandler checkedpar <- if(!rules$multitype) check(par) else if(!is.null(types)) check(par, types) else ## types vector not given - defer checking NULL if(!is.null(checkedpar)) { stopifnot(is.list(checkedpar)) stopifnot(!is.null(names(checkedpar)) && all(nzchar(names(checkedpar)))) stopifnot(names(checkedpar)[[1]] == "beta") C.beta <- unlist(checkedpar[[1]]) C.beta <- C.beta * betamultiplier C.ipar <- as.numeric(unlist(checkedpar[-1])) } else { C.beta <- C.ipar <- NULL } ## Determine whether model is integrable integrable <- rules$validity(par, "integrable") explainvalid <- rules$explainvalid if(!integrable && stopinvalid) stop(paste("rmhmodel: the model is not integrable; it should satisfy", explainvalid$integrable), call.=FALSE) ## Determine whether cif is stabilising ## (i.e. any hybrid including this cif will be integrable) stabilising <- rules$validity(par, "stabilising") ## Calculate reach of model mreach <- rules$reach(par) ################################################################### ## return augmented list out <- list(cif=cif, par=par, w=w, trend=trend, types=types, C.id=C.id, C.beta=C.beta, C.ipar=C.ipar, check= if(is.null(C.ipar)) check else NULL, multitype.interact=rules$multitype, integrable=integrable, stabilising=stabilising, explainvalid=explainvalid, reach=mreach ) class(out) <- c("rmhmodel", class(out)) return(out) } rmhmodel.default }) print.rmhmodel <- function(x, ...) { verifyclass(x, "rmhmodel") splat("Metropolis-Hastings algorithm, model parameters\n") Ncif <- length(x$cif) splat("Conditional intensity:", if(Ncif == 1) "cif=" else "hybrid of cifs", commasep(sQuote(x$cif))) if(!is.null(x$types)) { if(length(x$types) == 1) splat("Univariate process.") else { cat("Multitype process with types =\n") print(x$types) if(!x$multitype.interact) splat("Interaction does not depend on type") } } else if(x$multitype.interact) { splat("Multitype process, types not yet specified.") } else { typ <- try(rmhResolveTypes(x, rmhstart(), rmhcontrol())) if(!inherits(typ, "try-error")) { ntyp <- length(typ) if(ntyp > 1) { splat("Data imply a multitype process with", ntyp, "types of points.") splat("Interaction does not depend on type.") } } } cat("\nNumerical parameters: par =\n") print(x$par) if(is.null(x$C.ipar)) splat("Parameters have not yet been checked for compatibility with types.") if(is.owin(x$w)) print(x$w) else splat("Window: not specified.") cat("\nTrend: ") tren <- x$trend if(is.null(tren)) { cat("none.\n") } else { if(is.list(tren)) cat(paste0("List of ", length(tren), ":\n")) print(tren) } if(!is.null(x$integrable) && !x$integrable) cat("\n*Warning: model is not integrable and cannot be simulated*\n") return(invisible(NULL)) } reach.rmhmodel <- function(x, ...) { if(length(list(...)) == 0) return(x$reach) # reach must be recomputed cif <- x$cif Ncif <- length(cif) pars <- if(Ncif == 1) list(x$par) else x$par maxr <- 0 for(i in seq_len(Ncif)) { cif.i <- cif[i] par.i <- pars[[i]] rules <- spatstatRmhInfo(cif.i) rchfun <- rules$reach if(!is.function(rchfun)) stop(paste("Internal error: reach is unknown for cif=", sQuote(cif.i)), call.=FALSE) r.i <- rchfun(par.i, ...) maxr <- max(maxr, r.i, na.rm=TRUE) } return(maxr) } is.poisson.rmhmodel <- function(x) { verifyclass(x, "rmhmodel") identical(x$cif, 'poisson') } is.stationary.rmhmodel <- function(x) { verifyclass(x, "rmhmodel") tren <- x$trend return(is.null(tren) || is.numeric(tren)) } as.owin.rmhmodel <- function(W, ..., fatal=FALSE) { # W is the rmhmodel object. It contains a window w ans <- W$w if(is.owin(ans)) return(ans) if(fatal) stop("rmhmodel object does not contain a window") return(NULL) } domain.rmhmodel <- Window.rmhmodel <- function(X, ...) { as.owin(X) } is.expandable.rmhmodel <- local({ ok <- function(z) { is.null(z) || is.numeric(z) || is.function(z) } is.expandable.rmhmodel <- function(x) { tren <- x$tren ans <- if(!is.list(tren)) ok(tren) else all(sapply(tren, ok)) return(ans) } is.expandable.rmhmodel }) ##### Table of rules for handling rmh models ################## spatstatRmhInfo <- function(cifname) { rules <- .Spatstat.RmhTable[[cifname]] if(is.null(rules)) stop(paste("Unrecognised cif:", sQuote(cifname)), call.=FALSE) return(rules) } .Spatstat.RmhTable <- list( # # 0. Poisson (special case) # 'poisson'= list( C.id=NA, multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the Poisson process" with(par, forbidNA(beta, ctxt)) par <- check.named.list(par, "beta", ctxt) with(par, explain.ifnot(all(beta >= 0), ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE",stabilising="FALSE"), reach = function(par, ...) { return(0) }, hardcore = function(par, ...) { return(0) }, temper = function(par, invtemp) { return(par^invtemp) } ), # # 1. Strauss. # 'strauss'= list( C.id="strauss", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the strauss cif" par <- check.named.list(par, c("beta","gamma","r"), ctxt) # treat r=NA as absence of interaction par <- within(par, if(is.na(r)) { r <- 0; gamma <- 1 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(gamma >= 0, ctxt)) with(par, explain.ifnot(r >= 0, ctxt)) return(par) }, validity=function(par, kind) { gamma <- par$gamma switch(kind, integrable=(gamma <= 1), stabilising=(gamma == 0) ) }, explainvalid=list( integrable="gamma <= 1", stabilising="gamma == 0"), reach = function(par, ...) { r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) 0 else r) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] g <- par[["gamma"]] return(if(g <= epsilon) r else 0) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 2. Strauss with hardcore. # 'straush' = list( C.id="straush", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the straush cif" par <- check.named.list(par, c("beta","gamma","r","hc"), ctxt) # treat hc=NA as absence of hard core par <- within(par, if(is.na(hc)) { hc <- 0 } ) # treat r=NA as absence of interaction par <- within(par, if(is.na(r)) { r <- hc; gamma <- 1 } ) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.finite(hc, ctxt)) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, check.1.real(hc, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(gamma >= 0, ctxt)) with(par, explain.ifnot(r >= 0, ctxt)) with(par, explain.ifnot(hc >= 0, ctxt)) with(par, explain.ifnot(hc <= r, ctxt)) return(par) }, validity=function(par, kind) { hc <- par$hc gamma <- par$gamma switch(kind, integrable=(hc > 0 || gamma <= 1), stabilising=(hc > 0) ) }, explainvalid=list( integrable="hc > 0 or gamma <= 1", stabilising="hc > 0"), reach = function(par, ...) { h <- par[["hc"]] r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) h else r) }, hardcore = function(par, ..., epsilon=0) { h <- par[["hc"]] r <- par[["r"]] g <- par[["gamma"]] return(if(g <= epsilon) r else h) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 3. Softcore. # 'sftcr' = list( C.id="sftcr", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the sftcr cif" par <- check.named.list(par, c("beta","sigma","kappa"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(sigma, ctxt)) with(par, check.finite(kappa, ctxt)) with(par, check.1.real(sigma, ctxt)) with(par, check.1.real(kappa, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(sigma >= 0, ctxt)) with(par, explain.ifnot(kappa >= 0 && kappa <= 1, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE",stabilising="FALSE"), reach = function(par, ..., epsilon=0) { if(epsilon==0) return(Inf) kappa <- par[["kappa"]] sigma <- par[["sigma"]] return(sigma/(epsilon^(kappa/2))) }, hardcore = function(par, ..., epsilon=0) { if(epsilon==0) return(0) kappa <- par[["kappa"]] sigma <- par[["sigma"]] return(sigma/((-log(epsilon))^(kappa/2))) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp sigma <- sigma * (invtemp^(kappa/2)) }) } ), # # 4. Multitype Strauss. # 'straussm' = list( C.id="straussm", multitype=TRUE, parhandler=function(par, types) { ctxt <- "For the straussm cif" par <- check.named.list(par, c("beta","gamma","radii"), ctxt) beta <- par$beta gamma <- par$gamma r <- par$radii ntypes <- length(types) check.finite(beta, ctxt) check.nvector(beta, ntypes, TRUE, "types") MultiPair.checkmatrix(gamma, ntypes, "par$gamma") gamma[is.na(gamma)] <- 1 check.finite(gamma, ctxt) MultiPair.checkmatrix(r, ntypes, "par$radii") if(any(nar <- is.na(r))) { r[nar] <- 0 gamma[nar] <- 1 } check.finite(r, ctxt) explain.ifnot(all(beta >= 0), ctxt) explain.ifnot(all(gamma >= 0), ctxt) explain.ifnot(all(r >= 0), ctxt) par <- list(beta=beta, gamma=gamma, r=r) return(par) }, validity=function(par, kind) { gamma <- par$gamma radii <- par$radii dg <- diag(gamma) dr <- diag(radii) hard <-!is.na(dg) & (dg == 0) & !is.na(dr) & (dr > 0) operative <- !is.na(gamma) & !is.na(radii) & (radii > 0) switch(kind, stabilising=all(hard), integrable=all(hard) || all(gamma[operative] <= 1)) }, explainvalid=list( integrable=paste( "gamma[i,j] <= 1 for all i and j,", "or gamma[i,i] = 0 for all i"), stabilising="gamma[i,i] = 0 for all i"), reach = function(par, ...) { r <- par$radii g <- par$gamma operative <- ! (is.na(r) | (g == 1)) return(max(0, r[operative])) }, hardcore = function(par, ..., epsilon=0) { r <- par$radii g <- par$gamma return(max(0, r[!is.na(r) & g <= epsilon])) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 5. Multitype Strauss with hardcore. # 'straushm' = list( C.id="straushm", multitype=TRUE, parhandler=function(par, types) { ctxt="For the straushm cif" par <- check.named.list(par, c("beta","gamma","iradii","hradii"), ctxt) beta <- par$beta gamma <- par$gamma iradii <- par$iradii hradii <- par$hradii ntypes <- length(types) check.nvector(beta, ntypes, TRUE, "types") check.finite(beta, ctxt) MultiPair.checkmatrix(gamma, ntypes, "par$gamma") gamma[is.na(gamma)] <- 1 check.finite(gamma, ctxt) MultiPair.checkmatrix(iradii, ntypes, "par$iradii") if(any(nar <- is.na(iradii))) { iradii[nar] <- 0 gamma[nar] <- 1 } check.finite(iradii, ctxt) MultiPair.checkmatrix(hradii, ntypes, "par$hradii") nah <- is.na(hradii) hradii[nah] <- 0 check.finite(hradii, ctxt) explain.ifnot(all(beta >= 0), ctxt) explain.ifnot(all(gamma >= 0), ctxt) explain.ifnot(all(iradii >= 0), ctxt) explain.ifnot(all(hradii >= 0), ctxt) comparable <- !nar & !nah explain.ifnot(all((iradii >= hradii)[comparable]), ctxt) par <- list(beta=beta,gamma=gamma,iradii=iradii,hradii=hradii) return(par) }, validity=function(par, kind) { gamma <- par$gamma iradii <- par$iradii hradii <- par$hradii dh <- diag(hradii) dg <- diag(gamma) dr <- diag(iradii) hhard <- !is.na(dh) & (dh > 0) ihard <- !is.na(dr) & (dr > 0) & !is.na(dg) & (dg == 0) hard <- hhard | ihard operative <- !is.na(gamma) & !is.na(iradii) & (iradii > 0) switch(kind, stabilising=all(hard), integrable={ all(hard) || all(gamma[operative] <= 1) }) }, explainvalid=list( integrable=paste( "hradii[i,i] > 0 or gamma[i,i] = 0 for all i, or", "gamma[i,j] <= 1 for all i and j"), stabilising="hradii[i,i] > 0 or gamma[i,i] = 0 for all i"), reach=function(par, ...) { r <- par$iradii h <- par$hradii g <- par$gamma roperative <- ! (is.na(r) | (g == 1)) hoperative <- ! is.na(h) return(max(0, r[roperative], h[hoperative])) }, hardcore = function(par, ..., epsilon=0) { r <- par$radii h <- par$hradii g <- par$gamma return(max(h[!is.na(h)], r[!is.na(r) & g <= epsilon])) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 6. Diggle-Gates-Stibbard interaction # (function number 1 from Diggle, Gates, and Stibbard) 'dgs' = list( C.id="dgs", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the dgs cif" par <- check.named.list(par, c("beta","rho"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(rho, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(rho, ctxt)) with(par, explain.ifnot(rho >= 0, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE", stabilising="FALSE"), reach=function(par, ...) { return(par[["rho"]]) }, hardcore=function(par, ..., epsilon=0) { if(epsilon == 0) return(0) return(par[["rho"]] * (2/pi) * asin(sqrt(epsilon))) }, temper = NULL # not a loglinear model ), # # 7. Diggle-Gratton interaction # (function number 2 from Diggle, Gates, and Stibbard). 'diggra' = list( C.id="diggra", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the diggra cif" par <- check.named.list(par, c("beta","kappa","delta","rho"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(kappa, ctxt)) with(par, check.finite(delta, ctxt)) with(par, check.finite(rho, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(kappa, ctxt)) with(par, check.1.real(delta, ctxt)) with(par, check.1.real(rho, ctxt)) with(par, explain.ifnot(kappa >= 0, ctxt)) with(par, explain.ifnot(delta >= 0, ctxt)) with(par, explain.ifnot(rho >= 0, ctxt)) with(par, explain.ifnot(delta < rho, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE",stabilising="FALSE"), reach=function(par, ...) { return(par[["rho"]]) }, hardcore=function(par, ..., epsilon=0) { return(par[["delta"]]) }, temper = function(par, invtemp) { within(par, { kappa <- kappa * invtemp }) }), # # 8. Geyer saturation model # 'geyer' = list( C.id="geyer", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the geyer cif" par <- check.named.list(par, c("beta","gamma","r","sat"), ctxt) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, check.1.real(sat, ctxt)) par <- within(par, sat <- min(sat, .Machine$integer.max-100)) par <- within(par, if(is.na(gamma)) { r <- 0; gamma <- 1 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.finite(sat, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE", stabilising="FALSE"), reach = function(par, ...) { r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) 0 else 2 * r) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] g <- par[["gamma"]] return(if(g <= epsilon) r else 0) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 9. The ``lookup'' device. This permits simulating, at least # approximately, ANY pairwise interaction function model # with isotropic pair interaction (i.e. depending only on distance). # The pair interaction function is provided as a vector of # distances and corresponding function values which are used # as a ``lookup table'' by the C code. # 'lookup' = list( C.id="lookup", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the lookup cif" par <- check.named.list(par, c("beta","h"), ctxt, "r") with(par, check.finite(beta, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) beta <- par[["beta"]] h.init <- par[["h"]] r <- par[["r"]] if(is.null(r)) { if(!is.stepfun(h.init)) stop(paste("For cif=lookup, if component r of", "par is absent then component h must", "be a stepfun object.")) if(!is.cadlag(h.init)) stop(paste("The lookup pairwise interaction step", "function must be right continuous,\n", "i.e. built using the default values of the", sQuote("f"), "and", sQuote("right"), "arguments for stepfun.")) r <- knots(h.init) h0 <- get("yleft",envir=environment(h.init)) h <- h.init(r) nlook <- length(r) if(!isTRUE(all.equal(h[nlook],1))) stop(paste("The lookup interaction step function", "must be equal to 1 for", dQuote("large"), "distances.")) if(r[1] <= 0) stop(paste("The first jump point (knot) of the lookup", "interaction step function must be", "strictly positive.")) h <- c(h0,h) } else { h <- h.init nlook <- length(r) if(length(h) != nlook) stop("Mismatch of lengths of h and r lookup vectors.") if(anyNA(r)) stop("Missing values not allowed in r lookup vector.") if(is.unsorted(r)) stop("The r lookup vector must be in increasing order.") if(r[1] <= 0) stop(paste("The first entry of the lookup vector r", "should be strictly positive.")) h <- c(h,1) } if(any(h < 0)) stop(paste("Negative values in the lookup", "pairwise interaction function.")) if(h[1] > 0 & any(h > 1)) stop(paste("Lookup pairwise interaction function does", "not define a valid point process.")) rmax <- r[nlook] r <- c(0,r) nlook <- nlook+1 deltar <- mean(diff(r)) if(isTRUE(all.equal(diff(r),rep.int(deltar,nlook-1)))) { par <- list(beta=beta,nlook=nlook, equisp=1, deltar=deltar,rmax=rmax, h=h) } else { par <- list(beta=beta,nlook=nlook, equisp=0, deltar=deltar,rmax=rmax, h=h, r=r) } return(par) }, validity=function(par, kind) { h <- par$h if(is.stepfun(h)) h <- eval(expression(c(yleft,y)),envir=environment(h)) switch(kind, integrable={ (h[1] == 0) || all(h <= 1) }, stabilising={ h[1] == 0 }) }, explainvalid=list( integrable="h[1] == 0 or h[i] <= 1 for all i", stabilising="h[1] == 0"), reach = function(par, ...) { r <- par[["r"]] h <- par[["h"]] if(is.null(r)) r <- knots(h) return(max(r)) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] h <- par[["h"]] if(is.null(r)) r <- knots(h) return(max(0, r[h <= epsilon])) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp h <- h^invtemp }) } ), # # 10. Area interaction # 'areaint'= list( C.id="areaint", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the areaint cif" par <- check.named.list(par, c("beta","eta","r"), ctxt) par <- within(par, if(is.na(r)) { r <- 0 }) with(par, check.finite(beta, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(eta, ctxt)) with(par, check.1.real(r, ctxt)) with(par, check.finite(eta, ctxt)) with(par, check.finite(r, ctxt)) with(par, explain.ifnot(eta >= 0, ctxt)) with(par, explain.ifnot(r >= 0, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE", stabilising="FALSE"), reach = function(par, ...) { r <- par[["r"]] eta <- par[["eta"]] return(if(eta == 1) 0 else (2 * r)) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] eta <- par[["eta"]] if(eta > epsilon) return(0) if(eta == 0) return(2 * r) # linear approximation return(2 * r * eta/epsilon) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp eta <- eta^invtemp }) } ), # # 11. The ``badgey'' (Baddeley-Geyer) model. # 'badgey' = list( C.id="badgey", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the badgey cif" par <- check.named.list(par, c("beta","gamma","r","sat"), ctxt) par <- within(par, sat <- pmin(sat, .Machine$integer.max-100)) par <- within(par, gamma[is.na(gamma) | is.na(r)] <- 1) par <- within(par, r[is.na(r)] <- 0) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.finite(sat, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(all(gamma >= 0), ctxt)) with(par, explain.ifnot(all(r >= 0), ctxt)) with(par, explain.ifnot(all(sat >= 0), ctxt)) with(par, explain.ifnot(length(gamma) == length(r), ctxt)) gamma <- par[["gamma"]] r <- par[["r"]] sat <- par[["sat"]] if(length(sat)==1) sat <- rep.int(sat,length(gamma)) else explain.ifnot(length(sat) == length(gamma), ctxt) mmm <- cbind(gamma,r,sat) mmm <- mmm[fave.order(r),] ndisc <- length(r) par <- list(beta=par$beta,ndisc=ndisc,parms=as.vector(t(mmm))) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE", stabilising="FALSE"), reach = function(par, ...) { r <- par[["r"]] gamma <- par[["gamma"]] operative <- (gamma != 1) return(if(!any(operative)) 0 else (2 * max(r[operative]))) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] gamma <- par[["gamma"]] return(max(0, r[gamma <= epsilon])) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 12. The hard core process 'hardcore' = list( C.id="hardcore", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the hardcore cif" par <- check.named.list(par, c("beta", "hc"), ctxt) par <- within(par, if(is.na(hc)) { hc <- 0 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(hc, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(hc, ctxt)) return(par) }, validity=function(par, kind) { hc <- par$hc switch(kind, integrable=TRUE, stabilising=(hc > 0)) }, explainvalid=list(integrable="TRUE", stabilising="hc > 0"), reach = function(par, ...) { hc <- par[["hc"]] return(hc) }, hardcore = function(par, ...) { hc <- par[["hc"]] return(hc) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp }) } ), # # Lucky 13. Fiksel process 'fiksel' = list( C.id="fiksel", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the Fiksel cif" par <- check.named.list(par, c("beta", "r", "hc", "kappa", "a"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.finite(hc, ctxt)) with(par, check.finite(kappa, ctxt)) with(par, check.finite(a, ctxt)) with(par, check.1.real(r, ctxt)) with(par, check.1.real(hc, ctxt)) with(par, check.1.real(kappa, ctxt)) with(par, check.1.real(a, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(hc >= 0, ctxt)) with(par, explain.ifnot(r > hc, ctxt)) return(par) }, validity=function(par, kind) { hc <- par$hc a <- par$a switch(kind, integrable=(hc > 0 || a <= 0), stabilising=(hc > 0)) }, explainvalid=list( integrable="hc > 0 or a <= 0", stabilising="hc > 0"), reach = function(par, ...) { r <- par[["r"]] hc <- par[["hc"]] a <- par[["a"]] return(if(a != 0) r else hc) }, hardcore = function(par, ...) { return(par[["hc"]]) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp a <- a * invtemp }) } ), # # 14. Lennard-Jones 'lennard' = list( C.id="lennard", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the Lennard-Jones cif" par <- check.named.list(par, c("beta", "sigma", "epsilon"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(sigma, ctxt)) with(par, check.finite(epsilon, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(sigma, ctxt)) with(par, check.1.real(epsilon, ctxt)) with(par, explain.ifnot(sigma > 0, ctxt)) with(par, explain.ifnot(epsilon > 0, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=(par$sigma > 0), stabilising=FALSE) }, explainvalid=list( integrable="sigma > 0", stabilising="FALSE"), reach = function(par, ...) { sigma <- par[["sigma"]] return(2.5 * sigma) }, hardcore = function(par, ...) { sigma <- par[["sigma"]] return(sigma/2.5) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp epsilon <- epsilon * invtemp }) } ), # # 15. Multitype hardcore. # 'multihard' = list( C.id="multihard", multitype=TRUE, parhandler=function(par, types) { ctxt="For the multihard cif" par <- check.named.list(par, c("beta","hradii"), ctxt) beta <- par$beta hradii <- par$hradii ntypes <- length(types) check.nvector(beta, ntypes, TRUE, "types") check.finite(beta, ctxt) MultiPair.checkmatrix(hradii, ntypes, "par$hradii") hradii[is.na(hradii)] <- 0 check.finite(hradii, ctxt) explain.ifnot(all(beta >= 0), ctxt) explain.ifnot(all(hradii >= 0), ctxt) par <- list(beta=beta,hradii=hradii) return(par) }, validity=function(par, kind) { switch(kind, integrable=return(TRUE), stabilising={ hself <- diag(par$hradii) repel <- !is.na(hself) & (hself > 0) return(all(repel)) }) }, explainvalid=list( integrable="TRUE", stabilising="hradii[i,i] > 0 for all i"), reach=function(par, ...) { return(max(0, par$hradii, na.rm=TRUE)) }, hardcore=function(par, ..., epsilon=0) { return(max(0, par$hradii, na.rm=TRUE)) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp }) } ), # # 16. Triplets. # 'triplets'= list( C.id="triplets", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the triplets cif" par <- check.named.list(par, c("beta","gamma","r"), ctxt) # treat r=NA as absence of interaction par <- within(par, if(is.na(r)) { r <- 0; gamma <- 1 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(gamma >= 0, ctxt)) with(par, explain.ifnot(r >= 0, ctxt)) return(par) }, validity=function(par, kind) { gamma <- par$gamma switch(kind, integrable=(gamma <= 1), stabilising=(gamma == 0) ) }, explainvalid=list( integrable="gamma <= 1", stabilising="gamma == 0"), reach = function(par, ...) { r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) 0 else r) }, hardcore = function(par, ...) { return(0) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 17. Penttinen. # 'penttinen'= list( C.id="penttinen", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the penttinen cif" par <- check.named.list(par, c("beta", "gamma", "r"), ctxt) # treat r=NA as absence of interaction par <- within(par, if(is.na(r)) { r <- 0; gamma <- 1 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(gamma >= 0, ctxt)) with(par, explain.ifnot(r > 0, ctxt)) return(par) }, validity=function(par, kind) { gamma <- par$gamma switch(kind, integrable=(gamma <= 1), stabilising=(gamma == 0) ) }, explainvalid=list( integrable="gamma <= 1", stabilising="gamma == 0"), reach = function(par, ...) { r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) 0 else (2 * r)) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] g <- par[["gamma"]] return(if(g <= epsilon) (2 * r) else 0) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ) # end of list '.Spatstat.RmhTable' ) spatstat.core/R/eval.fasp.R0000644000176200001440000000560514144333462015261 0ustar liggesusers# # eval.fasp.R # # # eval.fasp() Evaluate expressions involving fasp objects # # compatible.fasp() Check whether two fasp objects are compatible # # $Revision: 1.11 $ $Date: 2016/02/11 10:17:12 $ # eval.fasp <- local({ eval.fasp <- function(expr, envir, dotonly=TRUE) { #' convert syntactic expression to 'expression' object e <- as.expression(substitute(expr)) #' convert syntactic expression to call ## elang <- substitute(expr) #' find names of all variables in the expression varnames <- all.vars(e) if(length(varnames) == 0) stop("No variables in this expression") ## get the actual variables if(missing(envir)) { envir <- sys.parent() } else if(is.list(envir)) { envir <- list2env(envir, parent=parent.frame()) } vars <- lapply(as.list(varnames), get, envir=envir) names(vars) <- varnames ## find out which ones are fasp objects isfasp <- unlist(lapply(vars, inherits, what="fasp")) if(!any(isfasp)) stop("No fasp objects in this expression") fasps <- vars[isfasp] nfasps <- length(fasps) ## test whether the fasp objects are compatible if(nfasps > 1L && !(do.call(compatible, unname(fasps)))) stop(paste(if(nfasps > 2) "some of" else NULL, "the objects", commasep(sQuote(names(fasps))), "are not compatible")) ## copy first object as template result <- fasps[[1L]] which <- result$which nr <- nrow(which) nc <- ncol(which) ## create environment for evaluation fenv <- new.env() ## for each [i,j] extract fv objects and evaluate expression for(i in seq_len(nr)) for(j in seq_len(nc)) { ## extract fv objects at position [i,j] funs <- lapply(fasps, getpanel, i=i, j=j) ## insert into list of argument values vars[isfasp] <- funs ## assign them into the right environment for(k in seq_along(vars)) assign(varnames[k], vars[[k]], envir=fenv) ## evaluate resultij <- eval(substitute(eval.fv(ee,ff,dd), list(ee=e, ff=fenv, dd=dotonly))) ## insert back into fasp result$fns[[which[i,j] ]] <- resultij } result$title <- paste("Result of eval.fasp(", e, ")", sep="") return(result) } getpanel <- function(x, i, j) { as.fv(x[i,j]) } eval.fasp }) compatible.fasp <- function(A, B, ...) { verifyclass(A, "fasp") if(missing(B)) return(TRUE) verifyclass(B, "fasp") dimA <- dim(A$which) dimB <- dim(B$which) if(!all(dimA == dimB)) return(FALSE) for(i in seq_len(dimA[1L])) for(j in seq_len(dimA[2L])) { Aij <- as.fv(A[i,j]) Bij <- as.fv(B[i,j]) if(!compatible.fv(Aij, Bij)) return(FALSE) } # A and B agree if(length(list(...)) == 0) return(TRUE) # recursion return(compatible.fasp(B, ...)) } spatstat.core/R/hotbox.R0000644000176200001440000000365214141452520014677 0ustar liggesusers#' #' hotbox.R #' #' Heat kernel for a one-dimensional rod #' and two-dimensional rectangle #' #' Code from Greg McSwiggan and Adrian Baddeley hotbox <- function(Xsource, Xquery, sigma, ..., W=NULL, squared=FALSE, nmax=20) { #' heat kernel in a rectangle check.1.real(sigma) if(is.null(W)) { if(is.ppp(Xsource)) W <- Window(Xsource) else if(is.sob(Xquery)) W <- Window(Xquery) else stop("No window information is present") } else { stopifnot(is.owin(W)) if(!is.sob(Xsource)) Xsource <- as.ppp(Xsource, W) if(!is.sob(Xquery)) Xquery <- as.ppp(Xquery, W) } if(!is.rectangle(W)) stop("The window must be a rectangle") slen <- sidelengths(W) Xsource <- shift(Xsource, origin="bottomleft") Xquery <- shift(Xquery, origin="bottomleft") nsource <- npoints(Xsource) if(is.ppp(Xquery)) { nquery <- npoints(Xquery) answer <- numeric(nquery) for(i in seq_len(nsource)) { cx <- hotrod(slen[1], Xsource$x[i], Xquery$x, sigma, ends="insulated", nmax=nmax) cy <- hotrod(slen[2], Xsource$y[i], Xquery$y, sigma, ends="insulated", nmax=nmax) contrib <- cx * cy if(squared) contrib <- contrib^2 answer <- answer + contrib } } else if(is.im(Xquery) || is.owin(Xquery)) { Xquery <- as.im(Xquery, ...) if(anyNA(Xquery)) stop("Image must be a full rectangle") ansmat <- matrix(0, nrow(Xquery), ncol(Xquery)) xx <- Xquery$xcol yy <- Xquery$yrow for(i in seq_len(nsource)) { cx <- hotrod(slen[1], Xsource$x[i], xx, sigma, ends="insulated", nmax=nmax) cy <- hotrod(slen[2], Xsource$y[i], yy, sigma, ends="insulated", nmax=nmax) contrib <- outer(cy, cx, "*") if(squared) contrib <- contrib^2 ansmat <- ansmat + contrib } answer <- Xquery answer[] <- ansmat } else stop("Unrecognised format for Xquery") return(answer) } spatstat.core/R/psst.R0000644000176200001440000001403314144333463014367 0ustar liggesusers# # psst.R # # Computes the GNZ contrast of delta-f for any function f # # $Revision: 1.9 $ $Date: 2015/07/11 08:19:26 $ # ################################################################################ # psst <- function(object, fun, r=NULL, breaks=NULL, ..., model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), truecoef=NULL, hi.res=NULL, funargs=list(correction="best"), verbose=TRUE) { if(is.ppm(object)) { fit <- object } else if(is.ppp(object) || is.quad(object)) { if(is.ppp(object)) object <- quadscheme(object, ...) if(!is.null(model)) { fit <- update(model, Q=object, forcefit=TRUE) } else { fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } } else stop("object should be a fitted point process model or a point pattern") # rfixed <- !is.null(r) || !is.null(breaks) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy # E <- equalsfun.quad(Q) # WQ <- w.quad(Q) # quadrature weights # integrals will be restricted to quadrature points # that were actually used in the fit # USED <- getglmsubset(fit) if(fit$correction == "border") { rbord <- fit$rbord b <- bdist.points(U) USED <- (b > rbord) } else USED <- rep.int(TRUE, U$n) # basic statistics Win <- Window(X) npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW # adjustments to account for restricted domain of pseudolikelihood # if(any(!USED) && spatstat.options("eroded.intensity")) { # XUSED <- USED[Z] # npts.used <- sum(Z & USED) # area.used <- sum(WQ[USED]) # lambda.used <- npts.used/area.used # } else { # XUSED <- rep.int(TRUE, npts) # npts.used <- npts # area.used <- areaW # lambda.used <- lambda # } # determine breakpoints for r values rmaxdefault <- rmax.rule("G", Win, lambda) breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max # residuals resid <- residuals(fit, type="raw",drop=FALSE, new.coef=truecoef, quad=hi.res) rescts <- with(resid, "continuous") # absolute weight for continuous integrals wc <- -rescts # initialise fv object df <- data.frame(r=rvals, theo=0) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~Delta~S(r), NULL), "theo", . ~ r, alim=c(0, rmax), c("r","%s[theo](r)"), desc, fname="bold(R)~Delta~S") # evaluate fun(X) for data fX <- do.call(fun, append(list(X, r=rvals), funargs)) fXunits <- unitname(fX) # Extract 'best' estimate only fX <- with(fX, .y) zero <- numeric(length(fX)) # sum over all quadrature points iused <- seq(U$n)[USED] nused <- length(iused) if(verbose) cat(paste("\nProcessing", nused, "quadrature points...")) # running sums & integrals sumX <- zero integ <- integ2 <- zero # template for X \cup {u} uX <- superimpose(U[1], X, W=Win, check=FALSE) Ux <- U$x Uy <- U$y # if(verbose) pstate <- list() # for(j in seq(nused)) { i <- iused[j] wi <- wc[i] if(Z[i]) { # data point fXi <- do.call(fun, append(list(X[-i], r=rvals), funargs)) fXi <- with(fXi, .y) deltaf <- fX - fXi sumX <- sumX + deltaf } else { # dummy point uX$x[1] <- Ux[i] uX$y[1] <- Uy[i] fuX <- do.call(fun, append(list(uX, r=rvals), funargs)) fuX <- with(fuX, .y) deltaf <- fuX - fX } integ <- integ + wi * deltaf integ2 <- integ2 + wi * deltaf^2 # if(j %% 500 == 0) { cat("[garbage ") gc() cat("collected]") } if(verbose) pstate <- progressreport(j, nused, state=pstate) } sdv <- sqrt(integ2) res <- sumX - integ ans <- bind.fv(ans, data.frame(dat=sumX, com=integ, var=integ2, sd=sdv, hi=2*sdv, lo=-2*sdv, res=res, stdres=res/sdv), c("Sigma~Delta~S(r)", "bold(C)~Delta~S(r)", "bold(C)^2~Delta~S(r)", "sqrt(bold(C)^2~Delta~S(r))", "%s[hi](r)", "%s[lo](r)", "bold(R)~Delta~S(r)", "bold(T)~Delta~S(r)"), c("data pseudosum (contribution to %s)", "model compensator (contribution to %s)", "pseudovariance of %s", "sqrt(pseudovariance) of %s", "upper 2 sigma critical band for %s", "lower 2 sigma critical band for %s", "pseudoresidual function %s", "standardised pseudoresidual function %s"), "res") fvnames(ans,".") <- c("res", "hi", "lo", "theo") unitname(ans) <- fXunits # return(ans) } npfun <- function(X, ..., r) { npts <- npoints(X) # initialise fv object df <- data.frame(r=r, theo=0, npoint=npts) desc <- c("distance argument r", "value 0", "value equal to number of points") ans <- fv(df, "r", substitute(npoints(r), NULL), "npoint", . ~ r, alim=c(0, max(r)), c("r","%s[theo](r)", "%s[obs](r)"), desc, fname="npoints") unitname(ans) <- unitname(X) return(ans) } nndcumfun <- function(X, ..., r) { nn <- nndist(X) bk <- breakpts.from.r(r) # nn <- nn[nn <= bdist.points(X)] h <- whist(nn, bk$val) # initialise fv object df <- data.frame(r=r, theo=0, obs=h) desc <- c("distance argument r", "value 0", "observed count") ans <- fv(df, "r", substitute(nndcount(r), NULL), "obs", . ~ r, alim=c(0, max(r)), c("r","%s[theo](r)", "%s[obs](r)"), desc, fname="nndcount") unitname(ans) <- unitname(X) return(ans) } spatstat.core/R/nndensity.R0000644000176200001440000000173114144333463015412 0ustar liggesusers# # nndensity.R # # Density estimation based on nn distance # # $Revision: 1.3 $ $Date: 2014/10/24 00:22:30 $ # nndensity <- function(x, ...) { UseMethod("nndensity") } nndensity.ppp <- function(x, k, ..., verbose=TRUE) { if(missing(k) || is.null(k)) { k <- round(sqrt(npoints(x))) if(verbose) cat(paste("k=", k, "\n")) } else if(k == 1) warning("k=1 will produce strange results") # distance to k-th nearest neighbour D <- nnmap(x, k=k, what="dist", ...) # area searched A <- eval.im(pi * D^2) # distance to boundary B <- bdist.pixels(as.owin(D)) # handle edge effects edge <- solutionset(B < D) # centres of all pixels where edge effect occurs xy <- rasterxy.mask(edge, drop=TRUE) # corresponding values of distance rr <- D[edge, drop=TRUE] # compute actual search area X <- as.ppp(xy, W=as.owin(x), check=FALSE) A[edge] <- discpartarea(X, matrix(rr, ncol=1)) # finally compute intensity estimate L <- eval.im(k/A) return(L) } spatstat.core/R/localKcross.R0000644000176200001440000005246714144333462015671 0ustar liggesusers#' #' localKcross.R #' #' original by Ege Rubak #' #' $Revision: 1.15 $ $Date: 2020/12/19 05:25:06 $ "localLcross" <- function(X, from, to, ..., rmax = NULL, correction = "Ripley") { localKcross(X, from, to, ..., rmax = rmax, correction = correction, wantL = TRUE) } "localLdot" <- function(X, from, ..., rmax = NULL, correction = "Ripley") { localKdot(X, from, ..., rmax = rmax, correction = correction, wantL = TRUE) } "localKcross" <- function(X, from, to, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) if(missing(from)) from <- levels(marx)[1] if(missing(to)) to <- levels(marx)[2] I <- (marx == from) if(!any(I)) stop(paste("No points have mark =", from)) Iexplain <- paste("points having mark =", from) Ikey <- make.parseable(paste(from)) if(from == to) { ## use Kest XI <- X[I] dont.complain.about(XI) result <- do.call(localK, resolve.defaults(list(X=quote(XI), rmax=rmax, correction=correction, verbose=verbose, rvalue=rvalue), list(...))) } else { J <- (marx == to) if(!any(J)) stop(paste("No points have mark =", to)) Jexplain <- paste("points having mark =", to) Jkey <- make.parseable(paste(to)) result <-localKmultiEngine(X, I, J, ..., Ikey=Ikey, Jkey=Jkey, Iexplain=Iexplain, Jexplain=Jexplain, rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue) } return(result) } "localKdot" <- function(X, from, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) if(missing(from)) from <- levels(marx)[1] I <- (marx == from) J <- rep.int(TRUE, X$n) # i.e. all points Iexplain <- paste("points having mark =", from) Jexplain <- "points of any type" Ikey <- make.parseable(paste(from)) Jkey <- "." if(!any(I)) stop(paste("No points have mark =", from)) result <- localKmultiEngine(X, I, J, ..., Iexplain=Iexplain, Jexplain=Jexplain, Ikey=Ikey, Jkey=Jkey, rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue) attr(result, "indices") <- list(from=from) return(result) } "localKcross.inhom" <- function(X, from, to, lambdaFrom=NULL, lambdaTo=NULL, ..., rmax = NULL, correction = "Ripley", sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") miss.update <- missing(update) miss.leave <- missing(leaveoneout) marx <- marks(X) if(missing(from)) from <- levels(marx)[1] if(missing(to)) to <- levels(marx)[2] I <- (marx == from) J <- (marx == to) Iexplain <- paste("points having mark =", from) Jexplain <- paste("points having mark =", to) Ikey <- make.parseable(paste(from)) Jkey <- make.parseable(paste(to)) K <- localKmultiEngine(X, I, J, lambdaFrom, lambdaTo, ..., rmax = rmax, Iexplain=Iexplain, Jexplain=Jexplain, Ikey=Ikey, Jkey=Jkey, correction=correction, sigma=sigma, varcov=varcov, lambdaX=lambdaX, update=update, leaveoneout=leaveoneout, miss.update=miss.update, miss.leave=miss.leave) attr(K, "indices") <- list(from=from, to=to) return(K) } localLcross.inhom <- function(X, from, to, lambdaFrom = NULL, lambdaTo = NULL, ..., rmax = NULL) { localKcross.inhom(X, from, to, lambdaFrom, lambdaTo, ..., rmax = rmax, wantL = TRUE) } "localKmultiEngine" <- function(X, from, to, lambdaFrom=NULL, lambdaTo=NULL, ..., rmax = NULL, wantL=FALSE, correction="Ripley", verbose=TRUE, rvalue=NULL, sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE, Iexplain="points satisfying condition I", Jexplain="points satisfying condition J", Ikey="I", Jkey="J", miss.update=missing(update), miss.leave=missing(leaveoneout)) { npts <- npoints(X) W <- Window(X) areaW <- area(W) lambda.ave <- npts/areaW from <- ppsubset(X, from) to <- ppsubset(X, to) if(is.null(from) || is.null(to)) stop("from and to must be valid subset indices") if(!any(from)) stop("no points belong to subset from") if(!any(to)) stop("no points belong to subset to") X_from <- X[from] X_to <- X[to] n_from <- sum(from) n_to <- sum(to) lambdaFrom.ave <- n_from/areaW lambdaTo.ave <- n_to/areaW weighted <- !is.null(lambdaFrom) || !is.null(lambdaTo) || !is.null(lambdaX) if(weighted){ lambdas <- resolve.lambda.cross(X, from, to, lambdaFrom, lambdaTo, ..., lambdaX = lambdaX, sigma = sigma, varcov = varcov, leaveoneout = leaveoneout, update = update, Iexplain=Iexplain, Jexplain=Jexplain, miss.update=miss.update, miss.leave=miss.leave, caller = "localKcrossEngine") lambdaFrom <- lambdas$lambdaI lambdaTo <- lambdas$lambdaJ } if(is.null(rvalue)) rmaxdefault <- rmax %orifnull% rmax.rule("K", W, lambda.ave) else { stopifnot(is.numeric(rvalue)) stopifnot(length(rvalue) == 1) stopifnot(rvalue >= 0) rmaxdefault <- rvalue } breaks <- handle.r.b.args(NULL, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(none="none", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=FALSE) correction <- implemented.for.K(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # identify all close pairs rmax <- max(r) close <- crosspairs(X_from, X_to, rmax) # close$i and close$j are serial numbers in X_from and X_to respectively; # map them to original serial numbers in X orig <- seq_len(npts) imap <- orig[from] jmap <- orig[to] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(from & to)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$d <- close$d[ok] close$xi <- close$xi[ok] close$xj <- close$xj[ok] close$yi <- close$yi[ok] close$yj <- close$yj[ok] } } # extract information for these pairs (relative to orderings of X_from, X_to) DIJ <- close$d XI <- ppp(close$xi, close$yi, window=W, check=FALSE) I <- close$i J <- close$j if(weighted) { ## lambdaI <- lambdaFrom[I] ## not used lambdaJ <- lambdaTo[J] ## weightI <- 1/lambdaI ## not used weightJ <- 1/lambdaJ } # initialise df <- as.data.frame(matrix(NA, length(r), n_from)) labl <- desc <- character(n_from) if(verbose) state <- list() switch(correction, none={ # uncorrected! For demonstration purposes only! for(i in 1:n_from) { ii <- (I == i) ## Below wh <- whist(DIJ[ii], breaks$val, if(weighted) weightJ[ii] else NULL) # no edge weights Knone <- cumsum(wh) ## Tweaking factor to express Kcross.inhom as unweighted average of local contrib. if(weighted) Knone <- Knone * lambdaFrom.ave/lambdaFrom[i] df[,i] <- Knone icode <- numalign(i, n_from) names(df)[i] <- paste("un", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("uncorrected estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, n_from, state=state) } if(!weighted) df <- df/lambdaTo.ave }, translate={ # Translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) if(weighted) edgewt <- edgewt * weightJ for(i in 1:n_from) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, edgewt[ii]) Ktrans <- cumsum(wh) ## Tweaking factor to express Kcross.inhom as unweighted average of local contrib. if(weighted) Ktrans <- Ktrans * lambdaFrom.ave/lambdaFrom[i] df[,i] <- Ktrans icode <- numalign(i, n_from) names(df)[i] <- paste("trans", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("translation-corrected estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, n_from, state=state) } if(!weighted) df <- df/lambdaTo.ave h <- diameter(W)/2 df[r >= h, ] <- NA }, isotropic={ # Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) if(weighted) edgewt <- edgewt * weightJ for(i in 1:n_from) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, edgewt[ii]) Kiso <- cumsum(wh) ## Tweaking factor to express Kcross.inhom as unweighted average of local contrib. if(weighted) Kiso <- Kiso * lambdaFrom.ave/lambdaFrom[i] df[,i] <- Kiso icode <- numalign(i, n_from) names(df)[i] <- paste("iso", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("Ripley isotropic correction estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, n_from, state=state) } if(!weighted) df <- df/lambdaTo.ave h <- diameter(W)/2 df[r >= h, ] <- NA }) # transform values if L required if(wantL) df <- sqrt(df/pi) # return vector of values at r=rvalue, if desired if(!is.null(rvalue)) { nr <- length(r) if(r[nr] != rvalue) stop("Internal error - rvalue not attained") return(as.numeric(df[nr,])) } ## function value table required ## add r and theo df <- cbind(df, data.frame(r=r, theo=if(wantL) r else (pi * r^2))) desc <- c(desc, c("distance argument r", "theoretical Poisson %s")) labl <- c(labl, c("r", "{%s[%s]^{pois}}(r)")) ## Handle 'dot' symbol if(identical(Jkey, ".")) { Jkeyname <- "symbol(\"\\267\")" Jkeylab <- quote(dot) Jkeyexpr <- quote(symbol("\267")) } else Jkeyname <- Jkeylab <- Jkeyexpr <- Jkey ## Determine fv labels if(!wantL) { if(!weighted) { fnam <- c("K", paste0("list(loc,", Ikey, ",", Jkeyname, ")")) ylab <- substitute(K[loc,I,J](r), list(I=Ikey, J=Jkeylab)) yexp <- substitute(K[list(loc,I,J)](r), list(I=Ikey, J=Jkeyexpr)) } else { fnam <- c("K", paste0("list(inhom,loc,", Ikey, ",", Jkeyname, ")")) ylab <- substitute(K[inhom,loc,I,J](r), list(I=Ikey, J=Jkeylab)) yexp <- substitute(K[list(inhom,loc,I,J)](r), list(I=Ikey, J=Jkeyexpr)) } } else { if(!weighted) { fnam <- c("L", paste0("list(loc,", Ikey, ",", Jkeyname, ")")) ylab <- substitute(L[loc,I,J](r), list(I=Ikey, J=Jkeylab)) yexp <- substitute(L[list(loc,I,J)](r), list(I=Ikey, J=Jkeyexpr)) } else { fnam <- c("L", paste0("list(inhom,loc,", Ikey, ",", Jkeyname, ")")) ylab <- substitute(L[inhom,loc,I,J](r), list(I=Ikey, J=Jkeylab)) yexp <- substitute(L[list(inhom,loc,I,J)](r), list(I=Ikey, J=Jkeyexpr)) } } # create fv object K <- fv(df, "r", ylab, "theo", , alim, labl, desc, fname=fnam, yexp=yexp) # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) attr(K, "correction") <- correction if(weighted && lambdas$danger) attr(K, "dangerous") <- lambdas$dangerous ### TEMPORARY HACK to save info about the "from" points attr(K, "Xfrom") <- X_from return(K) } resolve.lambda.cross <- function(X, I, J, lambdaI=NULL, lambdaJ=NULL, ..., lambdaX=NULL, sigma=NULL, varcov=NULL, leaveoneout=TRUE, update=TRUE, lambdaIJ=NULL, Iexplain="points satisfying condition I", Jexplain="points satisfying condition J", miss.update=missing(update), miss.leave=missing(leaveoneout), caller="direct"){ dangerous <- c("lambdaI", "lambdaJ") dangerI <- dangerJ <- TRUE XI <- X[I] XJ <- X[J] nI <- npoints(XI) nJ <- npoints(XJ) lamIname <- short.deparse(substitute(lambdaI)) lamJname <- short.deparse(substitute(lambdaJ)) bothnames <- c(lamIname, lamJname) givenI <- !is.null(lambdaI) givenJ <- !is.null(lambdaJ) givenX <- !is.null(lambdaX) if(givenI != givenJ) { givenone <- bothnames[c(givenI, givenJ)] missedone <- setdiff(bothnames, givenone) stop(paste("If", givenone, "is given, then", missedone, "should also be given"), call.=FALSE) } if(givenX && givenI && givenJ) warning(paste(paste(sQuote(bothnames), collapse=" and "), "were ignored because", sQuote("lambdaX"), "was given"), call.=FALSE) if(givenX) { ## Intensity values for all points of X if(is.im(lambdaX)) { ## Look up intensity values lambdaI <- safelookup(lambdaX, XI) lambdaJ <- safelookup(lambdaX, XJ) } else if(is.imlist(lambdaX) && is.multitype(X) && length(lambdaX) == length(levels(marks(X)))) { ## Look up intensity values Y <- split(X) lamY <- mapply("[", x=lambdaX, i=Y, SIMPLIFY=FALSE) lamX <- unsplit(lamY, marks(X)) lambdaI <- lamX[I] lambdaJ <- lamX[J] } else if(is.function(lambdaX)) { ## evaluate function at locations if(!is.marked(X) || length(formals(lambdaX)) == 2) { lambdaI <- lambdaX(XI$x, XI$y) lambdaJ <- lambdaX(XJ$x, XJ$y) } else { lambdaI <- lambdaX(XI$x, XI$y, marks(XI)) lambdaJ <- lambdaX(XJ$x, XJ$y, marks(XJ)) } } else if(is.numeric(lambdaX) && is.vector(as.numeric(lambdaX))) { ## vector of intensity values if(length(lambdaX) != npoints(X)) stop(paste("The length of", sQuote("lambdaX"), "should equal the number of points of X")) lambdaI <- lambdaX[I] lambdaJ <- lambdaX[J] } else if(is.ppm(lambdaX) || is.kppm(lambdaX) || is.dppm(lambdaX)) { ## point process model provides intensity model <- lambdaX if(!update) { ## just use intensity of fitted model lambdaI <- predict(model, locations=XI, type="trend") lambdaJ <- predict(model, locations=XJ, type="trend") } else { ## re-fit model to data X if(is.ppm(model)) { model <- update(model, Q=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { model <- update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } lambdaI <- lambdaX[I] lambdaJ <- lambdaX[J] dangerI <- dangerJ <- FALSE dangerous <- "lambdaIJ" if(miss.update & caller == "Kmulti.inhom") warn.once(key="Kmulti.inhom.update", "The behaviour of Kmulti.inhom when lambda is a ppm object", "has changed (in spatstat 1.45-3 and later).", "See help(Kmulti.inhom)") } } else stop(paste("Argument lambdaX is not understood:", "it should be a numeric vector,", "an image, a function(x,y)", "or a fitted point process model (ppm, kppm or dppm)")) } else { ## lambdaI, lambdaJ expected if(!givenI) { ## estimate intensity dangerI <- FALSE dangerous <- setdiff(dangerous, "lambdaI") lambdaI <- density(XI, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) } else if(is.im(lambdaI)) { ## look up intensity values lambdaI <- safelookup(lambdaI, XI) } else if(is.function(lambdaI)) { ## evaluate function at locations lambdaI <- lambdaI(XI$x, XI$y) } else if(is.numeric(lambdaI) && is.vector(as.numeric(lambdaI))) { ## validate intensity vector check.nvector(lambdaI, nI, things=Iexplain) } else if(is.ppm(lambdaI) || is.kppm(lambdaI) || is.dppm(lambdaI)) { ## point process model provides intensity model <- lambdaI if(!update) { ## just use intensity of fitted model lambdaI <- predict(model, locations=XI, type="trend") } else { ## re-fit model to data X model <- if(is.ppm(model)) update(model, Q=X) else update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) lambdaI <- lambdaX[I] dangerI <- FALSE dangerous <- setdiff(dangerous, "lambdaI") if(miss.update && caller == "Kmulti.inhom") warn.once(key="Kmulti.inhom.update", "The behaviour of Kmulti.inhom when lambda is a ppm object", "has changed (in spatstat 1.45-3 and later).", "See help(Kmulti.inhom)") } } else stop(paste(sQuote("lambdaI"), "should be a vector or an image")) if(!givenJ) { ## estimate intensity dangerJ <- FALSE dangerous <- setdiff(dangerous, "lambdaJ") lambdaJ <- density(XJ, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) } else if(is.im(lambdaJ)) { ## look up intensity values lambdaJ <- safelookup(lambdaJ, XJ) } else if(is.function(lambdaJ)) { ## evaluate function at locations lambdaJ <- lambdaJ(XJ$x, XJ$y) } else if(is.numeric(lambdaJ) && is.vector(as.numeric(lambdaJ))) { ## validate intensity vector check.nvector(lambdaJ, nJ, things=Jexplain) } else if(is.ppm(lambdaJ) || is.kppm(lambdaJ) || is.dppm(lambdaJ)) { ## point process model provides intensity model <- lambdaJ if(!update) { ## just use intensity of fitted model lambdaJ <- predict(model, locations=XJ, type="trend") } else { ## re-fit model to data X model <- if(is.ppm(model)) update(model, Q=X) else update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) lambdaJ <- lambdaX[J] dangerJ <- FALSE dangerous <- setdiff(dangerous, "lambdaJ") if(miss.update & caller == "Kmulti.inhom") warn.once(key="Kmulti.inhom.update", "The behaviour of Kmulti.inhom when lambda is a ppm object", "has changed (in spatstat 1.45-3 and later).", "See help(Kmulti.inhom)") } } else stop(paste(sQuote("lambdaJ"), "should be a vector or an image")) } ## Weight for each pair if(!is.null(lambdaIJ)) { dangerIJ <- TRUE dangerous <- union(dangerous, "lambdaIJ") if(!is.matrix(lambdaIJ)) stop("lambdaIJ should be a matrix") if(nrow(lambdaIJ) != nI) stop(paste("nrow(lambdaIJ) should equal the number of", Iexplain)) if(ncol(lambdaIJ) != nJ) stop(paste("ncol(lambdaIJ) should equal the number of", Jexplain)) } else { dangerIJ <- FALSE } danger <- dangerI || dangerJ || dangerIJ return(list(lambdaI = lambdaI, lambdaJ = lambdaJ, lambdaIJ=lambdaIJ, danger = danger, dangerous = dangerous)) } spatstat.core/R/Gmulti.R0000644000176200001440000001527114144333461014642 0ustar liggesusers# Gmulti.S # # Compute estimates of nearest neighbour distance distribution functions # for multitype point patterns # # S functions: # Gcross G_{ij} # Gdot G_{i\bullet} # Gmulti (generic) # # $Revision: 4.45 $ $Date: 2020/10/30 03:59:45 $ # ################################################################################ "Gcross" <- function(X, i, j, r=NULL, breaks=NULL, ..., correction=c("rs", "km", "han")) { # computes G_{ij} estimates # # X marked point pattern (of class 'ppp') # i,j the two mark values to be compared # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X, dfok=FALSE)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] # I <- (marx == i) if(sum(I) == 0) stop("No points are of type i") if(i == j){ result <- Gest(X[I], r=r, breaks=breaks, ...) } else { J <- (marx == j) if(sum(J) == 0) stop("No points are of type j") result <- Gmulti(X, I, J, r=r, breaks=breaks, disjoint=FALSE, ..., correction=correction) } result <- rebadge.as.crossfun(result, "G", NULL, i, j) return(result) } "Gdot" <- function(X, i, r=NULL, breaks=NULL, ..., correction=c("km","rs","han")) { # Computes estimate of # G_{i\bullet}(t) = # P( a further point of pattern in B(0,t)| a type i point at 0 ) # # X marked point pattern (of class ppp) # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) if(sum(I) == 0) stop("No points are of type i") J <- rep.int(TRUE, X$n) # i.e. all points # result <- Gmulti(X, I, J, r, breaks, disjoint=FALSE, ..., correction=correction) result <- rebadge.as.dotfun(result, "G", NULL, i) return(result) } ########## "Gmulti" <- function(X, I, J, r=NULL, breaks=NULL, ..., disjoint=NULL, correction=c("rs", "km", "han")) { # # engine for computing the estimate of G_{ij} or G_{i\bullet} # depending on selection of I, J # # X marked point pattern (of class ppp) # # I,J logical vectors of length equal to the number of points # and identifying the two subsets of points to be # compared. # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # verifyclass(X, "ppp") W <- X$window npts <- npoints(X) areaW <- area(W) # check I and J I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") nI <- sum(I) nJ <- sum(J) if(nI == 0) stop("No points satisfy condition I") if(nJ == 0) stop("No points satisfy condition J") if(is.null(disjoint)) disjoint <- !any(I & J) # choose correction(s) # correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("rs", "km", "han") correction <- pickoption("correction", correction, c(none="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", best="km"), multi=TRUE) # determine breakpoints for r values lamJ <- nJ/areaW rmaxdefault <- rmax.rule("G", W, lamJ) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) # brks <- breaks$val rmax <- breaks$max rvals <- breaks$r zeroes <- numeric(length(rvals)) # initialise fv object df <- data.frame(r=rvals, theo=1-exp(-lamJ * pi * rvals^2)) fname <- c("G", "list(I,J)") Z <- fv(df, "r", quote(G[I,J](r)), "theo", . ~ r, c(0,rmax), c("r", makefvlabel(NULL, NULL, fname, "pois")), c("distance argument r", "theoretical Poisson %s"), fname=fname, yexp=quote(G[list(I,J)](r))) # "type I to type J" nearest neighbour distances XI <- X[I] XJ <- X[J] if(disjoint) nnd <- nncross(XI, XJ, what="dist") else { seqnp <- seq_len(npts) iX <- seqnp[I] iY <- seqnp[J] nnd <- nncross(XI, XJ, iX, iY, what="dist") } # distance to boundary from each type i point bdry <- bdist.points(XI) # observations o <- pmin.int(nnd,bdry) # censoring indicators d <- (nnd <= bdry) # # calculate estimates if("none" %in% correction) { # UNCORRECTED e.d.f. of nearest neighbour distances: use with care if(npts == 0) edf <- zeroes else { hh <- hist(nnd[nnd <= rmax],breaks=breaks$val,plot=FALSE)$counts edf <- cumsum(hh)/length(nnd) } Z <- bind.fv(Z, data.frame(raw=edf), makefvlabel(NULL, "hat", fname, "raw"), "uncorrected estimate of %s", "raw") } if("han" %in% correction) { # Hanisch style estimator if(npts == 0) G <- zeroes else { # uncensored distances x <- nnd[d] # weights a <- eroded.areas(W, rvals) # calculate Hanisch estimator h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts G <- cumsum(h/a) G <- G/max(G[is.finite(G)]) } # add to fv object Z <- bind.fv(Z, data.frame(han=G), makefvlabel(NULL, "hat", fname, "han"), "Hanisch estimate of %s", "han") # modify recommended plot range attr(Z, "alim") <- range(rvals[G <= 0.9]) } if(any(correction %in% c("rs", "km"))) { # calculate Kaplan-Meier and border correction (Reduced Sample) estimators if(npts == 0) result <- data.frame(rs=zeroes, km=zeroes, hazard=zeroes) else { result <- km.rs(o, bdry, d, breaks) result <- as.data.frame(result[c("rs", "km", "hazard")]) } # add to fv object Z <- bind.fv(Z, result, c(makefvlabel(NULL, "hat", fname, "bord"), makefvlabel(NULL, "hat", fname, "km"), "hazard(r)"), c("border corrected estimate of %s", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function lambda(r)"), "km") # modify recommended plot range attr(Z, "alim") <- range(rvals[result$km <= 0.9]) } nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) unitname(Z) <- unitname(X) return(Z) } spatstat.core/R/parameters.R0000644000176200001440000000121314144333463015535 0ustar liggesusers## ## parameters.R ## ## $Revision: 1.2 $ $Date: 2015/05/08 04:27:15 $ ## parameters <- function(model, ...) { UseMethod("parameters") } parameters.ppm <- function(model, ...) { ss <- summary(model, quick="no variances") out <- c(list(trend=ss$trend$value), ss$covfunargs, ss$interaction$interaction$par, ss$interaction$sensible$param) return(out) } parameters.kppm <- function(model, ...) { ss <- summary(model, quick="no variances") out <- c(list(trend=ss$trend$trend$value), ss$covfunargs, ss$clustpar, ss$clustargs, list(mu=ss$mu)) return(out) } spatstat.core/R/covariates.R0000644000176200001440000000306714144333462015542 0ustar liggesusers# # covariates.R # # evaluate covariates # # $Revision: 1.3 $ $Date: 2015/10/21 09:06:57 $ # evalCovariate <- function(covariate, locations) { # evaluate covariate of any kind at specified locations covvalues <- if(is.im(covariate)) safelookup(covariate, locations) else if(is.function(covariate)) covariate(locations$x, locations$y) else if(is.numeric(covariate) || is.factor(covariate)) { if(length(covariate) == 1L) rep.int(covariate, length(locations$x)) else if(length(covariate) == length(locations$x)) covariate else stop("Inappropriate length for covariate vector") } else stop("Covariate should be an image, a function or a factor/numeric vector") return(covvalues) } ppmCovariates <- function(model) { # generate list of all covariates in ppm (excluding marks) stopifnot(is.ppm(model)) co <- as.list(model$covariates) xy <- list(x=function(x,y){x}, y=function(x,y){y}) coplus <- append(co, xy) return(as.anylist(coplus)) } findCovariate <- function(covname, scope, scopename=NULL) { # find the named covariate in the given ppm object or list if(is.ppm(scope)) { covlist <- ppmCovariates(scope) if(missing(scopename)) scopename <- "covariates in model" } else if(is.list(scope)) { covlist <- scope } else stop("scope should be a named list of covariates, or a ppm object") if(!(covname %in% names(covlist))) stop(paste("covariate", dQuote(covname), "not found", if(!is.null(scopename)) paste("amongst", scopename) else NULL)) covlist[[covname]] } spatstat.core/R/logistic.R0000644000176200001440000002313114144333462015211 0ustar liggesusers## ## logistic.R ## ## $Revision: 1.28 $ $Date: 2020/11/27 03:04:30 $ ## ## Logistic composite likelihood method ## ## (quadscheme construction is now in 'logiquad.R') logi.engine <- function(Q, trend = ~1, interaction, ..., covariates=NULL, subsetexpr=NULL, clipwin=NULL, correction="border", rbord=reach(interaction), covfunargs=list(), allcovar=FALSE, vnamebase=c("Interaction", "Interact."), vnameprefix=NULL, justQ = FALSE, savecomputed = FALSE, precomputed = NULL, VB=FALSE ){ if(is.null(trend)) trend <- ~1 if(is.null(interaction)) interaction <- Poisson() want.trend <- !identical.formulae(trend, ~1) want.inter <- !is.poisson(interaction) want.subset <- !is.null(subsetexpr) # validate choice of edge correction correction <- pickoption("correction", correction, c(border="border", periodic="periodic", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", none="none")) # rbord applies only to border correction if(correction == "border") { check.1.real(rbord, "In ppm") explain.ifnot(rbord >= 0, "In ppm") } else rbord <- 0 # backdoor stuff if(!missing(vnamebase)) { if(length(vnamebase) == 1) vnamebase <- rep.int(vnamebase, 2) if(!is.character(vnamebase) || length(vnamebase) != 2) stop("Internal error: illegal format of vnamebase") } if(!is.null(vnameprefix)) { if(!is.character(vnameprefix) || length(vnameprefix) != 1) stop("Internal error: illegal format of vnameprefix") } # create dummy points if(inherits(Q, "ppp")){ Xplus <- Q Q <- quadscheme.logi(Xplus, ...) D <- Q$dummy Dinfo <- Q$param } else if(checkfields(Q, c("data", "dummy"))) { Xplus <- Q$data D <- Q$dummy Dinfo <- Q$param if(is.null(Dinfo)){ Dinfo <- list(how="given", rho=npoints(D)/(area(D)*markspace.integral(D))) } Q <- quadscheme.logi(Xplus, D) } else stop("Format of object Q is not understood") ## clip to subset? if(!is.null(clipwin)) { if(is.data.frame(covariates)) { ok <- inside.owin(union.quad(Q), w=clipwin) covariates <- covariates[ok, , drop=FALSE] } Q <- Q[clipwin] Xplus <- Q$data D <- Q$dummy } if (justQ) return(Q) ### Dirty way of recording arguments so that the model can be refitted later (should probably be done using call, eval, envir, etc.): extraargs <- list(covfunargs = covfunargs, allcovar = allcovar, vnamebase = vnamebase, vnameprefix = vnameprefix) extraargs <- append(extraargs, list(...)) ## Dummy intensity if(correction == "border" && Dinfo$how=="grid"){ Dbord <- D[bdist.points(D)>=rbord] Dinfo$rho <- npoints(Dbord)/(eroded.areas(as.owin(Dbord), rbord)*markspace.integral(Dbord)) } rho <- Dinfo$rho ##Setting the B from Barker dynamics (relative to dummy intensity) B <- list(...)$Barker if(is.null(B)) B <- 1 B <- B*rho Dinfo <- append(Dinfo, list(B=B)) Dinfo <- append(Dinfo, list(extraargs=extraargs)) # Wplus <- as.owin(Xplus) nXplus <- npoints(Xplus) U <- superimpose(Xplus, D, W=Wplus, check=FALSE) # E <- equalpairs(U, Xplus, marked = is.marked(Xplus)) E <- cbind(1:nXplus, 1:nXplus) # computed <- if (savecomputed) list(X = Xplus, Q = Q, U = U) else list() # assemble covariate data frame if(want.trend || want.subset) { tvars <- variablesinformula(trend) if(want.subset) tvars <- union(tvars, all.vars(subsetexpr)) if(!is.data.frame(covariates)) { ## resolve 'external' covariates externalvars <- setdiff(tvars, c("x", "y", "marks")) tenv <- environment(trend) covariates <- getdataobjects(externalvars, tenv, covariates, fatal=TRUE) } wantxy <- c("x", "y") %in% tvars wantxy <- wantxy | rep.int(allcovar, 2) cvdf <- data.frame(x=U$x, y=U$y)[, wantxy, drop=FALSE] if(!is.null(covariates)) { df <- mpl.get.covariates(covariates, U, "quadrature points", covfunargs) cvdf <- cbind(cvdf, df) } wantmarks <- "marks" %in% tvars if(wantmarks) cvdf <- cbind(cvdf, marks = marks(U)) } else cvdf <- NULL # evaluate interaction sufficient statistics if (!is.null(ss <- interaction$selfstart)) interaction <- ss(Xplus, interaction) V <- evalInteraction(Xplus, U, E, interaction, correction, precomputed = precomputed, savecomputed = savecomputed) if(!is.matrix(V)) stop("evalInteraction did not return a matrix") if (savecomputed) computed <- append(computed, attr(V, "computed")) IsOffset <- attr(V, "IsOffset") if(is.null(IsOffset)) IsOffset <- rep.int(FALSE, ncol(V)) # determine names if(ncol(V) > 0) { Vnames <- colnames(V) if(is.null(Vnames)) { nc <- ncol(V) Vnames <- if(nc == 1) vnamebase[1L] else paste(vnamebase[2L], 1:nc, sep="") colnames(V) <- Vnames } else if(!is.null(vnameprefix)) { Vnames <- paste(vnameprefix, Vnames, sep="") colnames(V) <- Vnames } } else Vnames <- character(0) # combine all data glmdata <- as.data.frame(V) if(!is.null(cvdf)) glmdata <- cbind(glmdata, cvdf) # construct response and weights ok <- if(correction == "border") (bdist.points(U) >= rbord) else rep.int(TRUE, npoints(U)) # Keep only those quadrature points for which the # conditional intensity is nonzero. KEEP <- if(ncol(V)>0) matrowall(V != -Inf) else rep.int(TRUE, npoints(U)) ok <- ok & KEEP wei <- c(rep.int(1,npoints(Xplus)),rep.int(B/rho,npoints(D))) resp <- c(rep.int(1,npoints(Xplus)),rep.int(0,npoints(D))) ## User-defined subset: if(!is.null(subsetexpr)) { USERSUBSET <- eval(subsetexpr, glmdata, environment(trend)) ok <- ok & USERSUBSET } # add offset, subset and weights to data frame # using reserved names beginning with ".logi." glmdata <- cbind(glmdata, .logi.Y = resp, .logi.B = B, .logi.w = wei, .logi.ok =ok) # build glm formula # (reserved names begin with ".logi.") trendpart <- paste(as.character(trend), collapse=" ") fmla <- paste(".logi.Y ", trendpart) # Interaction terms if(want.inter) { VN <- Vnames # enclose offset potentials in 'offset(.)' if(any(IsOffset)) VN[IsOffset] <- paste("offset(", VN[IsOffset], ")", sep="") fmla <- paste(c(fmla, VN), collapse="+") } # add offset intrinsic to logistic technique fmla <- paste(fmla, "offset(-log(.logi.B))", sep="+") fmla <- as.formula(fmla) # to satisfy package checker: .logi.B <- B .logi.w <- wei .logi.ok <- ok .logi.Y <- resp # suppress warnings from code checkers dont.complain.about(.logi.B, .logi.w, .logi.ok, .logi.Y) # go ##fit <- glm(fmla, data=glmdata, ## family=binomial(), subset = .logi.ok, weights = .logi.w) fit <- if(VB) vblogit.fmla(fmla, data = glmdata, subset = .logi.ok, weights = .logi.w, ...) else glm(fmla, data = glmdata, family = binomial(), subset = .logi.ok, weights = .logi.w) environment(fit$terms) <- sys.frame(sys.nframe()) ## Fitted coeffs co <- coef(fit) fitin <- fii(interaction, co, Vnames, IsOffset) ## Saturated log-likelihood: satlogpl <- sum(ok*resp*log(B)) ## Max. value of log-likelihood: maxlogpl <- logLik(fit) + satlogpl # Stamp with spatstat version number spv <- package_version(versionstring.spatstat()) the.version <- list(major=spv$major, minor=spv$minor, release=spv$patchlevel, date="$Date: 2020/11/27 03:04:30 $") ## Compile results fit <- list(method = "logi", fitter = "glm", projected = FALSE, coef = co, trend = trend, interaction = interaction, fitin = fitin, Q = Q, maxlogpl = maxlogpl, satlogpl = satlogpl, internal = list(Vnames = Vnames, IsOffset=IsOffset, glmdata = glmdata, glmfit = fit, logistic = Dinfo, computed = computed, vnamebase=vnamebase, vnameprefix=vnameprefix, VB = if(VB) TRUE else NULL, priors = if(VB) fit$priors else NULL ), covariates = mpl.usable(covariates), covfunargs= covfunargs, subsetexpr = subsetexpr, correction = correction, rbord = rbord, fisher = NULL, varcov = NULL, # if(VB) fit$S else NULL, terms = terms(trend), version = the.version, problems = list() ) class(fit) <- "ppm" return(fit) } forbid.logi <- function(object) { if(object$method == "logi") stop("Sorry, this is not implemented for method=\'logi\'") return(invisible(NULL)) } spatstat.core/R/hybrid.R0000644000176200001440000002707414144333462014667 0ustar liggesusers# # # hybrid.R # # $Revision: 1.9 $ $Date: 2018/03/15 07:37:41 $ # # Hybrid of several interactions # # Hybrid() create a hybrid of several interactions # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Hybrid <- local({ Hybrid <- function(...) { interlist <- list(...) n <- length(interlist) if(n == 0) stop("No arguments given") #' arguments may be interaction objects or ppm objects isinter <- unlist(lapply(interlist, is.interact)) isppm <- unlist(lapply(interlist, is.ppm)) if(any(nbg <- !(isinter | isppm))) stop(paste(ngettext(sum(nbg), "Argument", "Arguments"), paste(which(nbg), collapse=", "), ngettext(sum(nbg), "is not an interaction", "are not interactions"))) #' ensure the list contains only interaction objects if(any(isppm)) interlist[isppm] <- lapply(interlist[isppm], as.interact) #' recursively expand any components that are themselves hybrids while(any(ishybrid <- unlist(lapply(interlist, is.hybrid)))) { i <- min(which(ishybrid)) n <- length(interlist) expandi <- interlist[[i]]$par interlist <- c(if(i > 1) interlist[1:(i-1L)] else NULL, expandi, if(i < n) interlist[(i+1L):n] else NULL) } #' ncomponents <- length(interlist) if(ncomponents == 1) { #' single interaction - return it return(interlist[[1L]]) } #' ensure all components have names names(interlist) <- good.names(names(interlist), "HybridComponent", 1:ncomponents) #' check for infinite potential values haveInf <- lapply(interlist, getElement, name="hasInf") haveInf <- !sapply(haveInf, identical, y=FALSE) hasInf <- any(haveInf) #' build object out <- list( name = "Hybrid interaction", creator = "Hybrid", family = hybrid.family, pot = NULL, par = interlist, parnames = names(interlist), hasInf = hasInf, selfstart = function(X, self) { ilist <- self$par sslist <- lapply(ilist, getElement, name="selfstart") has.ss <- sapply(sslist, is.function) if(any(has.ss)) { ilist[has.ss] <- lapply(ilist[has.ss], invokeSelfStart, Y=X) self$par <- ilist } return(self) }, init = NULL, update = NULL, # default OK print = function(self, ..., family=FALSE, brief=FALSE) { if(family) print.isf(self$family) ncomponents <- length(self$par) clabs <- self$parnames splat("Hybrid of", ncomponents, "components:", commasep(sQuote(clabs))) for(i in 1:ncomponents) { splat(paste0(clabs[i], ":")) print(self$par[[i]], ..., family=family, brief=brief) } parbreak() return(invisible(NULL)) }, interpret = function(coeffs, self) { interlist <- self$par result <- list(param=list(), inames=character(0), printable=list()) for(i in 1:length(interlist)) { interI <- interlist[[i]] nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") #' find coefficients with prefix that exactly matches nameI. Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) #' extract them if(any(relevant)) { Crelevant <- coeffs[relevant] names(Crelevant) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) #' invoke the self-interpretation of interI interpretI <- interI$interpret if(is.function(interpretI)) { resultI <- interpretI(Crelevant, interI) paramI <- resultI$param prinI <- resultI$printable inamesI <- resultI$inames inamesI <- paste(nameI, inamesI) if(length(prinI) > 0) { result$param <- append(result$param, paramI) result$printable <- append(result$printable, list(prinI)) result$inames <- c(result$inames, inamesI) } } } } return(result) }, valid = function(coeffs, self) { #' check validity via mechanism used for 'rmhmodel' siminfo <- .Spatstat.Rmhinfo[["Hybrid interaction"]] Z <- siminfo(coeffs, self) cifs <- Z$cif pars <- Z$par ntypes <- Z$ntypes if((Ncif <- length(cifs)) == 1) { #' single cif pars <- append(pars, list(beta=rep.int(1, ntypes))) } else { for(i in 1:Ncif) pars[[i]] <- append(pars[[i]], list(beta=rep.int(1, ntypes[i]))) } RM <- rmhmodel(cif=cifs, par=pars, types=1:max(ntypes), stopinvalid=FALSE) return(RM$integrable) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) #' separate into components spl <- splitHybridInteraction(coeffs, self) interlist <- spl$interlist coeflist <- spl$coeflist #' compute projection for each component interaction Ncif <- length(interlist) projlist <- vector(mode="list", length=Ncif) nproj <- integer(Ncif) for(i in 1:Ncif) { coefsI <- coeflist[[i]] interI <- interlist[[i]] if(!is.interact(interI)) stop("Internal error: interlist entry is not an interaction") projI <- interI$project if(is.null(projI)) stop(paste("Projection is not yet implemented for a", interI$name)) p <- projI(coefsI, interI) #' p can be NULL (indicating no projection required for interI) #' or a single interaction or a list of interactions. if(is.null(p)) { if(Ncif == 1) return(NULL) # no projection required p <- list(NULL) nproj[i] <- 0 } else if(is.interact(p)) { p <- list(p) nproj[i] <- 1L } else if(is.list(p) && all(unlist(lapply(p, is.interact)))) { nproj[i] <- length(p) } else stop("Internal error: result of projection had wrong format") projlist[[i]] <- p } #' for interaction i there are nproj[i] **new** interactions to try. if(all(nproj == 0)) return(NULL) if(spatstat.options("project.fast")) { #' Single interaction required. #' Extract first entry from each list #' (there should be only one entry, but...) qlist <- lapply(projlist, "[[", i=1L) #' replace NULL entries by corresponding original interactions isnul <- unlist(lapply(qlist, is.null)) if(all(isnul)) return(NULL) if(any(isnul)) qlist[isnul] <- interlist[isnul] names(qlist) <- names(interlist) #' build hybrid and return result <- do.call(Hybrid, qlist) return(result) } #' Full case result <- list() for(i in which(nproj > 0)) { ntry <- nproj[i] tries <- projlist[[i]] for(j in 1:ntry) { #' assemble list of component interactions for hybrid qlist <- interlist qlist[[i]] <- tries[[j]] #' eliminate Poisson ispois <- unlist(lapply(qlist, is.poisson)) if(all(ispois)) { #' collapse to single Poisson h <- Poisson() } else { if(any(ispois)) qlist <- qlist[!ispois] h <- do.call(Hybrid, qlist) } result <- append(result, list(h)) } } #' 'result' is a list of interactions, each a hybrid if(length(result) == 1) result <- result[[1L]] return(result) }, irange = function(self, coeffs=NA, epsilon=0, ...) { interlist <- self$par answer <- 0 for(i in 1:length(interlist)) { interI <- interlist[[i]] nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") #' find coefficients with prefix that exactly matches nameI. if(all(is.na(coeffs))) Crelevant <- NA else { Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) #' extract them Crelevant <- coeffs[relevant] names(Crelevant) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) } #' compute reach reachI <- interI$irange if(is.function(reachI)) { resultI <- reachI(interI, coeffs=Crelevant, epsilon=epsilon, ...) answer <- max(answer, resultI) } } return(answer) }, version=versionstring.spatstat() ) class(out) <- "interact" return(out) } invokeSelfStart <- function(inte, Y) { ss <- inte$selfstart if(!is.function(ss)) return(inte) return(ss(Y, inte)) } Hybrid }) is.hybrid <- function(x) { UseMethod("is.hybrid") } is.hybrid.interact <- function(x) { return(is.interact(x) && (x$name == "Hybrid interaction")) } is.hybrid.ppm <- function(x) { return(is.hybrid(as.interact(x))) } splitHybridInteraction <- function(coeffs, inte) { # For hybrids, $par is a list of the component interactions, # but coeffs is a numeric vector. # Split the coefficient vector into the relevant coeffs for each interaction interlist <- inte$par N <- length(interlist) coeflist <- vector(mode="list", length=N) for(i in 1:N) { interI <- interlist[[i]] # forbid hybrids-of-hybrids - these should not occur anyway if(interI$name == "Hybrid interaction") stop("A hybrid-of-hybrid interactions is not implemented") # nameI is the tag that identifies I-th component in hybrid nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) # extract coefficients # (there may be none, if this interaction is Poisson or an 'offset') coeffsI <- coeffs[relevant] # remove the prefix so the coefficients are recognisable to interaction if(any(relevant)) names(coeffsI) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) # store coeflist[[i]] <- coeffsI } names(coeflist) <- names(interlist) return(list(coeflist=coeflist, interlist=interlist)) } Hybrid <- intermaker(Hybrid, list(creator="Hybrid", name="general hybrid Gibbs process", par=list("..."=42), parnames=list("any list of interactions"))) spatstat.core/R/subfits.R0000644000176200001440000004713414144333464015066 0ustar liggesusers# # # $Revision: 1.54 $ $Date: 2021/08/31 06:40:35 $ # # subfits.new <- local({ subfits.new <- function(object, what="models", verbose=FALSE) { stopifnot(inherits(object, "mppm")) what <- match.arg(what, c("models", "interactions", "basicmodels")) if(what == "interactions") return(subfits.old(object, what, verbose)) ## extract stuff announce <- if(verbose) Announce else Ignore announce("Extracting stuff...") fitter <- object$Fit$fitter FIT <- object$Fit$FIT trend <- object$trend random <- object$random info <- object$Info npat <- object$npat Inter <- object$Inter interaction <- Inter$interaction itags <- Inter$itags Vnamelist <- object$Fit$Vnamelist Isoffsetlist <- object$Fit$Isoffsetlist has.design <- info$has.design # has.random <- info$has.random announce("done.\n") ## fitted parameters coefs.full <- coef(object) if(is.null(dim(coefs.full))) { ## fixed effects model: replicate vector to matrix coefs.names <- names(coefs.full) coefs.full <- matrix(coefs.full, byrow=TRUE, nrow=npat, ncol=length(coefs.full), dimnames=list(NULL, coefs.names)) } else { ## random/mixed effects model: coerce to matrix coefs.names <- colnames(coefs.full) coefs.full <- as.matrix(coefs.full) } ## determine which interaction(s) are active on each row announce("Determining active interactions...") active <- active.interactions(object) announce("done.\n") ## exceptions if(any(rowSums(active) > 1)) stop(paste("subfits() is not implemented for models", "in which several interpoint interactions", "are active on the same point pattern")) if(!is.null(random) && any(variablesinformula(random) %in% itags)) stop(paste("subfits() is not yet implemented for models", "with random effects that involve", "the interpoint interactions")) ## implied coefficients for each active interaction announce("Computing implied coefficients...") implcoef <- list() for(tag in itags) { announce(tag) implcoef[[tag]] <- impliedcoefficients(object, tag) announce(", ") } announce("done.\n") ## Fisher information and vcov fisher <- varcov <- NULL if(what == "models") { announce("Fisher information...") fisher <- vcov(object, what="fisher", err="null") varcov <- try(solve(fisher), silent=TRUE) if(inherits(varcov, "try-error")) varcov <- NULL announce("done.\n") } ## Extract data frame announce("Extracting data...") datadf <- object$datadf rownames <- object$Info$rownames announce("done.\n") ## set up lists for results models <- rep(list(NULL), npat) interactions <- rep(list(NULL), npat) ## interactions announce("Determining interactions...") pstate <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) ## Find relevant interaction acti <- active[i,] nactive <- sum(acti) interi <- if(nactive == 0) Poisson() else interaction[i, acti, drop=TRUE] tagi <- names(interaction)[acti] ## Find relevant coefficients coefs.avail <- coefs.full[i,] names(coefs.avail) <- coefs.names if(nactive == 1) { ic <- implcoef[[tagi]] coefs.implied <- ic[i, ,drop=TRUE] names(coefs.implied) <- colnames(ic) ## overwrite any existing values of coefficients; add new ones. coefs.avail[names(coefs.implied)] <- coefs.implied } ## create fitted interaction with these coefficients vni <- if(nactive > 0) Vnamelist[[tagi]] else character(0) iso <- if(nactive > 0) Isoffsetlist[[tagi]] else logical(0) interactions[[i]] <- fii(interi, coefs.avail, vni, iso) } announce("Done!\n") names(interactions) <- rownames ## if(what=="interactions") return(interactions) ## Extract data required to reconstruct complete model fits announce("Extracting more data...") data <- object$data Y <- object$Y Yname <- info$Yname moadf <- object$Fit$moadf fmla <- object$Fit$fmla ## deal with older formats of mppm if(is.null(Yname)) Yname <- info$Xname if(is.null(Y)) Y <- data[ , Yname, drop=TRUE] ## used.cov.names <- info$used.cov.names has.covar <- info$has.covar if(has.covar) { covariates.hf <- data[, used.cov.names, drop=FALSE] dfvar <- used.cov.names %in% names(datadf) } announce("done.\n") ## Construct template for fake ppm object spv <- package_version(versionstring.spatstat()) fake.version <- list(major=spv$major, minor=spv$minor, release=spv$patchlevel, date="$Date: 2021/08/31 06:40:35 $") fake.call <- call("cannot.update", Q=NULL, trend=trend, interaction=NULL, covariates=NULL, correction=object$Info$correction, rbord = object$Info$rbord) fakemodel <- list( method = "mpl", fitter = fitter, coef = coef(object), trend = object$trend, interaction = NULL, fitin = NULL, Q = NULL, maxlogpl = NA, internal = list(glmfit = FIT, glmdata = NULL, Vnames = NULL, IsOffset = NULL, fmla = fmla, computed = list()), covariates = NULL, correction = object$Info$correction, rbord = object$Info$rbord, version = fake.version, problems = list(), fisher = fisher, varcov = varcov, call = fake.call, callstring = "cannot.update()", fake = TRUE) class(fakemodel) <- "ppm" ## Loop through point patterns announce("Generating models for each row...") pstate <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) Yi <- Y[[i]] Wi <- if(is.ppp(Yi)) Yi$window else Yi$data$window ## assemble relevant covariate images covariates <- if(has.covar) covariates.hf[i, , drop=TRUE, strip=FALSE] else NULL if(has.covar && has.design) ## Convert each data frame covariate value to an image covariates[dfvar] <- lapply(covariates[dfvar], as.im, W=Wi) ## Extract relevant interaction finte <- interactions[[i]] inte <- finte$interaction if(is.poisson.interact(inte)) inte <- NULL Vnames <- finte$Vnames if(length(Vnames) == 0) Vnames <- NULL IsOffset <- finte$IsOffset if(length(IsOffset) == 0) IsOffset <- NULL ## Construct fake ppm object fakemodel$interaction <- inte fakemodel$fitin <- finte fakemodel$Q <- Yi fakemodel$covariates <- covariates fakemodel$internal$glmdata <- moadf[moadf$id == i, ] fakemodel$internal$Vnames <- Vnames fakemodel$internal$IsOffset <- IsOffset fake.call$Q <- Yi fake.call$covariates <- covariates fakemodel$call <- fake.call fakemodel$callstring <- short.deparse(fake.call) ## store in list models[[i]] <- fakemodel } announce("done.\n") names(models) <- rownames models <- as.anylist(models) return(models) } Announce <- function(...) cat(...) Ignore <- function(...) { NULL } subfits.new }) ## ///////////////////////////////////////////////////// subfits <- subfits.old <- local({ subfits.old <- function(object, what="models", verbose=FALSE) { stopifnot(inherits(object, "mppm")) what <- match.arg(what, c("models","interactions", "basicmodels")) ## extract stuff announce <- if(verbose) Announce else Ignore announce("Extracting stuff...") trend <- object$trend random <- object$random use.gam <- object$Fit$use.gam info <- object$Info npat <- object$npat Inter <- object$Inter interaction <- Inter$interaction itags <- Inter$itags Vnamelist <- object$Fit$Vnamelist Isoffsetlist <- object$Fit$Isoffsetlist has.design <- info$has.design has.random <- info$has.random moadf <- object$Fit$moadf announce("done.\n") ## levels of any factors levelslist <- lapply(as.list(moadf), levelsAsFactor) isfactor <- !sapply(levelslist, is.null) ## fitted parameters coefs.full <- coef(object) if(is.null(dim(coefs.full))) { ## fixed effects model: replicate vector to matrix coefs.names <- names(coefs.full) coefs.full <- matrix(coefs.full, byrow=TRUE, nrow=npat, ncol=length(coefs.full), dimnames=list(NULL, coefs.names)) } else { ## random/mixed effects model: coerce to matrix coefs.names <- colnames(coefs.full) coefs.full <- as.matrix(coefs.full) } ## determine which interaction(s) are active on each row announce("Determining active interactions...") active <- active.interactions(object) announce("done.\n") ## exceptions if(any(rowSums(active) > 1)) stop(paste("subfits() is not implemented for models", "in which several interpoint interactions", "are active on the same point pattern")) if(!is.null(random) && any(variablesinformula(random) %in% itags)) stop(paste("subfits() is not yet implemented for models", "with random effects that involve", "the interpoint interactions")) ## implied coefficients for each active interaction announce("Computing implied coefficients...") implcoef <- list() for(tag in itags) { announce(tag) implcoef[[tag]] <- impliedcoefficients(object, tag) announce(", ") } announce("done.\n") ## This code is currently not usable because the mapping is wrong reconcile <- FALSE if(reconcile) { ## determine which coefficients of main model are interaction terms announce("Identifying interaction coefficients...") md <- model.depends(object$Fit$FIT) usetags <- unlist(lapply(implcoef, colnames)) isVname <- apply(md[, usetags, drop=FALSE], 1, any) mainVnames <- row.names(md)[isVname] announce("done.\n") } ## Fisher information and vcov fisher <- varcov <- NULL if(what == "models") { announce("Fisher information...") fisher <- vcov(object, what="fisher", err="null") varcov <- try(solve(fisher), silent=TRUE) if(inherits(varcov, "try-error")) varcov <- NULL announce("done.\n") } ## Extract data frame announce("Extracting data...") datadf <- object$datadf rownames <- object$Info$rownames announce("done.\n") ## set up list for results results <- rep(list(NULL), npat) if(what == "interactions") { announce("Determining interactions...") pstate <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) ## Find relevant interaction acti <- active[i,] nactive <- sum(acti) interi <- if(nactive == 0) Poisson() else interaction[i, acti, drop=TRUE] tagi <- names(interaction)[acti] ## Find relevant coefficients coefs.avail <- coefs.full[i,] names(coefs.avail) <- coefs.names if(nactive == 1) { ic <- implcoef[[tagi]] coefs.implied <- ic[i, ,drop=TRUE] names(coefs.implied) <- colnames(ic) ## overwrite any existing values of coefficients; add new ones. coefs.avail[names(coefs.implied)] <- coefs.implied } ## create fitted interaction with these coefficients vni <- if(nactive > 0) Vnamelist[[tagi]] else character(0) iso <- if(nactive > 0) Isoffsetlist[[tagi]] else logical(0) results[[i]] <- fii(interi, coefs.avail, vni, iso) } announce("Done!\n") names(results) <- rownames return(results) } ## Extract data required to reconstruct complete model fits announce("Extracting more data...") data <- object$data Y <- object$Y Yname <- info$Yname ## deal with older formats of mppm if(is.null(Yname)) Yname <- info$Xname if(is.null(Y)) Y <- data[ , Yname, drop=TRUE] ## used.cov.names <- info$used.cov.names has.covar <- info$has.covar if(has.covar) { covariates.hf <- data[, used.cov.names, drop=FALSE] dfvar <- used.cov.names %in% names(datadf) } announce("done.\n") ## Loop through point patterns announce("Looping through rows...") pstate <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) Yi <- Y[[i]] Wi <- if(is.ppp(Yi)) Yi$window else Yi$data$window ## assemble relevant covariate images scrambled <- FALSE if(!has.covar) { covariates <- NULL } else { covariates <- covariates.hf[i, , drop=TRUE, strip=FALSE] if(has.design) { ## Convert each data frame covariate value to an image imrowi <- lapply(covariates[dfvar], as.im, W=Wi) ## Problem: constant covariate leads to singular fit ## --------------- Hack: --------------------------- scrambled <- TRUE ## Construct fake data by resampling from possible values covar.vals <- lapply(as.list(covariates[dfvar, drop=FALSE]), possible) fake.imrowi <- lapply(covar.vals, scramble, W=Wi, Y=Yi$data) ## insert fake data into covariates covariates[dfvar] <- fake.imrowi ## ------------------ end hack ---------------------------- } ## identify factor-valued spatial covariates spatialfactors <- !dfvar & isfactor[names(covariates)] if(any(spatialfactors)) { ## problem: factor levels may be dropped ## more fakery... scrambled <- TRUE spfnames <- names(spatialfactors)[spatialfactors] covariates[spatialfactors] <- lapply(levelslist[spfnames], scramble, W=Wi, Y=Yi$data) } } ## Fit ppm to data for case i only ## using relevant interaction acti <- active[i,] nactive <- sum(acti) if(nactive == 1){ interi <- interaction[i, acti, drop=TRUE] tagi <- names(interaction)[acti] fiti <- PiPiM(Yi, trend, interi, covariates=covariates, allcovar=has.random, use.gam=use.gam, vnamebase=tagi, vnameprefix=tagi) } else { fiti <- PiPiM(Yi, trend, Poisson(), covariates=covariates, allcovar=has.random, use.gam=use.gam) } fiti$scrambled <- scrambled ## fiti determines which coefficients are required coefi.fitted <- fiti$coef coefnames.wanted <- coefnames.fitted <- names(coefi.fitted) ## reconcile interaction coefficient names if(reconcile) { coefnames.translated <- coefnames.wanted ma <- match(coefnames.fitted, fiti$internal$Vnames) hit <- !is.na(ma) if(any(hit)) coefnames.translated[hit] <- mainVnames[ ma[hit] ] } ## take the required coefficients from the full mppm fit coefs.avail <- coefs.full[i,] names(coefs.avail) <- coefs.names if(nactive == 1) { ic <- implcoef[[tagi]] coefs.implied <- ic[i, ,drop=TRUE] names(coefs.implied) <- colnames(ic) ## overwrite any existing values of coefficients; add new ones. coefs.avail[names(coefs.implied)] <- coefs.implied } ## check if(!all(coefnames.wanted %in% names(coefs.avail))) stop("Internal error: some fitted coefficients not accessible") ## hack entries in ppm object fiti$method <- "mppm" ## reset coefficients coefi.new <- coefs.avail[coefnames.wanted] fiti$coef.orig <- coefi.fitted ## (detected by summary.ppm, predict.ppm) fiti$theta <- fiti$coef <- coefi.new ## reset interaction coefficients in 'fii' object coef(fiti$fitin)[] <- coefi.new[names(coef(fiti$fitin))] ## ... and replace fake data by true data if(has.design) { fiti$internal$glmdata.scrambled <- gd <- fiti$internal$glmdata fixnames <- intersect(names(imrowi), colnames(gd)) for(nam in fixnames) { fiti$covariates[[nam]] <- imrowi[[nam]] fiti$internal$glmdata[[nam]] <- data[i, nam, drop=TRUE] } } ## Adjust rank of glm fit object # fiti$internal$glmfit$rank <- FIT$rank fiti$internal$glmfit$rank <- sum(is.finite(fiti$coef)) ## Fisher information and variance-covariance if known ## Extract submatrices for relevant parameters if(reconcile) { #' currently disabled because mapping is wrong if(!is.null(fisher)) { if(!reconcile) { fiti$fisher <- fisher[coefnames.wanted, coefnames.wanted, drop=FALSE] } else { fush <- fisher[coefnames.translated, coefnames.translated, drop=FALSE] dimnames(fush) <- list(coefnames.wanted, coefnames.wanted) fiti$fisher <- fush } } if(!is.null(varcov)) { if(!reconcile) { fiti$varcov <- varcov[coefnames.wanted, coefnames.wanted, drop=FALSE] } else { vc <- varcov[coefnames.translated, coefnames.translated, drop=FALSE] dimnames(vc) <- list(coefnames.wanted, coefnames.wanted) fiti$varcov <- vc } } } ## store in list results[[i]] <- fiti } announce("done.\n") names(results) <- rownames results <- as.anylist(results) return(results) } PiPiM <- function(Y, trend, inter, covariates, ..., allcovar=FALSE, use.gam=FALSE, vnamebase=c("Interaction", "Interact."), vnameprefix=NULL) { # This ensures that the model is fitted in a unique environment # so that it can be updated later. force(Y) force(trend) force(inter) force(covariates) force(allcovar) force(use.gam) force(vnamebase) force(vnameprefix) feet <- ppm(Y, trend, inter, covariates=covariates, allcovar=allcovar, use.gam=use.gam, forcefit=TRUE, vnamebase=vnamebase, vnameprefix=vnameprefix) return(feet) } possible <- function(z) { if(!is.factor(z)) unique(z) else factor(levels(z), levels=levels(z)) } scramble <- function(vals, W, Y) { W <- as.mask(W) npixels <- prod(W$dim) nvalues <- length(vals) npts <- npoints(Y) ## sample the possible values randomly at the non-data pixels sampled <- sample(vals, npixels, replace=TRUE) Z <- im(sampled, xcol=W$xcol, yrow=W$yrow) ## repeat the possible values cyclically at the data points if(npts >= 1) Z[Y] <- vals[1 + ((1:npts) %% nvalues)] return(Z) } Announce <- function(...) cat(...) Ignore <- function(...) { NULL } subfits.old }) cannot.update <- function(...) { stop("This model cannot be updated") } spatstat.core/R/zclustermodel.R0000644000176200001440000000654014144333464016277 0ustar liggesusers#' #' zclustermodel.R #' #' Experimental #' zclustermodel <- function(name="Thomas", ..., mu, kappa, scale) { if(missing(kappa)) stop("The parent intensity kappa must be given") if(missing(mu)) stop("The mean cluster size mu must be given") if(missing(scale)) stop("The cluster scale must be given") rules <- spatstatClusterModelInfo(name) par <- c(kappa=kappa, scale=scale) other <- rules$resolvedots(...) clustargs <- rules$checkclustargs(other$margs, old=FALSE) out <- list(name=name, rules=rules, par=par, mu=mu, clustargs=clustargs, other=other) class(out) <- "zclustermodel" return(out) } print.zclustermodel <- local({ print.zclustermodel <- function(x, ...) { with(x, { splat(rules$printmodelname(list(clustargs=clustargs))) newpar <- rules$checkpar(par, old=FALSE) splat("Parent intensity kappa =", blurb("kappa", newpar["kappa"])) splat("Cluster scale = ", newpar["scale"]) splat("Mean cluster size mu =", blurb("mu", mu)) if(length(clustargs) > 0) { hdr <- paste("Cluster shape", ngettext(length(clustargs), "parameter:", "parameters:")) if(is.list(clustargs) && all(sapply(clustargs, is.numeric)) && all(lengths(clustargs) == 1)) { splat(hdr, paste(names(clustargs), as.numeric(clustargs), sep="=", collapse=", ")) } else { splat(hdr) print(clustargs) } } }) return(invisible(NULL)) } blurb <- function(name, value) { if(is.numeric(value)) as.character(value) else if(is.im(value)) "[image]" else "[unrecognized format]" } print.zclustermodel }) pcfmodel.zclustermodel <- function(model, ...) { p <- model$rules$pcf mpar <- model$par other <- model$other f <- function(r) { do.call(p, c(list(par=mpar, rvals=r), other, model$rules["funaux"])) } return(f) } Kmodel.zclustermodel <- function(model, ...) { K <- model$rules$K mpar <- model$par other <- model$other f <- function(r) { as.numeric(do.call(K, c(list(par=mpar, rvals=r), other, model$rules["funaux"]))) } return(f) } intensity.zclustermodel <- function(X, ...) { X$par[["kappa"]] * X$mu } predict.zclustermodel <- function(object, ..., locations, type="intensity", ngrid=NULL) { ## limited use!!! if(!identical(type, "intensity")) stop("Sorry, only type='intensity' is implemented") lambda <- object$par[["kappa"]] * object$mu if(is.numeric(lambda)) { if(is.ppp(locations)) return(rep(lambda, npoints(locations))) W <- as.owin(locations) if(!is.mask(W)) W <- as.mask(W, dimyx=ngrid, ...) return(as.im(lambda, W=W)) } return(lambda[locations]) } clusterradius.zclustermodel <- function(model, ..., thresh = NULL, precision = FALSE) { do.call(clusterradius.character, resolve.defaults( list(model = model$name, thresh = thresh, precision = precision), list(...), as.list(model$par), model$clustargs) ) } reach.zclustermodel <- function(x, ..., epsilon) { thresh <- if(missing(epsilon)) NULL else epsilon 2 * clusterradius(x, ..., thresh=thresh) } spatstat.core/R/fii.R0000644000176200001440000001542514144333462014152 0ustar liggesusers# # fii.R # # Class of fitted interpoint interactions # # fii <- function(interaction=NULL, coefs=numeric(0), Vnames=character(0), IsOffset=NULL, vnameprefix="") { if(is.null(interaction)) interaction <- Poisson() stopifnot(is.interact(interaction)) if(is.poisson.interact(interaction)) { if(length(Vnames) > 0) stop("Coefficients inappropriate for Poisson process") } if(is.null(IsOffset)) IsOffset <- rep.int(FALSE, length(Vnames)) else { stopifnot(is.logical(IsOffset)) stopifnot(length(IsOffset) == length(Vnames)) } hasInf <- !identical(interaction$hasInf, FALSE) out <- list(interaction=interaction, coefs=coefs, Vnames=Vnames, IsOffset=IsOffset, hasInf=hasInf, vnameprefix=vnameprefix) class(out) <- c("fii", class(out)) return(out) } summary.fii <- function(object, ...) { y <- unclass(object) INTERACT <- object$interaction coefs <- object$coefs Vnames <- object$Vnames IsOffset <- object$IsOffset vnprefix <- object$vnameprefix y$poisson <- is.poisson.interact(INTERACT) thumbnail <- NULL if(y$poisson) { thumbnail <- "Poisson()" } else { if(!is.null(interpret <- INTERACT$interpret)) { ## invoke auto-interpretation feature newstyle <- newstyle.coeff.handling(INTERACT) Icoefs <- if(newstyle) coefs[Vnames[!IsOffset]] else coefs ## strip off vname prefix used by mppm if(npre <- sum(nchar(vnprefix))) names(Icoefs) <- substring(names(Icoefs), npre+1L) ## auto-interpret sensible <- interpret(Icoefs, INTERACT) if(!is.null(sensible) && sum(lengths(sensible))) { header <- paste("Fitted", sensible$inames) printable <- sensible$printable # Try to make a thumbnail description param <- sensible$param ipar <- INTERACT$par if(all(lengths(param) == 1) && all(lengths(ipar) == 1)) { allargs <- append(ipar, param) allargs <- lapply(allargs, signif, digits=4) thumbnail <- fakecallstring(INTERACT$creator, allargs) } } else { # no fitted interaction parameters (e.g. Hard Core) header <- NULL printable <- NULL thumbnail <- paste0(INTERACT$creator, "()") } } else { # fallback sensible <- NULL VN <- Vnames[!IsOffset] if(length(VN) > 0) { header <- "Fitted interaction terms" icoef <- coefs[VN] printable <- exp(unlist(icoef)) ricoef <- lapply(icoef, signif, digits=4) thumbnail <- fakecallstring(INTERACT$creator, ricoef) } else { header <- NULL printable <- NULL thumbnail <- paste0(INTERACT$creator, "()") } } y <- append(y, list(sensible=sensible, header=header, printable=printable, thumbnail=thumbnail)) } class(y) <- c("summary.fii", class(y)) return(y) } print.fii <- function(x, ...) { sx <- summary(x) do.call(print.summary.fii, resolve.defaults(list(x=sx, brief=TRUE), list(...))) return(invisible(NULL)) } print.summary.fii <- local({ #' hide internal arguments print.summary.fii <- function(x, ...) { PrintIt(x, ...) } PrintIt <- function(x, ..., prefix="Interaction: ", banner=TRUE, family = waxlyrical('extras'), brief = !family, tiny = !waxlyrical('errors')) { if(tiny) { #' use thumbnail if available thumbnail <- x$thumbnail if(!is.null(thumbnail)) { splat(thumbnail) return(invisible(NULL)) } } terselevel <- spatstat.options('terse') if(banner && !brief) cat(prefix) if(x$poisson) { splat("Poisson process") parbreak(terselevel) } else { print(x$interaction, family=family, brief=TRUE, banner=banner) if(!is.null(x$printable)) { nvalues <- length(x$printable) nheader <- length(x$header) if(nvalues == 1 || nvalues == nheader) { for(i in 1:nvalues) { hdi <- x$header[i] xpi <- x$printable[[i]] if(!is.list(xpi) && length(xpi) == 1) { splat(paste0(hdi, ":\t", xpi)) } else { splat(paste0(hdi, ":")) print(xpi) } } } else { splat(x$header) print(x$printable) } } } if(!brief) { co <- x$coefs[x$Vnames[!x$IsOffset]] if(length(co) > 0) { parbreak(terselevel) splat("Relevant coefficients:") print(co) } } return(invisible(NULL)) } print.summary.fii }) parameters.fii <- function(model, ...) { ss <- summary(model) out <- append(ss$interaction$par, ss$sensible$param) return(out) } coef.summary.fii <- function(object, ...) { object$printable } reach.fii <- function(x, ..., epsilon=0) { inte <- x$interaction coeffs <- x$coefs Vnames <- x$Vnames if(is.poisson.interact(inte)) return(0) # get 'irange' function from interaction object irange <- inte$irange if(is.null(irange)) return(Inf) # apply 'irange' function using fitted coefficients if(newstyle.coeff.handling(inte)) ir <- irange(inte, coeffs[Vnames], epsilon=epsilon) else ir <- irange(inte, coeffs, epsilon=epsilon) if(is.na(ir)) ir <- Inf return(ir) } plot.fii <- function(x, ...) { inte <- x$interaction if(is.poisson.interact(inte)) { message("Poisson interaction; nothing plotted") return(invisible(NULL)) } plfun <- inte$plot %orifnull% inte$family$plot if(is.null(plfun)) stop("Plotting not implemented for this type of interaction") plfun(x, ...) } fitin <- function(object) { UseMethod("fitin") } fitin.ppm <- function(object) { f <- object$fitin if(!is.null(f)) return(f) ## For compatibility with older versions inte <- object$interaction if(is.null(inte)) { f <- fii() # Poisson } else { coefs <- coef(object) Vnames <- object$internal$Vnames IsOffset <- object$internal$IsOffset if(npre <- sum(nchar(object$internal$vnameprefix))) names(coefs) <- substring(names(coefs), npre+1L) # Internal names of regressor variables f <- fii(inte, coefs, Vnames, IsOffset) } unitname(f) <- unitname(data.ppm(object)) return(f) } as.interact.fii <- function(object) { verifyclass(object, "fii") return(object$interaction) } coef.fii <- function(object, ...) { verifyclass(object, "fii") return(object$coefs) } "coef<-.fii" <- function(object, ..., value) { verifyclass(object, "fii") check.nvector(value, length(object$coefs), things="coefficients", naok=TRUE) object$coefs <- value %orifnull% numeric(0) return(object) } spatstat.core/R/hackglmm.R0000644000176200001440000000665314144333464015173 0ustar liggesusers# hackglmm.R # $Revision: 1.8 $ $Date: 2020/11/04 00:27:07 $ hackglmmPQL <- function (fixed, random, family, data, correlation, weights, control, niter = 10, verbose = TRUE, subset, ..., reltol=1e-3) { if (is.character(family)) family <- get(family) if (is.function(family)) family <- family() if (is.null(family$family)) { print(family) stop("'family' not recognized") } m <- mcall <- Call <- match.call() nm <- names(m)[-1L] keep <- is.element(nm, c("weights", "data", "subset", "na.action")) for (i in nm[!keep]) m[[i]] <- NULL allvars <- if (is.list(random)) allvars <- c(all.vars(fixed), names(random), unlist(lapply(random, function(x) all.vars(formula(x))))) else c(all.vars(fixed), all.vars(random)) Terms <- if (missing(data)) terms(fixed) else terms(fixed, data = data) off <- attr(Terms, "offset") if (length(off <- attr(Terms, "offset"))) allvars <- c(allvars, as.character(attr(Terms, "variables"))[off + 1]) Call$fixed <- eval(fixed) Call$random <- eval(random) m$formula <- as.formula(paste("~", paste(allvars, collapse = "+"))) environment(m$formula) <- environment(fixed) m$drop.unused.levels <- TRUE m[[1L]] <- as.name("model.frame") mf <- eval.parent(m) off <- model.offset(mf) if (is.null(off)) off <- 0 w <- model.weights(mf) if (is.null(w)) w <- rep(1, nrow(mf)) wts <- mf$wts <- w if(missing(subset)) fit0 <- glm(formula = fixed, family = family, data = mf, weights = wts, ...) else { # hack to get around peculiar problem with `subset' argument glmmsubset <- eval(expression(subset), data) if(length(glmmsubset) != nrow(mf)) { if(sum(glmmsubset) != nrow(mf)) stop("Internal error: subset vector is wrong length") message("(Fixing subset index..)") glmmsubset <- glmmsubset[glmmsubset] } mf$glmmsubset <- glmmsubset fit0 <- glm(formula = fixed, family = family, data = mf, weights = wts, subset=glmmsubset, ...) } w <- fit0$prior.weights eta <- fit0$linear.predictor zz <- eta + fit0$residuals - off wz <- fit0$weights fam <- family nm <- names(mcall)[-1L] keep <- is.element(nm, c("fixed", "random", "data", "subset", "na.action", "control")) for (i in nm[!keep]) mcall[[i]] <- NULL fixed[[2L]] <- quote(zz) mcall[["fixed"]] <- fixed mcall[[1L]] <- as.name("lme") mcall$random <- random mcall$method <- "ML" if (!missing(correlation)) mcall$correlation <- correlation mcall$weights <- quote(varFixed(~invwt)) mf$zz <- zz mf$invwt <- 1/wz mcall$data <- mf for (i in 1:niter) { if (verbose) cat("iteration", i, "\n") fit <- eval(mcall) etaold <- eta eta <- fitted(fit) + off if (sum((eta - etaold)^2) < (reltol^2) * sum(eta^2)) break mu <- fam$linkinv(eta) mu.eta.val <- fam$mu.eta(eta) mf$zz <- eta + (fit0$y - mu)/mu.eta.val - off wz <- w * mu.eta.val^2/fam$variance(mu) mf$invwt <- 1/wz mcall$data <- mf } fit$call <- Call fit$family <- family # if(!spatstat.options("developer")) { # attributes(fit$logLik) <- NULL # fit$logLik <- as.numeric(NA) # } oldClass(fit) <- c("glmmPQL", oldClass(fit)) fit } spatstat.core/R/randommk.R0000644000176200001440000003672614144333463015223 0ustar liggesusers# # # randommk.R # # Random generators for MULTITYPE point processes # # $Revision: 1.39 $ $Date: 2018/05/07 04:34:35 $ # # rmpoispp() random marked Poisson pp # rmpoint() n independent random marked points # rmpoint.I.allim() ... internal # rpoint.multi() temporary wrapper # rmpoispp <- local({ ## Argument checking is.numvector <- function(x) {is.numeric(x) && is.vector(x)} is.constant <- function(x) {is.numvector(x) && length(x) == 1} checkone <- function(x) { if(is.constant(x)) { if(x >= 0) return(TRUE) else stop("Intensity is negative!") } return(is.function(x) || is.im(x)) } ## Ensure that m can be passed as a single value to function(x,y,m,...) slice.fun <- function(x,y,fun,mvalue, ...) { m <- if(length(mvalue) == 1) rep.int(mvalue, length(x)) else mvalue result <- fun(x,y,m, ...) return(result) } ## Main function rmpoispp <- function(lambda, lmax=NULL, win = owin(c(0,1),c(0,1)), types, ..., nsim=1, drop=TRUE, warnwin=!missing(win)) { ## arguments: ## lambda intensity: ## constant, function(x,y,m,...), image, ## vector, list of function(x,y,...) or list of images ## ## lmax maximum possible value of lambda ## constant, vector, or list ## ## win default observation window (of class 'owin') ## ## types possible types for multitype pattern ## ## ... extra arguments passed to lambda() ## if(missing(types)) types <- NULL force(warnwin) if(nsim > 1) { result <- vector(mode="list", length=nsim) for(i in 1:nsim) result[[i]] <- rmpoispp(lambda, lmax, win, types, ..., warnwin=warnwin) return(simulationresult(result, nsim, drop)) } ## Validate arguments single.arg <- checkone(lambda) vector.arg <- !single.arg && is.numvector(lambda) list.arg <- !single.arg && is.list(lambda) if(! (single.arg || vector.arg || list.arg)) stop(paste("argument", sQuote("lambda"), "not understood")) if(list.arg && !all(unlist(lapply(lambda, checkone)))) stop(paste("Each entry in the list", sQuote("lambda"), "must be either a constant, a function or an image")) if(vector.arg && any(lambda < 0)) stop(paste("Some entries in the vector", sQuote("lambda"), "are negative")) ## Determine & validate the set of possible types if(is.null(types)) { if(single.arg) { stop(paste(sQuote("types"), "must be given explicitly when", sQuote("lambda"), "is a constant, a function or an image")) } else if(!is.null(nama <- names(lambda)) && sum(nzchar(nama)) == length(lambda)) { types <- nama } else { types <- seq_along(lambda) } } ntypes <- length(types) if(!single.arg && (length(lambda) != ntypes)) stop(paste("The lengths of", sQuote("lambda"), "and", sQuote("types"), "do not match")) factortype <- factor(types, levels=types) ## Validate `lmax' if(! (is.null(lmax) || is.numvector(lmax) || is.list(lmax) )) stop(paste(sQuote("lmax"), "should be a constant, a vector, a list or NULL")) ## coerce lmax to a vector, to save confusion if(is.null(lmax)) maxes <- rep(NULL, ntypes) else if(is.numvector(lmax) && length(lmax) == 1) maxes <- rep.int(lmax, ntypes) else if(length(lmax) != ntypes) stop(paste("The length of", sQuote("lmax"), "does not match the number of possible types")) else if(is.list(lmax)) maxes <- unlist(lmax) else maxes <- lmax ## coerce lambda to a list, to save confusion lam <- if(single.arg) rep(list(lambda), ntypes) else if(vector.arg) as.list(lambda) else lambda ## Simulate for(i in 1:ntypes) { if(single.arg && is.function(lambda)) { ## call f(x,y,m, ...) Y <- rpoispp(slice.fun, lmax=maxes[i], win=win, fun=lambda, mvalue=types[i], ..., warnwin=warnwin) } else { ## call f(x,y, ...) or use other formats Y <- rpoispp(lam[[i]], lmax=maxes[i], win=win, ..., warnwin=warnwin) } Y <- Y %mark% factortype[i] X <- if(i == 1) Y else superimpose(X, Y, W=X$window, check=FALSE) } ## Randomly permute, just in case the order is important permu <- sample(X$n) X <- X[permu] return(simulationresult(list(X), 1, drop)) } rmpoispp }) ## ------------------------------------------------------------------------ rmpoint <- local({ ## argument validation is.numvector <- function(x) {is.numeric(x) && is.vector(x)} is.constant <- function(x) {is.numvector(x) && length(x) == 1} checkone <- function(x) { if(is.constant(x)) { if(x >= 0) return(TRUE) else stop("Intensity is negative!") } return(is.function(x) || is.im(x)) } # integration.. integratexy <- function(f, win, ...) { imag <- as.im(f, W=win, ...) integral.im(imag) } ## create a counterpart of f(x,y,m) that works when m is a single value funwithfixedmark <- function(xx, yy, ..., m, fun) { mm <- rep.int(m, length(xx)) fun(xx, yy, mm, ...) } integratewithfixedmark <- function(m, fun, win, ...) { integratexy(funwithfixedmark, win=win, m=m, fun=fun, ...) } # Main function rmpoint <- function(n, f=1, fmax=NULL, win = unit.square(), types, ptypes, ..., giveup = 1000, verbose = FALSE, nsim = 1, drop=TRUE) { if(!is.numeric(n)) stop("n must be a scalar or vector") if(any(ceiling(n) != floor(n))) stop("n must be an integer or integers") if(any(n < 0)) stop("n must be non-negative") if(missing(types)) types <- NULL if(missing(ptypes)) ptypes <- NULL if(nsim > 1) { result <- vector(mode="list", length=nsim) for(i in 1:nsim) result[[i]] <- rmpoint(n, f, fmax, win, types, ptypes, ..., giveup=giveup, verbose=verbose) return(simulationresult(result, nsim, drop)) } if(sum(n) == 0) { nopoints <- ppp(x=numeric(0), y=numeric(0), window=win, check=FALSE) if(!is.null(types)) { nomarks <- factor(types[numeric(0)], levels=types) nopoints <- nopoints %mark% nomarks } return(simulationresult(list(nopoints), 1, drop)) } ############# Model <- if(length(n) == 1) { if(is.null(ptypes)) "I" else "II" } else "III" ############## Validate f argument single.arg <- checkone(f) vector.arg <- !single.arg && is.numvector(f) list.arg <- !single.arg && is.list(f) if(! (single.arg || vector.arg || list.arg)) stop(paste("argument", sQuote("f"), "not understood")) if(list.arg && !all(unlist(lapply(f, checkone)))) stop(paste("Each entry in the list", sQuote("f"), "must be either a constant, a function or an image")) if(vector.arg && any(f < 0)) stop(paste("Some entries in the vector", sQuote("f"), "are negative")) ## cases where it's known that all types of points ## have the same conditional density of location (x,y) const.density <- vector.arg || (list.arg && all(unlist(lapply(f, is.constant)))) same.density <- const.density || (single.arg && !is.function(f)) ################ Determine & validate the set of possible types if(is.null(types)) { if(single.arg && length(n) == 1) stop(paste(sQuote("types"), "must be given explicitly when", sQuote("f"), "is a single number, a function or an image and", sQuote("n"), "is a single number")) else { basis <- if(single.arg) n else f if(!is.null(nama <- names(basis)) && sum(nzchar(nama)) == length(basis)) { types <- nama } else { types <- seq_along(basis) } } } ntypes <- length(types) if(!single.arg && (length(f) != ntypes)) stop(paste("The lengths of", sQuote("f"), "and", sQuote("types"), "do not match")) if(length(n) > 1 && ntypes != length(n)) stop(paste("The lengths of", sQuote("n"), "and", sQuote("types"), "do not match")) factortype <- factor(types, levels=types) ####################### Validate `fmax' if(! (is.null(fmax) || is.numvector(fmax) || is.list(fmax) )) stop(paste(sQuote("fmax"), "should be a constant, a vector, a list or NULL")) ## coerce fmax to a vector, to save confusion if(is.null(fmax)) maxes <- rep(NULL, ntypes) else if(is.constant(fmax)) maxes <- rep.int(fmax, ntypes) else if(length(fmax) != ntypes) stop(paste("The length of", sQuote("fmax"), "does not match the number of possible types")) else if(is.list(fmax)) maxes <- unlist(fmax) else maxes <- fmax ## coerce f to a list, to save confusion flist <- if(single.arg) rep(list(f), ntypes) else if(vector.arg) as.list(f) else f #################### START ################################## ## special algorithm for Model I when all f[[i]] are images if(Model == "I" && !same.density && all(unlist(lapply(flist, is.im)))) { X <- rmpoint.I.allim(n, flist, types) return(simulationresult(list(X), 1, drop)) } ## otherwise, first select types, then locations given types if(Model == "I") { ## Compute approximate marginal distribution of type if(vector.arg) ptypes <- f/sum(f) else if(list.arg) { fintegrals <- unlist(lapply(flist, integratexy, win=win, ...)) ptypes <- fintegrals/sum(fintegrals) } else { ## single argument if(is.constant(f)) { ptypes <- rep.int(1/ntypes, ntypes) } else { ## f is a function (x,y,m) ## convert to images and integrate fintegrals <- unlist(lapply(types, integratewithfixedmark, win=win, fun=f, ...)) ## normalise ptypes <- fintegrals/sum(fintegrals) } } } ## Generate marks if(Model == "I" || Model == "II") { ## i.i.d.: n marks with distribution 'ptypes' marques <- sample(factortype, n, prob=ptypes, replace=TRUE) nn <- table(marques) } else { ## multinomial: fixed number n[i] of types[i] repmarks <- factor(rep.int(types, n), levels=types) marques <- sample(repmarks) nn <- n } ntot <- sum(nn) ############## SIMULATE !!! ######################### ## If all types have the same conditional density of location, ## generate the locations using rpoint, and return. if(same.density) { X <- rpoint(ntot, flist[[1]], maxes[[1]], win=win, ..., giveup=giveup, verbose=verbose) X <- X %mark% marques return(simulationresult(list(X), 1, drop)) } ## Otherwise invoke rpoint() for each type separately X <- ppp(numeric(ntot), numeric(ntot), window=win, marks=marques, check=FALSE) for(i in 1:ntypes) { if(verbose) cat(paste("Type", i, "\n")) if(single.arg && is.function(f)) { ## want to call f(x,y,m, ...) Y <- rpoint(nn[i], funwithfixedmark, fmax=maxes[i], win=win, ..., m=factortype[i], fun=f, giveup=giveup, verbose=verbose) } else { ## call f(x,y, ...) or use other formats Y <- rpoint(nn[i], flist[[i]], fmax=maxes[i], win=win, ..., giveup=giveup, verbose=verbose) } Y <- Y %mark% factortype[i] X[marques == factortype[i]] <- Y } return(simulationresult(list(X), 1, drop)) } rmpoint }) rmpoint.I.allim <- local({ ## Extract pixel coordinates and probabilities get.stuff <- function(imag) { w <- as.mask(as.owin(imag)) dx <- w$xstep dy <- w$ystep rxy <- rasterxy.mask(w, drop=TRUE) xpix <- rxy$x ypix <- rxy$y ppix <- as.vector(imag$v[w$m]) ## not normalised - OK npix <- length(xpix) return(list(xpix=xpix, ypix=ypix, ppix=ppix, dx=rep.int(dx,npix), dy=rep.int(dy, npix), npix=npix)) } rmpoint.I.allim <- function(n, f, types) { ## Internal use only! ## Generates random marked points (Model I *only*) ## when all f[[i]] are pixel images. ## stuff <- lapply(f, get.stuff) ## Concatenate into loooong vectors xpix <- unlist(lapply(stuff, getElement, name="xpix")) ypix <- unlist(lapply(stuff, getElement, name="ypix")) ppix <- unlist(lapply(stuff, getElement, name="ppix")) dx <- unlist(lapply(stuff, getElement, name="dx")) dy <- unlist(lapply(stuff, getElement, name="dy")) ## replicate types numpix <- unlist(lapply(stuff, getElement, name="npix")) tpix <- rep.int(seq_along(types), numpix) ## ## sample pixels from union of all images ## npix <- sum(numpix) id <- sample(npix, n, replace=TRUE, prob=ppix) ## get pixel centre coordinates and randomise within pixel x <- xpix[id] + (runif(n) - 1/2) * dx[id] y <- ypix[id] + (runif(n) - 1/2) * dy[id] ## compute types marx <- factor(types[tpix[id]],levels=types) ## et voila! return(ppp(x, y, window=as.owin(f[[1]]), marks=marx, check=FALSE)) } rmpoint.I.allim }) ## ## wrapper for Rolf's function ## rpoint.multi <- function (n, f, fmax=NULL, marks = NULL, win = unit.square(), giveup = 1000, verbose = FALSE, warn=TRUE, nsim=1, drop=TRUE) { if(nsim > 1) { result <- vector(mode="list", length=nsim) for(i in 1:nsim) result[[i]] <- rpoint.multi(n, f, fmax, marks, win, giveup, verbose) return(simulationresult(result, nsim, drop)) } no.marks <- is.null(marks) || (is.factor(marks) && length(levels(marks)) == 1) if(warn) { nhuge <- spatstat.options("huge.npoints") if(n > nhuge) warning(paste("Attempting to generate", n, "random points")) } ## unmarked case if (no.marks) { X <- if(is.function(f)) { rpoint(n, f, fmax, win, giveup=giveup, verbose=verbose) } else { rpoint(n, f, fmax, giveup=giveup, verbose=verbose) } return(simulationresult(list(X), 1, drop)) } ## multitype case if(length(marks) != n) stop("length of marks vector != n") if(!is.factor(marks)) stop("marks should be a factor") types <- levels(marks) types <- factor(types, levels=types) ## generate required number of points of each type nums <- table(marks) X <- rmpoint(nums, f, fmax, win=win, types=types, giveup=giveup, verbose=verbose) if(any(table(marks(X)) != nums)) stop("Internal error: output of rmpoint illegal") ## reorder them to correspond to the desired 'marks' vector Y <- X Xmarks <- marks(X) for(ty in types) { to <- (marks == ty) from <- (Xmarks == ty) if(sum(to) != sum(from)) stop(paste("Internal error: mismatch for mark =", ty)) if(any(to)) { Y$x[to] <- X$x[from] Y$y[to] <- X$y[from] Y$marks[to] <- ty } } return(simulationresult(list(Y), 1, drop)) } spatstat.core/R/densityfun.R0000644000176200001440000000512314144333462015565 0ustar liggesusers## ## densityfun.R ## ## Exact 'funxy' counterpart of density.ppp ## ## $Revision: 1.12 $ $Date: 2021/03/31 01:25:18 $ densityfun <- function(X, ...) { UseMethod("densityfun") } densityfun.ppp <- function(X, sigma=NULL, ..., weights=NULL, edge=TRUE, diggle=FALSE) { verifyclass(X, "ppp") ## handle weights now weightsgiven <- !missing(weights) && !is.null(weights) if(weightsgiven) { # convert to numeric if(is.im(weights)) { weights <- safelookup(weights, X) # includes warning if NA } else if(is.expression(weights)) weights <- eval(weights, envir=as.data.frame(X), enclos=parent.frame()) if(length(weights) == 0) weightsgiven <- FALSE } if(weightsgiven) { check.nvector(weights, npoints(X)) } else weights <- NULL ## stuff <- list(Xdata=X, weights=weights, edge=edge, diggle=diggle, ...) ## ## determine smoothing parameters ker <- resolve.2D.kernel(sigma=sigma, ..., x=X, bwfun=bw.diggle, allow.zero=TRUE) stuff[c("sigma", "varcov")] <- ker[c("sigma", "varcov")] ## g <- function(x, y=NULL, drop=TRUE) { Y <- xy.coords(x, y)[c("x", "y")] W <- Window(stuff$Xdata) ok <- inside.owin(Y, w=W) allgood <- all(ok) if(!allgood) Y <- lapply(Y, "[", i=ok) Xquery <- as.ppp(Y, W) vals <- do.call(densitycrossEngine, append(list(Xquery=Xquery), stuff)) if(allgood || drop) return(vals) ans <- numeric(length(ok)) ans[ok] <- vals ans[!ok] <- NA attr(ans, "sigma") <- attr(vals, "sigma") return(ans) } g <- funxy(g, as.rectangle(as.owin(X))) class(g) <- c("densityfun", class(g)) return(g) } print.densityfun <- function(x, ...) { cat("function(x,y)", "which returns", "kernel estimate of intensity for", fill=TRUE) X <- get("X", envir=environment(x)) print(X, ...) cat("Optional argument:", "drop=TRUE", fill=TRUE) return(invisible(NULL)) } ## Method for as.im ## (enables plot.funxy, persp.funxy, contour.funxy to work for this class) as.im.densityfun <- function(X, W=Window(X), ..., approx=TRUE) { if(!approx) { #' evaluate exactly at grid points using as.im.funxy -> as.im.function result <- as.im.function(X, W=W, ...) } else { #' faster, approximate evaluation using FFT stuff <- get("stuff", envir=environment(X)) Xdata <- stuff[["Xdata"]] otherstuff <- stuff[names(stuff) != "Xdata"] if(!missing(W)) Xdata <- Xdata[W] result <- do.call(density, resolve.defaults(list(x=quote(Xdata)), list(...), otherstuff)) } return(result) } spatstat.core/R/distcdf.R0000644000176200001440000001364014144333462015020 0ustar liggesusers#' #' distcdf.R #' #' cdf of |X1-X2| when X1,X2 are iid uniform in W, etc #' #' $Revision: 1.18 $ $Date: 2021/08/12 06:21:43 $ #' distcdf <- local({ distcdf <- function(W, V=W, ..., dW=1, dV=dW, nr=1024, regularise=TRUE, savedenom=FALSE, delta=NULL) { reflexive <- (missing(V) || is.null(V)) && (missing(dV) || is.null(dV)) diffuse <- is.owin(W) && is.owin(V) uniformW <- is.null(dW) || identical(dW, 1) uniformV <- is.null(dV) || identical(dV, 1) uniform <- uniformW && uniformV if(is.owin(W)) { W <- as.mask(as.owin(W), ...) dW <- as.im(dW, W=W) } else if(is.ppp(W)) { if(uniformW) { #' discrete uniform distribution on W dW <- pixellate(W, ...) } else { #' dW should be a weight or vector of weights if(!is.vector(dW) || !is.numeric(dW)) stop("If W is a point pattern, dW should be a vector of weights") if(length(dW) == 1L) { dW <- rep(dW, npoints(W)) } else stopifnot(length(dW) == npoints(W)) dW <- pixellate(W, weights=dW, ...) } } else stop("W should be a point pattern or a window") if(!reflexive) { if(is.owin(V)) { V <- as.mask(as.owin(V), ...) dV <- as.im(dV, W=V) } else if(is.ppp(V)) { if(uniformV) { #' discrete uniform distribution on V dV <- pixellate(V, ...) } else { #' dV should be a weight or vector of weights if(!is.vector(dV) || !is.numeric(dV)) stop("If V is a point pattern, dV should be a vector of weights") if(length(dV) == 1L) { dV <- rep(dV, npoints(V)) } else stopifnot(length(dV) == npoints(V)) dV <- pixellate(V, weights=dV, ...) } } else stop("V should be a point pattern or a window") if(!uniformV && min(dV) < 0) stop("Negative values encountered in dV") } #' compute if(diffuse && uniform) { #' uniform distributions on windows g <- if(reflexive) setcov(W, ...) else setcov(W, V, ...) } else { g <- if(reflexive) imcov(dW) else imcov(dW, dV) } r <- as.im(function(x,y) { sqrt(x^2 + y^2) }, g) pix <- with(r, max(xstep, ystep)) #' extract rvals <- as.vector(as.matrix(r)) gvals <- as.vector(as.matrix(g)) rmax <- max(rvals) #' histogram if(is.null(nr)) nr <- max(1024, 512 * ceiling(rmax/(pix*512))) rgrid <- seq(0, rmax, length=nr) dr <- rmax/(nr-1) h <- whist(rvals, breaks=rgrid, weights=gvals/sum(gvals)) ch <- c(0,cumsum(h)) #' regularise at very short distances if(regularise) { pix <- with(r, max(xstep, ystep)) suspect <- which(rgrid <= 10 * pix) reference <- which(rgrid <= 20 * pix) weigh <- pmin(seq_along(ch), min(reference))^2 fit <- lm(ch ~ I(rgrid^2) + I(rgrid^3) - 1, subset=reference, weights=weigh) ch[suspect] <- predict(fit)[suspect] ## enforce cdf properties ch[1] <- 0 ch <- cummax(ch) } #' ok result <- fv(data.frame(r=rgrid, f=ch), "r", quote(CDF(r)), "f", , range(rvals), c("r","%s(r)"), c("Interpoint distance","Cumulative probability"), fname="CDF") #' refine spacing, if required if(!is.null(delta)) result <- refine(result, delta) #' if(savedenom) { denomW <- integral(dW) denomV <- if(reflexive) denomW else integral(dV) attr(result, "denom") <- denomW * denomV } return(result) } refine <- function(H, delta=NULL, verbose=FALSE, force=FALSE) { ## H is CDF of pairwise distances ## Ensure H has spacing at most 'delta' check.1.real(delta) stopifnot(is.finite(delta) && (delta > 0)) rstep <- mean(diff(H$r)) inflate <- rstep/delta if(verbose) splat("delta=", delta, "rstep=", rstep, "inflate=", inflate) if(inflate > 1) { ## interpolate H if(verbose) { plot(H, xlim=c(0, R/2)) } H <- interpCDF(H, n=ceiling(inflate)) if(verbose) { plot(H, add=TRUE, xlim=c(0,R), col=2) splat("New rstep=", mean(diff(H$r))) } if(force) { ## force CDF to be nondecreasing and to start from 0 Hr <- H[["f"]] Hr[1] <- 0 Hr <- cummax(Hr) H[["f"]] <- Hr } } return(H) } interpCDF <- function(H, ..., method=c("smooth.spline", "loess"), delta=NULL, n=NULL) { ## H is CDF of pairwise distance ## Interpolate H by smoothing H(r)/r^2 method <- match.arg(method) rname <- fvnames(H, ".x") rold <- H[[rname]] rpos <- (rold > 0) if(is.null(delta) == is.null(n)) stop("Exactly one of the arguments 'delta' or 'n' should be given") if(!is.null(n)) { delta <- mean(diff(rold))/n } else { check.1.real(delta) stopifnot(delta > 0) } rnew <- seq(min(rold), max(rold), by=delta) ## initialise result newvalues <- vector(mode="list", length=ncol(H)) names(newvalues) <- colnames(H) newvalues[[rname]] <- rnew ## process each column of function values nama <- fvnames(H, ".a") for(ynam in nama) { yy <- H[[ynam]] ok <- is.finite(yy) & rpos yok <- yy[ok] rok <- rold[ok] switch(method, smooth.spline = { ss <- smooth.spline(x=rok, y=yok/rok^2, ...) yhat <- predict(ss, rnew)$y * rnew^2 }, loess = { df <- data.frame(x=rok, y=yok/rok^2) lo <- loess(y ~ x, df, ...) yhat <- predict(lo, data.frame(x=rnew)) * rnew^2 }) newvalues[[ynam]] <- yhat } newH <- as.data.frame(newvalues) ## copy attributes anames <- setdiff(names(attributes(H)), c("row.names", "dim", "dimnames", "names", "tsp")) for(e in anames) attr(newH, e) <- attr(H, e) return(newH) } distcdf }) spatstat.core/R/density.ppp.R0000644000176200001440000010425214144333462015655 0ustar liggesusers# # density.ppp.R # # Method for 'density' for point patterns # # $Revision: 1.113 $ $Date: 2021/01/07 03:08:41 $ # # ksmooth.ppp <- function(x, sigma, ..., edge=TRUE) { # .Deprecated("density.ppp", package="spatstat") # density.ppp(x, sigma, ..., edge=edge) # } density.ppp <- local({ density.ppp <- function(x, sigma=NULL, ..., weights=NULL, edge=TRUE, varcov=NULL, at="pixels", leaveoneout=TRUE, adjust=1, diggle=FALSE, se=FALSE, kernel="gaussian", scalekernel=is.character(kernel), positive=FALSE, verbose=TRUE) { verifyclass(x, "ppp") output <- pickoption("output location type", at, c(pixels="pixels", points="points")) if(!identical(kernel, "gaussian")) { validate2Dkernel(kernel) ## kernel is only partly implemented! if(se) stop("Standard errors are not implemented for non-Gaussian kernel") if(verbose && scalekernel && (is.function(sigma) || (is.null(sigma) && is.null(varcov)))) warning("Bandwidth selection will be based on Gaussian kernel") } ker <- resolve.2D.kernel(..., sigma=sigma, varcov=varcov, x=x, adjust=adjust) sigma <- ker$sigma varcov <- ker$varcov ## sigma.is.infinite <- ker$infinite if(is.im(weights)) { weights <- safelookup(weights, x) # includes warning if NA } else if(is.expression(weights)) weights <- eval(weights, envir=as.data.frame(x), enclos=parent.frame()) if(length(weights) == 0 || (!is.null(dim(weights)) && nrow(weights) == 0)) weights <- NULL if(se) { # compute standard error SE <- denspppSEcalc(x, sigma=sigma, varcov=varcov, ..., weights=weights, edge=edge, at=output, leaveoneout=leaveoneout, adjust=adjust, diggle=diggle) if(positive) SE <- posify(SE) } ## infinite bandwidth if(bandwidth.is.infinite(sigma)) { #' uniform estimate nx <- npoints(x) single <- is.null(dim(weights)) totwt <- if(is.null(weights)) nx else if(single) sum(weights) else colSums(weights) if(!edge) totwt <- 0 * totwt W <- Window(x) A <- area.owin(W) switch(output, pixels = { E <- solapply(totwt/A, as.im, W=W, ...) names(E) <- colnames(weights) if(single) E <- E[[1L]] }, points = { numerator <- rep(totwt, each=nx) if(!single) numerator <- matrix(numerator, nrow=nx) if(leaveoneout && edge) numerator <- numerator - (weights %orifnull% 1) E <- numerator/A if(!single) colnames(E) <- colnames(weights) }) result <- if(se) list(estimate=E, SE=SE) else E return(result) } if(output == "points") { # VALUES AT DATA POINTS ONLY result <- densitypointsEngine(x, sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, weights=weights, edge=edge, leaveoneout=leaveoneout, diggle=diggle, ...) if(verbose && !is.null(uhoh <- attr(result, "warnings"))) { switch(uhoh, underflow=warning("underflow due to very small bandwidth"), warning(uhoh)) } ## constrain values to be positive if(positive) result <- posify(result) if(se) result <- list(estimate=result, SE=SE) return(result) } # VALUES AT PIXELS if(!edge) { # no edge correction edg <- NULL raw <- second.moment.calc(x, sigma, what="smooth", ..., kernel=kernel, scalekernel=scalekernel, weights=weights, varcov=varcov) raw <- divide.by.pixelarea(raw) smo <- raw } else if(!diggle) { # edge correction e(u) both <- second.moment.calc(x, sigma, what="smoothedge", ..., kernel=kernel, scalekernel=scalekernel, weights=weights, varcov=varcov) raw <- divide.by.pixelarea(both$smooth) edg <- both$edge ## Math.im / Math.imlist not yet working smo <- imagelistOp(raw, edg, "/") } else { # edge correction e(x_i) edg <- second.moment.calc(x, sigma, what="edge", ..., scalekernel=scalekernel, kernel=kernel, varcov=varcov) wi <- 1/safelookup(edg, x, warn=FALSE) wi[!is.finite(wi)] <- 0 # edge correction becomes weight attached to points if(is.null(weights)) { newweights <- wi } else if(is.matrix(weights) || is.data.frame(weights)) { stopifnot(nrow(weights) == npoints(x)) newweights <- weights * wi } else { stopifnot(length(weights) == npoints(x)) newweights <- weights * wi } raw <- second.moment.calc(x, sigma, what="smooth", ..., kernel=kernel, scalekernel=scalekernel, weights=newweights, varcov=varcov) raw <- divide.by.pixelarea(raw) smo <- raw } result <- if(is.im(smo)) smo[x$window, drop=FALSE] else solapply(smo, "[", i=x$window, drop=FALSE) # internal use only spill <- resolve.1.default(list(spill=FALSE), list(...)) if(spill) return(list(result=result, sigma=sigma, varcov=varcov, raw = raw, edg=edg)) # constrain values to be positive if(positive) result <- posify(result) # normal return attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov attr(result, "kernel") <- kernel if(se) result <- list(estimate=result, SE=SE) return(result) } divideimage <- function(numer, denom) eval.im(numer/denom) posify <- function(x, eps=.Machine$double.xmin) { force(eps) # scalpel if(is.im(x)) return(eval.im(pmax(eps, x))) if(inherits(x, "solist")) return(solapply(x, posify, eps=eps)) if(is.numeric(x)) return(pmax(eps, x)) # data frame or list if(is.list(x) && all(sapply(x, is.numeric))) return(lapply(x, posify, eps=eps)) warning("Internal error: posify did not recognise data format") return(x) } divide.by.pixelarea <- function(x) { if(is.im(x)) { x$v <- x$v/(x$xstep * x$ystep) } else { for(i in seq_along(x)) x[[i]]$v <- with(x[[i]], v/(xstep * ystep)) } return(x) } denspppSEcalc <- function(x, sigma, varcov, ..., weights, edge, diggle, at) { ## Calculate standard error, rather than estimate nx <- npoints(x) single <- is.null(dim(weights)) if(bandwidth.is.infinite(sigma)) { #' special case - uniform totwt2 <- if(is.null(weights)) nx else if(single) sum(weights^2) else colSums(weights^2) if(!edge) totwt2 <- 0 * totwt2 W <- Window(x) A <- area.owin(W) switch(at, pixels = { V <- solapply(totwt2/A, as.im, W=W, ...) names(V) <- colnames(weights) if(single) V <- V[[1L]] }, points = { numerator <- rep(totwt2, each=nx) if(!single) numerator <- matrix(numerator, nrow=nx) leaveoneout <- resolve.1.default(list(leaveoneout=TRUE), list(...)) if(edge && leaveoneout) numerator <- numerator - (weights %orifnull% 1)^2 V <- numerator/A if(!single) colnames(V) <- colnames(weights) }) return(sqrt(V)) } ## Usual case tau <- taumat <- NULL if(is.null(varcov)) { varconst <- 1/(4 * pi * prod(ensure2vector(sigma))) tau <- sigma/sqrt(2) } else { varconst <- 1/(4 * pi * sqrt(det(varcov))) taumat <- varcov/2 } ## Calculate edge correction weights if(edge) { edgeim <- second.moment.calc(x, sigma, what="edge", ..., varcov=varcov) if(diggle || at == "points") { edgeX <- safelookup(edgeim, x, warn=FALSE) diggleX <- 1/edgeX diggleX[!is.finite(diggleX)] <- 0 } edgeim <- edgeim[Window(x), drop=FALSE] } ## Perform smoothing if(!edge) { ## no edge correction V <- density(x, sigma=tau, varcov=taumat, ..., weights=weights, edge=edge, diggle=diggle, at=at) } else if(!diggle) { ## edge correction e(u) V <- density(x, sigma=tau, varcov=taumat, ..., weights=weights, edge=edge, diggle=diggle, at=at) V <- if(at == "points") V * diggleX else imagelistOp(V, edgeim, "/") } else { ## Diggle edge correction e(x_i) wts <- if(is.null(weights)) diggleX else (diggleX * weights) V <- density(x, sigma=tau, varcov=taumat, ..., weights=wts, edge=edge, diggle=diggle, at=at) } V <- V * varconst return(sqrt(V)) } density.ppp }) densitypointsEngine <- function(x, sigma=NULL, ..., kernel="gaussian", scalekernel=is.character(kernel), weights=NULL, edge=TRUE, varcov=NULL, leaveoneout=TRUE, diggle=FALSE, sorted=FALSE, spill=FALSE, cutoff=NULL, debug=FALSE) { stopifnot(is.logical(leaveoneout)) validate2Dkernel(kernel) if(is.character(kernel)) kernel <- match2DkernelName(kernel) isgauss <- identical(kernel, "gaussian") if(isgauss) { ## constant factor in Gaussian density if(is.null(varcov)) { gaussconst <- 1/(2 * pi * sigma^2) } else { detSigma <- det(varcov) Sinv <- solve(varcov) gaussconst <- 1/(2 * pi * sqrt(detSigma)) } } if(length(weights) == 0 || (!is.null(dim(weights)) && nrow(weights) == 0)) weights <- NULL ## infinite bandwidth if(bandwidth.is.infinite(sigma)) { #' uniform estimate nx <- npoints(x) single <- is.null(dim(weights)) totwt <- if(is.null(weights)) nx else if(single) sum(weights) else colSums(weights) if(!edge) totwt <- 0 * totwt W <- Window(x) A <- area.owin(W) numerator <- rep(totwt, each=nx) if(!single) numerator <- matrix(numerator, nrow=nx) if(leaveoneout && edge) numerator <- numerator - (weights %orifnull% 1) result <- numerator/A if(!single) colnames(result) <- colnames(weights) return(result) } ## cutoff distance (beyond which the kernel value is treated as zero) ## NB: input argument 'cutoff' is either NULL or ## an absolute distance (if scalekernel=FALSE) ## a number of standard deviations (if scalekernel=TRUE) cutoff <- cutoff2Dkernel(kernel, sigma=sigma, varcov=varcov, scalekernel=scalekernel, cutoff=cutoff, fatal=TRUE) ## cutoff is now an absolute distance if(debug) cat(paste("cutoff=", cutoff, "\n")) if(leaveoneout && npoints(x) > 1) { ## ensure each point has its closest neighbours within the cutoff nndmax <- maxnndist(x) cutoff <- max(2 * nndmax, cutoff) if(debug) cat(paste("adjusted cutoff=", cutoff, "\n")) } # validate weights if(is.null(weights)) { k <- 1L } else if(is.matrix(weights) || is.data.frame(weights)) { k <- ncol(weights) stopifnot(nrow(weights) == npoints(x)) weights <- as.data.frame(weights) weightnames <- colnames(weights) } else { k <- 1L stopifnot(length(weights) == npoints(x) || length(weights) == 1L) } # evaluate edge correction weights at points if(edge) { win <- x$window if(isgauss && is.null(varcov) && win$type == "rectangle") { # evaluate Gaussian probabilities directly xr <- win$xrange yr <- win$yrange xx <- x$x yy <- x$y xprob <- pnorm(xr[2L], mean=xx, sd=sigma) - pnorm(xr[1L], mean=xx, sd=sigma) yprob <- pnorm(yr[2L], mean=yy, sd=sigma) - pnorm(yr[1L], mean=yy, sd=sigma) edgeweight <- xprob * yprob } else { edg <- second.moment.calc(x, sigma=sigma, kernel=kernel, scalekernel=scalekernel, what="edge", varcov=varcov, ...) edgeweight <- safelookup(edg, x, warn=FALSE) } if(diggle) { # Diggle edge correction # edgeweight is attached to each point if(is.null(weights)) { k <- 1L weights <- 1/edgeweight } else { weights <- weights/edgeweight } } } if(isgauss && spatstat.options("densityTransform") && spatstat.options("densityC")) { ## .................. experimental C code ..................... if(debug) cat('Using experimental code!\n') npts <- npoints(x) result <- if(k == 1L) numeric(npts) else matrix(, npts, k) xx <- x$x yy <- x$y ## transform to standard coordinates if(is.null(varcov)) { xx <- xx/(sqrt(2) * sigma) yy <- yy/(sqrt(2) * sigma) } else { xy <- cbind(xx, yy) %*% matrixsqrt(Sinv/2) xx <- xy[,1L] yy <- xy[,2L] sorted <- FALSE } ## cutoff in standard coordinates sd <- sigma %orifnull% sqrt(min(eigen(varcov)$values)) cutoff <- cutoff/(sqrt(2) * sd) ## sort into increasing order of x coordinate (required by C code) if(!sorted) { oo <- fave.order(xx) xx <- xx[oo] yy <- yy[oo] } if(is.null(weights)) { zz <- .C(SC_Gdenspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), result = as.double(double(npts)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[oo] <- zz$result result <- result * gaussconst } else if(k == 1L) { wtsort <- if(sorted) weights else weights[oo] zz <- .C(SC_Gwtdenspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[oo] <- zz$result result <- result * gaussconst } else { ## matrix of weights wtsort <- if(sorted) weights else weights[oo, ] for(j in 1:k) { zz <- .C(SC_Gwtdenspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), weight = as.double(wtsort[,j]), result = as.double(double(npts)), PACKAGE="spatstat.core") if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result } result <- result * gaussconst } } else if(isgauss && spatstat.options("densityC")) { # .................. C code ........................... if(debug) cat('Using standard code.\n') npts <- npoints(x) result <- if(k == 1L) numeric(npts) else matrix(, npts, k) # sort into increasing order of x coordinate (required by C code) if(sorted) { xx <- x$x yy <- x$y } else { oo <- fave.order(x$x) xx <- x$x[oo] yy <- x$y[oo] } if(is.null(varcov)) { # isotropic kernel if(is.null(weights)) { zz <- .C(SC_denspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), sig = as.double(sigma), result = as.double(double(npts)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[oo] <- zz$result } else if(k == 1L) { wtsort <- if(sorted) weights else weights[oo] zz <- .C(SC_wtdenspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), sig = as.double(sigma), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[oo] <- zz$result } else { # matrix of weights wtsort <- if(sorted) weights else weights[oo, ] for(j in 1:k) { zz <- .C(SC_wtdenspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), sig = as.double(sigma), weight = as.double(wtsort[,j]), result = as.double(double(npts)), PACKAGE="spatstat.core") if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result } } } else { # anisotropic kernel flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C(SC_adenspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), result = as.double(double(npts)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[oo] <- zz$result } else if(k == 1L) { # vector of weights wtsort <- if(sorted) weights else weights[oo] zz <- .C(SC_awtdenspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[oo] <- zz$result } else { # matrix of weights wtsort <- if(sorted) weights else weights[oo, ] for(j in 1:k) { zz <- .C(SC_awtdenspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), weight = as.double(wtsort[,j]), result = as.double(double(npts)), PACKAGE="spatstat.core") if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result } } } } else { # ..... interpreted code ......................................... close <- closepairs(x, cutoff) i <- close$i j <- close$j d <- close$d npts <- npoints(x) result <- if(k == 1L) numeric(npts) else matrix(, npts, k) # evaluate contribution from each close pair (i,j) if(isgauss) { if(is.null(varcov)) { contrib <- gaussconst * exp(-d^2/(2 * sigma^2)) } else { ## anisotropic kernel dx <- close$dx dy <- close$dy contrib <- gaussconst * exp(-(dx * (dx * Sinv[1L,1L] + dy * Sinv[1L,2L]) + dy * (dx * Sinv[2L,1L] + dy * Sinv[2L,2L]))/2) } } else { contrib <- evaluate2Dkernel(kernel, close$dx, close$dy, sigma=sigma, varcov=varcov, scalekernel=scalekernel, ...) } ## sum (weighted) contributions ## query point i, data point j ifac <- factor(i, levels=1:npts) if(is.null(weights)) { result <- tapplysum(contrib, list(ifac)) } else if(k == 1L) { wcontrib <- contrib * weights[j] result <- tapplysum(wcontrib, list(ifac)) } else { for(kk in 1:k) { wcontribkk <- contrib * weights[j, kk] result[,kk] <- tapplysum(wcontribkk, list(ifac)) } } # } # ----- contribution from point itself ---------------- if(!leaveoneout) { #' add contribution from point itself if(isgauss) { self <- gaussconst } else { self <- evaluate2Dkernel(kernel, 0, 0, sigma=sigma, varcov=varcov, scalekernel=scalekernel, ...) } if(!is.null(weights)) self <- self * weights result <- result + self } # ........ Edge correction ........................................ if(edge && !diggle) result <- result/edgeweight # ............. validate ................................. npts <- npoints(x) if(k == 1L) { result <- as.numeric(result) if(length(result) != npts) stop(paste("Internal error: incorrect number of lambda values", "in leave-one-out method:", "length(lambda) = ", length(result), "!=", npts, "= npoints")) if(anyNA(result)) { nwrong <- sum(is.na(result)) stop(paste("Internal error:", nwrong, "NA or NaN", ngettext(nwrong, "value", "values"), "generated in leave-one-out method")) } } else { if(ncol(result) != k) stop(paste("Internal error: incorrect number of columns returned:", ncol(result), "!=", k)) colnames(result) <- weightnames if(nrow(result) != npts) stop(paste("Internal error: incorrect number of rows of lambda values", "in leave-one-out method:", "nrow(lambda) = ", nrow(result), "!=", npts, "= npoints")) if(anyNA(result)) { nwrong <- sum(!complete.cases(result)) stop(paste("Internal error:", nwrong, ngettext(nwrong, "row", "rows"), "of NA values generated in leave-one-out method")) } } if(spill) return(list(result=result, sigma=sigma, varcov=varcov, edg=edgeweight)) # tack on bandwidth attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov # return(result) } resolve.2D.kernel <- function(..., sigma=NULL, varcov=NULL, x, mindist=NULL, adjust=1, bwfun=NULL, allow.zero=FALSE) { if(is.function(sigma)) { bwfun <- sigma sigma <- NULL } if(is.null(sigma) && is.null(varcov) && !is.null(bwfun)) { #' call bandwidth selection function force(x) bw <- do.call.matched(bwfun, resolve.defaults(list(X=quote(x)), list(...))) #' interpret the result as either sigma or varcov if(!is.numeric(bw)) stop("bandwidth selector returned a non-numeric result") if(length(bw) %in% c(1L,2L)) { sigma <- as.numeric(bw) if(!all(sigma > 0)) { gripe <- "bandwidth selector returned negative value(s)" if(allow.zero) warning(gripe) else stop(gripe) } } else if(is.matrix(bw) && nrow(bw) == 2 && ncol(bw) == 2) { varcov <- bw if(!all(eigen(varcov)$values > 0)) stop("bandwidth selector returned matrix with negative eigenvalues") } else stop("bandwidth selector did not return a matrix or numeric value") } sigma.given <- !is.null(sigma) varcov.given <- !is.null(varcov) if(sigma.given) { stopifnot(is.numeric(sigma)) stopifnot(length(sigma) %in% c(1L,2L)) if(!allow.zero) stopifnot(all(sigma > 0)) } if(varcov.given) stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov)==2 ) # reconcile ngiven <- varcov.given + sigma.given switch(ngiven+1L, { # default w <- x$window sigma <- (1/8) * shortside(as.rectangle(w)) }, { if(sigma.given && length(sigma) == 2) varcov <- diag(sigma^2) if(!is.null(varcov)) sigma <- NULL }, { stop(paste("Give only one of the arguments", sQuote("sigma"), "and", sQuote("varcov"))) }) # apply adjustments if(!is.null(sigma)) sigma <- adjust * sigma if(!is.null(varcov)) varcov <- (adjust^2) * varcov # sd <- if(is.null(varcov)) sigma else sqrt(sum(diag(varcov))) cutoff <- 8 * sd uhoh <- if(!is.null(mindist) && cutoff < mindist) "underflow" else NULL result <- list(sigma=sigma, varcov=varcov, cutoff=cutoff, warnings=uhoh) return(result) } densitycrossEngine <- function(Xdata, Xquery, sigma=NULL, ..., kernel="gaussian", scalekernel=is.character(kernel), weights=NULL, edge=TRUE, varcov=NULL, diggle=FALSE, sorted=FALSE, cutoff=NULL) { validate2Dkernel(kernel) if(is.character(kernel)) kernel <- match2DkernelName(kernel) isgauss <- identical(kernel, "gaussian") && scalekernel if(length(weights) == 0 || (!is.null(dim(weights)) && nrow(weights) == 0)) weights <- NULL # validate weights if(is.null(weights)) { k <- 1L } else if(is.matrix(weights) || is.data.frame(weights)) { k <- ncol(weights) stopifnot(nrow(weights) == npoints(Xdata)) weights <- as.data.frame(weights) weightnames <- colnames(weights) } else { k <- 1L stopifnot(length(weights) == npoints(Xdata) || length(weights) == 1L) } #' infinite bandwidth if(bandwidth.is.infinite(sigma)) { #' uniform estimate single <- is.null(dim(weights)) totwt <- if(is.null(weights)) npoints(Xdata) else if(single) sum(weights) else colSums(weights) if(!edge) totwt <- 0 * totwt lam <- totwt/area.owin(Window(Xdata)) result <- if(single) rep(lam, npoints(Xquery)) else matrix(lam, npoints(Xquery), length(lam), byrow=TRUE, dimnames=list(NULL, colnames(weights))) return(result) } # evaluate edge correction weights at points if(edge) { win <- Xdata$window if(diggle) { ## edge correction weights are attached to data points xedge <- Xdata } else { ## edge correction weights are applied at query points xedge <- Xquery if(!all(inside.owin(Xquery, , win))) stop(paste("Edge correction is not possible:", "some query points lie outside the data window"), call.=FALSE) } if(isgauss && is.null(varcov) && win$type == "rectangle") { ## evaluate Gaussian probabilities directly xr <- win$xrange yr <- win$yrange xx <- xedge$x yy <- xedge$y xprob <- pnorm(xr[2L], mean=xx, sd=sigma) - pnorm(xr[1L], mean=xx, sd=sigma) yprob <- pnorm(yr[2L], mean=yy, sd=sigma) - pnorm(yr[1L], mean=yy, sd=sigma) edgeweight <- xprob * yprob } else { ## evaluate edge correction weights edg <- second.moment.calc(Xdata, what="edge", kernel=kernel, scalekernel=scalekernel, sigma=sigma, varcov=varcov) edgeweight <- safelookup(edg, xedge, warn=FALSE) } if(diggle) { ## Diggle edge correction ## edgeweight is attached to each data point if(is.null(weights)) { k <- 1L weights <- 1/edgeweight } else { weights <- weights/edgeweight } } } ## cutoff distance (beyond which the kernel value is treated as zero) ## NB: input argument 'cutoff' is either NULL or ## an absolute distance (if scalekernel=FALSE) ## a number of standard deviations (if scalekernel=TRUE) cutoff <- cutoff2Dkernel(kernel, sigma=sigma, varcov=varcov, scalekernel=scalekernel, cutoff=cutoff, fatal=TRUE) ## cutoff is now an absolute distance ndata <- npoints(Xdata) nquery <- npoints(Xquery) if(!isgauss) { ## .................. non-Gaussian kernel ........................ close <- crosspairs(Xdata, Xquery, cutoff) contrib <- evaluate2Dkernel(kernel, close$dx, close$dy, sigma=sigma, varcov=varcov, scalekernel=scalekernel, ...) ## sum the (weighted) contributions i <- close$i j <- close$j jfac <- factor(j, levels=seq_len(nquery)) if(is.null(weights)) { result <- tapplysum(contrib, list(jfac)) } else if(k == 1L) { wcontrib <- contrib * weights[i] result <- tapplysum(wcontrib, list(jfac)) } else { result <- matrix(, nquery, k) for(kk in 1:k) { wcontribkk <- contrib * weights[i, kk] result[,kk] <- tapplysum(wcontribkk, list(jfac)) } } } else { ## ................. Gaussian kernel ................... result <- if(k == 1L) numeric(nquery) else matrix(, nquery, k) ## coordinates xq <- Xquery$x yq <- Xquery$y xd <- Xdata$x yd <- Xdata$y if(!sorted) { ## sort into increasing order of x coordinate (required by C code) ooq <- fave.order(Xquery$x) xq <- xq[ooq] yq <- yq[ooq] ood <- fave.order(Xdata$x) xd <- xd[ood] yd <- yd[ood] } if(is.null(varcov)) { ## isotropic kernel if(is.null(weights)) { zz <- .C(SC_crdenspt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), rmaxi = as.double(cutoff), sig = as.double(sigma), result = as.double(double(nquery)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[ooq] <- zz$result } else if(k == 1L) { wtsort <- if(sorted) weights else weights[ood] zz <- .C(SC_wtcrdenspt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), wd = as.double(wtsort), rmaxi = as.double(cutoff), sig = as.double(sigma), result = as.double(double(nquery)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[ooq] <- zz$result } else { ## matrix of weights wtsort <- if(sorted) weights else weights[ood, ] for(j in 1:k) { zz <- .C(SC_wtcrdenspt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), wd = as.double(wtsort[,j]), rmaxi = as.double(cutoff), sig = as.double(sigma), result = as.double(double(nquery)), PACKAGE="spatstat.core") if(sorted) result[,j] <- zz$result else result[ooq,j] <- zz$result } colnames(result) <- weightnames } } else { ## anisotropic kernel detSigma <- det(varcov) Sinv <- solve(varcov) flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C(SC_acrdenspt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[ooq] <- zz$result } else if(k == 1L) { ## vector of weights wtsort <- if(sorted) weights else weights[ood] zz <- .C(SC_awtcrdenspt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), wd = as.double(wtsort), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE="spatstat.core") if(sorted) result <- zz$result else result[ooq] <- zz$result } else { ## matrix of weights wtsort <- if(sorted) weights else weights[ood, ] for(j in 1:k) { zz <- .C(SC_awtcrdenspt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), wd = as.double(wtsort[,j]), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE="spatstat.core") if(sorted) result[,j] <- zz$result else result[ooq,j] <- zz$result } colnames(result) <- weightnames } } } # ........ Edge correction ........................................ if(edge && !diggle) result <- result/edgeweight # tack on bandwidth attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov # return(result) } bandwidth.is.infinite <- function(sigma) { sigma <- as.numeric(sigma) return((length(sigma) > 0) && all(sigma == Inf)) } density.ppplist <- density.splitppp <- function(x, ..., weights=NULL, se=FALSE) { if(is.null(weights) || is.im(weights) || is.expression(weights)) weights <- rep(list(weights), length(x)) y <- mapply(density.ppp, x=x, weights=weights, MoreArgs=list(se=se, ...), SIMPLIFY=FALSE) if(!se) return(as.solist(y, demote=TRUE)) y.est <- lapply(y, getElement, name="estimate") y.se <- lapply(y, getElement, name="SE") z <- list(estimate = as.solist(y.est, demote=TRUE), SE = as.solist(y.se, demote=TRUE)) return(z) } spatstat.core/R/pcfinhom.R0000644000176200001440000001604514144333463015206 0ustar liggesusers# # pcfinhom.R # # $Revision: 1.24 $ $Date: 2021/10/26 07:12:24 $ # # inhomogeneous pair correlation function of point pattern # # pcfinhom <- function(X, lambda=NULL, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor=c("r","d"), renormalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, reciplambda=NULL, sigma=NULL, varcov=NULL, close=NULL) { verifyclass(X, "ppp") # r.override <- !is.null(r) win <- X$window areaW <- area(win) npts <- npoints(X) kernel <- match.kernel(kernel) correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="good", best="best"), multi=TRUE) if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) correction <- implemented.for.K(correction, win$type, correction.given) divisor <- match.arg(divisor) if(is.null(bw) && kernel=="epanechnikov") { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(npts/areaW) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(npts/areaW) } ########## intensity values ######################### dangerous <- c("lambda", "reciplambda") danger <- TRUE if(npts == 0) { lambda <- reciplambda <- numeric(0) danger <- FALSE } else if(missing(lambda) && is.null(reciplambda)) { # No intensity data provided danger <- FALSE # Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambda <- as.numeric(lambda) reciplambda <- 1/lambda } else if(!is.null(reciplambda)) { # 1/lambda values provided if(is.im(reciplambda)) reciplambda <- safelookup(reciplambda, X) else if(is.function(reciplambda)) reciplambda <- reciplambda(X$x, X$y) else if(is.numeric(reciplambda) && is.vector(as.numeric(reciplambda))) check.nvector(reciplambda, npts) else stop(paste(sQuote("reciplambda"), "should be a vector, a pixel image, or a function")) } else { # lambda values provided if(is.im(lambda)) lambda <- safelookup(lambda, X) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) { model <- lambda if(!update) { ## just use intensity of fitted model lambda <- predict(model, locations=X, type="trend") } else { if(is.ppm(model)) { model <- update(model, Q=X) lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { model <- update(model, X=X) lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } danger <- FALSE } } else if(is.function(lambda)) lambda <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) check.nvector(lambda, npts) else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, a function, or a fitted model")) # evaluate reciprocal reciplambda <- 1/lambda } # renormalise if(renormalise && npts > 0) { check.1.real(normpower) stopifnot(normpower %in% 1:2) renorm.factor <- (areaW/sum(reciplambda))^normpower } ########## r values ############################ # handle arguments r and breaks rmaxdefault <- rmax.rule("K", win, lambda) breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) ########## smoothing parameters for pcf ############################ # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax)) ################################################# # compute pairwise distances if(npts > 1) { if(is.null(close)) { #' find close pairs close <- closepairs(X, rmax+hmax) } else { #' check 'close' has correct format needed <- c("i", "j", "xi", "yi", "xj", "yj", "dx", "dy", "d") if(any(is.na(match(needed, names(close))))) stop(paste("Argument", sQuote("close"), "should have components named", commasep(sQuote(needed))), call.=FALSE) } dIJ <- close$d I <- close$i J <- close$j XI <- ppp(close$xi, close$yi, window=win, check=FALSE) wIJ <- reciplambda[I] * reciplambda[J] } else { undefined <- rep(NaN, length(r)) } # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) out <- fv(df, "r", quote(g[inhom](r)), "theo", , alim, c("r","{%s[%s]^{pois}}(r)"), c("distance argument r", "theoretical Poisson %s"), fname=c("g", "inhom")) ###### compute ####### if(any(correction=="translate")) { # translation correction if(npts > 1) { XJ <- ppp(close$xj, close$yj, window=win, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) gT <- sewpcf(dIJ, edgewt * wIJ, denargs, areaW, divisor)$g if(renormalise) gT <- gT * renorm.factor } else gT <- undefined out <- bind.fv(out, data.frame(trans=gT), "{hat(%s)[%s]^{Trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction=="isotropic")) { # Ripley isotropic correction if(npts > 1) { edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) gR <- sewpcf(dIJ, edgewt * wIJ, denargs, areaW, divisor)$g if(renormalise) gR <- gR * renorm.factor } else gR <- undefined out <- bind.fv(out, data.frame(iso=gR), "{hat(%s)[%s]^{Ripley}}(r)", "isotropic-corrected estimate of %s", "iso") } # sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } # which corrections have been computed? corrxns <- rev(setdiff(names(out), "r")) # default is to display them all formula(out) <- . ~ r fvnames(out, ".") <- corrxns unitname(out) <- unitname(X) if(danger) attr(out, "dangerous") <- dangerous return(out) } spatstat.core/R/laslett.R0000644000176200001440000002676614144333462015065 0ustar liggesusers#' Calculating Laslett's transform #' Original by Kassel Hingee #' Adapted by Adrian Baddeley #' Copyright (C) 2016 Kassel Hingee and Adrian Baddeley # $Revision: 1.10 $ $Date: 2020/12/19 05:25:06 $ laslett <- function(X, ..., verbose=FALSE, plotit=TRUE, discretise=FALSE, type = c("lower", "upper", "left", "right")){ #' validate X and convert to a logical matrix type <- match.arg(type) oldX <- X if(is.im(X)) { X <- solutionset(X != 0) } else if(!is.owin(X)) stop("X should be an image or a window", call.=FALSE) if(type != "lower") { nrot <- match(type, c("right", "upper", "left")) theta <- nrot * pi/2 X <- rotate(X, angle=-theta) } if(!discretise && (is.polygonal(X) || is.rectangle(X))) { result <- polyLaslett(X, ..., oldX=oldX, verbose=verbose, plotit=FALSE) } else { result <- maskLaslett(X, ..., oldX=oldX, verbose=verbose, plotit=FALSE) } if(type != "lower") { #' rotate back prods <- c("TanOld", "TanNew", "Rect") result[prods] <- lapply(result[prods], rotate, angle=theta) } if(plotit) plot(result, ...) result$type <- type return(result) } maskLaslett <- local({ sumtoright <- function(x) { rev(cumsum(rev(x))) - x } maskLaslett <- function(X, ..., eps=NULL, dimyx=NULL, xy=NULL, oldX=X, verbose=FALSE, plotit=TRUE) { if(is.null(oldX)) oldX <- X X <- as.mask(X, eps=eps, dimyx=dimyx, xy=xy) unitX <- unitname(X) if(is.empty(X)) stop("Empty window!") M <- as.matrix(X) #' ....... Compute transformed set ................... #' Total width of transformed set on each row TotFalse <- rowSums(!M) ## compute transformed set Laz <- (col(M) <= TotFalse[row(M)]) Laz <- owin(mask=Laz, xrange=X$xrange, yrange=X$yrange, unitname=unitX) #' Largest sub-rectangle of transformed set width <- min(TotFalse) * X$xstep Rect <- owin(X$xrange[1L] + c(0, width), X$yrange, unitname=unitX) #' Along each horizontal line (row), #' compute a running count of FALSE pixels. #' This is the mapping for the set transform #' (the value at any pixel gives the new column number #' for the transformed pixel) CumulFalse <- t(apply(!M, 1L, cumsum)) #' discard one column for consistency with other matrices below CumulFalse <- CumulFalse[,-1L,drop=FALSE] #' ....... Find lower tangent points ................. #' compute discrete gradient in x direction G <- t(apply(M, 1, diff)) #' detect entries, exits, changes Exit <- (G == -1) Enter <- (G == 1) Change <- Exit | Enter #' form a running total of the number of pixels inside X #' to the **right** of the current pixel FutureInside <- t(apply(M, 1, sumtoright))[,-1L,drop=FALSE] #' find locations of changes loc <- which(Change, arr.ind=TRUE) #' don't consider entries/exits in the bottom row ok <- (loc[,"row"] > 1) loc <- loc[ok, , drop=FALSE] #' corresponding locations on horizontal line below current line below <- cbind(loc[,"row"]-1L, loc[,"col"]) #' look up data at these locations df <- data.frame(row=loc[,"row"], col=loc[,"col"], newcol=CumulFalse[loc], Exit=Exit[loc], Enter=Enter[loc], InsideBelow=M[below], FutureInsideBelow=FutureInside[below]) #' identify candidates for tangents df$IsCandidate <- with(df, Enter & !InsideBelow & (newcol < TotFalse[row])) #' collect data for each horizontal line (row) #' then sort by increasing x (column) within each line. oo <- with(df, order(row, col)) df <- df[oo, , drop=FALSE] #' divide data into one piece for each hztal line g <- split(df, df$row) #' Initialise empty list of tangent points tangents <- data.frame(row=integer(0), col=integer(0), newcol=integer(0)) #' process each hztal line for(p in g) { tangents <- with(p, { candidates <- which(IsCandidate) # indices are row numbers in 'p' if(verbose) print(p) exits <- which(Exit) for(i in candidates) { if(verbose) cat(paste("candidate", i, "\n")) if(any(found <- (exits > i))) { j <- exits[min(which(found))] if(verbose) cat(paste("next exit:", j, "\n")) #' check no pixels inside X in row below between i and j if(FutureInsideBelow[i] == FutureInsideBelow[j]) { if(verbose) cat(paste("Tangent (1) at row=", row[i], "col=", col[i], "\n")) tangents <- rbind(tangents, data.frame(row=row[i], col=col[i], newcol=newcol[i])) } } else { #' no exits on this row if(verbose) cat("no subsequent exit\n") if(FutureInsideBelow[i] == 0) { if(verbose) cat(paste("Tangent (2) at row=", row[i], "col=", col[i], "\n")) tangents <- rbind(tangents, data.frame(row=row[i], col=col[i], newcol=newcol[i])) } } } if(verbose) cat("====\n") tangents }) } tangents$oldx <- X$xcol[tangents$col] tangents$newx <- X$xcol[tangents$newcol] tangents$y <- X$yrow[tangents$row] TanOld <- with(tangents, ppp(oldx, y, window=Frame(X), unitname=unitX)) TanNew <- with(tangents, ppp(newx, y, window=Laz), unitname=unitX) result <- list(oldX=oldX, TanOld=TanOld, TanNew=TanNew, Rect=Rect, df=tangents) class(result) <- c("laslett", class(result)) if(plotit) plot(result, ...) return(result) } maskLaslett }) print.laslett <- function(x, ...) { cat("Laslett Transform\n") cat("\nOriginal object:\n") print(x$oldX) cat("\nTransformed set:\n") W <- Window(x$TanNew) print(W) unitinfo <- summary(unitname(W)) cat("\nTransformed area:", area.owin(W), "square", unitinfo$plural, unitinfo$explain, fill=TRUE) cat("\n") type <- x$type %orifnull% "lower" cat(npoints(x$TanNew), type, "tangent points found.", fill=TRUE) return(invisible(NULL)) } plot.laslett <- function(x, ..., Xpars=list(box=TRUE, col="grey"), pointpars=list(pch=3, cols="blue"), rectpars=list(lty=3, border="green")) { Display <- with(x, solist(Original= layered(oldX, TanOld, plotargs=list(Xpars, pointpars)), Transformed= layered(TanNew, Rect, plotargs=list(pointpars, rectpars)))) #' ignore arguments intended for as.mask argh <- list(...) if(any(bad <- names(argh) %in% c("eps", "dimyx", "xy"))) argh <- argh[!bad] dont.complain.about(Display) do.call(plot, resolve.defaults(list(x=quote(Display)), argh, list(main="", mar.panel=0, hsep=1, equal.scales=TRUE))) return(invisible(NULL)) } polyLaslett <- function(X, ..., oldX=X, verbose=FALSE, plotit=TRUE) { X <- as.polygonal(X) if(is.empty(X)) stop("Empty window!") unitX <- unitname(X) # expand frame slightly B <- Frame(X) B <- grow.rectangle(B, max(sidelengths(B))/8) x0 <- B$xrange[1L] x1 <- B$xrange[2L] # extract vertices v <- vertices(X) nv <- length(v$x) # .......... compute transformed set ..................... # make horizontal segments from each vertex to sides of box left <- with(v, psp(rep(x0,nv), y, x, y, window=B, marks=1:nv, check=FALSE)) right <- with(v, psp(x, y, rep(x1,nv), y, window=B, marks=1:nv, check=FALSE)) # intersect each horizontal segment with the window if(verbose) cat("Processing", nv, "polygon vertices... ") clipleft <- clip.psp(left, X) clipright <- clip.psp(right, X) if(verbose) cat("Done.\n") # calculate lengths of clipped segments, and group by vertex. # marks indicate which hztal segment was the parent of each piece. lenleft <- tapply(lengths_psp(clipleft), factor(marks(clipleft), levels=1:nv), sum) lenright <- tapply(lengths_psp(clipright), factor(marks(clipright), levels=1:nv), sum) lenleft[is.na(lenleft)] <- 0 lenright[is.na(lenright)] <- 0 emptylenleft <- lengths_psp(left) - lenleft emptylenright <- lengths_psp(right) - lenright # The transformed polygon isrightmost <- (lenright == 0) yright <- v$y[isrightmost] xright <- x0 + (emptylenleft+emptylenright)[isrightmost] minxright <- min(xright) # right margin of largest rectangle ord <- order(yright) Ty <- yright[ord] Tx <- xright[ord] nT <- length(Ty) if(Tx[nT] > x0) { Ty <- c(Ty, Ty[nT]) Tx <- c(Tx, x0) } if(Tx[1L] > x0) { Ty <- c(Ty[1L], Ty) Tx <- c(x0, Tx) } TX <- owin(B$xrange, B$yrange, poly=list(x=Tx, y=Ty), check=FALSE) TX <- TX[Frame(X)] # .......... identify lower tangents ..................... V <- as.ppp(v, W=Frame(X), unitname=unitX) is.candidate <- is.tangent <- logical(nv) # apply simple criteria for ruling in or out Plist <- X$bdry cumnv <- 0 for(i in seq_along(Plist)) { P <- Plist[[i]] xx <- P$x yy <- P$y nn <- length(xx) # xnext <- c(xx[-1L], xx[1L]) ynext <- c(yy[-1L], yy[1L]) # xprev <- c(xx[nn], xx[-nn]) yprev <- c(yy[nn], yy[-nn]) is.candidate[cumnv + seq_len(nn)] <- if(!is.hole.xypolygon(P)) { (yprev > yy & ynext >= yy) } else { (yprev >= yy & ynext > yy) } cumnv <- cumnv + nn } ## was.candidate <- is.candidate #' reject candidates lying too close to boundary tooclose <- (bdist.points(V[is.candidate]) < diameter(Frame(V))/1000) is.candidate[is.candidate][tooclose] <- FALSE #' evaluate candidate points #' make tiny boxes around vertex candidates <- which(is.candidate) nc <- length(candidates) nnd <- nndist(V) if(verbose) { cat(paste("Processing", nc, "tangent candidates ... ")) pstate <- list() } tiny <- .Machine$double.eps for(j in 1:nc) { i <- candidates[j] eps <- nnd[i]/16 xi <- v$x[i] yi <- v$y[i] Below <- owin(xi + c(-eps,eps), yi + c(-eps, 0)) # Above <- owin(xi + c(-eps, eps), yi + c(0, eps)) UpLeft <- owin(xi + c(-eps, 0), yi + c(0, eps)) is.tangent[i] <- (overlap.owin(X, Below) <= tiny) && (overlap.owin(X, UpLeft) < eps^2) if(verbose) pstate <- progressreport(j, nc, state=pstate) } if(verbose) cat(paste("Found", sum(is.tangent), "tangents\n")) TanOld <- V[is.tangent] ynew <- TanOld$y xnew <- x0 + emptylenleft[is.tangent] TanNew <- ppp(xnew, ynew, window=TX, check=FALSE, unitname=unitX) # maximal rectangle Rect <- owin(c(X$xrange[1L], minxright), X$yrange, unitname=unitX) # df <- data.frame(xold=TanOld$x, xnew=TanNew$x, y=TanNew$y) # result <- list(oldX=oldX, TanOld=TanOld, TanNew=TanNew, Rect=Rect, df=df) class(result) <- c("laslett", class(result)) if(plotit) plot(result, ...) return(result) } spatstat.core/R/percy.R0000644000176200001440000000542514144333463014525 0ustar liggesusers## percus.R ## ## Percus-Yevick style approximations to pcf and K ## ## $Revision: 1.4 $ $Date: 2014/01/31 10:10:19 $ pcfmodel.ppm <- local({ pcfmodel.ppm <- function(model, ...) { if(is.multitype(model)) stop("Not yet implemented for multitype models") if(!is.stationary(model)) stop("Model must be stationary") if(is.poisson(model)) return(function(r) rep(1, length(r))) inte <- as.interact(model) if(inte$family$name != "pairwise") stop("Only implemented for pairwise-interaction models") lambda <- intensity(model) beta <- exp(coef(model)[1]) par <- inte$par pot <- inte$pot f <- fitin(model) Vcoefs <- f$coefs[f$Vnames] Mayer <- inte$Mayer G <- Mayer(Vcoefs, inte) irange <- reach(inte, epsilon=1e-6) G2fun <- inte$Percy testit <- resolve.1.default(list(testit=FALSE), list(...)) if(testit || is.null(G2fun)) G2fun <- pairwisePercy fun <- function(r) { pcfapprox(r, beta, lambda, pot, par, Vcoefs, G, G2fun, irange) } return(fun) } pcfapprox <- function(r, beta, lambda, pot, par, Vcoefs, G, G2fun, irange) { as.numeric((beta/lambda)^2 * exp(logpairpot(r, pot, par, Vcoefs) - lambda * G2fun(r, Vcoefs, par, pot=pot, irange=irange, G=G))) } logpairpot <- function(r, pot, par, Vcoefs) { as.numeric(pot(matrix(r, ncol=1), par) %*% Vcoefs) } negpair <- function(x,y, pot, par, Vcoefs) { ## evaluate 1 - g(x,y) ## where g(x,y) is pair interaction between (0,0) and (x,y) 1 - exp(logpairpot(sqrt(x^2+y^2), pot, par, Vcoefs)) } pairwisePercy <- function(r, Vcoefs, par, ..., G, pot, irange, dimyx=256) { S <- max(max(r), irange) ng <- as.im(negpair, square(c(-S,S)), pot=pot, par=par, Vcoefs=Vcoefs, dimyx=dimyx) ng2 <- convolve.im(ng) rr <- seq(min(r), max(r), length=dimyx[1]) yy <- ng2[list(x=rr, y=rep.int(0, dimyx[1]))] zz <- 2 * G - yy z <- approx(rr, zz, r)$y return(z) } pcfmodel.ppm }) Kmodel.ppm <- local({ Kmodel.ppm <- function(model, ...) { if(is.poisson(model)) return(function(r) { pi * r^2 }) pc <- pcfmodel(model, ...) K <- function(r) pcf2K(r, pc) return(K) } pcf2K <- function(r, pc) { ## integrate the pair correlation function to obtain the K-function if(length(r) == 1) { ## definite integral spcfs <- function(s) { s * pc(s) } y <- 2 * pi * integrate(spcfs, lower=0, upper=r)$value } else { ## indefinite integral rr <- seq(0, max(r), length=1025) dr <- max(r)/(length(rr) - 1) ff <- 2 * pi * rr * pc(rr) yy <- dr * cumsum(ff) y <- approx(rr, yy, r)$y } return(y) } Kmodel.ppm }) spatstat.core/R/ssf.R0000644000176200001440000001624314144333464014177 0ustar liggesusers# # ssf.R # # spatially sampled functions # # $Revision: 1.21 $ $Date: 2020/12/19 05:33:45 $ # ssf <- function(loc, val) { stopifnot(is.ppp(loc)) if(is.function(val)) val <- val(loc$x, loc$y) if(is.data.frame(val)) val <- as.matrix(val) if(!is.matrix(val)) val <- matrix(val, ncol=1, dimnames=list(NULL, "value")) if(nrow(val) != npoints(loc)) stop("Incompatible lengths") result <- loc %mark% val class(result) <- c("ssf", class(result)) attr(result, "ok") <- complete.cases(val) return(result) } print.ssf <- function(x, ..., brief=FALSE) { if(brief) { splat("Spatial function sampled at", npoints(x), "locations") } else { splat("Spatially sampled function") cat("Locations:\n\t") print(unmark(x)) } val <- marks(x) if(!is.matrix(val)) { d <- 1 warning("Internal format error: val is not a matrix") } else d <- ncol(val) if(!brief) { type <- if(d == 1) "Scalar" else paste(d, "-vector", sep="") splat(type, "valued function") } if(d > 1 && !is.null(nama <- colnames(val))) splat("Component names:", commasep(sQuote(nama))) return(invisible(NULL)) } summary.ssf <- function(object, ...) { z <- NextMethod("summary") class(z) <- c("summary.ssf", class(z)) return(z) } print.summary.ssf <- function(x, ...) { splat("Spatially sampled function") cat("Locations:\n\t") NextMethod("print") } image.ssf <- function(x, ...) { do.call("plot", resolve.defaults(list(quote(x), how="smoothed"), list(...))) } as.im.ssf <- function(X, ...) nnmark(X, ...) as.function.ssf <- function(x, ...) { X <- x mX <- marks(X) switch(markformat(X), vector = { g <- function(x, y=NULL) { Y <- xy.coords(x,y)[c("x","y")] J <- nncross(Y, X, what="which") result <- mX[J] return(unname(result)) } }, dataframe = { g <- function(x, y=NULL) { Y <- xy.coords(x,y)[c("x","y")] J <- nncross(Y, X, what="which") result <- mX[J,,drop=FALSE] row.names(result) <- NULL return(result) } }, stop("Marks must be a vector or data.frame")) h <- funxy(g, Frame(X)) return(h) } plot.ssf <- function(x, ..., how=c("smoothed", "nearest", "points"), style = c("image", "contour", "imagecontour"), sigma=NULL, contourargs=list()) { xname <- short.deparse(substitute(x)) how <- match.arg(how) style <- match.arg(style) otherargs <- list(...) # convert to images y <- switch(how, points = as.ppp(x), nearest = nnmark(x), smoothed = Smooth(x, sigma=sigma) ) dont.complain.about(y) # points plot if(how == "points") { out <- do.call("plot", resolve.defaults(list(quote(y)), otherargs, list(main=xname))) if(is.null(out)) return(invisible(NULL)) return(out) } # image plot switch(style, image = { out <- do.call("plot", resolve.defaults(list(quote(y)), otherargs, list(main=xname))) }, contour = { xwin <- as.owin(x) dont.complain.about(xwin) do.call("plot", resolve.defaults(list(quote(xwin)), otherargs, list(main=xname))) do.call("contour", resolve.defaults(list(quote(y), add=TRUE), contourargs)) out <- NULL }, imagecontour = { out <- do.call("plot", resolve.defaults(list(quote(y)), otherargs, list(main=xname))) do.call("contour", resolve.defaults(list(quote(y), add=TRUE), contourargs)) }) return(invisible(out)) } contour.ssf <- function(x, ..., main, sigma=NULL) { if(missing(main)) main <- short.deparse(substitute(x)) y <- Smooth(x, sigma=sigma) contour(y, ..., main=main) return(invisible(NULL)) } Smooth.ssf <- function(X, ...) { stopifnot(inherits(X, "ssf")) ok <- attr(X, "ok") Y <- as.ppp(X)[ok] argh <- list(...) isnul <- as.logical(unlist(lapply(argh, is.null))) nonnularg <- argh[!isnul] sigma0 <- if(any(c("sigma", "varcov") %in% names(nonnularg))) NULL else 1.4 * max(nndist(X)) dont.complain.about(Y) Z <- do.call("Smooth.ppp", resolve.defaults(list(X = quote(Y)), list(...), list(sigma=sigma0), .MatchNull=FALSE)) # don't take NULL for an answer! return(Z) } "[.ssf" <- function(x, i, j, ..., drop) { loc <- unmark(x) val <- marks(x) ok <- attr(x, "ok") # if(!missing(j)) val <- val[, j, drop=FALSE] if(!missing(i)) { # use [.ppp to identify which points are retained locn <- loc %mark% seq_len(npoints(loc)) loci <- locn[i] loc <- unmark(loci) id <- marks(loci) # extract val <- val[id, , drop=FALSE] ok <- ok[id] } out <- loc %mark% val class(out) <- c("ssf", class(out)) attr(out, "ok") <- ok return(out) } as.ppp.ssf <- function(X, ...) { class(X) <- "ppp" attr(X, "ok") <- NULL return(X) } marks.ssf <- function(x, ...) { val <- x$marks if(is.null(dim(val))) val <- matrix(val, ncol=1) if(is.data.frame(val)) val <- as.matrix(val) return(val) } "marks<-.ssf" <- function(x, ..., value) { ssf(unmark(x), value) } unmark.ssf <- function(X) { unmark(as.ppp(X)) } with.ssf <- function(data, ...) { loc <- as.ppp(data) val <- marks(data) newval <- with(as.data.frame(val), ...) if(length(newval) == npoints(loc) || (is.matrix(newval) && nrow(newval) == npoints(loc))) return(ssf(loc, newval)) return(newval) } ## the following is NOT a method for 'apply' !! apply.ssf <- function(X, ...) { loc <- as.ppp(X) val <- marks(X) newval <- apply(val, ...) if(length(newval) == npoints(loc) || (is.matrix(newval) && nrow(newval) == npoints(loc))) return(ssf(loc, newval)) return(newval) } range.ssf <- function(x, ...) range(marks(x), ...) min.ssf <- function(x, ...) min(marks(x), ...) max.ssf <- function(x, ...) max(marks(x), ...) integral.ssf <- function(f, domain=NULL, ..., weights=attr(f, "weights")) { if(!is.null(weights)) { check.nvector(weights, npoints(f), oneok=TRUE) if(length(weights) == 1) weights <- rep(weights, npoints(f)) } if(is.tess(domain)) { result <- sapply(tiles(domain), integral.ssf, f=f, weights=weights) if(length(dim(result)) > 1) result <- t(result) return(result) } if(!is.null(domain)) { ok <- inside.owin(f, w=domain) f <- f[ok,] if(!is.null(weights)) weights <- weights[ok] } y <- marks(f) if(is.null(weights)) { z <- if(!is.matrix(y)) mean(y, na.rm=TRUE) else colMeans(y, na.rm=TRUE) a <- area(Window(f)) } else { z <- if(!is.matrix(y)) weighted.mean(y, w=weights, na.rm=TRUE) else apply(y, 2, weighted.mean, w=weights, na.rm=TRUE) a <- sum(weights) } z[!is.finite(z)] <- 0 return(z * a) } spatstat.core/R/kernel2d.R0000644000176200001440000001055614144333462015111 0ustar liggesusers#' #' kernel2d.R #' #' Two-dimensional smoothing kernels #' #' $Revision: 1.14 $ $Date: 2018/09/01 09:09:34 $ #' .Spatstat.2D.KernelTable <- list( #' table entries: #' d = density of standardised kernel #' sd = standard deviation of x coordinate, for standardised kernel #' hw = halfwidth of support of standardised kernel gaussian=list( d = function(x,y, ...) { dnorm(x) * dnorm(y) }, sd = 1, hw = 8, symmetric = TRUE), epanechnikov=list( d = function(x,y, ...) { (2/pi) * pmax(1 - (x^2+y^2), 0) }, sd = 1/sqrt(6), hw = 1, symmetric = TRUE), quartic=list( d = function(x,y, ...) { (3/pi) * pmax(1 - (x^2+y^2), 0)^2 }, sd = 1/sqrt(8), hw = 1, symmetric = TRUE), disc=list( d = function(x,y,...) { (1/pi) * as.numeric(x^2 + y^2 <= 1) }, sd = 1/2, hw = 1, symmetric = TRUE) ) validate2Dkernel <- function(kernel, fatal=TRUE) { if(is.character(match2DkernelName(kernel))) return(TRUE) if(is.im(kernel) || is.function(kernel)) return(TRUE) if(!fatal) return(FALSE) if(is.character(kernel)) stop(paste("Unrecognised choice of kernel", sQuote(kernel), paren(paste("options are", commasep(sQuote(names(.Spatstat.2D.KernelTable)))))), call.=FALSE) stop(paste("kernel should be a character string,", "a pixel image, or a function (x,y)"), call.=FALSE) } match2DkernelName <- function(kernel) { if(!is.character(kernel) || length(kernel) != 1) return(NULL) nama <- names(.Spatstat.2D.KernelTable) m <- pmatch(kernel, nama) if(is.na(m)) return(NULL) return(nama[m]) } lookup2DkernelInfo <- function(kernel) { validate2Dkernel(kernel) kernel <- match2DkernelName(kernel) if(is.null(kernel)) return(NULL) return(.Spatstat.2D.KernelTable[[kernel]]) } evaluate2Dkernel <- function(kernel, x, y, sigma=NULL, varcov=NULL, ..., scalekernel=is.character(kernel)) { info <- lookup2DkernelInfo(kernel) if(scalekernel) { ## kernel adjustment factor sdK <- if(is.character(kernel)) info$sd else 1 ## transform coordinates to x',y' such that kerfun(x', y') ## yields density k(x,y) at desired bandwidth if(is.null(varcov)) { rr <- sdK/sigma x <- x * rr y <- y * rr scalefactor <- rr^2 } else { SinvH <- matrixinvsqrt(varcov) rSinvH <- sdK * SinvH XY <- cbind(x, y) %*% rSinvH x <- XY[,1L] y <- XY[,2L] scalefactor <- det(rSinvH) } } ## now evaluate kernel if(is.character(kernel)) { kerfun <- info$d result <- kerfun(x, y) } else if(is.function(kernel)) { argh <- list(...) if(length(argh) > 0) argh <- argh[names(argh) %in% names(formals(kernel))] result <- do.call(kernel, append(list(x, y), argh)) if(anyNA(result)) stop("NA values returned from kernel function") if(length(result) != length(x)) stop("Kernel function returned the wrong number of values") } else if(is.im(kernel)) { result <- kernel[list(x=x, y=y)] if(anyNA(result) || length(result) != length(x)) stop("Domain of kernel image is not large enough") } else stop("Unrecognised format for kernel") if(scalekernel) result <- scalefactor * result return(result) } cutoff2Dkernel <- function(kernel, sigma=NULL, varcov=NULL, ..., scalekernel=is.character(kernel), cutoff=NULL, fatal=FALSE) { info <- lookup2DkernelInfo(kernel) ## if scalekernel = FALSE, 'cutoff' is an absolute distance ## if scalekernel = TRUE, 'cutoff' is expressed in number of s.d. if(scalekernel) { if(is.null(cutoff)) { ## template kernel's standard deviation sdK <- info$sd %orifnull% 1 ## template kernel's halfwidth hwK <- info$hw %orifnull% 8 ## cutoff for kernel with sd=1 cutoff <- hwK/sdK } ## required standard deviation if(!is.null(sigma)) { sig <- sigma } else if(!is.null(varcov)) { lam <- eigen(varcov)$values sig <- sqrt(max(lam)) } else stop("Cannot determine standard deviation") ## cutoff <- cutoff * sig } if(fatal && is.null(cutoff)) stop(paste("The argument", sQuote("cutoff"), "is required", "when a non-Gaussian kernel is specified", "and scalekernel=FALSE"), call.=FALSE) return(cutoff) } spatstat.core/R/hierhard.R0000644000176200001440000001477614144333462015201 0ustar liggesusers## ## hierhard.R ## ## $Revision: 1.4 $ $Date: 2018/03/15 07:37:41 $ ## ## The hierarchical hard core process ## ## ------------------------------------------------------------------- ## HierHard <- local({ # ......... define interaction potential HHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrices of interaction radii h <- par$hradii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) ## list all ordered pairs of types to be checked uptri <- par$archy$relation & !is.na(h) mark1 <- (lx[row(h)])[uptri] mark2 <- (lx[col(h)])[uptri] ## corresponding names mark1name <- (lxname[row(h)])[uptri] mark2name <- (lxname[col(h)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) ## create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # apply relevant hard core distance to each pair of points hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- array(0, dim=dim(d)) value[forbid] <- -Inf ## score for(i in 1:npairs) { # data points with mark m1 Xsub <- (tx == mark1[i]) # quadrature points with mark m2 Qsub <- (tu == mark2[i]) # assign z[Xsub, Qsub, i] <- value[Xsub, Qsub] } } attr(z, "IsOffset") <- TRUE return(z) } #### end of 'pot' function #### # Set up basic object except for family and parameters BlankHHobject <- list( name = "Hierarchical hard core process", creator = "HierHard", family = "hierpair.family", # evaluated later pot = HHpotential, par = list(types=NULL, hradii=NULL, archy=NULL), parnames = c("possible types", "hardcore distances", "hierarchical order"), pardesc = c("vector of possible types", "matrix of hardcore distances", "hierarchical order"), hasInf = TRUE, selfstart = function(X, self) { types <- self$par$types hradii <- self$par$hradii archy <- self$par$archy if(!is.null(types) && !is.null(hradii) && !is.null(archy)) return(self) if(is.null(types)) types <- levels(marks(X)) if(is.null(archy)) archy <- seq_len(length(types)) if(!inherits(archy, "hierarchicalordering")) archy <- hierarchicalordering(archy, types) if(is.null(hradii)) { marx <- marks(X) d <- nndist(X, by=marx) h <- aggregate(d, by=list(from=marx), min) h <- as.matrix(h[, -1L, drop=FALSE]) m <- table(marx) mm <- outer(m, m, pmin) hradii <- h * mm/(mm+1) dimnames(hradii) <- list(types, types) h[!(archy$relation)] <- NA } HierHard(types=types,hradii=hradii,archy=archy) }, init = function(self) { types <- self$par$types hradii <- self$par$hradii ## hradii could be NULL if(!is.null(types)) { if(!is.null(dim(types))) stop(paste("The", sQuote("types"), "argument should be a vector")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } nt <- length(types) if(!is.null(hradii)) MultiPair.checkmatrix(hradii, nt, sQuote("hradii"), asymmok=TRUE) } }, update = NULL, # default OK print = function(self) { hradii <- self$par$hradii types <- self$par$types archy <- self$par$archy if(waxlyrical('gory')) splat(nrow(hradii), "types of points") if(!is.null(types) && !is.null(archy)) { if(waxlyrical('space')) { splat("Possible types and ordering:") } else cat("Hierarchy: ") print(archy) } else if(!is.null(types)) { (if(waxlyrical('space')) splat else cat)("Possible types: ") print(types) } else if(waxlyrical('gory')) splat("Possible types:\t not yet determined") if(!is.null(hradii)) { splat("Hardcore radii:") print(hiermat(dround(hradii), archy)) } else splat("Hardcore radii: not yet determined") invisible(NULL) }, interpret = function(coeffs, self) { # there are no regular parameters (woo-hoo!) return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { h <- self$par$hradii return(max(0, h, na.rm=TRUE)) }, version=NULL # to be added ) class(BlankHHobject) <- "interact" # finally create main function HierHard <- function(hradii=NULL, types=NULL, archy=NULL) { if(!is.null(types)) { if(is.null(archy)) archy <- seq_len(length(types)) archy <- hierarchicalordering(archy, types) } out <- instantiate.interact(BlankHHobject, list(types=types, hradii=hradii, archy=archy)) if(!is.null(types) && !is.null(out$par$hradii)) dimnames(out$par$hradii) <- list(types,types) return(out) } HierHard <- intermaker(HierHard, BlankHHobject) HierHard }) spatstat.core/R/pairwise.R0000644000176200001440000000505714144333463015227 0ustar liggesusers# # # pairwise.S # # $Revision: 1.12 $ $Date: 2019/02/20 03:32:22 $ # # Pairwise() create a user-defined pairwise interaction process # [an object of class 'interact'] # # ------------------------------------------------------------------- # Pairwise <- function(pot, name = "user-defined pairwise interaction process", par = NULL, parnames=NULL, printfun) { fop <- names(formals(pot)) if(!isTRUE(all.equal(fop, c("d", "par"))) && !isTRUE(all.equal(fop, c("d", "tx", "tu", "par")))) stop(paste("Formal arguments of pair potential function", sQuote("pot"), "must be either (d, par) or (d, tx, tu, par)")) if(!is.null(parnames)) { stopifnot(is.character(parnames)) if(is.null(par) || length(par) != length(parnames)) stop("par does not match parnames") } if(missing(printfun)) printfun <- function(self) { cat("Potential function:\n") print(self$pot) if(!is.null(parnames <- self$parnames)) { for(i in 1:length(parnames)) { cat(paste(parnames[i], ":\t")) pari <- self$par[[i]] if(is.numeric(pari) && length(pari) == 1) cat(pari, "\n") else print(pari) } } } out <- list( name = name, creator = "Pairwise", family = pairwise.family, pot = pot, par = par, parnames = parnames, hasInf = NA, init = NULL, update = function(self, ...){ do.call(Pairwise, resolve.defaults(list(...), list(pot=self$pot, name=self$name, par=self$par, parnames=self$parnames, printfun=self$print))) } , print = printfun, version = versionstring.spatstat() ) class(out) <- "interact" return(out) } Pairwise <- intermaker(Pairwise, list(creator="Pairwise", name="user-defined pairwise interaction process", par=formals(Pairwise), parnames=list("the potential", "the name of the interaction", "the list of parameters", "a description of each parameter", "an optional print function"))) spatstat.core/R/eem.R0000644000176200001440000000132214144333462014140 0ustar liggesusers# eem.R # # Computes the Stoyan-Grabarnik "exponential energy weights" # # $Revision: 1.6 $ $Date: 2021/10/30 05:19:06 $ # eem <- function(fit, ...) { UseMethod("eem") } eem.ppm <- function(fit, check=TRUE, ...) { verifyclass(fit, "ppm") lambda <- fitted.ppm(fit, dataonly=TRUE, check=check) eemarks <- 1/lambda attr(eemarks, "type") <- "eem" attr(eemarks, "typename") <- "exponential energy marks" return(eemarks) } eem.slrm <- function(fit, check=TRUE, ...) { verifyclass(fit, "slrm") Y <- response(fit) lambdaY <- predict(fit, type="intensity")[Y, drop=FALSE] eemarks <- 1/lambdaY attr(eemarks, "type") <- "eem" attr(eemarks, "typename") <- "exponential energy marks" return(eemarks) } spatstat.core/R/Kmulti.R0000644000176200001440000003107514144333461014646 0ustar liggesusers# # Kmulti.S # # Compute estimates of cross-type K functions # for multitype point patterns # # $Revision: 5.55 $ $Date: 2021/10/09 02:04:59 $ # # # -------- functions ---------------------------------------- # Kcross() cross-type K function K_{ij} # between types i and j # # Kdot() K_{i\bullet} # between type i and all points regardless of type # # Kmulti() (generic) # # # -------- standard arguments ------------------------------ # X point pattern (of class 'ppp') # including 'marks' vector # r distance values at which to compute K # # -------- standard output ------------------------------ # A data frame with columns named # # r: same as input # # trans: K function estimated by translation correction # # iso: K function estimated by Ripley isotropic correction # # theo: K function for Poisson ( = pi * r ^2 ) # # border: K function estimated by border method # using standard formula (denominator = count of points) # # bord.modif: K function estimated by border method # using modified formula # (denominator = area of eroded window # # ------------------------------------------------------------------------ "Lcross" <- function(X, i, j, ..., from, to, correction) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- if(!missing(from)) from else levels(marks(X))[1] if(missing(j)) j <- if(!missing(to)) to else levels(marks(X))[2] if(missing(correction)) correction <- NULL K <- Kcross(X, i, j, ..., correction=correction) L <- eval.fv(sqrt(K/pi)) # relabel the fv object iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) L <- rebadge.fv(L, substitute(L[i,j](r), list(i=iname,j=jname)), c("L", paste0("list(", iname, ",", jname, ")")), new.yexp=substitute(L[list(i,j)](r), list(i=iname,j=jname))) attr(L, "labl") <- attr(K, "labl") return(L) } "Ldot" <- function(X, i, ..., from, correction) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- if(!missing(from)) from else levels(marks(X))[1] if(missing(correction)) correction <- NULL K <- Kdot(X, i, ..., correction=correction) L <- eval.fv(sqrt(K/pi)) # relabel the fv object iname <- make.parseable(paste(i)) L <- rebadge.fv(L, substitute(L[i ~ dot](r), list(i=iname)), c("L", paste(iname, "~ symbol(\"\\267\")")), new.yexp=substitute(L[i ~ symbol("\267")](r), list(i=iname))) attr(L, "labl") <- attr(K, "labl") return(L) } "Kcross" <- function(X, i, j, r=NULL, breaks=NULL, correction =c("border", "isotropic", "Ripley", "translate") , ..., ratio=FALSE, from, to) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- if(!missing(from)) from else levels(marx)[1] if(missing(j)) j <- if(!missing(to)) to else levels(marx)[2] I <- (marx == i) if(!any(I)) stop(paste("No points have mark i =", i)) if(i == j) { ## use Kest XI <- X[I] dont.complain.about(XI) result <- do.call(Kest, resolve.defaults(list(X=quote(XI), r=quote(r), breaks=quote(breaks), correction=correction, ratio=ratio), list(rmax=NULL), ## forbidden list(...))) } else { J <- (marx == j) if(!any(J)) stop(paste("No points have mark j =", j)) result <- Kmulti(X, I, J, r=r, breaks=breaks, correction=correction, ratio=ratio, ...) } result <- rebadge.as.crossfun(result, "K", NULL, i, j) } "Kdot" <- function(X, i, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate") , ..., ratio=FALSE, from) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- if(!missing(from)) from else levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points if(!any(I)) stop(paste("No points have mark i =", i)) result <- Kmulti(X, I, J, r=r, breaks=breaks, correction=correction, ..., ratio=ratio) result <- rebadge.as.dotfun(result, "K", NULL, i) return(result) } "Kmulti"<- function(X, I, J, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate") , ..., rmax=NULL, ratio=FALSE) { verifyclass(X, "ppp") npts <- npoints(X) W <- X$window areaW <- area(W) dotargs <- list(...) domainI <- resolve.1.default("domainI", dotargs) %orifnull% W domainJ <- resolve.1.default("domainJ", dotargs) %orifnull% W areaI <- area(domainI) areaJ <- area(domainJ) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") if(!any(I)) stop("no points belong to subset I") if(!any(J)) stop("no points belong to subset J") nI <- sum(I) nJ <- sum(J) lambdaI <- nI/areaI lambdaJ <- nJ/areaJ # r values rmaxdefault <- rmax %orifnull% rmax.rule("K", W, lambdaJ) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame # It will be given more columns later K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") K <- fv(K, "r", quote(K[IJ](r)), "theo", , alim, c("r","{%s[%s]^{pois}}(r)"), desc, fname=c("K", "list(I,J)"), yexp=quote(K[list(I,J)](r))) # save numerator and denominator? if(ratio) { denom <- lambdaI * lambdaJ * areaI numK <- eval.fv(denom * K) denK <- eval.fv(denom + K * 0) attributes(numK) <- attributes(denK) <- attributes(K) attr(numK, "desc")[2] <- "numerator for theoretical Poisson %s" attr(denK, "desc")[2] <- "denominator for theoretical Poisson %s" } # find close pairs of points XI <- X[I] XJ <- X[J] close <- crosspairs(XI, XJ, max(r), what="ijd") # close$i and close$j are serial numbers in XI and XJ respectively; # map them to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(I & J)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$d <- close$d[ok] } } # extract information for these pairs (relative to orderings of XI, XJ) dcloseIJ <- close$d icloseI <- close$i jcloseJ <- close$j # Compute estimates by each of the selected edge corrections. if(any(correction == "none")) { # uncorrected! wh <- whist(dcloseIJ, breaks$val) # no weights numKun <- cumsum(wh) denKun <- lambdaI * lambdaJ * areaI Kun <- numKun/denKun K <- bind.fv(K, data.frame(un=Kun), "{hat(%s)[%s]^{un}}(r)", "uncorrected estimate of %s", "un") if(ratio) { # save numerator and denominator numK <- bind.fv(numK, data.frame(un=numKun), "{hat(%s)[%s]^{un}}(r)", "numerator of uncorrected estimate of %s", "un") denK <- bind.fv(denK, data.frame(un=denKun), "{hat(%s)[%s]^{un}}(r)", "denominator of uncorrected estimate of %s", "un") } } if(any(correction == "border" | correction == "bord.modif")) { # border method # distance to boundary from each point of type I bI <- bdist.points(XI) # distance to boundary from first element of each (i, j) pair bcloseI <- bI[icloseI] # apply reduced sample algorithm RS <- Kount(dcloseIJ, bcloseI, bI, breaks) if(any(correction == "bord.modif")) { denom.area <- eroded.areas(W, r) numKbm <- RS$numerator denKbm <- denom.area * nI * nJ Kbm <- numKbm/denKbm K <- bind.fv(K, data.frame(bord.modif=Kbm), "{hat(%s)[%s]^{bordm}}(r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numK <- bind.fv(numK, data.frame(bord.modif=numKbm), "{hat(%s)[%s]^{bordm}}(r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denK <- bind.fv(denK, data.frame(bord.modif=denKbm), "{hat(%s)[%s]^{bordm}}(r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } if(any(correction == "border")) { numKb <- RS$numerator denKb <- lambdaJ * RS$denom.count Kb <- numKb/denKb K <- bind.fv(K, data.frame(border=Kb), "{hat(%s)[%s]^{bord}}(r)", "border-corrected estimate of %s", "border") if(ratio) { numK <- bind.fv(numK, data.frame(border=numKb), "{hat(%s)[%s]^{bord}}(r)", "numerator of border-corrected estimate of %s", "border") denK <- bind.fv(denK, data.frame(border=denKb), "{hat(%s)[%s]^{bord}}(r)", "denominator of border-corrected estimate of %s", "border") } } } if(any(correction == "translate")) { # translation correction edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE) wh <- whist(dcloseIJ, breaks$val, edgewt) numKtrans <- cumsum(wh) denKtrans <- lambdaI * lambdaJ * areaI Ktrans <- numKtrans/denKtrans rmax <- diameter(W)/2 Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") if(ratio) { numK <- bind.fv(numK, data.frame(trans=numKtrans), "{hat(%s)[%s]^{trans}}(r)", "numerator of translation-corrected estimate of %s", "trans") denK <- bind.fv(denK, data.frame(trans=denKtrans), "{hat(%s)[%s]^{trans}}(r)", "denominator of translation-corrected estimate of %s", "trans") } } if(any(correction == "isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dcloseIJ, ncol=1)) wh <- whist(dcloseIJ, breaks$val, edgewt) numKiso <- cumsum(wh) denKiso <- lambdaI * lambdaJ * areaI Kiso <- numKiso/denKiso rmax <- diameter(W)/2 Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(iso=Kiso), "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso") if(ratio) { numK <- bind.fv(numK, data.frame(iso=numKiso), "{hat(%s)[%s]^{iso}}(r)", "numerator of Ripley isotropic correction estimate of %s", "iso") denK <- bind.fv(denK, data.frame(iso=denKiso), "{hat(%s)[%s]^{iso}}(r)", "denominator of Ripley isotropic correction estimate of %s", "iso") } } # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) if(ratio) { # finish up numerator & denominator formula(numK) <- formula(denK) <- . ~ r unitname(numK) <- unitname(denK) <- unitname(K) # tack on to result K <- rat(K, numK, denK, check=FALSE) } return(K) } spatstat.core/R/summary.dppm.R0000644000176200001440000000533414144333464016037 0ustar liggesusers#' #' summary.dppm.R #' #' $Revision: 1.5 $ $Date: 2021/07/05 08:48:33 $ summary.dppm <- function(object, ..., quick=FALSE) { nama <- names(object) result <- unclass(object)[!(nama %in% c("X", "po", "call", "callframe"))] ## Fitting information result$has.subset <- "subset" %in% names(object$call) ## Summarise trend component result$trend <- summary(as.ppm(object), ..., quick=quick) ## repulsion strength result$repul <- mean(repul(object)) #' pack up class(result) <- "summary.dppm" return(result) } print.summary.dppm <- function(x, ...) { terselevel <- spatstat.options('terse') digits <- getOption('digits') splat(if(x$stationary) "Stationary" else "Inhomogeneous", "determinantal point process model") if(waxlyrical('extras', terselevel) && nchar(x$Xname) < 20) splat("Fitted to point pattern dataset", sQuote(x$Xname)) Fit <- x$Fit if(waxlyrical('gory', terselevel)) { switch(Fit$method, mincon = { splat("Fitted by minimum contrast") splat("\tSummary statistic:", Fit$StatName) print(Fit$mcfit) }, clik =, clik2 = { splat("Fitted by maximum second order composite likelihood") splat("\trmax =", Fit$rmax) if(!is.null(wtf <- Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } printStatus(optimStatus(Fit$clfit)) }, adapcl = { splat("Fitted by adaptive second order composite likelihood") splat("\tepsilon =", x$Fit$epsilon) if(!is.null(wtf <- x$Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } }, palm = { splat("Fitted by maximum Palm likelihood") splat("\trmax =", Fit$rmax) if(!is.null(wtf <- Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } printStatus(optimStatus(Fit$clfit)) }, warning(paste("Unrecognised fitting method", sQuote(Fit$method))) ) } # ............... trend ......................... parbreak() splat("----------- TREND MODEL -----") print(x$trend, ...) # ..................... determinantal part ................ parbreak() splat("---------- DETERMINANTAL STRUCTURE -----------------") print(x$fitted) parbreak() splat(if(x$stationary) "Strength" else "(Average) strength", "of repulsion:", signif(x$repul, digits)) return(invisible(NULL)) } spatstat.core/R/idw.R0000644000176200001440000001232714144333462014164 0ustar liggesusers# # idw.R # # Inverse-distance weighted smoothing # # $Revision: 1.13 $ $Date: 2021/01/07 03:08:41 $ idw <- function(X, power=2, at=c("pixels", "points"), ..., se=FALSE) { stopifnot(is.ppp(X) && is.marked(X)) at <- match.arg(at) marx <- marks(X) if(is.data.frame(marx)) { if((nc <- ncol(marx)) > 1) { ## multiple columns of marks - process one-by-one each <- vector(mode="list", length=nc) for(j in 1:nc) each[[j]] <- idw(X %mark% marx[,j], power=power, at=at, ..., se=se) names(each) <- colnames(marx) ## if(!se) { ## estimates only switch(at, pixels = { out <- as.solist(each) }, points = { out <- as.data.frame(each) } ) } else { ## estimates and standard errors est <- lapply(each, getElement, name="estimate") SE <- lapply(each, getElement, name="SE") switch(at, pixels = { out <- list(estimate = as.solist(est), SE = as.solist(SE)) }, points = { out <- list(estimate = as.data.frame(est), SE = as.data.frame(SE)) }) } return(out) } else marx <- marx[,1L] } if(!is.numeric(marx)) stop("Marks must be numeric") check.1.real(power) switch(at, pixels = { ## create grid W <- as.mask(as.owin(X), ...) dim <- W$dim npixels <- prod(dim) ## call C if(!se) { z <- .C(SC_Cidw, x = as.double(X$x), y = as.double(X$y), v = as.double(marx), n = as.integer(npoints(X)), xstart = as.double(W$xcol[1L]), xstep = as.double(W$xstep), nx = as.integer(dim[2L]), ystart = as.double(W$yrow[1L]), ystep = as.double(W$ystep), ny = as.integer(dim[1L]), power = as.double(power), num = as.double(numeric(npixels)), den = as.double(numeric(npixels)), rat = as.double(numeric(npixels)), PACKAGE="spatstat.core") out <- as.im(matrix(z$rat, dim[1L], dim[2L]), W=W) out <- out[W, drop=FALSE] } else { z <- .C(SC_Cidw2, x = as.double(X$x), y = as.double(X$y), v = as.double(marx), n = as.integer(npoints(X)), xstart = as.double(W$xcol[1L]), xstep = as.double(W$xstep), nx = as.integer(dim[2L]), ystart = as.double(W$yrow[1L]), ystep = as.double(W$ystep), ny = as.integer(dim[1L]), power = as.double(power), num = as.double(numeric(npixels)), den = as.double(numeric(npixels)), rat = as.double(numeric(npixels)), mtwo = as.double(numeric(npixels)), wtwo = as.double(numeric(npixels)), PACKAGE="spatstat.core") est <- as.im(matrix(z$rat, dim[1L], dim[2L]), W=W) est <- est[W, drop=FALSE] sumw <- z$den sumw2 <- z$wtwo m2 <- z$mtwo varden <- sumw - sumw2/sumw varden[varden <= 0] <- NA SE <- sqrt(m2/varden) SE <- as.im(matrix(SE, dim[1L], dim[2L]), W=W) SE <- SE[W, drop=FALSE] out <- solist(estimate=est, SE=SE) } }, points={ npts <- npoints(X) if(!se) { z <- .C(SC_idwloo, x = as.double(X$x), y = as.double(X$y), v = as.double(marx), n = as.integer(npts), power = as.double(power), num = as.double(numeric(npts)), den = as.double(numeric(npts)), rat = as.double(numeric(npts)), PACKAGE="spatstat.core") out <- z$rat } else { z <- .C(SC_idwloo2, x = as.double(X$x), y = as.double(X$y), v = as.double(marx), n = as.integer(npts), power = as.double(power), num = as.double(numeric(npts)), den = as.double(numeric(npts)), rat = as.double(numeric(npts)), mtwo = as.double(numeric(npts)), wtwo = as.double(numeric(npts)), PACKAGE="spatstat.core") est <- z$rat sumw <- z$den sumw2 <- z$wtwo m2 <- z$mtwo varden <- sumw - sumw2/sumw varden[varden <= 0] <- NA SE <- sqrt(m2/varden) out <- list(estimate=est, SE=SE) } }) return(out) } spatstat.core/R/bw.optim.R0000644000176200001440000001027114144333461015133 0ustar liggesusers# # bw.optim.R # # Class of optimised bandwidths # Plotting the object displays the optimisation criterion # # $Revision: 1.32 $ $Date: 2020/04/11 05:29:57 $ # bw.optim <- function(cv, h, iopt=if(optimum == "min") which.min(cv) else which.max(cv), ..., cvname, hname, criterion="cross-validation", optimum = c("min", "max"), warnextreme=TRUE, hargnames=NULL, unitname=NULL) { if(missing(cvname) || is.null(cvname)) cvname <- deparse(substitute(cv)) if(missing(hname) || is.null(hname)) hname <- deparse(substitute(h)) stopifnot(is.numeric(cv)) stopifnot(is.numeric(h)) stopifnot(length(h) == length(cv)) optimum <- match.arg(optimum) result <- h[iopt] if(warnextreme) { optimised <- switch(optimum, min="minimised", max="maximised") if(is.infinite(result)) { warning(paste(criterion, "criterion was", optimised, "at", hname, "=", as.numeric(result)), call.=FALSE) } else if((iopt == length(h) || iopt == 1)) { warning(paste(criterion, "criterion was", optimised, "at", if(iopt == 1) "left-hand" else "right-hand", "end of interval", paste0(prange(signif(range(h[is.finite(h)]), 3)), ";"), "use", ngettext(length(hargnames), "argument", "arguments"), paste(sQuote(hargnames), collapse=", "), "to specify a wider interval for bandwidth", sQuote(hname)), call.=FALSE) } } attr(result, "cv") <- cv attr(result, "h") <- h attr(result, "iopt") <- iopt attr(result, "labels") <- list(hname=hname, cvname=cvname) attr(result, "info") <- list(...) attr(result, "criterion") <- criterion attr(result, "units") <- unitname class(result) <- "bw.optim" return(result) } print.bw.optim <- function(x, ...) { y <- as.numeric(x) names(y) <- attr(x, "labels")$hname print(y, ...) return(invisible(NULL)) } as.data.frame.bw.optim <- function(x, ...) { h <- attr(x, "h") cv <- attr(x, "cv") df <- data.frame(h, cv) labels <- attr(x, "labels") colnames(df) <- labels[c("hname", "cvname")] info <- attr(x, "info") if(length(info) > 0) { lenfs <- lengths(info) if(any(ok <- (lenfs == nrow(df)))) { df <- cbind(df, as.data.frame(info[ok])) } } return(df) } as.fv.bw.optim <- function(x) { # convert to fv object df <- as.data.frame(x) dfnames <- colnames(df) hname <- dfnames[1L] cvname <- dfnames[2L] descrip <- c("smoothing parameter", paste(attr(x, "criterion"), "criterion")) if(ncol(df) > 2) descrip <- c(descrip, paste("Additional variable", sQuote(dfnames[-(1:2)]))) labl <- c(hname, paste0(dfnames[-1L], paren(hname))) yexp <- substitute(CV(h), list(CV=as.name(cvname), h=as.name(hname))) xfv <- fv(df, argu=hname, ylab=yexp, valu=cvname, labl=labl, desc=descrip, fname=cvname, yexp=yexp) fvnames(xfv, ".") <- cvname unitname(xfv) <- unitname(x) return(xfv) } plot.bw.optim <- function(x, ..., showopt=TRUE, optargs=list(lty=3, col="blue")) { xname <- short.deparse(substitute(x)) # convert to fv object xfv <- as.fv(x) # plot cross-validation criterion out <- do.call(plot.fv, resolve.defaults(list(x=quote(xfv)), list(...), list(main=xname))) # Turn off 'showopt' if the x-variable is not the bandwidth if(missing(showopt)) { argh <- list(...) isfmla <- unlist(lapply(argh, inherits, what="formula")) if(any(isfmla)) { fmla <- argh[[min(which(isfmla))]] xvar <- deparse(rhs.of.formula(fmla, tilde=FALSE)) if(!(identical(xvar, fvnames(xfv, ".x")) || identical(xvar, ".x"))) showopt <- FALSE } } # show optimal value? if(showopt) { hoptim <- as.numeric(x) if(spatstat.options('monochrome')) optargs <- col.args.to.grey(optargs) do.call(abline, append(list(v=hoptim), optargs)) } if(is.null(out)) return(invisible(NULL)) return(out) } spatstat.core/R/randomonlines.R0000644000176200001440000001425314144333463016252 0ustar liggesusers# # randomOnLines.R # # $Revision: 1.10 $ $Date: 2020/03/16 10:28:51 $ # # Generate random points on specified lines # runifpointOnLines <- function(n, L, nsim=1, drop=TRUE) { if(!is.numeric(n) || any(n < 0) || any(n %% 1 != 0)) stop("n should be a nonnegative integer or integers") if(!is.psp(L)) L <- as.psp(L) W <- as.owin(L) result <- vector(mode="list", length=nsim) for(i in 1:nsim) { X <- datagen.runifpointOnLines(n, L) Y <- ppp(X$x, X$y, marks=X$marks, window=W, check=FALSE) result[[i]] <- Y } result <- simulationresult(result, nsim, drop) return(result) } datagen.runifpointOnLines <- function(n, L) { stopifnot(is.psp(L)) m <- length(n) ismarked <- (m > 1) if(m == 0 || (m == 1 && n == 0)) return(data.frame(x=numeric(0), y=numeric(0), seg=integer(0), tp=numeric(0))) # extract segment information len <- lengths_psp(L) sumlen <- sum(len) cumlen <- cumsum(len) cum0len <- c(0, cumlen) Ldf <- as.data.frame(L) x0 <- with(Ldf, x0) y0 <- with(Ldf, y0) dx <- with(Ldf, x1-x0) dy <- with(Ldf, y1-y0) # determine mark space if(ismarked) { markvalues <- names(n) if(sum(nzchar(markvalues)) < m) markvalues <- paste(1:m) } # initialise output data.frame out <- data.frame(x=numeric(0), y=numeric(0), seg=integer(0), tp=numeric(0)) if(ismarked) out <- cbind(out, data.frame(marks=character(0))) # generate points of each mark in turn for(j in 1:m) { if(n[[j]] > 0) { # generate random positions uu <- runif(n[[j]], min=0, max=sumlen) # identify segment for each point kk <- findInterval(uu, cum0len, rightmost.closed=TRUE, all.inside=TRUE) # parametric position along segment tt <- (uu - cum0len[kk])/len[kk] tt[!is.finite(tt)] <- 0 # convert to (x,y) x <- x0[kk] + tt * dx[kk] y <- y0[kk] + tt * dy[kk] # assemble result if(!ismarked) { out <- data.frame(x=x, y=y, seg=kk, tp=tt) } else { outj <- data.frame(x=x, y=y, seg=kk, tp=tt, marks=markvalues[j]) out <- rbind(out, outj) } } } if(ismarked) out$marks <- factor(out$marks, levels=markvalues) return(out) } runifpoisppOnLines <- function(lambda, L, nsim=1, drop=TRUE) { if(!is.numeric(lambda) || !all(is.finite(lambda) && (lambda >= 0))) stop("lambda should be a finite, nonnegative number or numbers") if(!is.psp(L)) L <- as.psp(L) W <- as.owin(L) result <- vector(mode="list", length=nsim) for(i in 1:nsim) { X <- datagen.runifpoisppOnLines(lambda, L) Y <- ppp(X$x, X$y, marks=X$marks, window=W, check=FALSE) result[[i]] <- Y } result <- simulationresult(result, nsim, drop) return(result) } datagen.runifpoisppOnLines <- function(lambda, L) { stopifnot(is.psp(L)) mu <- lambda * sum(lengths_psp(L)) n <- rpois(rep.int(1, length(mu)), mu) if(length(n) > 1) names(n) <- names(lambda) df <- datagen.runifpointOnLines(n, L) return(df) } rpoisppOnLines <- function(lambda, L, lmax=NULL, ..., nsim=1, drop=TRUE) { if(!is.psp(L)) L <- as.psp(L) W <- as.owin(L) result <- vector(mode="list", length=nsim) for(i in 1:nsim) { X <- datagen.rpoisppOnLines(lambda, L, lmax=lmax, ...) Y <- ppp(X$x, X$y, marks=X$marks, window=W, check=FALSE) result[[i]] <- Y } result <- simulationresult(result, nsim, drop) return(result) } datagen.rpoisppOnLines <- function(lambda, L, lmax=NULL, ..., check=TRUE) { stopifnot(is.psp(L)) if(is.numeric(lambda)) return(datagen.runifpoisppOnLines(lambda, L)) # ensure lambda is a list if(is.function(lambda) || is.im(lambda)) lambda <- list(lambda) m <- length(lambda) # determine type of argument argtype <- if(all(unlist(lapply(lambda, is.im)))) "im" else if(all(unlist(lapply(lambda, is.function)))) "function" else stop(paste(sQuote("lambda"), "must be a numeric vector, a function, an image,", "a list of functions, or a list of images")) # check values of lambda if(argtype == "im") { for(j in seq_len(m)) { lamj <- lambda[[j]] if(!(lamj$type %in% c("real", "integer"))) stop("lambda must be numeric-valued or integer-valued") lrange <- range(lamj) if(any(is.infinite(lrange))) stop("Infinite pixel values not permitted") if(lrange[1] < 0) stop("Negative pixel values not permitted") } } # determine uniform bound if(!is.null(lmax)) { stopifnot(is.numeric(lmax)) if(length(lmax) != m) { if(length(lmax) == 1) { lmax <- rep.int(lmax, m) } else stop("Length of lmax does not match length of lambda") } } else { # compute lmax lmax <- numeric(m) for(j in seq_len(m)) { lamj <- lambda[[j]] if(is.function(lamj)) { X <- pointsOnLines(L, np=10000) lambdaX <- lamj(X$x, X$y, ...) lmax[j] <- max(lambdaX, na.rm=TRUE) } else if(is.im(lamj)) lmax[j] <- max(lamj) } if(!all(is.finite(lmax))) stop("Infinite values of lambda obtained") if(any(lmax < 0)) stop("Negative upper bound for lambda obtained") names(lmax) <- names(lambda) } # Lewis-Shedler (rejection) method Y <- datagen.runifpoisppOnLines(lmax, L) n <- nrow(Y) if(n == 0) return(Y) # evaluate lambda at each simulated point if(m == 1) { lambda <- lambda[[1]] markindex <- 1 if(is.function(lambda)) lambdaY <- lambda(Y$x, Y$y, ...) else lambdaY <- safelookup(lambda, as.ppp(Y, W=as.owin(L))) } else { lambdaY <- numeric(n) markindex <- as.integer(Y$marks) for(j in seq_len(m)) { lamj <- lambda[[j]] jrows <- (markindex == j) Yj <- Y[jrows, , drop=FALSE] if(is.function(lamj)) lambdaY[jrows] <- lamj(Yj$x, Yj$y, ...) else lambdaY[jrows] <- safelookup(lamj, as.ppp(Yj, W=as.owin(L))) } } lambdaY[is.na(lambdaY)] <- 0 # accept/reject pY <- lambdaY/lmax[markindex] if(check) { if(any(pY < 0)) warning("Negative values of lambda obtained") if(any(pY > 1)) warning("lmax is not an upper bound for lambda") } retain <- (runif(n) < pY) Y <- Y[retain, , drop=FALSE] return(Y) } spatstat.core/R/fitted.ppm.R0000644000176200001440000001257314144333462015456 0ustar liggesusers# # fitted.ppm.R # # method for 'fitted' for ppm objects # # $Revision: 1.18 $ $Date: 2018/03/19 14:29:48 $ # fitted.ppm <- function(object, ..., type="lambda", dataonly=FALSE, new.coef=NULL, leaveoneout=FALSE, drop=FALSE, check=TRUE, repair=TRUE, ignore.hardcore=FALSE, dropcoef=FALSE) { verifyclass(object, "ppm") if(check && damaged.ppm(object)) { if(!repair) stop("object format corrupted; try update(object, use.internal=TRUE)") message("object format corrupted; repairing it.") object <- update(object, use.internal=TRUE) } if(leaveoneout) { ## Leave-one-out calculation for data points only if(missing(dataonly)) dataonly <- TRUE if(!dataonly) stop("Leave-one-out calculation requires dataonly=TRUE") if(!is.null(new.coef)) stop("Leave-one-out calculation requires new.coef=NULL") } coeffs <- adaptcoef(new.coef, coef(object), drop=dropcoef) uniform <- is.poisson.ppm(object) && no.trend.ppm(object) typelist <- c("lambda", "cif", "trend", "link") typevalu <- c("lambda", "lambda", "trend", "link") if(is.na(m <- pmatch(type, typelist))) stop(paste("Unrecognised choice of ", sQuote("type"), ": ", sQuote(type), sep="")) type <- typevalu[m] if(uniform) { lambda <- exp(coeffs[[1L]]) Q <- quad.ppm(object, drop=drop) lambda <- rep.int(lambda, n.quad(Q)) } else { glmdata <- getglmdata(object, drop=drop) glmfit <- getglmfit(object) Vnames <- object$internal$Vnames interacting <- (length(Vnames) != 0) # Modification of `glmdata' may be required if(interacting) switch(type, trend={ ## zero the interaction statistics glmdata[ , Vnames] <- 0 }, link=, lambda={ if(!ignore.hardcore) { ## Find any dummy points with zero conditional intensity forbid <- matrowany(as.matrix(glmdata[, Vnames]) == -Inf) ## Exclude these locations from predict.glm glmdata <- glmdata[!forbid, ] } else { ## Compute positive part of cif Q <- quad.ppm(object, drop=drop) X <- Q[["data"]] U <- union.quad(Q) E <- equalpairs.quad(Q) eva <- evalInteraction(X, U, E, object$interaction, object$correction, splitInf=TRUE) forbid <- attr(eva, "-Inf") %orifnull% logical(npoints(U)) ## Use positive part of interaction if(ncol(eva) != length(Vnames)) stop(paste("Internal error: evalInteraction yielded", ncol(eva), "variables instead of", length(Vnames)), call.=FALSE) glmdata[,Vnames] <- as.data.frame(eva) } }) # Compute predicted [conditional] intensity values changecoef <- !is.null(new.coef) || (object$method != "mpl") lambda <- GLMpredict(glmfit, glmdata, coeffs, changecoef=changecoef, type = ifelse(type == "link", "link", "response")) # Note: the `newdata' argument is necessary in order to obtain # predictions at all quadrature points. If it is omitted then # we would only get predictions at the quadrature points j # where glmdata$SUBSET[j]=TRUE. Assuming drop=FALSE. if(interacting && type=="lambda" && !ignore.hardcore) { # reinsert zeroes lam <- numeric(length(forbid)) lam[forbid] <- 0 lam[!forbid] <- lambda lambda <- lam } } if(dataonly) lambda <- lambda[is.data(quad.ppm(object))] if(leaveoneout) { ## Perform leverage calculation dfb <- dfbetas(object, multitypeOK=TRUE) delta <- with(dfb, 'discrete')[with(dfb, 'is.atom'),,drop=FALSE] ## adjust fitted value mom <- model.matrix(object)[is.data(quad.ppm(object)),,drop=FALSE] if(type == "trend" && !uniform && interacting) mom[, Vnames] <- 0 lambda <- lambda * exp(- rowSums(delta * mom)) } lambda <- unname(as.vector(lambda)) return(lambda) } adaptcoef <- function(new.coef, fitcoef, drop=FALSE) { ## a replacement for 'fitcoef' will be extracted from 'new.coef' if(is.null(new.coef)) { coeffs <- fitcoef } else if(length(new.coef) == length(fitcoef)) { coeffs <- new.coef } else { fitnames <- names(fitcoef) newnames <- names(new.coef) if(is.null(newnames) || is.null(fitnames)) stop(paste("Argument new.coef has wrong length", length(new.coef), ": should be", length(fitcoef)), call.=FALSE) absentnames <- setdiff(fitnames, newnames) excessnames <- setdiff(newnames, fitnames) if((nab <- length(absentnames)) > 0) stop(paste(ngettext(nab, "Coefficient", "Coefficients"), commasep(sQuote(absentnames)), ngettext(nab, "is", "are"), "missing from new.coef"), call.=FALSE) if(!drop && ((nex <- length(excessnames)) > 0)) stop(paste(ngettext(nex, "Coefficient", "Coefficients"), commasep(sQuote(excessnames)), ngettext(nab, "is", "are"), "present in new.coef but not in coef(object)"), call.=FALSE) #' extract only the relevant coefficients coeffs <- new.coef[fitnames] } return(coeffs) } spatstat.core/R/rPerfect.R0000644000176200001440000002161114144333463015150 0ustar liggesusers# # Perfect Simulation # # $Revision: 1.22 $ $Date: 2021/01/07 03:08:41 $ # # rStrauss # rHardcore # rStraussHard # rDiggleGratton # rDGS # rPenttinen rStrauss <- function(beta, gamma=1, R=0, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(gamma) check.1.real(R) check.finite(beta) check.finite(gamma) check.finite(R) stopifnot(beta > 0) stopifnot(gamma >= 0) stopifnot(gamma <= 1) stopifnot(R >= 0) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*R)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- storage.mode(gamma) <- storage.mode(R) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call(SC_PerfectStrauss, beta, gamma, R, xrange, yrange, PACKAGE="spatstat.core") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] times <- c(start=z[[4]], end=z[[5]]) if(nout<0) stop("internal error: copying failed in PerfectStrauss") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] attr(P, "times") <- times if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } # Perfect Simulation of Hardcore process rHardcore <- function(beta, R=0, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(R) check.finite(beta) check.finite(R) stopifnot(beta > 0) stopifnot(R >= 0) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*R)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- storage.mode(R) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call(SC_PerfectHardcore, beta, R, xrange, yrange, PACKAGE="spatstat.core") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectHardcore") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } # # Perfect simulation of hybrid Strauss-Hardcore # provided gamma <= 1 # rStraussHard <- function(beta, gamma=1, R=0, H=0, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(gamma) check.1.real(R) check.1.real(H) check.finite(beta) check.finite(gamma) check.finite(R) check.finite(H) stopifnot(beta > 0) stopifnot(gamma >= 0) if(gamma > 1) stop("Sorry, perfect simulation is only implemented for gamma <= 1") stopifnot(R >= 0) stopifnot(H >= 0) stopifnot(H <= R) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*R)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- storage.mode(gamma) <- storage.mode(R) <- storage.mode(H) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call(SC_PerfectStraussHard, beta, gamma, R, H, xrange, yrange, PACKAGE="spatstat.core") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectStraussHard") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } # # Perfect Simulation of Diggle-Gratton process # rDiggleGratton <- function(beta, delta, rho, kappa=1, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(delta) check.1.real(rho) check.1.real(kappa) check.finite(beta) check.finite(delta) check.finite(rho) check.finite(kappa) stopifnot(beta > 0) stopifnot(delta >= 0) stopifnot(rho >= 0) stopifnot(delta <= rho) stopifnot(kappa >= 0) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*rho)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- "double" storage.mode(delta) <- storage.mode(rho) <- storage.mode(kappa) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call(SC_PerfectDiggleGratton, beta, delta, rho, kappa, xrange, yrange, PACKAGE="spatstat.core") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectDiggleGratton") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } # # Perfect Simulation of Diggle-Gates-Stibbard process # rDGS <- function(beta, rho, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(rho) check.finite(beta) check.finite(rho) stopifnot(beta > 0) stopifnot(rho >= 0) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*rho)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- "double" storage.mode(rho) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call(SC_PerfectDGS, beta, rho, xrange, yrange, PACKAGE="spatstat.core") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectDGS") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } # # Perfect Simulation of Penttinen process # rPenttinen <- function(beta, gamma=1, R, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(gamma) check.1.real(R) check.finite(beta) check.finite(gamma) check.finite(R) stopifnot(beta > 0) stopifnot(gamma >= 0) stopifnot(gamma <= 1) stopifnot(R >= 0) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*R)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- storage.mode(gamma) <- storage.mode(R) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call(SC_PerfectPenttinen, beta, gamma, R, xrange, yrange, PACKAGE="spatstat.core") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectPenttinen") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } ## ....... utilities ................................. expandwinPerfect <- function(W, expand, amount) { ## expand 'W' if expand=TRUE according to default 'amount' ## or expand 'W' using rmhexpand(expand) if(!is.logical(expand)) { amount <- rmhexpand(expand) expand <- TRUE } changed <- FALSE if(expand) { W <- expand.owin(W, amount) changed <- TRUE } if(!is.rectangle(W)) { W <- as.rectangle(W) changed <- TRUE warning(paste("Simulation will be performed in the containing rectangle", "and clipped to the original window."), call.=FALSE) } attr(W, "changed") <- changed return(W) } spatstat.core/R/summary.ppm.R0000644000176200001440000004211514144333464015671 0ustar liggesusers# # summary.ppm.R # # summary() method for class "ppm" # # $Revision: 1.78 $ $Date: 2019/10/11 06:23:19 $ # # summary.ppm() # print.summary.ppm() # summary.ppm <- local({ covtype <- function(x) { if(is.im(x)) "im" else if(is.function(x)) "function" else if(is.owin(x)) "owin" else if(is.numeric(x) && length(x) == 1) "number" else if(is.factor(x)) "factor" else if(is.integer(x)) "integer" else if(is.numeric(x)) "numeric" else storage.mode(x) } xargs <- function(f) { ar <- names(formals(f))[-(1:2)] return(ar[ar != "..."]) } summary.ppm <- function(object, ..., quick=FALSE, fine=FALSE) { verifyclass(object, "ppm") x <- object y <- list() class(y) <- "summary.ppm" ####### Extract main data components ######################### QUAD <- object$Q DATA <- QUAD$data TREND <- x$trend INTERACT <- x$interaction if(is.null(INTERACT)) INTERACT <- Poisson() ####### Check version ######################### mpl.ver <- versionstring.ppm(object) int.ver <- versionstring.interact(INTERACT) current <- versionstring.spatstat() virgin <- min(package_version(c(mpl.ver, int.ver))) y$antiquated <- antiquated <- (virgin <= package_version("1.5")) y$old <- (virgin < majorminorversion(current)) y$version <- as.character(virgin) ####### Determine type of model ############################ y$entries <- list() y$no.trend <- identical.formulae(TREND, NULL) || identical.formulae(TREND, ~1) y$trendvar <- trendvar <- variablesinformula(TREND) y$stationary <- y$no.trend || all(trendvar == "marks") y$poisson <- is.poisson.interact(INTERACT) y$marked <- is.marked.ppp(DATA) y$multitype <- is.multitype.ppp(DATA) y$marktype <- if(y$multitype) "multitype" else if(y$marked) "marked" else "unmarked" if(y$marked) y$entries$marks <- marks(DATA) y$name <- paste(if(y$stationary) "Stationary " else "Nonstationary ", if(y$poisson) { if(y$multitype) "multitype " else if(y$marked) "marked " else "" }, INTERACT$name, sep="") ###### Fitting algorithm ######################################## y$method <- x$method y$VB <- x$internal$VB y$problems <- x$problems y$fitter <- if(!is.null(x$fitter)) x$fitter else "unknown" if(y$fitter %in% c("glm", "gam")) y$converged <- x$internal$glmfit$converged ###### Coefficients were changed after fit? ##################### y$projected <- yproj <- identical(x$projected, TRUE) y$changedcoef <- yproj || !is.null(x$coef.orig) y$valid <- valid.ppm(x, warn=FALSE) ###### Extract fitted model coefficients ######################### y$entries$coef <- COEFS <- x$coef y$coef.orig <- x$coef.orig y$entries$Vnames <- Vnames <- x$internal$Vnames y$entries$IsOffset <- x$internal$IsOffset ###### Extract fitted interaction and summarise ################# FITIN <- fitin(x) y$interaction <- summary(FITIN) # Exit here if quick=TRUE if(identical(quick, TRUE)) return(y) ###### Does it have external covariates? #################### # defaults y <- append(y, list(has.covars = FALSE, covnames = character(0), covars.used = character(0), uses.covars = FALSE, covars.are.df = FALSE, expandable = TRUE, covar.type = character(0), covar.descrip = character(0), has.funcs = FALSE, covfunargs = NULL, has.xargs = FALSE, xargmap = NULL)) class(y) <- "summary.ppm" if(!antiquated) { covars <- x$covariates y$has.covars <- hc <- !is.null(covars) && (length(covars) > 0) if(hc) { y$covnames <- names(covars) used <- (y$trendvar %in% names(covars)) y$covars.used <- y$trendvar[used] y$uses.covars <- any(used) y$covars.are.df <- is.data.frame(covars) # describe covariates ctype <- unlist(lapply(covars, covtype)) y$expandable <- all(ctype[used] %in%c("function", "number")) names(ctype) <- names(covars) y$covar.type <- ctype y$covar.descrip <- ctype # are there any functions? y$has.funcs <- any(isfun <- (ctype == "function")) # do covariates depend on additional arguments? if(y$has.funcs) { y$covfunargs <- x$covfunargs y$cfafitter <- attr(x$covfunargs, "fitter") funs <- covars[isfun] fdescrip <- function(f) { if(inherits(f, "distfun")) return("distfun") alist <- paste(names(formals(f)), collapse=", ") paste("function(", alist, ")", sep="") } y$covar.descrip[isfun] <- unlist(lapply(funs, fdescrip)) # find any extra arguments (after args 1 & 2) explicitly named fargs <- lapply(funs, xargs) nxargs <- lengths(fargs) y$has.xargs <- any(nxargs > 0) if(y$has.xargs) { # identify which function arguments are fixed in the call fmap <- data.frame(Covariate=rep.int(names(funs), nxargs), Argument=unlist(fargs)) fmap$Given <- (fmap$Argument %in% names(y$covfunargs)) y$xargmap <- fmap } } } } else { # Antiquated format # Interpret the function call instead callexpr <- parse(text=x$call) callargs <- names(as.list(callexpr[[1]])) # Data frame of covariates was called 'data' in versions up to 1.4-x y$has.covars <- !is.null(callargs) && !is.na(pmatch("data", callargs)) # conservative guess y$uses.covars <- y$has.covars y$covfunargs <- NULL } ###### Arguments in call #################################### y$args <- x[c("call", "correction", "rbord")] ####### Main data components ######################### y$entries <- append(list(quad=QUAD, data=DATA, interaction=INTERACT), y$entries) if(is.character(quick) && (quick == "entries")) return(y) ####### Summarise data ############################ y$data <- summary(DATA, checkdup=FALSE) y$quad <- summary(QUAD, checkdup=FALSE) if(is.character(quick) && (quick == "no prediction")) return(y) ###### Trend component ######################### y$trend <- list() y$trend$name <- if(y$poisson) "Intensity" else "Trend" y$trend$formula <- if(y$no.trend) NULL else TREND if(y$poisson && y$no.trend) { # uniform Poisson process y$trend$value <- exp(COEFS[[1]]) y$trend$label <- switch(y$marktype, unmarked="Uniform intensity", multitype="Uniform intensity for each mark level", marked="Uniform intensity in product space", "") } else if(y$stationary) { # stationary switch(y$marktype, unmarked={ # stationary non-poisson non-marked y$trend$label <- "First order term" y$trend$value <- c(beta=exp(COEFS[[1]])) }, multitype={ # stationary, multitype mrk <- marks(DATA) y$trend$label <- if(y$poisson) "Intensities" else "First order terms" # Use predict.ppm to evaluate the fitted intensities lev <- factor(levels(mrk), levels=levels(mrk)) nlev <- length(lev) marx <- list(x=rep.int(0, nlev), y=rep.int(0, nlev), marks=lev) betas <- predict(x, locations=marx, type="trend") names(betas) <- paste("beta_", as.character(lev), sep="") y$trend$value <- betas }, marked={ # stationary, marked y$trend$label <- "Fitted intensity coefficients" y$trend$value <- blankcoefnames(COEFS) }) } else { # not stationary # extract trend terms without trying to understand them much if(is.null(Vnames)) trendbits <- COEFS else { agree <- outer(names(COEFS), Vnames, "==") whichbits <- matrowall(!agree) trendbits <- COEFS[whichbits] } y$trend$label <- ngettext(length(trendbits), "Fitted trend coefficient", "Fitted trend coefficients") y$trend$value <- blankcoefnames(trendbits) } # ----- parameters with SE -------------------------- if(is.character(quick) && (quick == "no variances")) return(y) # Exit before SE for variational Bayes if(!is.null(x$internal$VB)) return(y) if(length(COEFS) > 0) { # compute standard errors se <- x$internal$se if(is.null(se)) { vc <- vcov(x, fine=fine, matrix.action="warn", invalid.action="silent") if(!is.null(vc)) { se <- if(is.matrix(vc)) sqrt(diag(vc)) else if(length(vc) == 1) sqrt(vc) else NULL } } if(!is.null(se)) { two <- qnorm(0.975) lo <- COEFS - two * se hi <- COEFS + two * se zval <- COEFS/se pval <- 2 * pnorm(abs(zval), lower.tail=FALSE) psig <- cut(pval, c(0,0.001, 0.01, 0.05, 1), labels=c("***", "**", "*", " "), include.lowest=TRUE) # table of coefficient estimates with SE and 95% CI y$coefs.SE.CI <- data.frame(Estimate=COEFS, S.E.=se, CI95.lo=lo, CI95.hi=hi, Ztest=psig, Zval=zval) } } return(y) } summary.ppm }) coef.summary.ppm <- function(object, ...) { object$coefs.SE.CI } print.summary.ppm <- function(x, ...) { if(x$old) warning("Model was fitted by an older version of spatstat") if(is.null(x$args)) { # this is the quick version splat(x$name) return(invisible(NULL)) } # otherwise - full details splat("Point process model") fitter <- if(!is.null(x$fitter)) x$fitter else "unknown" methodchosen <- if(is.null(x$method)) "unspecified method" else if(fitter == "exact") "maximum likelihood" else switch(x$method, mpl={ if(x$poisson) { # Poisson process "maximum likelihood (Berman-Turner approximation)" } else { "maximum pseudolikelihood (Berman-Turner approximation)" } }, logi={ if(is.null(x$VB)){ if(x$poisson) { # Poisson process "maximum likelihood (logistic regression approximation)" } else { "maximum pseudolikelihood (logistic regression approximation)" } } else { "maximum posterior density (variational Bayes approximation)" } }, ho="Huang-Ogata method (approximate maximum likelihood)", paste("unrecognised method", sQuote(x$method))) splat("Fitting method:", methodchosen) howfitted <- switch(fitter, exact= "analytically", gam = "using gam()", glm = "using glm()", ho = NULL, paste("using unrecognised fitter", sQuote(fitter))) if(!is.null(howfitted)) splat("Model was fitted", howfitted) if(fitter %in% c("glm", "gam")) { if(x$converged) splat("Algorithm converged") else splat("*** Algorithm did not converge ***") } if(x$projected) splat("Fit was projected to obtain a valid point process model") cat("Call:\n") print(x$args$call) if(x$old) splat("** Executed by old spatstat version", x$version, " **") splat("Edge correction:", dQuote(x$args$correction)) if(x$args$correction == "border") splat("\t[border correction distance r =", x$args$rbord,"]") # print summary of quadrature scheme if(is.null(x$quad)) return(invisible(NULL)) ruletextline() print(x$quad) ## start printing trend information if(is.null(x$no.trend)) return(invisible(NULL)) ruletextline() splat("FITTED MODEL:") parbreak() # This bit is currently identical to print.ppm() # except for a bit more fanfare # and the inclusion of the 'gory details' bit notrend <- x$no.trend # stationary <- x$stationary poisson <- x$poisson markeddata <- x$marked multitype <- x$multitype # markedpoisson <- poisson && markeddata # ----------- Print model type ------------------- cat(x$name) cat("\n") if(markeddata) mrk <- x$entries$marks if(multitype) { splat("Possible marks:") cat(paste(levels(mrk))) } # ----- trend -------------------------- if(length(x$trend) == 0) return(invisible(NULL)) parbreak() splat(paste0("---- ", x$trend$name, ": ----")) parbreak() if(!notrend) { splat("Log", if(poisson) "intensity:" else "trend:", pasteFormula(x$trend$formula)) if(x$uses.covars) splat("Model depends on external", ngettext(length(x$covars.used), "covariate", "covariates"), commasep(sQuote(x$covars.used))) } if(x$has.covars) { if(notrend || !x$uses.covars) splat("Model object contains external covariates") isdf <- identical(x$covars.are.df, TRUE) if(!is.null(cd <- x$covar.descrip)) { # print description of each covariate splat(paste0("Covariates provided", if(isdf) " (in data frame)" else NULL, ":")) namescd <- names(cd) for(i in seq_along(cd)) splat(paste0("\t", namescd[i], ": ", cd[i])) } if(!is.null(cfa <- x$covfunargs) && length(cfa) > 0) { splat("Covariate function arguments (covfunargs) provided:") namescfa <- names(cfa) for(i in seq_along(cfa)) { cat(paste(namescfa[i], "= ")) cfai <- cfa[[i]] if(is.numeric(cfai) && length(cfai) == 1) { cat(paste(cfai, "\n")) } else print(cfa[[i]]) } } } parbreak() splat(paste0(x$trend$label, ":")) tv <- x$trend$value if(!is.list(tv)) print(tv) else for(i in seq_along(tv)) print(tv[[i]]) # table of coefficient estimates with SE and 95% CI if(!is.null(cose <- x$coefs.SE.CI)) { cat("\n") print(cose) } # ---- Interaction ---------------------------- if(!poisson) { parbreak() splat(" ---- Interaction: -----") parbreak() print(x$interaction) } ####### Gory details ################################### parbreak() splat("----------- gory details -----") parbreak() COEFS <- x$entries$coef splat("Fitted regular parameters (theta):") print(COEFS) parbreak() splat("Fitted exp(theta):") print(exp(unlist(COEFS))) ##### Warnings issued ####### probs <- x$problems if(!is.null(probs) && is.list(probs) && (length(probs) > 0)) lapply(probs, function(a) { if(is.list(a) && !is.null(p <- a$print)) cat(paste("Problem:\n", p, "\n\n")) }) vali <- x$valid if(identical(vali, FALSE) && waxlyrical("errors")) { parbreak() splat("*** Model is not valid ***") if(!all(is.finite(x$entries$coef))) { splat("*** Some coefficients are NA or Inf ***") } else { splat("*** Interaction parameters are outside valid range ***") } } else if(is.na(vali) && waxlyrical("extras")) { parbreak() splat("[Validity of model could not be checked]") } return(invisible(NULL)) } no.trend.ppm <- function(x) { summary.ppm(x, quick=TRUE)$no.trend } is.stationary <- function(x) { UseMethod("is.stationary") } is.poisson <- function(x) { UseMethod("is.poisson") } is.stationary.ppm <- function(x) { TREND <- x$trend if(is.null(TREND) || identical.formulae(TREND, ~1)) return(TRUE) if(all(variablesinformula(TREND) == "marks")) return(TRUE) return(FALSE) } is.poisson.ppm <- function(x) { stopifnot(is.ppm(x)) y <- x$interaction if(is.null(y)) y <- Poisson() is.poisson.interact(y) } is.marked.ppm <- function(X, ...) { summary.ppm(X, quick=TRUE)$marked } is.multitype.ppm <- function(X, ...) { summary.ppm(X, quick=TRUE)$multitype } is.expandable.ppm <- function(x) { return(identical(summary(x, quick="entries")$expandable, TRUE)) } blankcoefnames <- function(x) { # remove name labels from ppm coefficients # First decide whether there are 'labels within labels' unlabelled <- unlist(lapply(x, function(z) { is.null(names(z)) } )) if(all(unlabelled)) value <- unlist(x) else { value <- list() for(i in seq_along(x)) value[[i]] <- if(unlabelled[i]) unlist(x[i]) else x[[i]] } return(value) } spatstat.core/R/pcfmulti.inhom.R0000644000176200001440000002216514144333463016337 0ustar liggesusers# # pcfmulti.inhom.R # # $Revision: 1.15 $ $Date: 2016/09/21 07:28:42 $ # # inhomogeneous multitype pair correlation functions # # pcfcross.inhom <- function(X, i, j, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] I <- (marx == i) J <- (marx == j) Iname <- paste("points with mark i =", i) Jname <- paste("points with mark j =", j) g <- pcfmulti.inhom(X, I, J, lambdaI, lambdaJ, ..., r=r,breaks=breaks, kernel=kernel, bw=bw, stoyan=stoyan, correction=correction, sigma=sigma, varcov=varcov, Iname=Iname, Jname=Jname) iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(g, substitute(g[inhom,i,j](r), list(i=iname,j=jname)), c("g", paste0("list", paren(paste("inhom", i, j, sep=",")))), new.yexp=substitute(g[list(inhom,i,j)](r), list(i=iname,j=jname))) attr(result, "dangerous") <- attr(g, "dangerous") return(result) } pcfdot.inhom <- function(X, i, lambdaI=NULL, lambdadot=NULL, ..., r=NULL, breaks=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points Iname <- paste("points with mark i =", i) Jname <- paste("points") g <- pcfmulti.inhom(X, I, J, lambdaI, lambdadot, ..., r=r,breaks=breaks, kernel=kernel, bw=bw, stoyan=stoyan, correction=correction, sigma=sigma, varcov=varcov, Iname=Iname, Jname=Jname) iname <- make.parseable(paste(i)) result <- rebadge.fv(g, substitute(g[inhom, i ~ dot](r), list(i=iname)), c("g", paste0("list(inhom,", iname, "~symbol(\"\\267\"))")), new.yexp=substitute(g[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) if(!is.null(dang <- attr(g, "dangerous"))) { dang[dang == "lambdaJ"] <- "lambdadot" dang[dang == "lambdaIJ"] <- "lambdaIdot" attr(result, "dangerous") <- dang } return(result) } pcfmulti.inhom <- function(X, I, J, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), sigma=NULL, varcov=NULL, Iname="points satisfying condition I", Jname="points satisfying condition J") { verifyclass(X, "ppp") # r.override <- !is.null(r) win <- X$window areaW <- area(win) npts <- npoints(X) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("translate", "Ripley") correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, win$type, correction.given) # bandwidth if(is.null(bw) && kernel=="epanechnikov") { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(npts/areaW) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(npts/areaW) } ########## indices I and J ######################## if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != npts || length(J) != npts) stop(paste("The length of I and J must equal", "the number of points in the pattern")) nI <- sum(I) nJ <- sum(J) if(nI == 0) stop(paste("There are no", Iname)) if(nJ == 0) stop(paste("There are no", Jname)) XI <- X[I] XJ <- X[J] ########## intensity values ######################### dangerous <- c("lambdaI", "lambdaJ") dangerI <- dangerJ <- TRUE if(is.null(lambdaI)) { # Estimate density by leave-one-out kernel smoothing dangerI <- FALSE lambdaI <- density(XI, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) } else { # lambda values provided if(is.vector(lambdaI)) check.nvector(lambdaI, nI) else if(is.im(lambdaI)) lambdaI <- safelookup(lambdaI, XI) else if(is.function(lambdaI)) lambdaI <- lambdaI(XI$x, XI$y) else stop(paste(sQuote("lambdaI"), "should be a vector, a pixel image, or a function")) } if(is.null(lambdaJ)) { # Estimate density by leave-one-out kernel smoothing dangerJ <- FALSE lambdaJ <- density(XJ, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) } else { # lambda values provided if(is.vector(lambdaJ)) check.nvector(lambdaJ, nJ) else if(is.im(lambdaJ)) lambdaJ <- safelookup(lambdaJ, XJ) else if(is.function(lambdaJ)) lambdaJ <- lambdaJ(XJ$x, XJ$y) else stop(paste(sQuote("lambdaJ"), "should be a vector, a pixel image, or a function")) } danger <- dangerI || dangerJ ########## r values ############################ # handle arguments r and breaks rmaxdefault <- rmax.rule("K", win, npts/areaW) breaks <- handle.r.b.args(r, breaks, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) fname <- c("g", "list(inhom,I,J)") out <- fv(df, "r", quote(g[inhom,I,J](r)), "theo", , alim, c("r", makefvlabel(NULL, NULL, fname, "pois")), c("distance argument r", "theoretical Poisson %s"), fname=fname, yexp=quote(g[list(inhom,I,J)](r))) ########## smoothing parameters for pcf ############################ # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax)) ################################################# # compute pairwise distances # identify close pairs of points close <- crosspairs(XI, XJ, rmax+hmax, what="ijd") # map (i,j) to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(I & J)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$d <- close$d[ok] } } # extract information for these pairs (relative to orderings of XI, XJ) dclose <- close$d icloseI <- close$i jcloseJ <- close$j # Form weight for each pair weight <- 1/(lambdaI[icloseI] * lambdaJ[jcloseJ]) ###### compute ####### if(any(correction=="translate")) { # translation correction edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE) gT <- sewpcf(dclose, edgewt * weight, denargs, areaW)$g out <- bind.fv(out, data.frame(trans=gT), makefvlabel(NULL, "hat", fname, "Trans"), "translation-corrected estimate of %s", "trans") } if(any(correction=="isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1)) gR <- sewpcf(dclose, edgewt * weight, denargs, areaW)$g out <- bind.fv(out, data.frame(iso=gR), makefvlabel(NULL, "hat", fname, "Ripley"), "isotropic-corrected estimate of %s", "iso") } # sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } # which corrections have been computed? corrxns <- rev(setdiff(names(out), "r")) # default is to display them all formula(out) <- . ~ r fvnames(out, ".") <- corrxns # unitname(out) <- unitname(X) if(danger) attr(out, "dangerous") <- dangerous return(out) } spatstat.core/R/ho.R0000644000176200001440000000431214144333462014002 0ustar liggesusers# # ho.R # # Huang-Ogata method # # $Revision: 1.17 $ $Date: 2016/03/15 07:42:26 $ # ho.engine <- function(model, ..., nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE) { verifyclass(model, "ppm") if(is.null(start)) start <- list(n.start=data.ppm(model)$n) # check that the model can be simulated if(!valid.ppm(model)) { warning("Fitted model is invalid - cannot be simulated") return(NULL) } # compute the observed value of the sufficient statistic X <- data.ppm(model) sobs <- suffstat(model, X) # generate 'nsim' realisations of the fitted model # and compute the sufficient statistics of the model rmhinfolist <- rmh(model, start, control, preponly=TRUE, verbose=FALSE) if(verb) { cat("Simulating... ") state <- list() } ndone <- 0 while(ndone < nsim) { Xi <- rmhEngine(rmhinfolist, verbose=FALSE) v <- try(suffstat(model,Xi)) if(!inherits(v, "try-error")) { if(ndone == 0) svalues <- matrix(, nrow=nsim, ncol=length(v)) ndone <- ndone + 1 svalues[ndone, ] <- v } if(verb) state <- progressreport(ndone, nsim, state=state) } if(verb) cat("Done.\n\n") # calculate the sample mean and variance of the # sufficient statistic for the simulations smean <- apply(svalues, 2, mean, na.rm=TRUE) svar <- var(svalues, na.rm=TRUE) # value of canonical parameter from MPL fit theta0 <- coef(model) # Newton-Raphson update Vinverse <- solve(svar) theta <- theta0 + as.vector(Vinverse %*% (sobs - smean)) ## appropriate names nama <- names(theta0) if(!is.null(nama)) { names(theta) <- nama dimnames(svar) <- dimnames(Vinverse) <- list(nama, nama) } ## update model newmodel <- model newmodel$coef <- theta newmodel$coef.orig <- theta0 newmodel$method <- "ho" newmodel$fitter <- "ho" newmodel$fisher <- svar newmodel$varcov <- Vinverse # recompute fitted interaction newmodel$fitin <- NULL newmodel$fitin <- fitin(newmodel) ## update pseudolikelihood value using code in logLik.ppm newmodel$maxlogpl.orig <- model$maxlogpl newmodel$maxlogpl <- logLik(newmodel, new.coef=theta, warn=FALSE) ## return(newmodel) } spatstat.core/R/vcov.mppm.R0000644000176200001440000002320514144333464015325 0ustar liggesusers# Variance-covariance matrix for mppm objects # # $Revision: 1.21 $ $Date: 2020/03/04 05:25:20 $ # # vcov.mppm <- local({ errhandler <- function(whinge, err) { switch(err, fatal=stop(whinge), warn={ warning(whinge) return(NA) }, null= return(NULL), stop(paste("Unrecognised option: err=", dQuote(err)))) } vcov.mppm <- function(object, ..., what="vcov", err="fatal") { what <- match.arg(what, c("vcov", "corr", "fisher", "Fisher", "internals", "all")) if(what == "Fisher") what <- "fisher" if(is.poisson.mppm(object) && object$Fit$fitter == "glm") return(vcmPois(object, ..., what=what, err=err)) return(vcmGibbs(object, ..., what=what, err=err)) } vcmPois <- function(object, ..., what, err, nacoef.action=c("warn", "fatal", "silent") ) { #' legacy algorithm for Poisson case #' detect NA coefficients if(missing(nacoef.action) && !missing(err) && !is.null(err)) { nacoef.action <- err } else { nacoef.action <- match.arg(nacoef.action) } if(!all(is.finite(coef(object)))) { gripe <- "Cannot compute variance; some coefficients are NA, NaN or Inf" switch(nacoef.action, fatal = stop(gripe, call.=FALSE), warn = warning(gripe, call.=FALSE), silent = {}) return(NULL) } #' get to work gf <- object$Fit$FIT gd <- object$Fit$moadf wt <- gd$.mpl.W fi <- fitted(gf) fo <- object$trend if(is.null(fo)) fo <- (~1) mof <- model.frame(fo, gd) mom <- model.matrix(fo, mof) momnames <- dimnames(mom)[[2]] fisher <- sumouter(mom, fi * wt) dimnames(fisher) <- list(momnames, momnames) switch(what, fisher = { return(fisher) }, vcov = { vc <- try(solve(fisher), silent=(err == "null")) if(inherits(vc, "try-error")) return(errhandler("Fisher information is singular", err)) else return(vc) }, corr={ co <- try(solve(fisher), silent=(err == "null")) if(inherits(co, "try-error")) return(errhandler("Fisher information is singular", err)) sd <- sqrt(diag(co)) return(co / outer(sd, sd, "*")) }) } vcmGibbs <- function(object, ..., what, err, matrix.action=c("warn", "fatal", "silent"), gam.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), nacoef.action=c("warn", "fatal", "silent") ) { if(!missing(err)) { if(err == "null") err <- "silent" matrix.action <- if(missing(matrix.action)) err else match.arg(matrix.action) gam.action <- if(missing(gam.action)) err else match.arg(gam.action) logi.action <- if(missing(logi.action)) err else match.arg(logi.action) nacoef.action <- if(missing(nacoef.action)) err else match.arg(nacoef.action) } else { matrix.action <- match.arg(matrix.action) gam.action <- match.arg(gam.action) logi.action <- match.arg(logi.action) nacoef.action <- match.arg(nacoef.action) } #' detect NA coefficients if(!all(is.finite(as.matrix(coef(object))))) { gripe <- "Cannot compute variance; some coefficients are NA, NaN or Inf" switch(nacoef.action, fatal = stop(gripe, call.=FALSE), warn = warning(gripe, call.=FALSE), silent = {}) return(NULL) } #' initialise cnames <- names(fixed.effects(object)) nc <- length(cnames) A2 <- A3 <- matrix(0, nc, nc, dimnames=list(cnames, cnames)) #' (1) Compute matrix A1 directly glmdata <- object$Fit$moadf glmsub <- glmdata$.mpl.SUBSET wt <- glmdata$.mpl.W mom <- model.matrix(object) lam <- unlist(fitted(object)) A1 <- sumouter(mom, lam * wt * glmsub) #' (2) compute A2 and A3 matrices of submodels subs <- subfits(object, what="basicmodels") n <- length(subs) guts <- lapply(subs, vcov, what="internals", matrix.action=matrix.action, gam.action=gam.action, logi.action=logi.action, dropcoef=TRUE, ...) a2 <- lapply(guts, getElement, name="A2") a3 <- lapply(guts, getElement, name="A3") #' (3) map into full model #' Identify the (unique) active interaction in each row activeinter <- active.interactions(object) #' interaction names (in glmdata) Vnamelist <- object$Fit$Vnamelist Isoffsetlist <- object$Fit$Isoffsetlist #' Each a2[[i]] and a3[[i]] refer to this interaction (eg 'str') #' but may contribute to several coefficients of the full model #' e.g. 'str' -> str:id -> 'str', 'str:id2' #' Determine which canonical variables of full model are active in each row mats <- split.data.frame(mom, glmdata$id) activevars <- matrix(sapply(mats, notallzero), nrow=length(mats)) #' dependence map of canonical variables of full model #' on the original variables/interactions md <- model.depends(object$Fit$FIT) #' process each row, summing A2 and A3 for(i in seq_len(n)) { #' the submodel in this row subi <- subs[[i]] #' contributes to second order terms only if non-Poisson if(!is.poisson(subi)) { cnames.i <- names(coef(subi)) a2i <- a2[[i]] a3i <- a3[[i]] #' the (unique) tag name of the interaction in this model tagi <- colnames(activeinter)[activeinter[i,]] #' the corresponding variable name(s) in glmdata and coef(subi) vni <- Vnamelist[[tagi]] iso <- Isoffsetlist[[tagi]] #' ignore offset variables vni <- vni[!iso] if(length(vni)) { #' retain only interaction rows & columns (the rest are zero anyway) e <- cnames.i %in% vni a2i <- a2i[e, e, drop=FALSE] a3i <- a3i[e, e, drop=FALSE] cnames.ie <- cnames.i[e] #' which coefficients of the full model are active in this row acti <- activevars[i,] #' for each interaction variable name in the submodel, #' find the coefficient(s) in the main model to which it contributes nie <- length(cnames.ie) cmap <- vector(mode="list", length=nie) names(cmap) <- cnames.ie for(j in seq_len(nie)) { cj <- cnames.ie[j] cmap[[j]] <- cnames[ md[,cj] & acti ] } #' all possible mappings maps <- do.call(expand.grid, append(cmap, list(stringsAsFactors=FALSE))) nmaps <- nrow(maps) if(nmaps == 0) { warning("Internal error: Unable to map submodel to full model") } else { for(irow in 1:nmaps) { for(jcol in 1:nmaps) { cmi <- as.character(maps[irow,]) cmj <- as.character(maps[jcol,]) if(anyDuplicated(cmi) || anyDuplicated(cmj)) { warning("Internal error: duplicated labels in submodel map") } else if(!is.null(a2i)) { A2[cmi,cmj] <- A2[cmi,cmj] + a2i A3[cmi,cmj] <- A3[cmi,cmj] + a2i } } } } } } } internals <- list(A1=A1, A2=A2, A3=A3) if(what %in% c("internals", "all")) internals <- c(internals, list(suff=mom)) if(what %in% c("vcov", "corr", "all")) { #' variance-covariance matrix required U <- checksolve(A1, matrix.action, , "variance") vc <- if(is.null(U)) NULL else (U %*% (A1 + A2 + A3) %*% U) } out <- switch(what, fisher = A1 + A2 + A3, vcov = vc, corr = { if(is.null(vc)) return(NULL) sd <- sqrt(diag(vc)) vc / outer(sd, sd, "*") }, internals = internals, all = list(internals=internals, fisher=A1+A2+A3, varcov=vc, invgrad=A1) ) return(out) } addsubmatrix <- function(A, B, guessnames) { if(is.null(B)) return(A) if(is.null(colnames(B)) && !missing(guessnames)) { if(is.character(guessnames)) guessnames <- list(guessnames, guessnames) if(all(lengths(guessnames) == dim(B))) colnames(B) <- guessnames } if(is.null(colnames(B))) { #' unusual if(!all(dim(A) == dim(B))) stop("Internal error: no column names, and matrices non-conformable") A <- A + B return(A) } j <- match(colnames(B), colnames(A)) if(anyNA(j)) stop("Internal error: unmatched column name(s)") A[j,j] <- A[j,j] + B return(A) } bindsubmatrix <- function(A, B) { if(is.null(B)) return(A) if(is.null(colnames(B))) { if(ncol(A) != ncol(B)) stop("Internal error: no column names, and matrices non-conformable") A <- rbind(A, B) return(A) } j <- match(colnames(B), colnames(A)) if(anyNA(j)) stop("Internal error: unmatched column name(s)") BB <- matrix(0, nrow(B), ncol(A)) BB[,j] <- B A <- rbind(A, BB) return(A) } mergeAlternatives <- function(A, B) { okA <- !sapply(A, is.null) okB <- !sapply(B, is.null) if(any(override <- !okA & okB)) A[override] <- B[override] return(A) } notallzero <- function(df) { apply(df != 0, 2, any) } vcov.mppm }) spatstat.core/R/hierstrhard.R0000644000176200001440000002664114144333462015724 0ustar liggesusers## ## hierstrhard.R ## ## $Revision: 1.5 $ $Date: 2018/03/15 07:37:41 $ ## ## The hierarchical Strauss-hard core process ## ## ------------------------------------------------------------------- ## HierStraussHard <- local({ # ......... define interaction potential HSHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrices of interaction radii r <- par$iradii h <- par$hradii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) ## list all ordered pairs of types to be checked uptri <- par$archy$relation & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] ## corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) ## create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { ## assemble the relevant interaction distance for each pair of points rxu <- r[ tx, tu ] ## apply relevant threshold to each pair of points str <- (d <= rxu) # and the relevant hard core distance hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- str value[forbid] <- -Inf ## score for(i in 1:npairs) { # data points with mark m1 Xsub <- (tx == mark1[i]) # quadrature points with mark m2 Qsub <- (tu == mark2[i]) # assign z[Xsub, Qsub, i] <- value[Xsub, Qsub] } } return(z) } #### end of 'pot' function #### # ........ auxiliary functions .............. delHSH <- function(which, types, iradii, hradii, archy, ihc) { iradii[which] <- NA if(any(!is.na(iradii))) { # some gamma interactions left # return modified HierStraussHard with fewer gamma parameters return(HierStraussHard(types=types, iradii=iradii, hradii=hradii, archy=archy)) } else if(any(!ihc)) { # ihc = inactive hard cores # no gamma interactions left, but some active hard cores return(HierHard(types=types, hradii=hradii, archy=archy)) } else return(Poisson()) } # Set up basic object except for family and parameters BlankHSHobject <- list( name = "Hierarchical Strauss-hard core process", creator = "HierStraussHard", family = "hierpair.family", # evaluated later pot = HSHpotential, par = list(types=NULL, iradii=NULL, hradii=NULL, archy=NULL), parnames = c("possible types", "interaction distances", "hardcore distances", "hierarchical order"), pardesc = c("vector of possible types", "matrix of interaction distances", "matrix of hardcore distances", "hierarchical order"), hasInf = TRUE, selfstart = function(X, self) { types <- self$par$types hradii <- self$par$hradii archy <- self$par$archy if(!is.null(types) && !is.null(hradii) && !is.null(archy)) return(self) if(is.null(types)) types <- levels(marks(X)) if(is.null(archy)) archy <- seq_len(length(types)) if(!inherits(archy, "hierarchicalordering")) archy <- hierarchicalordering(archy, types) if(is.null(hradii)) { marx <- marks(X) d <- nndist(X, by=marx) h <- aggregate(d, by=list(from=marx), min) h <- as.matrix(h[, -1L, drop=FALSE]) m <- table(marx) mm <- outer(m, m, pmin) hradii <- h * mm/(mm+1) dimnames(hradii) <- list(types, types) h[!(archy$relation)] <- NA } HierStraussHard(types=types,hradii=hradii, iradii=self$par$iradii, archy=archy) }, init = function(self) { types <- self$par$types iradii <- self$par$iradii hradii <- self$par$hradii ## hradii could be NULL if(!is.null(types)) { if(!is.null(dim(types))) stop(paste("The", sQuote("types"), "argument should be a vector")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } nt <- length(types) MultiPair.checkmatrix(iradii, nt, sQuote("iradii"), asymmok=TRUE) if(!is.null(hradii)) MultiPair.checkmatrix(hradii, nt, sQuote("hradii"), asymmok=TRUE) } ina <- is.na(iradii) if(all(ina)) stop(paste("All entries of", sQuote("iradii"), "are NA")) if(!is.null(hradii)) { hna <- is.na(hradii) both <- !ina & !hna if(any(iradii[both] <= hradii[both])) stop("iradii must be larger than hradii") } }, update = NULL, # default OK print = function(self) { iradii <- self$par$iradii hradii <- self$par$hradii types <- self$par$types archy <- self$par$archy if(waxlyrical('gory')) splat(nrow(iradii), "types of points") if(!is.null(types) && !is.null(archy)) { if(waxlyrical('space')) { splat("Possible types and ordering:") } else cat("Hierarchy: ") print(archy) } else if(!is.null(types)) { (if(waxlyrical('space')) splat else cat)("Possible types: ") print(types) } else if(waxlyrical('gory')) splat("Possible types:\t not yet determined") splat("Interaction radii:") dig <- getOption("digits") print(hiermat(signif(iradii, dig), archy)) if(!is.null(hradii)) { splat("Hardcore radii:") print(hiermat(signif(hradii, dig), archy)) } else splat("Hardcore radii: not yet determined") invisible(NULL) }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) ## get matrices of interaction radii r <- self$par$iradii h <- self$par$hradii ## list all unordered pairs of types uptri <- self$par$archy$relation & !is.na(r) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(NA, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) gammas[ cbind(index1, index2) ] <- exp(coeffs) # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=hiermat(dround(gammas), self$par$archy))) }, valid = function(coeffs, self) { # interaction radii r[i,j] iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # parameters to estimate required <- !is.na(iradii) & self$par$archy$relation # all required parameters must be finite if(!all(is.finite(gamma[required]))) return(FALSE) # DIAGONAL interactions must be non-explosive d <- diag(rep(TRUE, nrow(iradii))) activehard <- !is.na(hradii) & (hradii > 0) return(all(gamma[required & d & !activehard] <= 1)) }, project = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii types <- self$par$types archy <- self$par$archy # active hard cores activehard <- !is.na(hradii) & (hradii > 0) ihc <- !activehard # problems? uptri <- archy$relation required <- !is.na(iradii) & uptri offdiag <- !diag(nrow(iradii)) gammavalid <- is.finite(gamma) & (activehard | offdiag | (gamma <= 1)) naughty <- required & !gammavalid # if(!any(naughty)) return(NULL) if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delHSH(naughty, types, iradii, hradii, archy, ihc)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) ord <- self$par$archy$ordering uptri <- (ord[rn] <= ord[cn]) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matrix, ncol=2) inters <- lapply(mats, delHSH, types=types, iradii=iradii, hradii=hradii, archy=archy, ihc=ihc) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$iradii h <- self$par$hradii ractive <- !is.na(r) & self$par$archy$relation hactive <- !is.na(h) & self$par$archy$relation if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 ractive <- ractive & (abs(log(gamma)) > epsilon) } if(!any(c(ractive,hactive))) return(0) else return(max(c(r[ractive],h[hactive]))) }, version=NULL # to be added ) class(BlankHSHobject) <- "interact" # finally create main function HierStraussHard <- function(iradii, hradii=NULL, types=NULL, archy=NULL) { if(!is.null(types)) { if(is.null(archy)) archy <- seq_len(length(types)) archy <- hierarchicalordering(archy, types) } iradii[iradii == 0] <- NA out <- instantiate.interact(BlankHSHobject, list(types=types, iradii=iradii, hradii=hradii, archy=archy)) if(!is.null(types)) { dn <- list(types, types) dimnames(out$par$iradii) <- dn if(!is.null(out$par$hradii)) dimnames(out$par$hradii) <- dn } return(out) } HierStraussHard <- intermaker(HierStraussHard, BlankHSHobject) HierStraussHard }) spatstat.core/R/vcov.ppm.R0000644000176200001440000017335014144333464015157 0ustar liggesusers## ## Asymptotic covariance & correlation matrices ## and Fisher information matrix ## for ppm objects ## ## $Revision: 1.134 $ $Date: 2021/02/06 03:43:12 $ ## vcov.ppm <- local({ vcov.ppm <- function(object, ..., what="vcov", verbose=TRUE, fine=FALSE, gam.action=c("warn", "fatal", "silent"), matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), nacoef.action=c("warn", "fatal", "silent"), hessian=FALSE) { verifyclass(object, "ppm") argh <- list(...) gam.action <- match.arg(gam.action) matrix.action <- match.arg(matrix.action) logi.action <- match.arg(logi.action) nacoef.action <- match.arg(nacoef.action) if(!all(is.finite(coef(object)))) { gripe <- "Cannot compute variance; model is not valid" switch(nacoef.action, fatal = stop(gripe, call.=FALSE), warn = warning(gripe, call.=FALSE), silent = {}) return(NULL) } if(missing(fine) && ("A1dummy" %in% names(argh))) { message("Argument 'A1dummy' has been replaced by 'fine'") fine <- as.logical(argh$A1dummy) } else fine <- as.logical(fine) stopifnot(length(what) == 1 && is.character(what)) what.options <- c("vcov", "corr", "fisher", "Fisher", "internals", "all") what.map <- c("vcov", "corr", "fisher", "fisher", "internals", "all") if(is.na(m <- pmatch(what, what.options))) stop(paste("Unrecognised option: what=", sQuote(what))) what <- what.map[m] ## No vcov for Variational Bayes if(!is.null(object$internal$VB)) stop("Variance calculations currently not possible for variational Bayes fit.") ## no parameters, no variance if(length(coef(object)) == 0) { result <- switch(what, vcov=, corr=, fisher= { matrix(, 0, 0) }, internals=, all={ list() }) return(result) } ## nonstandard calculations (hack) generic.triggers <- c("A1", "new.coef", "modmat", "matwt", "saveterms", "sparseOK") nonstandard <- any(generic.triggers %in% names(argh)) || fine # saveterms <- identical(resolve.1.default("saveterms", argh), TRUE) ## Fisher information *may* be contained in object fisher <- object$fisher varcov <- object$varcov ## Do we need to go into the guts? needguts <- nonstandard || (is.null(fisher) && what=="fisher") || (is.null(varcov) && what %in% c("vcov", "corr")) || (what %in% c("internals", "all")) ## In general it is not true that varcov = solve(fisher) ## because we might use different estimators, ## or the parameters might be a subset of the canonical parameter if(needguts) { ## warn if fitted model was obtained using GAM if(identical(object$fitter, "gam")) { switch(gam.action, fatal={ stop(paste("model was fitted by gam();", "execution halted because fatal=TRUE"), call.=FALSE) }, warn={ warning(paste("model was fitted by gam();", "asymptotic variance calculation ignores this"), call.=FALSE) }, silent={}) } ## ++++ perform main calculation ++++ if((is.poisson(object) || (hessian && what!="internals")) && object$method != "logi") { ## Poisson model, or Hessian of Gibbs model without internals results <- vcalcPois(object, ..., what=what, matrix.action=matrix.action, verbose=verbose, fisher=fisher) } else { ## Gibbs model results <- vcalcGibbs(object, ..., what=what, fine=fine, matrix.action=matrix.action, hessian = hessian) } varcov <- results$varcov fisher <- results$fisher internals <- results$internals } if(what %in% c("vcov", "corr") && is.null(varcov)) { ## Need variance-covariance matrix. if(!is.null(fisher) && is.poisson(object)) ## Derive from Fisher information varcov <- checksolve(fisher, matrix.action, "Fisher information matrix", "variance") } out <- switch(what, fisher = fisher, vcov = varcov, corr = { if(is.null(varcov)) return(NULL) sd <- sqrt(diag(varcov)) varcov / outer(sd, sd, "*") }, internals = internals, all = results ) return(out) } ## ................ variance calculation for Poisson models ............. vcalcPois <- function(object, ..., what = c("vcov", "corr", "fisher", "internals", "all"), matrix.action=c("warn", "fatal", "silent"), nacoef.action=c("warn", "fatal", "silent"), method=c("C", "interpreted"), verbose=TRUE, fisher=NULL, modmat=model.matrix(object), matwt=NULL, # weights on rows of model matrix new.coef=NULL, dropcoef=FALSE, saveterms=FALSE) { ## variance-covariance matrix of Poisson model, ## or Hessian of Gibbs model what <- match.arg(what) method <- match.arg(method) matrix.action <- match.arg(matrix.action) if(reweighting <- !is.null(matwt)) stopifnot(is.numeric(matwt) && is.vector(matwt)) internals <- NULL nonstandard <- reweighting || !is.null(new.coef) || saveterms ## detect invalid model if(!all(is.finite(coef(object)))) { gripe<-"Cannot compute variance; some coefficients are NA, NaN or infinite" switch(nacoef.action, fatal=stop(gripe, call.=FALSE), warn=warning(gripe, call.=FALSE), silent={}) return(NULL) } ## compute Fisher information if not known if(is.null(fisher) || nonstandard) { gf <- getglmfit(object) ## we need a glm or gam if(is.null(gf)) { if(verbose) warning("Refitting the model using GLM/GAM") object <- update(object, forcefit=TRUE) gf <- getglmfit(object) if(is.null(gf)) stop("Internal error - refitting did not yield a glm object") } ## compute fitted intensity and sufficient statistic ltype <- if(is.poisson(object)) "trend" else "lambda" lambda <- fitted(object, type=ltype, new.coef=new.coef, dropcoef=dropcoef, check=FALSE) mom <- modmat nmom <- nrow(mom) Q <- quad.ppm(object) wt <- w.quad(Q) ok <- getglmsubset(object) Z <- is.data(Q) ## save them if(what == "internals") { internals <- if(!saveterms) list(suff=mom) else list(suff=mom, mom=mom, lambda=lambda, Z=Z, ok=ok) } ## Now restrict all terms to the domain of the pseudolikelihood lambda <- lambda[ok] mom <- mom[ok, , drop=FALSE] wt <- wt[ok] Z <- Z[ok] ## apply weights to rows of model matrix - temporary hack if(reweighting) { nwt <- length(matwt) if(nwt == nmom) { ## matwt matches original quadrature scheme - trim it matwt <- matwt[ok] } else if(nwt != sum(ok)) stop("Hack argument matwt has incompatible length") mom.orig <- mom mom <- matwt * mom } ## compute Fisher information switch(method, C = { fisher <- sumouter(mom, lambda * wt) if(reweighting) { gradient <- sumouter(mom.orig, matwt * lambda * wt) } }, interpreted = { if(!reweighting) { fisher <- 0 for(i in 1:nrow(mom)) { ro <- mom[i, ] v <- outer(ro, ro, "*") * lambda[i] * wt[i] if(!anyNA(v)) fisher <- fisher + v } momnames <- dimnames(mom)[[2]] dimnames(fisher) <- list(momnames, momnames) } else { fisher <- gradient <- 0 for(i in 1:nrow(mom)) { ro <- mom[i, ] ro0 <- mom.orig[i,] ldu <- lambda[i] * wt[i] v <- outer(ro, ro, "*") * ldu v0 <- outer(ro0, ro0, "*") * matwt[i] * ldu if(!anyNA(v)) fisher <- fisher + v if(!anyNA(v0)) gradient <- gradient + v0 } momnames <- dimnames(mom)[[2]] dn <- list(momnames, momnames) dimnames(fisher) <- dimnames(gradient) <- dn } }) } if(what %in% c("all", "internals")) { ## Internals needed if(is.null(internals)) internals <- list(suff = modmat) internals$fisher <- fisher if(reweighting) internals$gradient <- gradient ilist <- list(internals=internals) } if(what %in% c("all", "vcov", "corr")) { ## Variance-covariance matrix needed if(!reweighting) { ## Derive variance-covariance from Fisher info varcov <- checksolve(fisher, matrix.action, "Fisher information matrix", "variance") vcovlist <- list(fisher=fisher, varcov=varcov) } else { invgrad <- checksolve(gradient, matrix.action, "gradient matrix", "variance") varcov <- if(is.null(invgrad)) NULL else invgrad %*% fisher %*% invgrad vcovlist <- list(fisher=fisher, varcov=varcov, invgrad=invgrad) } } result <- switch(what, fisher = list(fisher=fisher), vcov = vcovlist, corr = vcovlist, internals = ilist, all = append(ilist, vcovlist)) return(result) } ## ...................... vcov calculation for Gibbs models .................... vcalcGibbs <- function(fit, ..., fine=FALSE, what = c("vcov", "corr", "fisher", "internals", "all"), generic=FALSE) { what <- match.arg(what) if(missing(generic)) { ## Change default to TRUE in certain cases ## For logistic fits, use generic method by default if(fit$method == "logi") generic <- TRUE ## For 'difficult' interactions, use generic method by default fasterbygeneric <- c("Areainter") if(as.interact(fit)$creator %in% fasterbygeneric) generic <- TRUE } ## decide whether to use the generic algorithm generic.triggers <- c("A1", "hessian", "new.coef", "matwt", "saveterms", "sparseOK") use.generic <- generic || fine || !is.stationary(fit) || (fit$method == "logi" && ("marks" %in% variablesinformula(fit$trend))) || (fit$method != "logi" && has.offset(fit)) || (fit$method == "logi" && has.offset.term(fit)) || !(fit$correction == "border" && fit$rbord == reach(fit)) || any(generic.triggers %in% names(list(...))) || !identical(options("contrasts")[[1]], c(unordered="contr.treatment", ordered="contr.poly")) ## compute spill <- (what %in% c("all", "internals", "fisher")) spill.vc <- (what == "all") out <- if(use.generic) vcalcGibbsGeneral(fit, ..., fine=fine, spill=spill, spill.vc=spill.vc) else vcalcGibbsSpecial(fit, ..., spill=spill, spill.vc=spill.vc) switch(what, vcov = , corr = { ## out is the variance-covariance matrix; return it return(list(varcov=out)) }, fisher = { ## out is a list of internal data: extract the Fisher info Fmat <- with(out, if(fit$method != "logi") Sigma else Sigma1log+Sigma2log) return(list(fisher=Fmat)) }, internals = { ## out is a list of internal data: return it ## (ensure model matrix is included) if(is.null(out$mom)) out$mom <- model.matrix(fit) return(list(internals=out)) }, all = { ## out is a list(internals, vc): return it ## (ensure model matrix is included) if(is.null(out$internals$mom)) out$internals$mom <- model.matrix(fit) ## ensure Fisher info is included if(is.null(out$internals$fisher)) { Fmat <- with(out$internals, if(fit$method != "logi") Sigma else Sigma1log+Sigma2log) out$internals$fisher <- Fmat } return(out) }, ) return(NULL) } ## ...................... general algorithm ........................... vcalcGibbsGeneral <- function(model, ..., spill = FALSE, spill.vc = FALSE, na.action=c("warn", "fatal", "silent"), matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), algorithm=c("vectorclip", "vector", "basic"), A1 = NULL, fine = FALSE, hessian = FALSE, modmat = model.matrix(model), matwt = NULL, new.coef = NULL, dropcoef=FALSE, saveterms = FALSE, parallel = TRUE, sparseOK = TRUE ) { modmat.given <- !missing(modmat) na.action <- match.arg(na.action) matrix.action <- match.arg(matrix.action) logi.action <- match.arg(logi.action) algorithm <- match.arg(algorithm) if(reweighting <- !is.null(matwt)) stopifnot(is.numeric(matwt) && is.vector(matwt)) spill <- spill || spill.vc saveterms <- spill && saveterms logi <- model$method=="logi" asked.parallel <- !missing(parallel) old.coef <- coef(model) use.coef <- adaptcoef(new.coef, old.coef, drop=dropcoef) if(modmat.given) { p <- ncol(modmat) pnames <- colnames(modmat) } else { p <- length(old.coef) pnames <- names(old.coef) } if(p == 0) { ## this probably can't happen if(!spill) return(matrix(, 0, 0)) else return(list()) } dnames <- list(pnames, pnames) # (may be revised later) internals <- list() ## sumobj <- summary(model, quick="entries") correction <- model$correction rbord <- model$rbord R <- reach(model, epsilon=1e-2) Q <- quad.ppm(model) D <- dummy.ppm(model) rho <- model$internal$logistic$rho #### If dummy intensity rho is unknown we estimate it if(is.null(rho)) rho <- npoints(D)/(area(D)*markspace.integral(D)) X <- data.ppm(model) Z <- is.data(Q) W <- as.owin(model) areaW <- if(correction == "border") eroded.areas(W, rbord) else area(W) ## ## determine which quadrature points contributed to the ## sum/integral in the pseudolikelihood ## (e.g. some points may be excluded by the border correction) okall <- getglmsubset(model) ## conditional intensity lambda(X[i] | X) = lambda(X[i] | X[-i]) ## data and dummy: lamall <- fitted(model, check = FALSE, new.coef = new.coef, dropcoef=dropcoef) if(anyNA(lamall)) { whinge <- "Some values of the fitted conditional intensity are NA" switch(na.action, fatal = { stop(whinge, call.=FALSE) }, warn = { warning(whinge, call.=FALSE) okall <- okall & !is.na(lamall) }, silent = { okall <- okall & !is.na(lamall) }) } ## data only: lam <- lamall[Z] ok <- okall[Z] nX <- npoints(X) ## sufficient statistic h(X[i] | X) = h(X[i] | X[-i]) ## data and dummy: mall <- modmat if(ncol(mall) != length(pnames)) { if(!dropcoef) stop(paste("Internal error: dimension of sufficient statistic = ", ncol(mall), "does not match length of coefficient vector =", length(pnames)), call.=FALSE) p <- length(pnames) pnames <- colnames(mall) dnames <- list(pnames, pnames) } ## save if(saveterms) internals <- append(internals, list(mom=mall, lambda=lamall, Z=Z, ok=okall, matwt=matwt)) if(reweighting) { ## each column of the model matrix is multiplied by 'matwt' check.nvector(matwt, nrow(mall), things="quadrature points") mall.orig <- mall mall <- mall * matwt } ## subsets of model matrix mokall <- mall[okall, , drop=FALSE] ## data only: m <- mall[Z, , drop=FALSE] mok <- m[ok, , drop=FALSE] ## if(reweighting) { ## save unweighted versions mokall.orig <- mall.orig[okall, , drop=FALSE] m.orig <- mall.orig[Z, , drop=FALSE] mok.orig <- m.orig[ok, , drop=FALSE] ## matwtX <- matwt[Z] } ## ^^^^^^^^^^^^^^^^ First order (sensitivity) matrices A1, S ## logistic if(logi){ ## Sensitivity matrix S for logistic case Slog <- sumouter(mokall, w = lamall[okall]*rho/(lamall[okall]+rho)^2) dimnames(Slog) <- dnames ## A1 matrix for logistic case A1log <- sumouter(mokall, w = lamall[okall]*rho*rho/(lamall[okall]+rho)^3) dimnames(A1log) <- dnames } ## Sensitivity matrix for MPLE case (= A1) if(is.null(A1) || reweighting) { if(fine){ A1 <- sumouter(mokall, w = (lamall * w.quad(Q))[okall]) if(reweighting) gradient <- sumouter(mokall.orig, w=(matwt * lamall * w.quad(Q))[okall]) } else{ A1 <- sumouter(mok) if(reweighting) gradient <- sumouter(mok.orig, w=matwtX) } } else { stopifnot(is.matrix(A1)) if(!all(dim(A1) == p)) stop(paste("Matrix A1 has wrong dimensions:", prange(dim(A1)), "!=", prange(c(p, p)))) } dimnames(A1) <- dnames ## ^^^^^^^^^^ Second order interaction effects A2, A3 if(hessian) { ## interaction terms suppressed A2 <- A3 <- matrix(0, p, p, dimnames=dnames) if(logi) A2log <- A3log <- matrix(0, p, p, dimnames=dnames) } else { ## ^^^^^^^^^^^^^^^^^^^^ `parallel' evaluation need.loop <- TRUE if(parallel) { ## compute second order difference ## ddS[i,j,] = h(X[i] | X) - h(X[i] | X[-j]) ddS <- deltasuffstat(model, restrict="pairs", force=FALSE, sparseOK=sparseOK) sparse <- inherits(ddS, "sparse3Darray") if(is.null(ddS)) { if(asked.parallel) warning("parallel option not available - reverting to loop") } else { need.loop <- FALSE ## rearrange so that ## ddS[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j]) ddS <- aperm(ddS, c(3,2,1)) ## now compute sum_{i,j} for i != j ## outer(ddS[,i,j], ddS[,j,i]) ddSok <- ddS[ , ok, ok, drop=FALSE] A3 <- sumsymouter(ddSok) ## compute pairweight and other arrays if(sparse) { ## Entries are only required for pairs i,j which interact. ## mom.array[ ,i,j] = h(X[i] | X) mom.array <- mapSparseEntries(ddS, margin=2, values=m, conform=TRUE, across=1) ## momdel[ ,i,j] = h(X[i] | X[-j]) momdel <- mom.array - ddS ## pairweight[i,j] = lambda(X[i] | X[-j] )/lambda( X[i] | X ) - 1 pairweight <- expm1(tensor1x1(-use.coef, ddS)) } else { ## mom.array[ ,i,j] = h(X[i] | X) mom.array <- array(t(m), dim=c(p, nX, nX)) ## momdel[ ,i,j] = h(X[i] | X[-j]) momdel <- mom.array - ddS ## lamdel[i,j] = lambda(X[i] | X[-j]) lamdel <- matrix(lam, nX, nX) * exp(tensor::tensor(-use.coef, ddS, 1, 1)) ## pairweight[i,j] = lamdel[i,j]/lambda[i] - 1 pairweight <- lamdel / lam - 1 } ## now compute sum_{i,j} for i != j ## pairweight[i,j] * outer(momdel[,i,j], momdel[,j,i]) ## for data points that contributed to the pseudolikelihood momdelok <- momdel[ , ok, ok, drop=FALSE] pwok <- pairweight[ok, ok] if(anyNA(momdelok) || anyNA(pwok)) stop("Unable to compute variance: NA values present", call.=FALSE) A2 <- sumsymouter(momdelok, w=pwok) dimnames(A2) <- dimnames(A3) <- dnames if(logi){ if(!sparse) { ## lam.array[ ,i,j] = lambda(X[i] | X) lam.array <- array(lam, c(nX,nX,p)) lam.array <- aperm(lam.array, c(3,1,2)) ## lamdel.array[,i,j] = lambda(X[i] | X[-j]) lamdel.array <- array(lamdel, c(nX,nX,p)) lamdel.array <- aperm(lamdel.array, c(3,1,2)) momdellogi <- rho/(lamdel.array+rho)*momdel ddSlogi <- rho/(lam.array+rho)*mom.array - momdellogi } else { ## lam.array[ ,i,j] = lambda(X[i] | X) lam.array <- mapSparseEntries(ddS, margin=2, lam, conform=TRUE, across=1) ## lamdel.array[,i,j] = lambda(X[i] | X[-j]) pairweight.array <- aperm(as.sparse3Darray(pairweight), c(3,1,2)) lamdel.array <- pairweight.array * lam.array + lam.array lamdel.logi <- applySparseEntries(lamdel.array, function(y,rho) { rho/(rho+y) }, rho=rho) lam.logi <- applySparseEntries(lam.array, function(y,rho) { rho/(rho+y) }, rho=rho) momdellogi <- momdel * lamdel.logi ddSlogi <- mom.array * lam.logi - momdellogi } momdellogiok <- momdellogi[ , ok, ok, drop=FALSE] A2log <- sumsymouter(momdellogiok, w=pwok) ddSlogiok <- ddSlogi[ , ok, ok, drop=FALSE] A3log <- sumsymouter(ddSlogiok) dimnames(A2log) <- dimnames(A3log) <- dnames } } } ## ^^^^^^^^^^^^^^^^^^^^ loop evaluation if(need.loop) { A2 <- A3 <- matrix(0, p, p, dimnames=dnames) if(logi) A2log <- A3log <- matrix(0, p, p, dimnames=dnames) if(saveterms) { ## *initialise* matrices ## lamdel[i,j] = lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)]) lamdel <- matrix(lam, nX, nX) ## momdel[ ,i,j] = h(X[i] | X[-j]) = h(X[i] | X[-c(i,j)]) momdel <- array(t(m), dim=c(p, nX, nX)) } ## identify close pairs if(is.finite(R)) { cl <- closepairs(X, R, what="indices") I <- cl$i J <- cl$j if(algorithm == "vectorclip") { cl2 <- closepairs(X, 2*R, what="indices") I2 <- cl2$i J2 <- cl2$j } } else { ## either infinite reach, or something wrong IJ <- expand.grid(I=1:nX, J=1:nX) IJ <- subset(IJ, I != J) I2 <- I <- IJ$I J2 <- J <- IJ$J } ## filter: I and J must both belong to the nominated subset okIJ <- ok[I] & ok[J] I <- I[okIJ] J <- J[okIJ] ## if(length(I) > 0 && length(J) > 0) { ## .............. loop over pairs ........................ ## The following ensures that 'empty' and 'X' have compatible marks empty <- X[integer(0)] ## make an empty 'equalpairs' matrix nonE <- matrix(, nrow=0, ncol=2) ## Run through pairs switch(algorithm, basic={ for(i in unique(I)) { Xi <- X[i] Ji <- unique(J[I==i]) if((nJi <- length(Ji)) > 0) { for(k in 1:nJi) { j <- Ji[k] X.ij <- X[-c(i,j)] ## compute conditional intensity ## lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)] plamj.i <- predict(model, type="cif", locations=X[j], X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) ## corresponding values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] pmj.i <- partialModelMatrix(X.ij, X[j], model)[nX-1, ] ## conditional intensity and sufficient statistic ## in reverse order ## lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)] plami.j <- predict(model, type="cif", locations=X[i], X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) pmi.j <- partialModelMatrix(X.ij, Xi, model)[nX-1, ] ## if(reweighting) { pmj.i <- pmj.i * matwtX[j] pmi.j <- pmi.j * matwtX[i] } if(saveterms) { lamdel[i,j] <- plami.j momdel[ , i, j] <- pmi.j lamdel[j,i] <- plamj.i momdel[ , j, i] <- pmj.i } ## increment A2, A3 wt <- plami.j / lam[i] - 1 A2 <- A2 + wt * outer(pmi.j, pmj.i) if(logi) A2log <- A2log + wt * rho/(plami.j+rho) * rho/(plamj.i+rho) * outer(pmi.j, pmj.i) ## delta sufficient statistic ## delta_i h(X[j] | X[-c(i,j)]) ## = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)]) ## = h(X[j] | X) - h(X[j] | X[-i]) ## delta_j h(X[i] | X[-c(i,j)]) ## = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)]) ## = h(X[i] | X) - h(X[i] | X[-j]) deltaiSj <- m[j, ] - pmj.i deltajSi <- m[i, ] - pmi.j A3 <- A3 + outer(deltaiSj, deltajSi) if(logi){ deltaiSjlog <- rho*(m[j, ]/ (lam[j]+rho) - pmj.i/(plamj.i+rho)) deltajSilog <- rho*(m[i, ]/ (lam[i]+rho) - pmi.j/(plami.j+rho)) A3log <- A3log + outer(deltaiSjlog, deltajSilog) } } } } }, vector={ ## --------- faster algorithm using vector functions -------- for(i in unique(I)) { Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { Xi <- X[i] ## neighbours of X[i] XJi <- X[Ji] ## all points other than X[i] X.i <- X[-i] ## index of XJi in X.i J.i <- Ji - (Ji > i) ## equalpairs matrix E.i <- cbind(J.i, seq_len(nJi)) ## compute conditional intensity ## lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)] ## for all j plamj <- predict(model, type="cif", locations=XJi, X=X.i, check = FALSE, new.coef = new.coef, sumobj=sumobj, E=E.i) ## corresponding values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] ## for all j pmj <- partialModelMatrix(X.i, empty, model)[J.i, , drop=FALSE] ## ## conditional intensity & sufficient statistic ## in reverse order ## lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)] ## for all j plami <- numeric(nJi) pmi <- matrix(, nJi, p) for(k in 1:nJi) { j <- Ji[k] X.ij <- X[-c(i,j)] plami[k] <- predict(model, type="cif", locations=Xi, X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) pmi[k, ] <- partialModelMatrix(X.ij, Xi, model)[nX-1, ] } ## if(reweighting) { pmj <- pmj * matwtX[Ji] pmi <- pmi * matwtX[i] } if(saveterms) { lamdel[Ji, i] <- plamj momdel[ , Ji, i] <- t(pmj) lamdel[i,Ji] <- plami momdel[ , i, Ji] <- t(pmi) } ## increment A2, A3 wt <- plami / lam[i] - 1 for(k in 1:nJi) { j <- Ji[k] A2 <- A2 + wt[k] * outer(pmi[k,], pmj[k,]) if(logi) A2log <- A2log + wt[k] * rho/(plami[k]+rho) * rho/(plamj[k]+rho) * outer(pmi[k,], pmj[k,]) ## delta sufficient statistic ## delta_i h(X[j] | X[-c(i,j)]) ## = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)]) ## = h(X[j] | X) - h(X[j] | X[-i]) ## delta_j h(X[i] | X[-c(i,j)]) ## = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)]) ## = h(X[i] | X) - h(X[i] | X[-j]) deltaiSj <- m[j, ] - pmj[k,] deltajSi <- m[i, ] - pmi[k,] A3 <- A3 + outer(deltaiSj, deltajSi) if(logi){ deltaiSjlog <- rho*(m[j, ]/(lam[j]+rho) - pmj[k,]/(plamj[k]+rho)) deltajSilog <- rho*(m[i, ]/(lam[i]+rho) - pmi[k,]/(plami[k]+rho)) A3log <- A3log + outer(deltaiSjlog, deltajSilog) } } } } }, vectorclip={ ## --------- faster version of 'vector' algorithm ## -------- by removing non-interacting points of X for(i in unique(I)) { ## all points within 2R J2i <- unique(J2[I2==i]) ## all points within R Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { Xi <- X[i] ## neighbours of X[i] XJi <- X[Ji] ## replace X[-i] by X[-i] \cap b(0, 2R) X.i <- X[J2i] nX.i <- length(J2i) ## index of XJi in X.i J.i <- match(Ji, J2i) if(anyNA(J.i)) stop("Internal error: Ji not a subset of J2i") ## equalpairs matrix E.i <- cbind(J.i, seq_len(nJi)) ## compute conditional intensity ## lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)] ## for all j plamj <- predict(model, type="cif", locations=XJi, X=X.i, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=E.i) ## corresponding values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] ## for all j pmj <- partialModelMatrix(X.i, empty, model)[J.i, , drop=FALSE] ## ## conditional intensity & sufficient statistic ## in reverse order ## lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)] ## for all j plami <- numeric(nJi) pmi <- matrix(, nJi, p) for(k in 1:nJi) { j <- Ji[k] ## X.ij <- X[-c(i,j)] X.ij <- X.i[-J.i[k]] plami[k] <- predict(model, type="cif", locations=Xi, X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) pmi[k, ] <- partialModelMatrix(X.ij, Xi, model)[nX.i, ] } ## if(reweighting) { pmj <- pmj * matwtX[Ji] pmi <- pmi * matwtX[i] } if(saveterms) { lamdel[Ji, i] <- plamj momdel[ , Ji, i] <- t(pmj) lamdel[i,Ji] <- plami momdel[ , i, Ji] <- t(pmi) } ## increment A2, A3 wt <- plami / lam[i] - 1 for(k in 1:nJi) { j <- Ji[k] A2 <- A2 + wt[k] * outer(pmi[k,], pmj[k,]) if(logi) A2log <- A2log + wt[k] * rho/(plami[k]+rho) * rho/(plamj[k]+rho) * outer(pmi[k,], pmj[k,]) ## delta sufficient statistic ## delta_i h(X[j] | X[-c(i,j)]) ## = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)]) ## = h(X[j] | X) - h(X[j] | X[-i]) ## delta_j h(X[i] | X[-c(i,j)]) ## = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)]) ## = h(X[i] | X) - h(X[i] | X[-j]) deltaiSj <- m[j, ] - pmj[k,] deltajSi <- m[i, ] - pmi[k,] A3 <- A3 + outer(deltaiSj, deltajSi) if(logi){ deltaiSjlog <- rho*(m[j, ]/(lam[j]+rho) - pmj[k,]/(plamj[k]+rho)) deltajSilog <- rho*(m[i, ]/(lam[i]+rho) - pmi[k,]/(plami[k]+rho)) A3log <- A3log + outer(deltaiSjlog, deltajSilog) } } } } }) } } ## ......... end of loop computation ............... } #### Matrix Sigma Sigma <- A1+A2+A3 if(spill) { ## save internal data (with matrices unnormalised) internals <- c(internals, list(A1=A1, A2=A2, A3=A3, Sigma=Sigma, areaW=areaW), if(logi) list(A1log=A1log, A2log=A2log, A3log=A3log, Slog=Slog) else NULL, if(reweighting) list(gradient=gradient) else NULL, list(hessian = if(reweighting) gradient else if(logi) Slog else A1, fisher = Sigma), if(saveterms) list(lamdel=lamdel, momdel=momdel) else NULL) ## return internal data if no further calculation needed if(!spill.vc && !logi) return(internals) } ## ........... calculate variance/covariance matrix for MPL ......... if(!reweighting) { ## Normalise A1 <- A1/areaW Sigma <- Sigma/areaW ## Enforce exact symmetry A1 <- (A1 + t(A1))/2 Sigma <- (Sigma + t(Sigma))/2 ## calculate inverse negative Hessian U <- checksolve(A1, matrix.action, , "variance") } else { ## Normalise gradient <- gradient/areaW Sigma <- Sigma/areaW ## Enforce exact symmetry gradient <- (gradient + t(gradient))/2 Sigma <- (Sigma + t(Sigma))/2 ## calculate inverse negative Hessian U <- checksolve(gradient, matrix.action, , "variance") } ## compute variance-covariance vc.mpl <- if(is.null(U)) matrix(NA, p, p) else U %*% Sigma %*% U / areaW dimnames(vc.mpl) <- dnames ## return variance-covariance matrix, if model was fitted by MPL if(!logi) { if(spill.vc) return(list(varcov=vc.mpl, internals=internals)) return(vc.mpl) } ###### Everything below is only computed for logistic fits ####### ## Matrix Sigma1log (A1log+A2log+A3log): Sigma1log <- A1log+A2log+A3log ## Resolving the dummy process type how <- model$internal$logistic$how if(how %in% c("given", "grid", "transgrid")){ whinge <- paste("vcov is not implemented for dummy type", sQuote(how)) if(logi.action=="fatal") stop(whinge) how <- if(how=="given") "poisson" else "stratrand" if(logi.action=="warn") warning(paste(whinge,"- using", sQuote(how), "formula"), call.=FALSE) } ## Matrix Sigma2log (depends on dummy process type) switch(how, poisson={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) }, binomial={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) A1vec <- t(mokall) %*% (rho*lamall[okall]/(lamall[okall]+rho)^2) Sigma2log <- Sigma2log - A1vec%*%t(A1vec)/rho*1/sum(1/(lamall[okall]+rho)) }, stratrand={ ## Dirty way of refitting model with new dummy pattern (should probably be done using call, eval, envir, etc.): ## Changed by ER 2013/06/14 to use the new quadscheme.logi ## D2 <- logi.dummy(X = X, type = "stratrand", nd = model$internal$logistic$args) ## Q2 <- quad(data=X, dummy=D2) ## Q2$dummy$Dinfo <- D2$Dinfo Q2 <- quadscheme.logi(data=X, dummytype = "stratrand", nd = model$internal$logistic$nd) D2 <- Q2$dummy Q2$dummy$Dinfo <- D2$Dinfo Z2 <- is.data(Q2) arglist <- list(Q=Q2, trend=model$trend, interaction = model$interaction, method = model$method, correction = model$correction, rbord = model$rbord, covariates = model$covariates) arglist <- append(arglist, model$internal$logistic$extraargs) model2 <- do.call(ppm, args = arglist) ## New cif lamall2 <- fitted(model2, check = FALSE, new.coef = new.coef, dropcoef=dropcoef) ## New model matrix mall2 <- model.matrix(model2) okall2 <- getglmsubset(model2) ## index vectors of stratrand cell indices of dummy points inD <- model$internal$logistic$inD inD2 <- model2$internal$logistic$inD ## Dummy points inside eroded window (for border correction) if(is.finite(R) && (correction == "border")){ ii <- (bdist.points(D) >= R) ii2 <- (bdist.points(D2) >= R) } else{ ii <- rep.int(TRUE, npoints(D)) ii2 <- rep.int(TRUE, npoints(D2)) } ## OK points of dummy pattern 1 with a valid point of dummy pattern 2 in same stratrand cell (and vice versa) okdum <- okall[!Z] okdum2 <- okall2[!Z2] ok1 <- okdum & ii & is.element(inD, inD2[okdum2 & ii2]) ok2 <- okdum2 & ii2 & is.element(inD2, inD[okdum & ii]) ## ok1 <- okdum & okdum2 & ii & is.element(inD, inD2[ii2]) ## ok2 <- okdum2 & okdum1 & ii2 & is.element(inD2, inD[ii]) ## ok1 <- ii & is.element(inD, inD2[ii2]) ## ok2 <- ii2 & is.element(inD2, inD[ii]) ## cif and suff. stat. for valid points in dummy patterns 1 and 2 lamdum <- lamall[!Z][ok1] lamdum2 <- lamall2[!Z2][ok2] mdum <- mall[!Z,,drop=FALSE][ok1,] mdum2 <- mall2[!Z2,,drop=FALSE][ok2,] ## finally calculation of Sigma2 wlam <- mdum * rho*lamdum/(lamdum+rho) wlam2 <- mdum2 * rho*lamdum2/(lamdum2+rho) ## Sigma2log <- t(wlam-wlam2)%*%(wlam-wlam2)/(2*rho*rho) Sigma2log <- crossprod(wlam-wlam2)/(2*rho*rho) }, stop("sorry - unrecognized dummy process in logistic fit") ) ## Attaching to Sigma2log calculated above dimnames(Sigma2log) <- dnames if(spill) { ## return internal data only (with matrices unnormalised) internals <- c(internals, list(Sigma1log=Sigma1log, Sigma2log=Sigma2log, mple=vc.mpl)) if(!spill.vc) return(internals) } ## .. Calculate variance-covariance matrix for logistic fit ........... ## normalise Slog <- Slog/areaW Sigma1log <- Sigma1log/areaW Sigma2log <- Sigma2log/areaW ## evaluate Ulog <- checksolve(Slog, matrix.action, , "variance") vc.logi <- if(is.null(Ulog)) matrix(NA, p, p) else Ulog %*% (Sigma1log+Sigma2log) %*% Ulog / areaW dimnames(vc.logi) <- dnames ## if(spill.vc) return(list(varcov=vc.logi, internals=internals)) return(vc.logi) } ## vcalcGibbs from Ege Rubak and J-F Coeurjolly ## 2013/06/14, modified by Ege to handle logistic case as well vcalcGibbsSpecial <- function(fit, ..., spill=FALSE, spill.vc=FALSE, special.alg = TRUE, matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent")) { matrix.action <- match.arg(matrix.action) logi.action <- match.arg(logi.action) spill <- spill || spill.vc ## Interaction name: iname <- fit$interaction$name ## Does the model have marks which are in the trend? marx <- is.marked(fit) && ("marks" %in% variablesinformula(fit$trend)) ## The full data and window: Xplus <- data.ppm(fit) Wplus <- as.owin(Xplus) ## Fitted parameters and the parameter dimension p (later consiting of p1 trend param. and p2 interaction param.): theta <- coef(fit) p <- length(theta) ## Number of points: n <- npoints(Xplus) ## Using the faster algorithms for special cases if(special.alg && fit$method != "logi"){ param <- coef(fit) switch(iname, "Strauss process"={ ## Only implemented for non-marked case: if(!marx) return(vcovPairPiece(Xplus, reach(fit$interaction), exp(coef(fit)[2]), matrix.action, spill=spill, spill.vc=spill.vc)) }, "Piecewise constant pairwise interaction process"={ ## Only implemented for non-marked case: if(!marx) return(vcovPairPiece(Xplus, fit$interaction$par$r, exp(coef(fit)[-1]), matrix.action, spill=spill, spill.vc=spill.vc)) }, "Multitype Strauss process"={ matR <- fit$interaction$par$radii R <- c(matR[1,1], matR[1,2], matR[2,2]) ## Only implemented for 2 types with equal interaction range: if(ncol(matR)==2 && marx){ n <- length(theta) res <- vcovMultiStrauss(Xplus, R, exp(theta[c(n-2,n-1,n)]), matrix.action,spill=spill,spill.vc=spill.vc) if(!spill) { res <- contrastmatrix(res, 2) dimnames(res) <- list(names(theta), names(theta)) } return(res) } } ) } ## Matrix specifying equal points in the two patterns in the call to eval below: E <- matrix(rep.int(1:n, 2), ncol = 2) ## Eval. the interaction potential difference at all points (internal spatstat function): # V1 <- fit$interaction$family$eval(Xplus, Xplus, E, fit$interaction$pot, fit$interaction$par, fit$correction) oldopt <- NULL if(fit$interaction$family$name=="pairwise"){ oldopt <- spatstat.options(fasteval = "off") } V1 <- evalInteraction(Xplus, Xplus, E, as.interact(fit), fit$correction) spatstat.options(oldopt) ## Calculate parameter dimensions and correct the contrast type parameters: p2 <- ncol(V1) p1 <- p-p2 if(p1>1) theta[2:p1] <- theta[2:p1] + theta[1] ## V1 <- evalInteraction(Q, Xplus, union.quad(Q), fit$interaction, fit$correction) POT <- attr(V1, "POT") attr(V1, "POT") <- NULL ## Adding the constant potential as first column (one column per type for multitype): if(!marx){ V1 <- cbind(1, V1) colnames(V1) <- names(theta) } else{ lev <- levels(marks(Xplus)) ## Indicator matrix for mark type attached to V1: tmp <- matrix(marks(Xplus), nrow(V1), p1)==matrix(lev, nrow(V1), p-ncol(V1), byrow=TRUE) colnames(tmp) <- lev V1 <- cbind(tmp,V1) } ## Matrices for differences of potentials: E <- matrix(rep.int(1:(n-1), 2), ncol = 2) dV <- V2 <- array(0,dim=c(n,n,p)) for(k in 1:p1){ V2[,,k] <- matrix(V1[,k], n, n, byrow = FALSE) } for(k in (p1+1):p){ diag(V2[,,k]) <- V1[,k] } for(j in 1:n){ ## Fast evaluation for pairwise interaction processes: if(fit$interaction$family$name=="pairwise" && !is.null(POT)){ V2[-j,j,-(1:p1)] <- V1[-j,-(1:p1)]-POT[-j,j,] } else{ V2[-j,j,-(1:p1)] <- fit$interaction$family$eval(Xplus[-j], Xplus[-j], E, fit$interaction$pot, fit$interaction$par, fit$correction) ## Q <- quadscheme(Xplus[-j],emptyppp) ## V2[-j,j,-1] <- evalInteraction(Q, Xplus[-j], Xplus[-j], fit$interaction, fit$correction) } for(k in 1:p){ dV[,j,k] <- V1[,k] - V2[,j,k] } } ## Ratio of first and second order Papangelou - 1: frac <- 0*dV[,,1] for(k in (p1+1):p){ frac <- frac + dV[,,k]*theta[k] } frac <- exp(-frac)-1 ## In the rest we restrict attention to points in the interior: ## The interaction range: R <- reach(fit$interaction) ## The reduced window, area and point pattern: W<-erosion.owin(Wplus,R) areaW <- area(W) ## Interior points determined by bdist.points: IntPoints <- bdist.points(Xplus)>=R X <- Xplus[IntPoints] ## Making a logical matrix, I, indicating R-close pairs which are in the interior: D <- pairdist(Xplus) diag(D) <- Inf I <- (D<=R) & outer(IntPoints,IntPoints, "&") ## Matrix A1: A1 <- t(V1[IntPoints,])%*%V1[IntPoints,] ## Matrix A2: A2 <- matrix(0,p,p) for(k in 1:p){ for(l in k:p){ A2[k,l] <- A2[l,k] <- sum(I*V2[,,k]*frac*t(V2[,,l])) } } ## Matrix A3: A3 <- matrix(0,p,p) for(k in 1:p){ for(l in k:p){ A3[k,l] <- A3[l,k] <- sum(I*dV[,,k]*t(dV[,,l])) } } ## Matrix Sigma (A1+A2+A3): Sigma<-A1+A2+A3 if(spill) { # save internal data (with matrices unnormalised) dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- list(names(theta), names(theta)) internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma, areaW=areaW) # return internal data, if model fitted by MPL if(!spill.vc && fit$method != "logi") return(internals) } # ......... Calculate variance-covariance matrix for MPL ........ # normalise A1 <- A1/areaW Sigma <- Sigma/areaW # evaluate U <- checksolve(A1, matrix.action, , "variance") vc.mpl <- if(is.null(U)) matrix(NA, p, p) else U %*% Sigma %*% U / areaW ## Convert to treatment contrasts if(marx) vc.mpl <- contrastmatrix(vc.mpl, p1) dimnames(vc.mpl) <- list(names(theta), names(theta)) # Return result for standard ppm method: if(fit$method!="logi") { if(spill.vc) return(list(varcov=vc.mpl, internals=internals)) return(vc.mpl) } ######################################################################## ###### The remainder is only executed when the method is logistic ###### ######################################################################## ### Most of this is copy/pasted from vcalcGibbsGeneral correction <- fit$correction Q <- quad.ppm(fit) D <- dummy.ppm(fit) rho <- fit$internal$logistic$rho ## If dummy intensity rho is unknown we estimate it if(is.null(rho)) rho <- npoints(D)/(area(D)*markspace.integral(D)) X <- data.ppm(fit) Z <- is.data(Q) # determine which data points entered into the sum in the pseudolikelihood # (border correction, nonzero cif) # data and dummy: okall <- getglmsubset(fit) ## # data only: ## ok <- okall[Z] # conditional intensity lambda(X[i] | X) = lambda(X[i] | X[-i]) # data and dummy: lamall <- fitted(fit, check = FALSE) ## # data only: ## lam <- lamall[Z] # sufficient statistic h(X[i] | X) = h(X[i] | X[-i]) # data and dummy: mall <- model.matrix(fit) mokall <- mall[okall, , drop=FALSE] ## # data only: ## m <- mall[Z, , drop=FALSE] ## mok <- m[ok, , drop=FALSE] # Sensitivity matrix S and A1 matrix for logistic case Slog <- sumouter(mokall, w = lamall[okall]*rho/(lamall[okall]+rho)^2) A1log <- sumouter(mokall, w = lamall[okall]*rho*rho/(lamall[okall]+rho)^3) ## Define W1, W2 and dW for the logistic method based on V1, V2 and dV (frac is unchanged) lambda1 <- exp(.rowSums(matrix(theta,n,p,byrow=TRUE)*V1, n, p)) W1 <- V1*rho/(lambda1+rho) lambda2 <- exp(apply(array(rep(theta,each=n*n),dim=c(n,n,p))*V2, c(1,2), sum)) W2 <- V2 dW <- dV for(k in 1:p){ W2[,,k] <- V2[,,k] * rho/(lambda2+rho) for(j in 1:n){ dW[,j,k] <- W1[,k] - W2[,j,k] } } ## Matrices A2log and A3log for the first component Sigma1log of the variance: A2log <- A3log <- matrix(0,p,p) for(k in 1:p){ for(l in k:p){ A2log[k,l] <- A2log[l,k] <- sum(I*W2[,,k]*frac*t(W2[,,l])) A3log[k,l] <- A3log[l,k] <- sum(I*dW[,,k]*t(dW[,,l])) } } A2log <- A2log A3log <- A3log ## First variance component Sigma1log (A1log+A2log+A3log): Sigma1log <- A1log+A2log+A3log ## Resolving the dummy process type how <- fit$internal$logistic$how if(how %in% c("given", "grid", "transgrid")){ whinge <- paste("vcov is not implemented for dummy type", sQuote(how)) if(logi.action=="fatal") stop(whinge) how <- if(how=="given") "poisson" else "stratrand" if(logi.action=="warn") warning(paste(whinge,"- using", sQuote(how), "formula"), call.=FALSE) } ## Matrix Sigma2log (depends on dummy process type) switch(how, poisson={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) }, binomial={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) A1vec <- t(mokall) %*% (rho*lamall[okall]/(lamall[okall]+rho)^2) Sigma2log <- Sigma2log - A1vec%*%t(A1vec)/rho*1/sum(1/(lamall[okall]+rho)) }, stratrand={ ### Dirty way of refitting model with new dummy pattern (should probably be done using call, eval, envir, etc.): ## D2 <- logi.dummy(X = X, type = "stratrand", nd = model$internal$logistic$args) ## Q2 <- quad(data=X, dummy=D2) ## Q2$dummy$Dinfo <- D2$Dinfo Q2 <- quadscheme.logi(data=X, dummytype = "stratrand", nd = fit$internal$logistic$nd) D2 <- Q2$dummy Z2 <- is.data(Q2) arglist <- list(Q=Q2, trend=fit$trend, interaction = fit$interaction, method = fit$method, correction = fit$correction, rbord = fit$rbord, covariates = fit$covariates) arglist <- append(arglist, fit$internal$logistic$extraargs) fit2 <- do.call(ppm, args = arglist) ## New cif lamall2 <- fitted(fit2, check=FALSE) ## New model matrix mall2 <- model.matrix(fit2) okall2 <- getglmsubset(fit2) # index vectors of stratrand cell indices of dummy points inD <- fit$internal$logistic$inD inD2 <- fit2$internal$logistic$inD # Dummy points inside eroded window (for border correction) if(is.finite(R) && (correction == "border")){ ii <- inside.owin(D, w = W) ii2 <- inside.owin(D2, w = W) } else{ ii <- rep.int(TRUE, npoints(D)) ii2 <- rep.int(TRUE, npoints(D2)) } # OK points of dummy pattern 1 with a valid point of dummy pattern 2 in same stratrand cell (and vice versa) okdum <- okall[!Z] okdum2 <- okall2[!Z2] ok1 <- okdum & ii & is.element(inD, inD2[okdum2 & ii2]) ok2 <- okdum2 & ii2 & is.element(inD2, inD[okdum & ii]) ## ok1 <- okdum & okdum2 & ii & is.element(inD, inD2[ii2]) ## ok2 <- okdum2 & okdum1 & ii2 & is.element(inD2, inD[ii]) ## ok1 <- ii & is.element(inD, inD2[ii2]) ## ok2 <- ii2 & is.element(inD2, inD[ii]) # cif and suff. stat. for valid points in dummy patterns 1 and 2 lamdum <- lamall[!Z][ok1] lamdum2 <- lamall2[!Z2][ok2] mdum <- mall[!Z,][ok1,] mdum2 <- mall2[!Z2,][ok2,] # finally calculation of Sigma2 wlam <- mdum * rho*lamdum/(lamdum+rho) wlam2 <- mdum2 * rho*lamdum2/(lamdum2+rho) ## Sigma2log <- t(wlam-wlam2)%*%(wlam-wlam2)/(2*rho*rho) Sigma2log <- crossprod(wlam-wlam2)/(2*rho*rho) }, stop("sorry - unrecognized dummy process in logistic fit") ) if(spill) { ## Attach dimnames to all matrices dimnames(Sigma2log) <- dimnames(Slog) <- dimnames(Sigma1log) <- dimnames(A1log) <- dimnames(A2log) <- dimnames(A3log) <- list(names(theta),names(theta)) # return internal data (with matrices unnormalised) internals <- c(internals, list(A1log=A1log, A2log=A2log, A3log=A3log, Slog=Slog, Sigma1log=Sigma1log, Sigma2log=Sigma2log, mple=vc.mpl)) if(!spill.vc) return(internals) } # ....... Compute variance-covariance for logistic fit ............. # Normalise Slog <- Slog/areaW Sigma1log <- Sigma1log/areaW Sigma2log <- Sigma2log/areaW ## Finally the result is calculated: Ulog <- checksolve(Slog, matrix.action, , "variance") vc.logi <- if(is.null(Ulog)) matrix(NA, p, p) else Ulog %*% (Sigma1log+Sigma2log) %*% Ulog / areaW # dimnames(vc.logi) <- list(names(theta), names(theta)) if(spill.vc) return(list(varcov=vc.logi, internals=internals)) return(vc.logi) } vcovPairPiece <- function(Xplus, R, Gam, matrix.action, spill=FALSE, spill.vc=FALSE){ ## R is the vector of breaks (R[length(R)]= range of the pp. ## Gam is the vector of weights Rmax <- R[length(R)] ## Xplus : point process observed in W+R ## Extracting the window and calculating area: Wplus<-as.owin(Xplus) W<-erosion.owin(Wplus,Rmax) areaW <- area(W) ## Interior points determined by bdist.points: IntPoints <- bdist.points(Xplus)>=Rmax X <- Xplus[IntPoints] nX <- npoints(X) nXplus <- npoints(Xplus) ## Matrix D with pairwise distances between points and infinite distance ## between a point and itself: Dplus<-pairdist(Xplus) D <- pairdist(X) diag(D) <- diag(Dplus) <- Inf ## logical matrix, I, indicating R-close pairs: p<-length(R) Tplus<-T<-matrix(0,X$n,p) I<-Iplus<-list() for (i in 1:p){ if (i==1){ Iplus[[1]]<- Dplus <=R[1] I[[1]] <- D<=R[1] } else { Iplus[[i]]<- ((Dplus>R[i-1]) & (Dplus <=R[i])) I[[i]] <- ((D>R[i-1]) & (D <=R[i])) } ## Vector T with the number of $R$-close neighbours to each point: Tplus[,i]<- .colSums(Iplus[[i]], nXplus, nXplus)[IntPoints] T[,i] <- .colSums(I[[i]], nX, nX) } ## Matrices A1, A2 and A3 are initialized to zero: A1 <- A2 <- A3 <- matrix(0,p+1,p+1) ## A1 and A3: A1[1,1] <- npoints(X) for (j in (2:(p+1))){ A1[1,j]<-A1[j,1]<-sum(Tplus[,j-1]) A3[j,j]<-sum(T[,j-1]) for (k in (2:(p+1))){ A1[j,k]<-sum(Tplus[,j-1] * Tplus[,k-1]) } } ## A2: for (j in (2:(p+1))){ A2[1,1]<-A2[1,1]+(Gam[j-1]^(-1)-1)*sum(T[,j-1]) for (l in (2:(p+1))){ if (l==j) vj<-Tplus[,j-1]-1 else vj<-Tplus[,j-1] A2[1,j]<-A2[1,j]+(Gam[l-1]^(-1)-1)*sum(T[,l-1]*(vj) ) } A2[j,1]<-A2[1,j] for (k in (2:(p+1))){ for (l in (2:(p+1))){ if (l==j) vj<-Tplus[,j-1]-1 else vj<-Tplus[,j-1] if (l==k) vk<-Tplus[,k-1]-1 else vk<-Tplus[,k-1] A2[j,k]<-A2[j,k]+ (Gam[l-1]^(-1)-1)*sum(I[[l-1]]*outer(vj,vk)) } } } Sigma<-A1+A2+A3 nam <- c("(Intercept)", names(Gam)) dnam <- list(nam, nam) if(spill) { # return internal data (with matrices unnormalised) dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- dimnames(Sigma) <- dnam internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma) if(!spill.vc) return(internals) } ## Calculate variance-covariance # Normalise: A1 <- A1/areaW Sigma <- Sigma/areaW U <- checksolve(A1, matrix.action, , "variance") mat <- if(is.null(U)) matrix(NA, length(nam), length(nam)) else U%*%Sigma%*%U / areaW dimnames(mat) <- dnam if(spill.vc) return(list(varcov=mat, internals=internals)) return(mat) } vcovMultiStrauss <- function(Xplus, vecR, vecg, matrix.action, spill=FALSE, spill.vc=FALSE){ ## Xplus : marked Strauss point process ## with two types ## observed in W+R (R=max(R11,R12,R22)) ## vecg = estimated parameters of interaction parameters ## ordered as the output of ppm, i.e. vecg=(g11,g12,g22) ## vecR = range for the diff. strauss ordered a vecg(R11,R12,R22) R <- max(vecR) R11<-vecR[1];R12<-vecR[2];R22<-vecR[3] ## Extracting the window and calculating area: Wplus<-as.owin(Xplus) W<-erosion.owin(Wplus,R) areaW <- area(W) X1plus<-Xplus[Xplus$marks==levels(Xplus$marks)[1]] X2plus<-Xplus[Xplus$marks==levels(Xplus$marks)[2]] ## Interior points determined by bdist.points: IntPoints1 <- bdist.points(X1plus)>=R IntPoints2 <- bdist.points(X2plus)>=R X1 <- X1plus[IntPoints1] X2 <- X2plus[IntPoints2] nX1 <- npoints(X1) nX2 <- npoints(X2) nX1plus <- npoints(X1plus) nX2plus <- npoints(X2plus) ## Matrix D with pairwise distances between points and infinite distance ## between a point and itself: D1plus<-pairdist(X1plus) D1 <- pairdist(X1) diag(D1) <- diag(D1plus) <- Inf D2plus<-pairdist(X2plus) D2 <- pairdist(X2) diag(D2) <- diag(D2plus) <- Inf D12plus<-crossdist(X1,X2plus) T12plus<- .rowSums(D12plus<=R12, nX1, nX2plus) D21plus<-crossdist(X2,X1plus) T21plus<- .rowSums(D21plus<=R12, nX2, nX1plus) I12<-crossdist(X1,X2)<=R12 I21<-crossdist(X2,X1)<=R12 T12<- .rowSums(I12, nX1, nX2) T21<- .rowSums(I21, nX2, nX1) ## logical matrix, I, indicating R-close pairs: I1plus<- D1plus <=R11 I1 <- D1<=R11 I2plus<- D2plus <=R22 I2 <- D2<=R22 ## Vector T with the number of $R$-close neighbours to each point: T1plus<- .colSums(I1plus, nX1plus, nX1plus)[IntPoints1] T1 <- .colSums(I1, nX1, nX1) T2plus<- .colSums(I2plus, nX2plus, nX2plus)[IntPoints2] T2 <- .colSums(I2, nX2, nX2) ## Matrices A1, A2 and A3 are initialized to zero: A1 <- A2 <- A3 <- matrix(0,5,5) ## A1 is filled: A1[1,1]<-npoints(X1) A1[1,3]<-A1[3,1]<-sum(T1plus) A1[1,4]<-A1[4,1]<-sum(T12plus) A1[2,2]<-npoints(X2) A1[2,5]<-A1[5,2]<-sum(T2plus) A1[2,4]<-A1[4,2]<-sum(T21plus) A1[3,3]<-sum(T1plus*T1plus) A1[3,4]<-A1[4,3]<-sum(T1plus*T12plus) A1[5,5]<-sum(T2plus*T2plus) A1[4,5]<-A1[5,4]<-sum(T2plus*T21plus) A1[4,4]<-sum(T12plus*T12plus)+sum(T21plus*T21plus) ## A3 is filled: A3[3,3]<-sum(T1) A3[5,5]<-sum(T2) A3[4,4]<-sum(T12)+sum(T21) ## A2 is filled: gamInv<-vecg^(-1)-1 gi1<-gamInv[1];gi12<-gamInv[2];gi2<-gamInv[3] A2[1,1]<-sum(T1)*gi1 A2[1,2]<-A2[2,1]<-sum(T12)*gi12 A2[1,3]<-A2[3,1]<-sum(T1*(T1plus-1))*gi1 A2[1,5]<-A2[5,1]<-sum(T21*T2plus)*gi12 A2[1,4]<-A2[4,1]<-gi1*sum(T1*(T12plus))+gi12*sum(T21*(T21plus-1)) A2[2,2]<-sum(T2)*gi2 A2[2,3]<-A2[3,2]<-sum(T12*T1plus)*gi12 A2[2,5]<-A2[5,2]<-sum(T2*(T2plus-1))*gi2 A2[2,4]<-A2[4,2]<-gi2*sum(T2*(T21plus))+gi12*sum(T12*(T12plus-1)) A2[3,3]<-gi1*sum(I1*outer(T1plus-1,T1plus-1)) A2[3,5]<-A2[5,3]<- gi12*sum(I12*outer(T1plus,T2plus)) A2[3,4]<-A2[4,3]<-gi1*sum(I1*outer(T1plus-1,T12plus))+gi12*sum(I12*outer(T1plus,T21plus-1)) A2[5,5]<-gi2*sum(I2*outer(T2plus-1,T2plus-1)) A2[4,5]<-A2[5,4]<-gi2*sum(I2*outer(T2plus-1,T21plus))+gi12*sum(I21*outer(T2plus,T12plus-1)) A2[4,4]<-gi1*sum(I1*outer(T12plus,T12plus))+gi2*sum(I2*outer(T21plus,T21plus))+ gi12*sum(I12*outer(T12plus-1,T21plus-1))+gi12*sum(I21*outer(T21plus-1,T12plus-1)) Sigma<-A1+A2+A3 nam <- c(levels(marks(Xplus)), names(vecg)) dnam <- list(nam, nam) if(spill) { # return internal data (with matrices unnormalised) dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- dimnames(Sigma) <- dnam internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma) if(!spill.vc) return(internals) } ## Calculate variance-covariance # Normalise: A1 <- A1/areaW Sigma <- Sigma/areaW U <- checksolve(A1, matrix.action, , "variance") mat <- if(is.null(U)) matrix(NA, length(nam), length(nam)) else U%*%Sigma%*%U / areaW dimnames(mat) <- dnam if(spill.vc) return(list(varcov=mat, internals=internals)) return(mat) } # Convert the first p rows & columns of variance matrix x # to variances of treatment contrasts contrastmatrix <- function(x,p){ mat <- x ## Correct column and row 1: for(i in 2:p){ mat[1,i] <- mat[i,1] <- x[1,i]-x[1,1] } ## Correct columns and rows 2,...,p: for(i in 2:p){ for(j in 2:p){ mat[i,j] <- x[1,1]-x[1,i]-x[1,j]+x[i,j] } for(j in (p+1):ncol(x)){ mat[i,j] <- mat[j,i] <- x[i,j]-x[1,j] } } mat } vcov.ppm } ) suffloc <- function(object) { verifyclass(object, "ppm") if(!is.poisson(object)) stop("Internals not available for Gibbs models") return(vcov(object, what="internals")$suff) } spatstat.core/R/defaultwin.R0000644000176200001440000000251514144333462015541 0ustar liggesusers# # # defaultwin.R # # $Revision: 1.10 $ $Date: 2015/10/21 09:06:57 $ # default.expand <- function(object, m=2, epsilon=1e-6, w=Window(object)) { stopifnot(is.ppm(object) || inherits(object, "rmhmodel")) # no expansion necessary if model is Poisson if(is.poisson(object)) return(.no.expansion) # default is no expansion if model is nonstationary if(!is.stationary(object)) return(.no.expansion) # Redundant since a non-expandable model is non-stationary # if(!is.expandable(object)) # return(.no.expansion) # rule is to expand data window by distance d = m * reach rr <- reach(object, epsilon=epsilon) if(!is.finite(rr)) return(rmhexpand()) if(!is.numeric(m) || length(m) != 1 || m < 1) stop("m should be a single number >= 1") mr <- m * rr rule <- rmhexpand(distance = mr) # if(is.owin(w)) { # apply rule to window wplus <- expand.owin(w, rule) # save as new expansion rule rule <- rmhexpand(wplus) } return(rule) } default.clipwindow <- function(object, epsilon=1e-6) { stopifnot(is.ppm(object) || inherits(object, "rmhmodel")) # data window w <- as.owin(object) if(is.null(w)) return(NULL) # interaction range of model rr <- reach(object, epsilon=epsilon) if(!is.finite(rr)) return(NULL) if(rr == 0) return(w) else return(erosion(w, rr)) } spatstat.core/R/Gres.R0000644000176200001440000000502514144333461014275 0ustar liggesusers# # Gres.R # # Residual G # # $Revision: 1.3 $ $Date: 2013/04/25 06:37:43 $ # ############################################################################# # Gres <- function(object, ...) { if(!is.fv(object)) { # usual case where 'object' is a ppm, ppp or quad G <- Gcom(object, ...) } else { # case where 'object' is the output of 'Gcom' a <- attr(object, "maker") if(is.null(a) || a != "Gcom") stop("fv object was not created by Gcom") G <- object if(length(list(...)) > 0) warning("Extra arguments ignored") } # initialise fv object df <- data.frame(r=G$r, theo=numeric(length(G$r))) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~hat(G)(r), NULL), "theo", . ~ r, attr(G, "alim"), c("r","bold(R)~%s[theo](r)"), desc, fname="G") # add residual estimates nam <- names(G) if(all(c("border","bcom") %in% nam)) ans <- bind.fv(ans, data.frame(bres=with(G, border-bcom)), "bold(R)~hat(%s)[bord](r)", "border corrected residual of %s", "bres") if(all(c("han","hcom") %in% nam)) ans <- bind.fv(ans, data.frame(hres=with(G, han-hcom)), "bold(R)~hat(%s)[han](r)", "Hanisch corrected residual of %s", "hres") if("hvar" %in% nam) { savedotnames <- fvnames(ans, ".") hsd <- with(G, sqrt(hvar)) ans <- bind.fv(ans, data.frame(hvar=with(G, hvar), hsd = hsd, hi = 2*hsd, lo = -2*hsd), c("bold(C)^2~hat(%s)[han](r)", "sqrt(bold(C)^2~hat(%s)[han](r))", "bold(R)~hat(%s)[Hi](r)", "bold(R)~hat(%s)[Lo](r)"), c("pseudovariance of Hanisch corrected residual %s", "pseudo-SD of Hanisch corrected residual %s", "upper critical band for Hanisch corrected residual %s", "lower critical band for Hanisch corrected residual %s"), "hres") ans <- bind.fv(ans, data.frame(hstdres=with(ans, hres/hsd)), "bold(T)~hat(%s)[han](r)", "standardised Hanisch-corrected residual %s", "hres") fvnames(ans, ".") <- c(savedotnames, c("hi", "lo")) } unitname(ans) <- unitname(G) return(ans) } spatstat.core/R/varblock.R0000644000176200001440000001307614144333464015210 0ustar liggesusers# # varblock.R # # Variance estimation using block subdivision # # $Revision: 1.21 $ $Date: 2020/11/30 13:11:20 $ # varblock <- local({ getrvalues <- function(z) { with(z, .x) } stepsize <- function(z) { mean(diff(z)) } dofun <- function(domain, fun, Xpp, ...) { fun(Xpp, ..., domain=domain) } varblock <- function(X, fun=Kest, blocks=quadrats(X, nx=nx, ny=ny), ..., nx=3, ny=nx, confidence=0.95) { stopifnot(is.ppp(X)) stopifnot(is.tess(blocks)) stopifnot(is.function(fun) || is.character(fun)) if(is.character(fun)) fun <- get(fun, mode="function") ## validate confidence level stopifnot(confidence > 0.5 && confidence < 1) alpha <- 1 - confidence probs <- c(alpha/2, 1-alpha/2) ## determine whether 'fun' has an argument called 'domain' canrestrict <- ("domain" %in% names(formals(fun))) || samefunction(fun, pcf) || samefunction(fun, Lest) ## check there's at least one point in each block Y <- split(X, blocks) nums <- sapply(Y, npoints) blockok <- (nums > 0) if(some.zeroes <- any(!blockok)) warning("Some tiles contain no data: they are discarded") if(!canrestrict) { ## divide data into disjoint blocks if(some.zeroes) Y <- Y[blockok] n <- length(Y) if(n <= 1) stop("Need at least 2 blocks") ## apply 'fun' to each block if(any(c("r", "breaks") %in% names(list(...)))) { ## r vector specified fX <- fun(X, ...) z <- lapply(Y, fun, ...) } else { ## need to ensure compatible fv objects z <- lapply(Y, fun, ...) rlist <- lapply(z, getrvalues) rmax <- min(sapply(rlist, max)) rstep <- min(sapply(rlist, stepsize)) r <- seq(0, rmax, by=rstep) z <- lapply(Y, fun, ..., r=r) fX <- fun(X, ..., r=r) } } else { ## use 'domain' argument of 'fun' to compute contributions from each tile B <- tiles(blocks) if(some.zeroes) B <- B[blockok] n <- length(B) if(any(c("r", "breaks") %in% names(list(...)))) { ## r vector specified fX <- fun(X, ...) z <- lapply(B, dofun, ..., fun=fun, Xpp=X) } else { ## need to ensure compatible fv objects z <- lapply(B, dofun, ..., fun=fun, Xpp=X) rlist <- lapply(z, getrvalues) rmax <- min(sapply(rlist, max)) rstep <- min(sapply(rlist, stepsize)) r <- seq(0, rmax, by=rstep) z <- lapply(B, dofun, ..., fun=fun, Xpp=X, r=r) fX <- fun(X, ..., r=r) } } ## find columns that are common to all estimates zzz <- reconcile.fv(append(list(fX), z)) fX <- zzz[[1]] z <- zzz[-1] ## sample mean m <- meanlistfv(z) ## sample variance sqdev <- lapply(z, sqdev.fv, m=m) v <- meanlistfv(sqdev) v <- eval.fv(v * n/(n-1), dotonly=FALSE) ## sample standard deviation sd <- eval.fv(sqrt(v), dotonly=FALSE) ## upper and lower limits sem <- eval.fv(sd/sqrt(n), dotonly=FALSE) zcrit <- qnorm(probs) lower <- eval.fv(m + zcrit[1] * sem, dotonly=FALSE) upper <- eval.fv(m + zcrit[2] * sem, dotonly=FALSE) ## rebadge fva <- .Spatstat.FvAttrib fva <- fva[fva %in% names(attributes(fX))] attributes(m)[fva] <- attributes(v)[fva] <- attributes(sd)[fva] <- attributes(upper)[fva] <- attributes(lower)[fva] <- attributes(fX)[fva] m <- prefixfv(m, "mean", "sample mean of", "bold(mean)~") v <- prefixfv(v, "var", "estimated variance of", "bold(var)~") sd <- prefixfv(sd, "sd", "estimated standard deviation of", "bold(sd)~") CItext <- paste(c("lower", "upper"), paste0(100 * confidence, "%%"), "CI limit for") lower <- prefixfv(lower, "lo", CItext[1], "bold(lo)~") upper <- prefixfv(upper, "hi", CItext[2], "bold(hi)~") ## tack together out <- cbind(fX,m,v,sd,upper,lower) ## restrict r domain bad <- matrowall(!is.finite(as.matrix(as.data.frame(out)))) rmax <- max(getrvalues(out)[!bad]) alim <- c(0, rmax) if(!canrestrict) alim <- intersect.ranges(attr(out, "alim"), alim) attr(out, "alim") <- alim ## sensible default plot formula ybase <- fvnames(fX, ".y") xname <- fvnames(fX, ".x") tname <- intersect("theo", fvnames(fX, ".")) fvnames(out, ".y") <- yname <- paste0("mean", ybase) fvnames(out, ".s") <- snames <- paste0(c("lo", "hi"), ybase) fvnames(out, ".") <- c(yname, tname, snames) attr(out, "fmla") <- paste(". ~ ", xname) return(out) } sqdev.fv <- function(x,m){ eval.fv((x-m)^2, dotonly=FALSE) } varblock }) meanlistfv <- local({ getYmatrix <- function(x, yn=ynames) { as.matrix(as.data.frame(x)[,yn]) } meanlistfv <- function(z, ...) { ## compute sample mean of a list of fv objects if(!is.list(z) || !all(unlist(lapply(z, is.fv)))) stop("z should be a list of fv objects") if(!do.call(compatible, unname(z))) stop("Objects are not compatible") result <- template <- z[[1]] ## extract each object's function values as a matrix ynames <- fvnames(template, "*") matlist <- unname(lapply(z, getYmatrix, yn=ynames)) ## stack matrices into an array y <- do.call(abind, append(matlist, list(along=3))) ## take mean ymean <- apply(y, 1:2, mean, ...) result[,ynames] <- ymean return(result) } meanlistfv }) spatstat.core/R/dppm.R0000644000176200001440000001334514144333462014342 0ustar liggesusers#' #' dppm.R #' #' $Revision: 1.15 $ $Date: 2021/07/14 09:44:32 $ dppm <- function(formula, family, data=NULL, ..., startpar = NULL, method = c("mincon", "clik2", "palm", "adapcl"), weightfun=NULL, control=list(), algorithm, statistic="K", statargs=list(), rmax = NULL, epsilon = 0.01, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) { method <- match.arg(method) # Instantiate family if not already done. if(is.character(family)) family <- get(family, mode="function") if(inherits(family, "detpointprocfamilyfun")) { familyfun <- family family <- familyfun() } verifyclass(family, "detpointprocfamily") # Check for intensity as only unknown and exit (should be changed for likelihood method) if(length(family$freepar)==1 && (family$freepar %in% family$intensity)) stop("Only the intensity needs to be estimated. Please do this with ppm yourself.") # Detect missing rhs of 'formula' and fix if(inherits(formula, c("ppp", "quad"))){ Xname <- short.deparse(substitute(formula)) formula <- as.formula(paste(Xname, "~ 1")) } if(!inherits(formula, "formula")) stop(paste("Argument 'formula' should be a formula")) # kppm(formula, DPP = family, data = data, covariates = data, # startpar = startpar, method = method, weightfun = weightfun, # control = control, algorithm = algorithm, statistic = statistic, # statargs = statargs, rmax = rmax, covfunargs = covfunargs, # use.gam = use.gam, nd = nd, eps = eps, ...) if(missing(algorithm)) { algorithm <- if(method == "adapcl") "Broyden" else "Nelder-Mead" } else check.1.string(algorithm) thecall <- call("kppm", X=formula, DPP=family, data = data, covariates = data, startpar = startpar, method = method, weightfun = weightfun, control = control, algorithm = algorithm, statistic = statistic, statargs = statargs, rmax = rmax, covfunargs = covfunargs, use.gam = use.gam, nd = nd, eps = eps) ncall <- length(thecall) argh <- list(...) nargh <- length(argh) if(nargh > 0) { thecall[ncall + 1:nargh] <- argh names(thecall)[ncall + 1:nargh] <- names(argh) } callenv <- parent.frame() if(!is.null(data)) callenv <- list2env(data, parent=callenv) result <- eval(thecall, envir=callenv, enclos=baseenv()) return(result) } ## Auxiliary function to mimic cluster models for DPPs in kppm code spatstatDPPModelInfo <- function(model){ out <- list( modelname = paste(model$name, "DPP"), # In modelname field of mincon fv obj. descname = paste(model$name, "DPP"), # In desc field of mincon fv obj. modelabbrev = paste(model$name, "DPP"), # In fitted obj. printmodelname = function(...) paste(model$name, "DPP"), # Used by print.kppm parnames = model$freepar, checkpar = function(par){ return(par) }, checkclustargs = function(margs, old = TRUE) list(), resolvedots = function(...){ return(list(...)) }, ## K-function K = function(par, rvals, ...){ if(length(par)==1 && is.null(names(par))) names(par) <- model$freepar mod <- update(model, as.list(par)) if(!valid(mod)){ return(rep(Inf, length(rvals))) } else{ return(Kmodel(mod)(rvals)) } }, ## pair correlation function pcf = function(par, rvals, ...){ if(length(par)==1 && is.null(names(par))) names(par) <- model$freepar mod <- update(model, as.list(par)) if(!valid(mod)){ return(rep(Inf, length(rvals))) } else{ return(pcfmodel(mod)(rvals)) } }, Dpcf = function(par, rvals, ...){ if(length(par)==1 && is.null(names(par))) names(par) <- model$freepar mod <- update(model, as.list(par)) if(!valid(mod)){ return(rep(Inf, length(rvals))) } else{ return(sapply(rvals, FUN = dppDpcf(mod))) } }, ## sensible starting parameters selfstart = function(X) { return(model$startpar(model, X)) } ) return(out) } ## Auxilliary function used for DPP stuff in kppm.R dppmFixIntensity <- function(DPP, lambda, po){ lambdaname <- DPP$intensity if(is.null(lambdaname)) warning("The model has no intensity parameter.\n", "Prediction from the fitted model is invalid ", "(but no warning or error will be given by predict.dppm).") ## Update model object with estimated intensity if it is a free model parameter if(lambdaname %in% DPP$freepar){ clusters <- update(DPP, structure(list(lambda), .Names=lambdaname)) } else{ clusters <- DPP lambda <- intensity(clusters) ## Overwrite po object with fake version X <- po$Q$data dont.complain.about(X) po <- ppm(X~offset(log(lambda))-1) po$fitter <- "dppm" ## update pseudolikelihood value using code in logLik.ppm po$maxlogpl.orig <- po$maxlogpl po$maxlogpl <- logLik(po, warn=FALSE) ######################################### } return(list(clusters=clusters, lambda=lambda, po=po)) } ## Auxiliary function used for DPP stuff in kppm.R dppmFixAlgorithm <- function(algorithm, changealgorithm, clusters, startpar){ if(!setequal(clusters$freepar, names(startpar))) stop("Names of startpar vector does not match the free parameters of the model.") lower <- upper <- NULL if(changealgorithm){ bb <- dppparbounds(clusters, names(startpar)) if(all(is.finite(bb))){ algorithm <- "Brent" lower <- bb[1L] upper <- bb[2L] } else{ algorithm <- "BFGS" } } return(list(algorithm = algorithm, lower = lower, upper = upper)) } spatstat.core/R/ic.kppm.R0000644000176200001440000000174614141452520014737 0ustar liggesusers#' #' ic.kppm.R #' #' Original by Rasmus Waagepetersen, 10 april 2021 #' #' Tweaks by Adrian Baddeley ic <- function(object) { UseMethod("ic") } ic.ppm <- function(object) { loglike <- logLik(object) ll <- as.numeric(loglike) df <- attr(loglike, "df") ## betahat <- coef(object) n <- npoints(data.ppm(object)) cbic <- -2*ll+df*log(n) cic <- -2*ll+df*2 ## cbic is BIC and cic is AIC in case of Poisson process return(list(loglike=ll, cbic=cbic, cic=cic, df=df)) } ic.kppm <- function(object){ po <- as.ppm(object) loglike <- logLik(po) ll <- as.numeric(loglike) betahat <- coef(object) p <- length(betahat) n <- npoints(data.ppm(po)) co <- vcov(object, what="internals") df <- p + sum(diag(as.matrix(co$J.inv %*% co$E))) #compute p_approx cbic = -2*loglike+df*log(n) cic = -2*loglike+df*2 cbic <- -2*ll+df*log(n) cic <- -2*ll+df*2 ## cbic is BIC and cic is AIC in case of Poisson process return(list(loglike=ll, cbic=cbic, cic=cic, df=df)) } spatstat.core/R/ippm.R0000644000176200001440000002257514144333462014354 0ustar liggesusers# # ippm.R # # $Revision: 2.28 $ $Date: 2020/12/19 05:25:06 $ # # Fisher scoring algorithm for irregular parameters in ppm trend # ippm <- local({ chucknames <- c("iScore", "start", "nlm.args", "silent", "warn.unused") hasarg <- function(f,a) { a %in% names(formals(f)) } ippm <- function(Q, ..., iScore=NULL, start=list(), covfunargs=start, nlm.args=list(stepmax=1/2), silent=FALSE, warn.unused=TRUE) { ## remember call cl <- match.call() callframe <- parent.frame() callstring <- short.deparse(sys.call()) ## ppmcall <- cl[!(names(cl) %in% chucknames)] ppmcall[[1L]] <- as.name('ppm') ## validate if(!is.list(start)) stop("start should be a list of initial values for irregular parameters") if(length(start) == 0) { ppmcall <- ppmcall[names(ppmcall) != "covfunargs"] return(eval(ppmcall, callframe)) } if(!is.null(iScore)) { if(!is.list(iScore) || length(iScore) != length(start)) stop("iScore should be a list of the same length as start") stopifnot(identical(names(iScore), names(start))) if(!all(sapply(iScore, is.function))) stop("iScore should be a list of functions") } ## smap <- match(names(start), names(covfunargs)) if(anyNA(smap)) stop("variables in start should be a subset of variables in covfunargs") covfunargs[smap] <- start ## fit the initial model and extract information ppmcall$covfunargs <- covfunargs fit0 <- eval(ppmcall, callframe) # lpl0 <- fit0$maxlogpl # p <- length(coef(fit0)) ## examine covariates and trend covariates <- fit0$covariates isfun <- sapply(covariates, is.function) covfuns <- covariates[isfun] ## determine which covariates depend on which irregular parameters pnames <- names(start) depmat <- matrix(FALSE, nrow=length(covfuns), ncol=length(pnames)) rownames(depmat) <- names(covfuns) colnames(depmat) <- pnames for(j in 1:length(pnames)) depmat[,j] <- sapply(covfuns, hasarg, pnames[j]) ## find covariates that depend on ANY irregular parameter depvar <- rownames(depmat)[apply(depmat, 1L, any)] ## check that these covariates appear only in offset terms covnames.fitted <- model.covariates(fit0, fitted=TRUE, offset=FALSE) if(any(uhoh <- depvar %in% covnames.fitted)) stop(paste(ngettext(sum(uhoh), "The covariate", "The covariates"), commasep(sQuote(depvar[uhoh])), "should appear only in offset terms")) ## check that every irregular parameter to be updated appears somewhere cov.names.offset <- model.covariates(fit0, fitted=FALSE, offset=TRUE) covfun.names.offset <- intersect(cov.names.offset, names(covfuns)) usearg <- apply(depmat[covfun.names.offset, , drop=FALSE], 2L, any) if(!all(usearg)) { if(warn.unused) { nbad <- sum(!usearg) warning(paste("Cannot maximise over the irregular", ngettext(nbad, "parameter", "parameters"), commasep(sQuote(names(usearg)[!usearg])), ngettext(nbad, "because it is", "because they are"), "not used in any term of the model")) } ## restrict start <- start[usearg] if(!is.null(iScore)) iScore <- iScore[usearg] pnames <- names(start) } if(length(start) == 0) { ppmcall <- ppmcall[names(ppmcall) != "covfunargs"] return(eval(ppmcall, callframe)) } ## parameters for objective function fdata <- list(fit0=fit0, nreg=length(coef(fit0)), covfunargs=covfunargs, smap=smap, pnames=pnames, iScore=iScore) ## minimise objective startvec <- unlist(start) typsize <- abs(startvec) typsize <- pmax(typsize, min(typsize[typsize > 0])) dont.complain.about(fdata) g <- do.call(nlm, resolve.defaults(list(f=quote(objectivefun), p=startvec, thedata=quote(fdata)), nlm.args, list(typsize=typsize))) popt <- g$estimate ## detect error states icode <- g$code if(!silent && icode > 2) { errmess <- nlmcodes[[icode]] if(!is.null(errmess)) warning(errmess) else warning("Unrecognised error code ", paste(icode), " returned from nlm", call.=FALSE) } ## return optimised model covfunargs[smap] <- popt attr(covfunargs, "fitter") <- "ippm" attr(covfunargs, "free") <- names(start) fit <- update(fit0, covfunargs=covfunargs, use.internal=TRUE) fit$dispatched <- fit[c("call", "callstring", "callframe")] fit$call <- cl fit$callstring <- callstring fit$callframe <- callframe fit$iScore <- iScore class(fit) <- c("ippm", class(fit)) return(fit) } ## define objective function objectivefun <- function(param, thedata) { with(thedata, { ## fit model with current irregular parameters param <- as.list(param) names(param) <- pnames covfunargs[smap] <- param fit <- update(fit0, covfunargs=covfunargs, use.internal=TRUE) lpl <- logLik(fit, warn=FALSE) ## return negative logL because nlm performs *minimisation* value <- -as.numeric(lpl) if(!is.null(iScore)) { ## compute analytic derivatives stuff <- ppmInfluence(fit, what="score", iScore=iScore, iArgs=param) score <- stuff$score if(length(score) == length(coef(fit)) + length(param)) attr(value, "gradient") <- -score[-(1:nreg), drop=FALSE] ## attr(value, "hessian") <- -hess[-(1:nreg), -(1:nreg), drop=FALSE] } return(value) }) } ## from help(nlm) nlmcodes <- list(c("Relative gradient is close to zero; ", "current iterate is probably solution"), c("Successive iterates are within tolerance; ", "current iterate is probably solution"), c("Last global step failed to locate a point ", "lower than current estimate. ", "Either current estimate is an approximate ", "local minimum of the function ", "or 'steptol' is too small"), "Iteration limit exceeded", c("Maximum step size 'stepmax' ", "exceeded five consecutive times. ", "Either the function is unbounded below, ", "becomes asymptotic to a finite value ", "from above in some direction, ", "or 'stepmax' is too small")) ippm }) update.ippm <- local({ update.ippm <- function(object, ..., envir=environment(terms(object))) { # call <- match.call() new.call <- old.call <- object$call old.callframe <- object$callframe Qold <- eval(old.call$Q, as.list(envir), enclos=old.callframe) argh <- list(...) if(any(isfmla <- sapply(argh, inherits, what="formula"))) { if(sum(isfmla) > 1) stop("Syntax not understood: several arguments are formulas") i <- min(which(isfmla)) new.fmla <- argh[[i]] argh <- argh[-i] if(inherits(Qold, "formula")) { ## formula will replace 'Q' if(is.null(lhs.of.formula(new.fmla))) { f <- (. ~ x) f[[3L]] <- new.fmla[[2L]] new.fmla <- f } new.call$Q <- newformula(Qold, new.fmla, old.callframe, envir, expandpoly=FALSE) } else if(inherits(Qold, c("ppp", "quad"))) { ## formula will replace 'trend' and may replace 'Q' new.fmla <- newformula(formula(object), new.fmla, old.callframe, envir, expandpoly=FALSE) if(!is.null(lhs <- lhs.of.formula(new.fmla))) { newQ <- eval(eval(substitute(substitute(l, list("."=Q)), list(l=lhs, Q=Qold))), envir=as.list(envir), enclos=old.callframe) new.call$Q <- newQ } new.fmla <- rhs.of.formula(new.fmla) if("trend" %in% names(old.call)) { new.call$trend <- new.fmla } else { ## find which argument in the original call was a formula wasfmla <- sapply(old.call, formulaic, envir=as.list(envir), enclos=old.callframe) if(any(wasfmla)) { new.call[[min(which(wasfmla))]] <- new.fmla } else { new.call$trend <- new.fmla } } } } ## silence the warnings about unused covfunargs (unless overruled) new.call$warn.unused <- FALSE ## other arguments if(length(argh) > 0) { nama <- names(argh) named <- if(is.null(nama)) rep(FALSE, length(argh)) else nzchar(nama) if(any(named)) new.call[nama[named]] <- argh[named] if(any(!named)) new.call[length(new.call) + 1:sum(!named)] <- argh[!named] } result <- eval(new.call, as.list(envir), enclos=old.callframe) return(result) } formulaic <- function(z, envir, enclos) { u <- try(eval(z, envir, enclos)) return(inherits(u, "formula")) } update.ippm }) spatstat.core/R/mpl.R0000644000176200001440000017203414144333463014174 0ustar liggesusers# mpl.R # # $Revision: 5.235 $ $Date: 2021/09/04 09:31:09 $ # # mpl.engine() # Fit a point process model to a two-dimensional point pattern # by maximum pseudolikelihood # # mpl.prepare() # set up data for glm procedure # # ------------------------------------------------------------------- # # "mpl" <- function(Q, # trend = ~1, # interaction = NULL, # data = NULL, # correction="border", # rbord = 0, # use.gam=FALSE) { # .Deprecated("ppm", package="spatstat") # ppm(Q=Q, trend=trend, interaction=interaction, # covariates=data, correction=correction, rbord=rbord, # use.gam=use.gam, method="mpl") # } mpl.engine <- function(Q, trend = ~1, interaction = NULL, ..., covariates = NULL, subsetexpr = NULL, clipwin = NULL, covfunargs = list(), correction="border", rbord = 0, use.gam=FALSE, gcontrol=list(), GLM=NULL, GLMfamily=NULL, GLMcontrol=NULL, famille=NULL, forcefit=FALSE, nd = NULL, eps = eps, allcovar=FALSE, callstring="", precomputed=NULL, savecomputed=FALSE, preponly=FALSE, rename.intercept=TRUE, justQ = FALSE, weightfactor = NULL) { GLMname <- if(!missing(GLM)) short.deparse(substitute(GLM)) else NULL ## Extract precomputed data if available if(!is.null(precomputed$Q)) { Q <- precomputed$Q X <- precomputed$X P <- precomputed$U } else { ## Determine quadrature scheme from argument Q if(verifyclass(Q, "quad", fatal=FALSE)) { ## user-supplied quadrature scheme - validate it validate.quad(Q, fatal=TRUE, repair=FALSE, announce=TRUE) ## Extract data points X <- Q$data } else if(verifyclass(Q, "ppp", fatal = FALSE)) { ## point pattern - create default quadrature scheme X <- Q Q <- quadscheme(X, nd=nd, eps=eps, check=FALSE) } else stop("First argument Q should be a point pattern or a quadrature scheme") ## Data and dummy points together P <- union.quad(Q) } ## clip to subset? if(!is.null(clipwin)) { if(is.data.frame(covariates)) covariates <- covariates[inside.owin(P, w=clipwin), , drop=FALSE] Q <- Q[clipwin] X <- X[clipwin] P <- P[clipwin] } ## secret exit if(justQ) return(Q) ## computed <- if(savecomputed) list(X=X, Q=Q, U=P) else NULL ## ## Validate main arguments if(!is.null(trend) && !inherits(trend, "formula")) stop(paste("Argument", sQuote("trend"), "must be a formula")) if(!is.null(interaction) && !inherits(interaction, "interact")) stop(paste("Argument", sQuote("interaction"), "has incorrect format")) ## check.1.real(rbord, "In ppm") explain.ifnot(rbord >= 0, "In ppm") ## rbord applies only to border correction if(correction != "border") rbord <- 0 ## covfunargs <- as.list(covfunargs) ## ## Interpret the call if(is.null(trend)) { trend <- ~1 environment(trend) <- parent.frame() } want.trend <- !identical.formulae(trend, ~1) want.inter <- !is.null(interaction) && !is.null(interaction$family) ## Stamp with spatstat version number spv <- package_version(versionstring.spatstat()) the.version <- list(major=spv$major, minor=spv$minor, release=spv$patchlevel, date="$Date: 2021/09/04 09:31:09 $") if(want.inter) { ## ensure we're using the latest version of the interaction object if(outdated.interact(interaction)) interaction <- update(interaction) } ## if(!want.trend && !want.inter && !forcefit && !allcovar && is.null(subsetexpr)) { ## the model is the uniform Poisson process ## The MPLE (= MLE) can be evaluated directly npts <- npoints(X) W <- as.owin(X) if(correction == "border" && rbord > 0) { npts <- sum(bdist.points(X) >= rbord) areaW <- eroded.areas(W, rbord) } else { npts <- npoints(X) areaW <- area(W) } volume <- areaW * markspace.integral(X) lambda <- npts/volume ## fitted canonical coefficient co <- log(lambda) ## asymptotic variance of canonical coefficient varcov <- matrix(1/npts, 1, 1) fisher <- matrix(npts, 1, 1) se <- sqrt(1/npts) ## give names tag <- if(rename.intercept) "log(lambda)" else "(Intercept)" names(co) <- tag dimnames(varcov) <- dimnames(fisher) <- list(tag, tag) ## maximised log likelihood maxlogpl <- if(npts == 0) 0 else npts * (log(lambda) - 1) ## rslt <- list( method = "mpl", fitter = "exact", projected = FALSE, coef = co, trend = trend, interaction = NULL, fitin = fii(), Q = Q, maxlogpl = maxlogpl, satlogpl = NULL, internal = list(computed=computed, se=se), covariates = mpl.usable(covariates), ## covariates are still retained! covfunargs = covfunargs, subsetexpr = NULL, correction = correction, rbord = rbord, terms = terms(trend), fisher = fisher, varcov = varcov, version = the.version, problems = list()) class(rslt) <- "ppm" return(rslt) } ################# P r e p a r e D a t a ###################### prep <- mpl.prepare(Q, X, P, trend, interaction, covariates, want.trend, want.inter, correction, rbord, "quadrature points", callstring, subsetexpr=subsetexpr, allcovar=allcovar, precomputed=precomputed, savecomputed=savecomputed, covfunargs=covfunargs, weightfactor=weightfactor, ...) ## back door if(preponly) { ## exit now, returning prepared data frame and internal information prep$info <- list(want.trend=want.trend, want.inter=want.inter, correction=correction, rbord=rbord, interaction=interaction) return(prep) } fmla <- prep$fmla glmdata <- prep$glmdata problems <- prep$problems likelihood.is.zero <- prep$likelihood.is.zero is.identifiable <- prep$is.identifiable computed <- resolve.defaults(prep$computed, computed) IsOffset <- prep$IsOffset ## update covariates (if they were resolved from the environment) if(!is.null(prep$covariates)) covariates <- prep$covariates ################# F i t i t #################################### if(!is.identifiable) stop(paste("in", callstring, ":", problems$unidentifiable$print), call.=FALSE) ## to avoid problem with package checker .mpl.W <- glmdata$.mpl.W .mpl.SUBSET <- glmdata$.mpl.SUBSET ## determine algorithm control parameters if(is.null(gcontrol)) gcontrol <- list() else stopifnot(is.list(gcontrol)) gcontrol <- if(!is.null(GLMcontrol)) do.call(GLMcontrol, gcontrol) else if(want.trend && use.gam) do.call(mgcv::gam.control, gcontrol) else do.call(stats::glm.control, gcontrol) ## Fit the generalized linear/additive model. if(is.null(GLM) && is.null(famille)) { ## the sanctioned technique, using `quasi' family if(want.trend && use.gam) { FIT <- gam(fmla, family=quasi(link="log", variance="mu"), weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol) fittername <- "gam" } else { FIT <- glm(fmla, family=quasi(link="log", variance="mu"), weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol, model=FALSE) fittername <- "glm" } } else if(!is.null(GLM)) { ## alternative GLM fitting function or penalised GLM etc fam <- GLMfamily %orifnull% quasi(link="log", variance="mu") FIT <- GLM(fmla, family=fam, weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol) fittername <- GLMname } else { ## experimentation only! if(is.function(famille)) famille <- famille() stopifnot(inherits(famille, "family")) if(want.trend && use.gam) { FIT <- gam(fmla, family=famille, weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol) fittername <- "experimental" } else { FIT <- glm(fmla, family=famille, weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol, model=FALSE) fittername <- "experimental" } } environment(FIT$terms) <- sys.frame(sys.nframe()) ################ I n t e r p r e t f i t ####################### ## Fitted coefficients co <- FIT$coef ## glm covariates W <- glmdata$.mpl.W SUBSET <- glmdata$.mpl.SUBSET Z <- is.data(Q) Vnames <- prep$Vnames vnameprefix <- prep$vnameprefix ## saturated log pseudolikelihood satlogpl <- - (sum(log(W[Z & SUBSET])) + sum(Z & SUBSET)) ## attained value of max log pseudolikelihood maxlogpl <- if(likelihood.is.zero) -Inf else (satlogpl - deviance(FIT)/2) ## fitted interaction object fitin <- if(want.inter) { fii(interaction, co, Vnames, IsOffset, vnameprefix) } else fii() unitname(fitin) <- unitname(X) ###################################################################### ## Clean up & return rslt <- list( method = "mpl", fitter = fittername, projected = FALSE, coef = co, trend = trend, interaction = if(want.inter) interaction else NULL, fitin = fitin, Q = Q, maxlogpl = maxlogpl, satlogpl = satlogpl, internal = list(glmfit=FIT, glmdata=glmdata, Vnames=Vnames, IsOffset=IsOffset, fmla=fmla, computed=computed, vnamebase=prep$vnamebase, vnameprefix=prep$vnameprefix), covariates = mpl.usable(covariates), covfunargs = covfunargs, subsetexpr = subsetexpr, correction = correction, rbord = rbord, terms = terms(trend), version = the.version, problems = problems) class(rslt) <- "ppm" return(rslt) } ########################################################################## ### ///////////////////////////////////////////////////////////////////// ########################################################################## mpl.prepare <- local({ mpl.prepare <- function(Q, X, P, trend, interaction, covariates, want.trend, want.inter, correction, rbord, Pname="quadrature points", callstring="", ..., subsetexpr=NULL, covfunargs=list(), allcovar=FALSE, precomputed=NULL, savecomputed=FALSE, vnamebase=c("Interaction", "Interact."), vnameprefix=NULL, warn.illegal=TRUE, warn.unidentifiable=TRUE, weightfactor=NULL, skip.border=FALSE, clip.interaction=TRUE, splitInf=FALSE) { ## Q: quadrature scheme ## X = data.quad(Q) ## P = union.quad(Q) if(missing(want.trend)) want.trend <- !is.null(trend) && !identical.formulae(trend, ~1) if(missing(want.inter)) want.inter <- !is.null(interaction) && !is.null(interaction$family) want.subset <- !is.null(subsetexpr) computed <- list() problems <- list() names.precomputed <- names(precomputed) likelihood.is.zero <- FALSE is.identifiable <- TRUE if(!missing(vnamebase)) { if(length(vnamebase) == 1) vnamebase <- rep.int(vnamebase, 2) if(!is.character(vnamebase) || length(vnamebase) != 2) stop("Internal error: illegal format of vnamebase") } if(!is.null(vnameprefix)) { if(!is.character(vnameprefix) || length(vnameprefix) != 1) stop("Internal error: illegal format of vnameprefix") } ################ C o m p u t e d a t a #################### ## Extract covariate values updatecovariates <- FALSE covariates.df <- NULL if(allcovar || want.trend || want.subset) { if("covariates.df" %in% names.precomputed) { covariates.df <- precomputed$covariates.df } else { if(!is.data.frame(covariates)) { ## names of 'external' covariates to be found covnames <- variablesinformula(trend) if(want.subset) covnames <- union(covnames, all.vars(subsetexpr)) if(allcovar) covnames <- union(covnames, names(covariates)) covnames <- setdiff(covnames, c("x", "y", "marks")) ## resolve 'external' covariates tenv <- environment(trend) covariates <- getdataobjects(covnames, tenv, covariates, fatal=TRUE) updatecovariates <- any(attr(covariates, "external")) } ## extract values of covariates ('internal' and 'external') covariates.df <- mpl.get.covariates(covariates, P, Pname, covfunargs) } if(savecomputed) computed$covariates.df <- covariates.df } ## Form the weights and the ``response variable''. if("dotmplbase" %in% names.precomputed) .mpl <- precomputed$dotmplbase else { nQ <- n.quad(Q) wQ <- w.quad(Q) mQ <- marks.quad(Q) ## is NULL for unmarked patterns zQ <- is.data(Q) yQ <- numeric(nQ) yQ[zQ] <- 1/wQ[zQ] zeroes <- attr(wQ, "zeroes") sQ <- if(is.null(zeroes)) rep.int(TRUE, nQ) else !zeroes ## tweak weights ONLY if(!is.null(weightfactor)) wQ <- wQ * weightfactor ## pack up .mpl <- list(W = wQ, Z = zQ, Y = yQ, MARKS = mQ, SUBSET = sQ) } if(savecomputed) computed$dotmplbase <- .mpl glmdata <- data.frame(.mpl.W = .mpl$W, .mpl.Y = .mpl$Y) ## count data and dummy points in specified subset izdat <- .mpl$Z[.mpl$SUBSET] ndata <- sum(izdat) # ndummy <- sum(!izdat) ## Determine the domain of integration for the pseudolikelihood. if(correction == "border") { bdP <- if("bdP" %in% names.precomputed) precomputed$bdP else bdist.points(P) if(savecomputed) computed$bdP <- bdP .mpl$DOMAIN <- (bdP >= rbord) } skip.border <- skip.border && (correction == "border") ####################### T r e n d ############################## internal.names <- c(".mpl.W", ".mpl.Y", ".mpl.Z", ".mpl.SUBSET", "SUBSET", ".mpl") reserved.names <- c("x", "y", "marks", internal.names) if(allcovar || want.trend || want.subset) { trendvariables <- variablesinformula(trend) ## Check for use of internal names in trend cc <- check.clashes(internal.names, trendvariables, "the model formula") if(cc != "") stop(cc) if(want.subset) { subsetvariables <- all.vars(subsetexpr) cc <- check.clashes(internal.names, trendvariables, "the subset expression") if(cc != "") stop(cc) trendvariables <- union(trendvariables, subsetvariables) } ## Standard variables if(allcovar || "x" %in% trendvariables) glmdata <- data.frame(glmdata, x=P$x) if(allcovar || "y" %in% trendvariables) glmdata <- data.frame(glmdata, y=P$y) if(("marks" %in% trendvariables) || !is.null(.mpl$MARKS)) { if(is.null(.mpl$MARKS)) stop("Model formula depends on marks, but data do not have marks", call.=FALSE) glmdata <- data.frame(glmdata, marks=.mpl$MARKS) } ## ## Check covariates if(!is.null(covariates.df)) { ## Check for duplication of reserved names cc <- check.clashes(reserved.names, names(covariates), sQuote("covariates")) if(cc != "") stop(cc) ## Take only those covariates that are named in the trend formula if(!allcovar) needed <- names(covariates.df) %in% trendvariables else needed <- rep.int(TRUE, ncol(covariates.df)) if(any(needed)) { covariates.needed <- covariates.df[, needed, drop=FALSE] ## Append to `glmdata' glmdata <- data.frame(glmdata,covariates.needed) ## Ignore any quadrature points that have NA's in the covariates nbg <- is.na(covariates.needed) if(any(nbg)) { offending <- matcolany(nbg) covnames.na <- names(covariates.needed)[offending] quadpoints.na <- matrowany(nbg) n.na <- sum(quadpoints.na) n.tot <- length(quadpoints.na) errate <- n.na/n.tot pcerror <- round(signif(100 * errate, 2), 2) complaint <- paste("Values of the", ngettext(length(covnames.na), "covariate", "covariates"), paste(sQuote(covnames.na), collapse=", "), "were NA or undefined at", paste(pcerror, "%", " (", n.na, " out of ", n.tot, ")", sep=""), "of the", Pname) warning(paste(complaint, ". Occurred while executing: ", callstring, sep=""), call. = FALSE) .mpl$SUBSET <- .mpl$SUBSET & !quadpoints.na details <- list(covnames.na = covnames.na, quadpoints.na = quadpoints.na, print = complaint) problems <- append(problems, list(na.covariates=details)) } } } } ###################### I n t e r a c t i o n #################### Vnames <- NULL IsOffset <- NULL forbid <- NULL if(want.inter) { ## Form the matrix of "regression variables" V. ## The rows of V correspond to the rows of P (quadrature points) ## while the column(s) of V are the regression variables (log-potentials) E <- precomputed$E %orifnull% equalpairs.quad(Q) if(!skip.border) { ## usual case V <- evalInteraction(X, P, E, interaction, correction, ..., splitInf=splitInf, precomputed=precomputed, savecomputed=savecomputed) } else { ## evaluate only in eroded domain if(all(c("Esub", "Usub", "Retain") %in% names.precomputed)) { ## use precomputed data Psub <- precomputed$Usub Esub <- precomputed$Esub Retain <- precomputed$Retain } else { ## extract subset of quadrature points Retain <- .mpl$DOMAIN | is.data(Q) Psub <- P[Retain] ## map serial numbers in P to serial numbers in Psub Pmap <- cumsum(Retain) ## extract subset of equal-pairs matrix keepE <- Retain[ E[,2] ] Esub <- E[ keepE, , drop=FALSE] ## adjust indices in equal pairs matrix Esub[,2] <- Pmap[Esub[,2]] } ## call evaluator on reduced data if(all(c("X", "Q", "U") %in% names.precomputed)) { subcomputed <- resolve.defaults(list(E=Esub, U=Psub, Q=Q[Retain]), precomputed) } else subcomputed <- NULL if(clip.interaction) { ## normal V <- evalInteraction(X, Psub, Esub, interaction, correction, ..., splitInf=splitInf, precomputed=subcomputed, savecomputed=savecomputed) } else { ## ignore window when calculating interaction ## by setting 'W=NULL' (currently detected only by AreaInter) V <- evalInteraction(X, Psub, Esub, interaction, correction, ..., W=NULL, splitInf=splitInf, precomputed=subcomputed, savecomputed=savecomputed) } if(savecomputed) { computed$Usub <- Psub computed$Esub <- Esub computed$Retain <- Retain } } if(!is.matrix(V)) stop("interaction evaluator did not return a matrix") ## extract information about offsets IsOffset <- attr(V, "IsOffset") if(is.null(IsOffset)) IsOffset <- FALSE if(splitInf) { ## extract information about hard core terms forbid <- attr(V, "-Inf") %orifnull% logical(nrow(V)) } if(skip.border) { ## fill in the values in the border region with zeroes. Vnew <- matrix(0, nrow=npoints(P), ncol=ncol(V)) colnames(Vnew) <- colnames(V) Vnew[Retain, ] <- V ## retain attributes attr(Vnew, "IsOffset") <- IsOffset attr(Vnew, "computed") <- attr(V, "computed") attr(Vnew, "POT") <- attr(V, "POT") V <- Vnew if(splitInf) { fnew <- logical(nrow(Vnew)) fnew[Retain] <- forbid forbid <- fnew } } ## extract intermediate computation results if(savecomputed) computed <- resolve.defaults(attr(V, "computed"), computed) ## Augment data frame by appending the regression variables ## for interactions. ## ## First determine the names of the variables ## Vnames <- dimnames(V)[[2]] if(is.null(Vnames)) { ## No names were provided for the columns of V. ## Give them default names. ## In ppm the names will be "Interaction" ## or "Interact.1", "Interact.2", ... ## In mppm an alternative tag will be specified by vnamebase. nc <- ncol(V) Vnames <- if(nc == 1) vnamebase[1] else paste0(vnamebase[2], 1:nc) dimnames(V) <- list(dimnames(V)[[1]], Vnames) } else if(!is.null(vnameprefix)) { ## Variable names were provided by the evaluator (e.g. MultiStrauss). ## Prefix the variable names by a string ## (typically required by mppm) Vnames <- paste(vnameprefix, Vnames, sep="") dimnames(V) <- list(dimnames(V)[[1]], Vnames) } ## Check the names are valid as column names in a dataframe okVnames <- make.names(Vnames, unique=TRUE) if(any(Vnames != okVnames)) { warning(paste("Names of interaction terms", "contained illegal characters;", "names have been repaired.")) Vnames <- okVnames } ## Check for name clashes between the interaction variables ## and the formula cc <- check.clashes(Vnames, termsinformula(trend), "model formula") if(cc != "") stop(cc) ## and with the variables in 'covariates' if(!is.null(covariates)) { cc <- check.clashes(Vnames, names(covariates), sQuote("covariates")) if(cc != "") stop(cc) } ## OK. append variables. glmdata <- data.frame(glmdata, V) ## check IsOffset matches Vnames if(length(IsOffset) != length(Vnames)) { if(length(IsOffset) == 1) IsOffset <- rep.int(IsOffset, length(Vnames)) else stop("Internal error: IsOffset has wrong length", call.=FALSE) } ## Keep only those quadrature points for which the ## conditional intensity is nonzero. ##KEEP <- apply(V != -Inf, 1, all) .mpl$KEEP <- matrowall(V != -Inf) .mpl$SUBSET <- .mpl$SUBSET & .mpl$KEEP ## Check that there are at least some data and dummy points remaining datremain <- .mpl$Z[.mpl$SUBSET] somedat <- any(datremain) somedum <- !all(datremain) if(warn.unidentifiable && !(somedat && somedum)) { ## Model would be unidentifiable if it were fitted. ## Register problem is.identifiable <- FALSE if(ndata == 0) { complaint <- "model is unidentifiable: data pattern is empty" } else { offending <- !c(somedat, somedum) offending <- c("all data points", "all dummy points")[offending] offending <- paste(offending, collapse=" and ") complaint <- paste("model is unidentifiable:", offending, "have zero conditional intensity") } details <- list(data=!somedat, dummy=!somedum, print=complaint) problems <- append(problems, list(unidentifiable=details)) } ## check whether the model has zero likelihood: ## check whether ANY data points have zero conditional intensity if(any(.mpl$Z & !.mpl$KEEP)) { howmany <- sum(.mpl$Z & !.mpl$KEEP) complaint <- paste(howmany, "data point(s) are illegal", "(zero conditional intensity under the model)") details <- list(illegal=howmany, print=complaint) problems <- append(problems, list(zerolikelihood=details)) if(warn.illegal && is.identifiable) warning(paste(complaint, ". Occurred while executing: ", callstring, sep=""), call. = FALSE) likelihood.is.zero <- TRUE } } ################## S u b s e t ################### if(correction == "border") .mpl$SUBSET <- .mpl$SUBSET & .mpl$DOMAIN if(!is.null(subsetexpr)) { ## user-defined subset expression USER.SUBSET <- eval(subsetexpr, glmdata, environment(trend)) if(is.owin(USER.SUBSET)) { USER.SUBSET <- inside.owin(P$x, P$y, USER.SUBSET) } else if(is.im(USER.SUBSET)) { USER.SUBSET <- as.logical(USER.SUBSET[P, drop=FALSE]) if(anyNA(USER.SUBSET)) USER.SUBSET[is.na(USER.SUBSET)] <- FALSE } if(!(is.logical(USER.SUBSET) || is.numeric(USER.SUBSET))) stop("Argument 'subset' should yield logical values", call.=FALSE) if(anyNA(USER.SUBSET)) { USER.SUBSET[is.na(USER.SUBSET)] <- FALSE warning("NA values in argument 'subset' were changed to FALSE", call.=FALSE) } .mpl$SUBSET <- .mpl$SUBSET & USER.SUBSET } glmdata <- cbind(glmdata, data.frame(.mpl.SUBSET=.mpl$SUBSET, stringsAsFactors=FALSE)) ################# F o r m u l a ################################## if(!want.trend) trend <- ~1 trendpart <- paste(as.character(trend), collapse=" ") if(!want.inter) rhs <- trendpart else { VN <- Vnames ## enclose offset potentials in 'offset(.)' if(any(IsOffset)) VN[IsOffset] <- paste("offset(", VN[IsOffset], ")", sep="") rhs <- paste(c(trendpart, VN), collapse= "+") } fmla <- paste(".mpl.Y ", rhs) fmla <- as.formula(fmla) ## character string of trend formula (without Vnames) trendfmla <- paste(".mpl.Y ", trendpart) #### result <- list(fmla=fmla, trendfmla=trendfmla, covariates=if(updatecovariates) covariates else NULL, glmdata=glmdata, Vnames=Vnames, IsOffset=IsOffset, subsetexpr=subsetexpr, problems=problems, likelihood.is.zero=likelihood.is.zero, is.identifiable=is.identifiable, computed=computed, vnamebase=vnamebase, vnameprefix=vnameprefix, forbid=forbid) return(result) } check.clashes <- function(forbidden, offered, where) { name.match <- outer(forbidden, offered, "==") if(any(name.match)) { is.matched <- apply(name.match, 2, any) matched.names <- (offered)[is.matched] if(sum(is.matched) == 1) { return(paste("The variable",sQuote(matched.names), "in", where, "is a reserved name")) } else { return(paste("The variables", paste(sQuote(matched.names), collapse=", "), "in", where, "are reserved names")) } } return("") } mpl.prepare }) #################################################################### #################################################################### mpl.usable <- function(x) { ## silently remove covariates that don't have recognised format if(length(x) == 0 || is.data.frame(x)) return(x) isim <- sapply(x, is.im) isfun <- sapply(x, is.function) iswin <- sapply(x, is.owin) istess <- sapply(x, is.tess) isnum <- sapply(x, is.numeric) & (lengths(x) == 1) recognised <- isim | isfun | iswin | istess | isnum if(!all(recognised)) x <- x[recognised] return(x) } mpl.get.covariates <- local({ mpl.get.covariates <- function(covariates, locations, type="locations", covfunargs=list(), need.deriv=FALSE) { covargname <- sQuote(short.deparse(substitute(covariates))) locargname <- sQuote(short.deparse(substitute(locations))) if(is.null(covfunargs)) covfunargs <- list() ## extract spatial coordinates x <- locations$x y <- locations$y if(is.null(x) || is.null(y)) { xy <- xy.coords(locations) x <- xy$x y <- xy$y } if(is.null(x) || is.null(y)) stop(paste("Can't interpret", locargname, "as x,y coordinates")) ## extract marks if any m <- locations$marks markinfo <- if(is.null(m)) NULL else list(marks=m) ## validate covariates and extract values n <- length(x) if(is.data.frame(covariates)) { if(nrow(covariates) != n) stop(paste("Number of rows in", covargname, "does not equal the number of", type)) return(covariates) } else if(is.list(covariates)) { if(length(covariates) == 0) return(as.data.frame(matrix(, n, 0))) isim <- unlist(lapply(covariates, is.im)) isfun <- unlist(lapply(covariates, is.function)) iswin <- unlist(lapply(covariates, is.owin)) istess <- unlist(lapply(covariates, is.tess)) isnum <- unlist(lapply(covariates, is.number)) if(!all(isim | isfun | isnum | iswin | istess)) stop(paste("Each entry in the list", covargname, "should be an image, a function,", "a window, a tessellation or a single number")) if(sum(nzchar(names(covariates))) < length(covariates)) stop(paste("Some entries in the list", covargname, "are un-named")) ## look up values of each covariate at the quadrature points values <- unclass(covariates) values[isim] <- lapply(covariates[isim], lookup.im, x=x, y=y, naok=TRUE, strict=FALSE) values[isfun] <- vf <- lapply(covariates[isfun], evalfxy, x=x, y=y, extra=append(covfunargs, markinfo)) values[isnum] <- lapply(covariates[isnum], rep, length(x)) values[iswin] <- lapply(covariates[iswin], insidexy, x=x, y=y) values[istess] <- lapply(covariates[istess], tileindex, x=x, y=y) result <- as.data.frame(values) if(need.deriv && any(isfun)) { ## check for gradient/hessian attributes of function values grad <- lapply(vf, attr, which="gradient") hess <- lapply(vf, attr, which="hessian") grad <- grad[!unlist(lapply(grad, is.null))] hess <- hess[!unlist(lapply(hess, is.null))] if(length(grad) > 0 || length(hess) > 0) attr(result, "derivatives") <- list(gradient=grad, hessian=hess) } return(result) } stop(paste(covargname, "must be either a data frame or a list")) } ## functions for 'apply' evalfxy <- function(f, x, y, extra) { if(length(extra) == 0) return(f(x,y)) ## extra arguments must be matched explicitly by name ok <- names(extra) %in% names(formals(f)) z <- do.call(f, append(list(x,y), extra[ok])) return(z) } insidexy <- function(w, x, y) { inside.owin(x, y, w) } is.number <- function(x) { is.numeric(x) && (length(x) == 1) } mpl.get.covariates }) bt.frame <- function(Q, trend=~1, interaction=NULL, ..., covariates=NULL, correction="border", rbord=0, use.gam=FALSE, allcovar=FALSE) { prep <- mpl.engine(Q, trend=trend, interaction=interaction, ..., covariates=covariates, correction=correction, rbord=rbord, use.gam=use.gam, allcovar=allcovar, preponly=TRUE, forcefit=TRUE) class(prep) <- c("bt.frame", class(prep)) return(prep) } print.bt.frame <- function(x, ...) { cat("Model frame for Berman-Turner device\n") df <- x$glmdata cat(paste("$glmdata: Data frame with", nrow(df), "rows and", ncol(df), "columns\n")) cat(" Column names:\t") cat(paste(paste(names(df),collapse="\t"), "\n")) cat("Complete model formula ($fmla):\t") print(x$fmla) info <- x$info if(info$want.trend) { cat("Trend:\tyes\nTrend formula string ($trendfmla):\t") cat(paste(x$trendfmla, "\n")) } else cat("Trend:\tno\n") cat("Interaction ($info$interaction):\t") inte <- info$interaction if(is.null(inte)) inte <- Poisson() print(inte, family=FALSE, brief=TRUE) if(!is.poisson.interact(inte)) { cat("Internal names of interaction variables ($Vnames):\t") cat(paste(x$Vnames, collapse="\t")) cat("\n") } edge <- info$correction cat(paste("Edge correction ($info$correction):\t", sQuote(edge), "\n")) if(edge == "border") cat(paste("\tBorder width ($info$rbord):\t", info$rbord, "\n")) if(length(x$problems) > 0) { cat("Problems:\n") print(x$problems) } if(length(x$computed) > 0) cat(paste("Frame contains saved computations for", commasep(dQuote(names(x$computed))))) return(invisible(NULL)) } partialModelMatrix <- function(X, D, model, callstring="", ...) { ## X = 'data' ## D = 'dummy' Q <- quad(X,D) P <- union.quad(Q) trend <- model$trend inter <- model$interaction covar <- model$covariates prep <- mpl.prepare(Q, X, P, trend, inter, covar, correction=model$correction, rbord=model$rbord, Pname="data points", callstring=callstring, warn.unidentifiable=FALSE, ...) fmla <- prep$fmla glmdata <- prep$glmdata mof <- model.frame(fmla, glmdata) mom <- model.matrix(fmla, mof) modelnames <- names(coef(model)) modelnames <- sub("log(lambda)", "(Intercept)", modelnames, fixed=TRUE) if(!isTRUE(all.equal(colnames(mom), modelnames))) warning(paste("Internal error: mismatch between", "column names of model matrix", "and names of coefficient vector in fitted model")) attr(mom, "mplsubset") <- glmdata$.mpl.SUBSET attr(mom, "-Inf") <- prep$forbid return(mom) } oversize.quad <- function(Q, ..., nU, nX, p=1) { ## Determine whether the quadrature scheme is ## too large to handle in one piece (in mpl) ## for a generic interaction ## nU = number of quadrature points ## nX = number of data points ## p = dimension of statistic if(missing(nU)) nU <- n.quad(Q) if(missing(nX)) nX <- npoints(Q$data) nmat <- as.double(nU) * nX nMAX <- spatstat.options("maxmatrix")/p needsplit <- (nmat > nMAX) return(needsplit) } quadBlockSizes <- function(nX, nD, p=1, nMAX=spatstat.options("maxmatrix")/p, announce=TRUE) { if(is.quad(nX) && missing(nD)) { nD <- npoints(nX$dummy) nX <- npoints(nX$data) } ## Calculate number of dummy points in largest permissible X * (X+D) matrix nperblock <- max(1, floor(nMAX/nX - nX)) ## determine number of such blocks nblocks <- ceiling(nD/nperblock) ## make blocks roughly equal (except for the last one) nperblock <- min(nperblock, ceiling(nD/nblocks)) ## announce if(announce && nblocks > 1) { msg <- paste("Large quadrature scheme", "split into blocks to avoid memory size limits;", nD, "dummy points split into", nblocks, "blocks,") nfull <- nblocks - 1 nlastblock <- nD - nperblock * nfull if(nlastblock == nperblock) { msg <- paste(msg, "each containing", nperblock, "dummy points") } else { msg <- paste(msg, "the first", ngettext(nfull, "block", paste(nfull, "blocks")), "containing", nperblock, ngettext(nperblock, "dummy point", "dummy points"), "and the last block containing", nlastblock, ngettext(nlastblock, "dummy point", "dummy points")) } message(msg) } else nlastblock <- nperblock return(list(nblocks=nblocks, nperblock=nperblock, nlastblock=nlastblock)) } ## function that should be called to evaluate interaction terms ## between quadrature points and data points evalInteraction <- function(X, P, E = equalpairs(P, X), interaction, correction, splitInf=FALSE, ..., precomputed=NULL, savecomputed=FALSE) { ## evaluate the interaction potential ## (does not assign/touch the variable names) verifyclass(interaction, "interact") ## handle Poisson case if(is.poisson(interaction)) { out <- matrix(numeric(0), nrow=npoints(P), ncol=0) attr(out, "IsOffset") <- logical(0) if(splitInf) attr(out, "-Inf") <- logical(nrow(out)) return(out) } ## determine whether to use fast evaluation in C dofast <- (spatstat.options("fasteval") %in% c("on", "test")) && !is.null(cando <- interaction$can.do.fast) && cando(X, correction, interaction$par) && !is.null(interaction$fasteval) ## determine whether to split quadscheme into blocks if(dofast) { dosplit <- FALSE } else { ## decide whether the quadrature scheme is too large to handle in one piece needsplit <- oversize.quad(nU=npoints(P), nX=npoints(X)) ## not implemented when savecomputed=TRUE dosplit <- needsplit && !savecomputed if(needsplit && savecomputed) warning(paste("Oversize quadscheme cannot be split into blocks", "because savecomputed=TRUE;", "memory allocation error may occur")) } if(!dosplit) { ## normal case V <- evalInterEngine(X=X, P=P, E=E, interaction=interaction, correction=correction, splitInf=splitInf, ..., precomputed=precomputed, savecomputed=savecomputed) } else { ## Too many quadrature points: split into blocks nX <- npoints(X) nP <- npoints(P) ## Determine which evaluation points are data points Pdata <- E[,2] ## hence which are dummy points Pall <- seq_len(nP) Pdummy <- if(length(Pdata) > 0) Pall[-Pdata] else Pall nD <- length(Pdummy) ## calculate block sizes bls <- quadBlockSizes(nX, nD, announce=TRUE) nblocks <- bls$nblocks nperblock <- bls$nperblock ## seqX <- seq_len(nX) EX <- cbind(seqX, seqX) ## for(iblock in 1:nblocks) { first <- min(nD, (iblock - 1) * nperblock + 1) last <- min(nD, iblock * nperblock) ## extract dummy points Di <- P[Pdummy[first:last]] Pi <- superimpose(X, Di, check=FALSE, W=X$window) ## evaluate potential Vi <- evalInterEngine(X=X, P=Pi, E=EX, interaction=interaction, correction=correction, splitInf=splitInf, ..., savecomputed=FALSE) Mi <- attr(Vi, "-Inf") if(iblock == 1) { V <- Vi M <- Mi } else { ## tack on the glm variables for the extra DUMMY points only V <- rbind(V, Vi[-seqX, , drop=FALSE]) if(splitInf && !is.null(M)) M <- c(M, Mi[-seqX]) } } ## The first 'nX' rows of V contain values for X. ## The remaining rows of V contain values for dummy points. if(length(Pdata) == 0) { ## simply discard rows corresponding to data V <- V[-seqX, , drop=FALSE] if(splitInf && !is.null(M)) M <- M[-seqX] } else { ## replace data in correct position ii <- integer(nP) ii[Pdata] <- seqX ii[Pdummy] <- (nX+1):nrow(V) V <- V[ii, , drop=FALSE] if(splitInf && !is.null(M)) M <- M[ii] } attr(V, "-Inf") <- M } return(V) } ## workhorse function that actually calls relevant code to evaluate interaction evalInterEngine <- function(X, P, E, interaction, correction, splitInf=FALSE, ..., Reach = NULL, precomputed=NULL, savecomputed=FALSE) { ## fast evaluator (C code) may exist fasteval <- interaction$fasteval cando <- interaction$can.do.fast par <- interaction$par feopt <- spatstat.options("fasteval") dofast <- !is.null(fasteval) && (is.null(cando) || cando(X, correction,par)) && (feopt %in% c("on", "test")) && (!splitInf || ("splitInf" %in% names(formals(fasteval)))) V <- NULL if(dofast) { if(feopt == "test") message("Calling fasteval") V <- fasteval(X, P, E, interaction$pot, interaction$par, correction, splitInf=splitInf, ...) } if(is.null(V)) { ## use generic evaluator for family evaluate <- interaction$family$eval evalargs <- names(formals(evaluate)) if(splitInf && !("splitInf" %in% evalargs)) stop("Sorry, the", interaction$family$name, "interaction family", "does not support calculation of the positive part", call.=FALSE) if(is.null(Reach)) Reach <- reach(interaction) if("precomputed" %in% evalargs) { ## Use precomputed data ## version 1.9-3 onward (pairwise and pairsat families) V <- evaluate(X, P, E, interaction$pot, interaction$par, correction=correction, splitInf=splitInf, ..., Reach=Reach, precomputed=precomputed, savecomputed=savecomputed) } else { ## Cannot use precomputed data ## Object created by earlier version of ppm ## or not pairwise/pairsat interaction V <- evaluate(X, P, E, interaction$pot, interaction$par, correction=correction, splitInf=splitInf, ..., Reach=Reach) } } return(V) } deltasuffstat <- local({ deltasuffstat <- function(model, ..., restrict=c("pairs", "first", "none"), dataonly=TRUE, sparseOK=TRUE, quadsub=NULL, force=FALSE, warn.forced=FALSE, verbose=warn.forced, use.special=TRUE) { stopifnot(is.ppm(model)) sparseOK <- !isFALSE(sparseOK) # NULL -> TRUE restrict <- match.arg(restrict) if(dataonly) { X <- data.ppm(model) nX <- npoints(X) } else { X <- quad.ppm(model) if(!is.null(quadsub)) { z <- is.data(X) z[quadsub] <- FALSE if(any(z)) stop("subset 'quadsub' must include all data points", call.=FALSE) X <- X[quadsub] } nX <- n.quad(X) } ncoef <- length(coef(model)) inte <- as.interact(model) if(!sparseOK && exceedsMaxArraySize(nX, nX, ncoef)) stop(paste("Array dimensions too large", paren(paste(c(nX, nX, ncoef), collapse=" x ")), "for non-sparse calculation of variance terms"), call.=FALSE) zeroes <- if(!sparseOK) array(0, dim=c(nX, nX, ncoef)) else sparse3Darray(dims=c(nX, nX, ncoef)) if(is.poisson(inte)) return(zeroes) ## Get names of interaction terms in model (including offsets) f <- fitin(model) Inames <- f$Vnames IsOffset <- f$IsOffset hasInf <- !identical(inte$hasInf, FALSE) ## Offset terms do not contribute to sufficient statistic if(all(IsOffset) && !hasInf) return(zeroes) ## Nontrivial interaction terms must be computed. ## Look for member function $delta2 in the interaction v <- NULL v.is.full <- FALSE if(use.special) { ## Use specialised $delta2 for interaction,if available if(is.function(delta2 <- inte$delta2)) v <- delta2(X, inte, model$correction, sparseOK=sparseOK) ## Use generic $delta2 for the family, if available if(is.null(v) && is.function(delta2 <- inte$family$delta2)) v <- delta2(X, inte, model$correction, sparseOK=sparseOK) } ## no luck? if(is.null(v)) { if(!force) return(NULL) ## use brute force algorithm if(warn.forced) warning("Reverting to brute force to compute interaction terms", call.=FALSE) v <- if(dataonly) deltasufX(model, sparseOK, verbose=verbose) else deltasufQ(model, quadsub, sparseOK, verbose=verbose) v.is.full <- TRUE } ## extract hard core information deltaInf <- attr(v, "deltaInf") ## ensure 'v' is a 3D array if(length(dim(v)) != 3) { if(is.matrix(v)) { v <- array(v, dim=c(dim(v), 1)) } else if(inherits(v, "sparseMatrix")) { v <- as.sparse3Darray(v) } } if(!sparseOK) { if(inherits(v, "sparse3Darray")) v <- as.array(v) if(inherits(deltaInf, "sparseMatrix")) deltaInf <- as.matrix(deltaInf) } if(restrict != "none") { ## kill contributions from points outside the domain of pseudolikelihood ## (e.g. points in the border region) use <- if(dataonly) getppmdatasubset(model) else if(is.null(quadsub)) getglmsubset(model) else getglmsubset(model)[quadsub] if(any(kill <- !use)) { switch(restrict, pairs = { v[kill,kill,] <- 0 }, first = { v[kill,,] <- 0 }, none = {}) if(!is.null(deltaInf)) { switch(restrict, pairs = { deltaInf[kill,kill] <- FALSE }, first = { deltaInf[kill,] <- FALSE }, none = {}) } } } ## Make output array, with planes corresponding to model coefficients if(v.is.full) { ## Planes of 'v' already correspond to coefficients of model cnames <- names(coef(model)) ## Remove any offset interaction terms ## (e.g. Hardcore interaction): these do not contribute to suff stat if(any(IsOffset)) { retain <- is.na(match(cnames, Inames[IsOffset])) v <- v[ , , retain, drop=FALSE] Inames <- Inames[!IsOffset] } result <- v } else { ## Planes of 'v' correspond to interaction terms only. ## Fill out the first order terms with zeroes result <- zeroes if(length(Inames) != dim(v)[3]) stop(paste("Internal error: deltasuffstat:", "number of planes of v =", dim(v)[3], "!= number of interaction terms =", length(Inames)), call.=FALSE) ## Offset terms do not contribute to sufficient statistic if(any(IsOffset)) { v <- v[ , , !IsOffset, drop=FALSE] Inames <- Inames[!IsOffset] } ## Map planes of 'v' into coefficients Imap <- match(Inames, names(coef(model))) if(anyNA(Imap)) stop(paste("Internal error: deltasuffstat:", "cannot match interaction coefficients")) if(length(Imap) > 0) { ## insert 'v' into array result[ , , Imap] <- v } } ## pack up attr(result, "deltaInf") <- deltaInf return(result) } ## compute deltasuffstat using partialModelMatrix deltasufX <- function(model, sparseOK=TRUE, verbose=FALSE) { stopifnot(is.ppm(model)) X <- data.ppm(model) hasInf <- !identical(model$interaction$hasInf, FALSE) nX <- npoints(X) p <- length(coef(model)) m <- model.matrix(model, splitInf=hasInf) if(hasInf) { isInf <- attr(m, "-Inf") hasInf <- !is.null(isInf) } isdata <- is.data(quad.ppm(model)) m <- m[isdata, ,drop=FALSE] if(hasInf) isInf <- isInf[isdata] ok <- getppmdatasubset(model) ## canonical statistic before and after deleting X[j] ## mbefore[ , i, j] = h(X[i] | X) ## mafter[ , i, j] = h(X[i] | X[-j]) ## where h(u|x) is the canonical statistic of the *positive* cif dimwork <- c(p, nX, nX) if(!sparseOK) { mafter <- mbefore <- array(t(m), dim=dimwork) isInfafter <- isInfbefore <- if(!hasInf) NULL else matrix(isInf, dim=dimwork[-1]) } else { ## make empty arrays; fill in values later ## (but only where they might change) mafter <- mbefore <- sparse3Darray(dims=dimwork) isInfafter <- isInfbefore <- if(!hasInf) NULL else sparseMatrix(i=integer(0), j=integer(0), x=logical(0), dims=dimwork[-1]) } ## identify close pairs R <- reach(model) if(is.finite(R)) { cl <- closepairs(X, R, what="indices") I <- cl$i J <- cl$j cl2 <- closepairs(X, 2*R, what="indices") I2 <- cl2$i J2 <- cl2$j } else { ## either infinite reach, or something wrong IJ <- expand.grid(I=1:nX, J=1:nX) IJ <- subset(IJ, I != J) I2 <- I <- IJ$I J2 <- J <- IJ$J } ## DO NOT RESTRICT - THIS IS NOW DONE IN deltasuffstat ## filter: I and J must both belong to the nominated subset ## okIJ <- ok[I] & ok[J] ## I <- I[okIJ] ## J <- J[okIJ] ## if(length(I) > 0 && length(J) > 0) { ## .............. loop over pairs ........................ uniqueI <- unique(I) npairs <- length(uniqueI) pstate <- list() if(verbose) splat("Examining", npairs, "pairs of data points...") ## The following ensures that 'empty' and 'X' have compatible marks empty <- X[integer(0)] ## ## Run through pairs for(iter in seq_len(npairs)) { i <- uniqueI[iter] ## all points within 2R J2i <- unique(J2[I2==i]) ## all points within R Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { Xi <- X[i] ## neighbours of X[i] XJi <- X[Ji] ## replace X[-i] by X[-i] \cap b(0, 2R) X.i <- X[J2i] nX.i <- length(J2i) ## index of XJi in X.i J.i <- match(Ji, J2i) if(anyNA(J.i)) stop("Internal error: Ji not a subset of J2i") ## values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] ## for all j pmj <- snipModelMatrix(X.i, empty, model, J.i, hasInf) if(hasInf) zzj <- attr(pmj, "-Inf") ## sufficient statistic in reverse order ## h(X[i] | X[-j]) = h(X[i] | X[-c(i,j)] ## for all j pmi <- matrix(, nJi, p) zzi <- logical(nJi) for(k in 1:nJi) { ## j <- Ji[k] ## X.ij <- X[-c(i,j)] X.ij <- X.i[-J.i[k]] pmik <- snipModelMatrix(X.ij, Xi, model, nX.i, hasInf) pmi[k, ] <- pmik if(hasInf) zzi[k] <- attr(pmik, "-Inf") } ## if(!sparseOK) { mafter[ , Ji, i] <- t(pmj) mafter[ , i, Ji] <- t(pmi) if(hasInf) { isInfafter[Ji, i] <- zzj isInfafter[i, Ji] <- zzi } } else { mafter[ , Ji, i] <- array(t(pmj), dim=c(p, nJi, 1)) mafter[ , i, Ji] <- array(t(pmi), dim=c(p, 1, nJi)) mbefore[ , Ji, i] <- array(t(m[Ji,]), dim=c(p, nJi, 1)) mbefore[ , i, Ji] <- array(m[i,], dim=c(p, 1, nJi)) if(hasInf) { isInfafter[Ji, i] <- zzj isInfafter[i, Ji] <- zzi isInfbefore[Ji, i] <- isInf[Ji] isInfbefore[i, Ji] <- isInf[i] } } } if(verbose) pstate <- progressreport(iter, npairs, state=pstate) } } ## delta[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j]) delta <- mbefore - mafter ## delta[i, j, ] = h(X[i] | X) - h(X[i] | X[-j]) delta <- aperm(delta, c(2,3,1)) ## if(hasInf) { deltaInf <- isInfbefore - isInfafter attr(delta, "deltaInf") <- deltaInf } return(delta) } deltasufQ <- function(model, quadsub, sparseOK, verbose=FALSE) { stopifnot(is.ppm(model)) hasInf <- !identical(model$interaction$hasInf, FALSE) p <- length(coef(model)) Q <- quad.ppm(model) ok <- getglmsubset(model) m <- model.matrix(model, splitInf=hasInf) if(hasInf) { isInf <- attr(m, "-Inf") hasInf <- !is.null(isInf) } if(!is.null(quadsub)) { Q <- Q[quadsub] m <- m[quadsub, , drop=FALSE] ok <- ok[quadsub] if(hasInf) isInf <- isInf[quadsub] } X <- Q$data U <- union.quad(Q) nU <- npoints(U) nX <- npoints(X) isdata <- is.data(Q) isdummy <- !isdata ## canonical statistic before and after adding/deleting U[j] dimwork <- c(p, nU, nU) if(!sparseOK) { mafter <- mbefore <- array(t(m), dim=dimwork) delta <- array(0, dim=dimwork) isInfafter <- isInfbefore <- deltaInf <- if(!hasInf) NULL else matrix(isInf, dim=dimwork[-1]) } else { ## make empty arrays; fill in values later ## [but only where they might change] mafter <- mbefore <- delta <- sparse3Darray(dims=dimwork) isInfafter <- isInfbefore <- deltaInf <- if(!hasInf) NULL else sparseMatrix(i=integer(0), j=integer(0), x=logical(0), dims=dimwork[-1]) } ## mbefore[ , i, j] = h(U[i] | X) ## For data points X[j] ## mafter[ , i, j] = h(U[i] | X[-j]) ## delta[ , i, j] = h(U[i] | X) - h(U[i] | X[-j]) ## For dummy points X[j] ## mafter[ , i, j] = h(U[i] | X \cup U[j]) ## delta[ , i, j] = h(U[i] | X \cup U[j]) - h(U[i] | X) changesign <- ifelseAB(isdata, -1, 1) ## identify close pairs of quadrature points R <- reach(model) if(is.finite(R)) { cl <- closepairs(U, R, what="indices") I <- cl$i J <- cl$j cl2 <- closepairs(U, 2*R, what="indices") I2 <- cl2$i J2 <- cl2$j } else { ## either infinite reach, or something wrong IJ <- expand.grid(I=1:nU, J=1:nX) IJ <- IJ[ with(IJ, I != J), ] I2 <- I <- IJ$I J2 <- J <- IJ$J } ## filter: I and J must both belong to the nominated subset okIJ <- ok[I] & ok[J] I <- I[okIJ] J <- J[okIJ] ## if(length(I) > 0 && length(J) > 0) { ## .............. loop over pairs of quadrature points ............... ## Run through pairs uI <- unique(I) zI <- isdata[uI] uIdata <- uI[zI] uIdummy <- uI[!zI] nuIdata <- length(uIdata) nuIdummy <- length(uIdummy) if(verbose) splat("Examining", nuIdata, "+", nuIdummy, "=", nuIdata + nuIdummy, "pairs of points") ## Run through pairs i, j where 'i' is a data point pstate <- list() for(iter in seq_len(nuIdata)) { i <- uIdata[iter] ## all DATA points within 2R of X[i] ## This represents X[-i] J2i <- unique(J2[I2==i]) J2i <- J2i[isdata[J2i]] ## all QUADRATURE points within R of X[i] Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { isd <- isdata[Ji] ## data points which are neighbours of X[i] XJi <- X[Ji[isd]] ## dummy points which are neighbours of X[i] DJi <- U[Ji[!isd]] ## replace X[-i] by X[-i] \cap b(0, 2R) X.i <- X[J2i] nX.i <- length(J2i) ## index of XJi in X.i J.i <- match(Ji[isd], J2i) if(anyNA(J.i)) stop("Internal error: Ji[isd] not a subset of J2i") ## index of DJi in superimpose(X.i, DJi) JDi <- nX.i + seq_len(sum(!isd)) ## values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] ## for all j pmj <- snipModelMatrix(X.i, DJi, model, c(J.i, JDi), hasInf) ## mafter[ , Ji, i] <- t(pmj) if(hasInf) isInfafter[Ji, i] <- attr(pmj, "-Inf") if(sparseOK) { mbefore[ , Ji, i] <- array(t(m[Ji,]), dim=c(p, nJi, 1)) if(hasInf) isInfbefore[Ji, i] <- isInf[Ji] } } if(verbose) pstate <- progressreport(iter, nuIdata, state=pstate) } ## Run through pairs i, j where 'i' is a dummy point pstate <- list() for(iter in seq_len(nuIdummy)) { i <- uIdummy[iter] ## all DATA points within 2R of U[i] J2i <- unique(J2[I2==i]) J2i <- J2i[isdata[J2i]] ## all QUADRATURE points within R of U[i] Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { isd <- isdata[Ji] JiData <- Ji[isd] JiDummy <- Ji[!isd] ## data points which are neighbours of U[i] XJi <- X[JiData] ## dummy points which are neighbours of U[i] DJi <- U[JiDummy] ## replace X \cup U[i] by (X \cap b(0, 2R)) \cup U[i] J2Ui <- c(J2i, i) XUi <- U[J2Ui] nXUi <- length(J2Ui) ## index of XJi in X.i J.i <- match(JiData, J2Ui) if(anyNA(J.i)) stop("Internal error: Ji[isd] not a subset of J2i") ## index of DJi in superimpose(X.i, DJi) JDi <- nXUi + seq_len(length(JiDummy)) ## values of sufficient statistic ## h(X[j] | X \cup U[i]) ## for all j pmj <- snipModelMatrix(XUi, DJi, model, c(J.i, JDi), hasInf) ## JiSort <- c(JiData, JiDummy) if(!sparseOK) { mafter[ , JiSort, i] <- t(pmj) if(hasInf) isInfafter[JiSort, i] <- attr(pmj, "-Inf") } else { mafter[ , JiSort, i] <- array(t(pmj), dim=c(p, nJi, 1)) mbefore[ , JiSort, i] <- array(t(m[JiSort,]), dim=c(p, nJi, 1)) if(hasInf) { isInfafter[JiSort, i] <- attr(pmj, "-Inf") isInfbefore[JiSort, i] <- isInf[JiSort] } } } if(verbose) pstate <- progressreport(iter, nuIdummy, state=pstate) } } ## delta[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j]) delta[ , , isdata] <- mbefore[, , isdata] - mafter[ , , isdata] ## delta[ ,i,j] = h(X[i] | X \cup U[j]) - h(X[i] | X) delta[ , , isdummy] <- mafter[, , isdummy] - mbefore[ , , isdummy] ## rearrange: new delta[i,j,] = old delta[, i, j] delta <- aperm(delta, c(2,3,1)) ## if(hasInf) { deltaInf[ , isdata] <- isInfbefore[ , isdata] - isInfafter[ , isdata] deltaInf[ , isdummy] <- isInfafter[ , isdummy] - isInfbefore[ , isdummy] attr(delta, "deltaInf") <- deltaInf } return(delta) } snipModelMatrix <- function(X, D, model, retain, splitInf=FALSE) { M <- partialModelMatrix(X, D, model, splitInf=splitInf) if(splitInf) isInf <- attr(M, "-Inf") M <- M[retain, , drop=FALSE] if(splitInf) attr(M, "-Inf") <- isInf[retain] return(M) } deltasuffstat }) spatstat.core/R/rknn.R0000644000176200001440000000203214144333463014342 0ustar liggesusers# # rknn.R # # Distribution of distance to k-th nearest point in d dimensions # (Poisson process of intensity lambda) # # $Revision: 1.2 $ $Date: 2009/12/31 01:33:44 $ # dknn <- function(x, k=1, d=2, lambda=1) { validposint(k, "dknn") validposint(d, "dknn") alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.)) y <- dgamma(x^d, shape=k, rate=lambda * alpha.d) y <- y * d * x^(d-1) return(y) } pknn <- function(q, k=1, d=2, lambda=1) { validposint(k, "pknn") validposint(d, "pknn") alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.)) p <- pgamma(q^d, shape=k, rate=lambda * alpha.d) return(p) } qknn <- function(p, k=1, d=2, lambda=1) { validposint(k, "qknn") validposint(d, "qknn") alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.)) y <- qgamma(p, shape=k, rate=lambda * alpha.d) z <- y^(1/d) return(z) } rknn <- function(n, k=1, d=2, lambda=1) { validposint(k, "rknn") validposint(d, "rknn") alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.)) y <- rgamma(n, shape=k, rate=lambda * alpha.d) x <- y^(1/d) return(x) } spatstat.core/R/relrisk.R0000644000176200001440000004553514144333463015064 0ustar liggesusers# # relrisk.R # # Estimation of relative risk # # $Revision: 1.50 $ $Date: 2020/05/09 05:29:13 $ # relrisk <- function(X, ...) UseMethod("relrisk") relrisk.ppp <- local({ relrisk.ppp <- function(X, sigma=NULL, ..., at=c("pixels", "points"), weights = NULL, varcov=NULL, relative=FALSE, adjust=1, edge=TRUE, diggle=FALSE, se=FALSE, casecontrol=TRUE, control=1, case) { stopifnot(is.ppp(X)) stopifnot(is.multitype(X)) control.given <- !missing(control) case.given <- !missing(case) at <- match.arg(at) ## evaluate numerical weights (multiple columns not allowed) weights <- pointweights(X, weights=weights, parent=parent.frame()) weighted <- !is.null(weights) ## npts <- npoints(X) marx <- marks(X) imarks <- as.integer(marx) types <- levels(marx) ntypes <- length(types) if(ntypes == 1) stop("Data contains only one type of points") ## casecontrol <- casecontrol && (ntypes == 2) if((control.given || case.given) && !(casecontrol || relative)) { aa <- c("control", "case")[c(control.given, case.given)] nn <- length(aa) warning(paste(ngettext(nn, "Argument", "Arguments"), paste(sQuote(aa), collapse=" and "), ngettext(nn, "was", "were"), "ignored, because relative=FALSE and", if(ntypes==2) "casecontrol=FALSE" else "there are more than 2 types of points")) } ## prepare for analysis Y <- split(X) splitweights <- if(weighted) split(weights, marx) else rep(list(NULL), ntypes) uX <- unmark(X) ## compute bandwidth (default bandwidth selector is bw.relrisk) ker <- resolve.2D.kernel(..., sigma=sigma, varcov=varcov, adjust=adjust, bwfun=bw.relrisk, x=X) sigma <- ker$sigma varcov <- ker$varcov ## determine smoothing parameters if(bandwidth.is.infinite(sigma)) edge <- FALSE SmoothPars <- resolve.defaults(list(sigma=sigma, varcov=varcov, at=at, edge=edge, diggle=diggle), list(...)) ## if(se) { ## determine other bandwidth for variance estimation VarPars <- SmoothPars if(bandwidth.is.infinite(sigma)) { varconst <- 1 } else if(is.null(varcov)) { varconst <- 1/(4 * pi * prod(sigma)) VarPars$sigma <- sigma/sqrt(2) } else { varconst <- 1/(4 * pi * sqrt(det(varcov))) VarPars$varcov <- varcov/2 } if(edge) { ## evaluate edge correction weights edgeim <- do.call(second.moment.calc, append(list(x=uX, what="edge"), SmoothPars)) if(diggle || at == "points") { edgeX <- safelookup(edgeim, uX, warn=FALSE) diggleX <- 1/edgeX diggleX[!is.finite(diggleX)] <- 0 } edgeim <- edgeim[Window(X), drop=FALSE] } } ## ......................................... ## compute intensity estimates for each type ## ......................................... switch(at, pixels = { ## intensity estimates of each type Deach <- do.call(density.splitppp, append(list(x=Y, weights=splitweights), SmoothPars)) ## compute intensity estimate for unmarked pattern Dall <- im.apply(Deach, sum, check=FALSE) ## WAS: Dall <- Reduce("+", Deach) ## variance terms if(se) { if(!edge) { ## no edge correction Veach <- do.call(density.splitppp, append(list(x=Y, weights=splitweights), VarPars)) } else if(!diggle) { ## edge correction e(u) Veach <- do.call(density.splitppp, append(list(x=Y, weights=splitweights), VarPars)) #' Ops.imlist not yet working Veach <- imagelistOp(Veach, edgeim, "/") } else { ## Diggle edge correction e(x_i) diggweights <- if(weighted) { diggleX * weights } else diggleX Veach <- as.solist(mapply(density.ppp, x=Y, weights=split(diggweights, marx), MoreArgs=VarPars, SIMPLIFY=FALSE)) } #' Ops.imlist not yet working Veach <- imagelistOp(Veach, varconst, "*") Vall <- im.apply(Veach, sum, check=FALSE) ## WAS: Vall <- Reduce("+", Veach) } }, points = { ## intensity estimates of each type **at each data point** ## dummy variable matrix dumm <- matrix(0, npts, ntypes) dumm[cbind(seq_len(npts), imarks)] <- 1 colnames(dumm) <- types if(weighted) dumm <- dumm * weights Deach <- do.call(density.ppp, append(list(x=uX, weights=dumm), SmoothPars)) ## compute intensity estimate for unmarked pattern Dall <- rowSums(Deach) ## variance terms if(se) { if(!edge) { ## no edge correction Veach <- do.call(density.ppp, append(list(x=uX, weights=dumm), VarPars)) } else if(!diggle) { ## edge correction e(u) Veach <- do.call(density.ppp, append(list(x=uX, weights=dumm), VarPars)) Veach <- Veach * diggleX } else { ## Diggle edge correction e(x_i) Veach <- do.call(density.ppp, append(list(x=uX, weights=dumm * diggleX), VarPars)) } Veach <- Veach * varconst Vall <- rowSums(Veach) } }) ## ......................................... ## compute probabilities/risks ## ......................................... if(ntypes == 2 && casecontrol) { if(control.given || !case.given) { stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:2) } else if(is.character(control)) { icontrol <- match(control, types) if(is.na(icontrol)) stop(paste("No points have mark =", control)) } else stop(paste("Unrecognised format for argument", sQuote("control"))) if(!case.given) icase <- 3 - icontrol } if(case.given) { stopifnot(length(case) == 1) if(is.numeric(case)) { icase <- case <- as.integer(case) stopifnot(case %in% 1:2) } else if(is.character(case)) { icase <- match(case, types) if(is.na(icase)) stop(paste("No points have mark =", case)) } else stop(paste("Unrecognised format for argument", sQuote("case"))) if(!control.given) icontrol <- 3 - icase } ## compute ...... switch(at, pixels = { ## compute probability of case pcase <- Deach[[icase]]/Dall ## correct small numerical errors pcase <- clamp01(pcase) ## trap NaN values nbg <- badvalues(pcase) if(any(nbg)) { ## apply l'Hopital's rule: ## p(case) = 1{nearest neighbour is case} distcase <- distmap(Y[[icase]], xy=pcase) distcontrol <- distmap(Y[[icontrol]], xy=pcase) closecase <- eval.im(as.integer(distcase < distcontrol)) pcase[nbg] <- closecase[nbg] } if(!relative) { if(!se) { result <- pcase } else { Vcase <- Veach[[icase]] NUM <- eval.im(Vcase * (1-2*pcase) + Vall * pcase^2) SE <- eval.im(sqrt(pmax(NUM, 0))/Dall) result <- solist(estimate=pcase, SE=SE) } } else { rcase <- eval.im(ifelse(pcase < 1, pcase/(1-pcase), NA)) if(!se) { result <- rcase } else { Vcase <- Veach[[icase]] Vctrl <- Veach[[icontrol]] Dctrl <- Deach[[icontrol]] NUM <- eval.im(Vcase + Vctrl * rcase^2) SE <- eval.im(sqrt(pmax(NUM, 0))/Dctrl) result <- solist(estimate=rcase, SE=SE) } } }, points={ ## compute probability of case pcase <- Deach[,icase]/Dall ## correct small numerical errors pcase <- clamp01(pcase) ## trap NaN values if(any(nbg <- badvalues(pcase))) { ## apply l'Hopital's rule nntype <- imarks[nnwhich(X)] pcase[nbg] <- as.integer(nntype[nbg] == icase) } if(!relative) { if(!se) { result <- pcase } else { NUM <- Veach[,icase] * (1-2*pcase) + Vall * pcase^2 SE <- sqrt(pmax(NUM, 0))/Dall result <- list(estimate=pcase, SE=SE) } } else { rcase <- ifelse(pcase < 1, pcase/(1-pcase), NA) if(!se) { result <- rcase } else { NUM <- Veach[,icase] + Veach[,icontrol] * rcase^2 SE <- sqrt(pmax(NUM, 0))/Deach[,icontrol] result <- list(estimate=rcase, SE=SE) } } }) } else { ## several types if(relative) { ## need 'control' type stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:ntypes) } else if(is.character(control)) { icontrol <- match(control, types) if(is.na(icontrol)) stop(paste("No points have mark =", control)) } else stop(paste("Unrecognised format for argument", sQuote("control"))) } switch(at, pixels={ #' Ops.imagelist not yet working probs <- imagelistOp(Deach, Dall, "/") ## correct small numerical errors probs <- as.solist(lapply(probs, clamp01)) ## trap NaN values nbg <- lapply(probs, badvalues) nbg <- Reduce("|", nbg) if(any(nbg)) { ## apply l'Hopital's rule distX <- distmap(X, xy=Dall) whichnn <- attr(distX, "index") typenn <- eval.im(imarks[whichnn]) typennsub <- as.matrix(typenn)[nbg] for(k in seq_along(result)) probs[[k]][nbg] <- (typennsub == k) } if(!relative) { if(!se) { result <- probs } else { SE <- list() for(i in 1:ntypes) { NUM <- (Veach[[i]] * (1 - 2 * probs[[i]]) + Vall * probs[[i]]^2) SE[[i]] <- eval.im(sqrt(pmax(NUM, 0))/Dall) } SE <- as.solist(SE) names(SE) <- types result <- list(estimate=probs, SE=SE) } } else { risks <- as.solist(lapply(probs, divideifpositive, d = probs[[icontrol]])) if(!se) { result <- risks } else { Vctrl <- Veach[[icontrol]] Dctrl <- Deach[[icontrol]] SE <- list() for(i in 1:ntypes) { NUM <- Veach[[i]] + Vctrl * risks[[i]]^2 SE[[i]] <- eval.im(sqrt(pmax(NUM, 0))/Dctrl) } SE <- as.solist(SE) names(SE) <- types result <- list(estimate=risks, SE=SE) } } }, points = { probs <- Deach/Dall ## correct small numerical errors probs <- clamp01(probs) ## trap NaN values bad <- badvalues(probs) badrow <- matrowany(bad) if(any(badrow)) { ## apply l'Hopital's rule typenn <- imarks[nnwhich(X)] probs[badrow, ] <- (typenn == col(result))[badrow, ] } if(!relative) { if(!se) { result <- probs } else { NUM <- Veach * (1-2*probs) + Vall * probs^2 SE <- sqrt(pmax(NUM, 0))/Dall result <- list(estimate=probs, SE=SE) } } else { risks <- probs/probs[,icontrol] if(!se) { result <- risks } else { NUM <- Veach + Veach[,icontrol] * risks^2 NUM[,icontrol] <- 0 SE <- sqrt(pmax(NUM, 0))/Deach[,icontrol] result <- list(estimate=risks, SE=SE) } } }) } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } clamp01 <- function(x) { if(is.im(x)) return(eval.im(pmin(pmax(x, 0), 1))) return(pmin(pmax(x, 0), 1)) } badvalues <- function(x) { if(is.im(x)) x <- as.matrix(x) return(!(is.finite(x) | is.na(x))) } reciprocal <- function(x) 1/x divideifpositive <- function(z, d) { eval.im(ifelse(d > 0, z/d, NA)) } relrisk.ppp }) bw.stoyan <- function(X, co=0.15) { ## Stoyan's rule of thumb stopifnot(is.ppp(X)) n <- npoints(X) W <- Window(X) a <- area(W) stoyan <- co/sqrt(5 * max(1,n)/a) return(stoyan) } bw.relrisk <- function(X, method="likelihood", nh=spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE) { stopifnot(is.ppp(X)) stopifnot(is.multitype(X)) ## rearrange in ascending order of x-coordinate (for C code) X <- X[fave.order(X$x)] ## Y <- split(X) ntypes <- length(Y) if(ntypes == 1) stop("Data contains only one type of points") n <- npoints(X) marx <- marks(X) method <- pickoption("method", method, c(likelihood="likelihood", leastsquares="leastsquares", ls="leastsquares", LS="leastsquares", weightedleastsquares="weightedleastsquares", wls="weightedleastsquares", WLS="weightedleastsquares")) ## if(method != "likelihood") { ## dummy variables for each type imarks <- as.integer(marx) if(ntypes == 2) { ## 1 = control, 2 = case indic <- (imarks == 2) y01 <- as.integer(indic) } else { indic <- matrix(FALSE, n, ntypes) indic[cbind(seq_len(n), imarks)] <- TRUE y01 <- indic * 1 } X01 <- X %mark% y01 } ## cross-validated bandwidth selection ## determine a range of bandwidth values if(is.null(hmin) || is.null(hmax)) { W <- Window(X) a <- area(W) d <- diameter(as.rectangle(W)) ## Stoyan's rule of thumb applied to the least and most common types mcount <- table(marx) nmin <- max(1, min(mcount)) nmax <- max(1, max(mcount)) stoyan.low <- 0.15/sqrt(nmax/a) stoyan.high <- 0.15/sqrt(nmin/a) if(is.null(hmin)) hmin <- max(minnndist(unique(X)), stoyan.low/5) if(is.null(hmax)) { hmax <- min(d/4, stoyan.high * 20) hmax <- max(hmax, hmin * 2) } } else stopifnot(hmin < hmax) ## h <- geomseq(from=hmin, to=hmax, length.out=nh) cv <- numeric(nh) ## ## compute cross-validation criterion switch(method, likelihood={ methodname <- "Likelihood" ## for efficiency, only compute the estimate of p_j(x_i) ## when j = m_i = mark of x_i. Dthis <- numeric(n) for(i in seq_len(nh)) { Dall <- density.ppp(X, sigma=h[i], at="points", edge=FALSE, sorted=TRUE) Deach <- density.splitppp(Y, sigma=h[i], at="points", edge=FALSE, sorted=TRUE) split(Dthis, marx) <- Deach pthis <- Dthis/Dall cv[i] <- -mean(log(pthis)) } }, leastsquares={ methodname <- "Least Squares" for(i in seq_len(nh)) { phat <- Smooth(X01, sigma=h[i], at="points", leaveoneout=TRUE, sorted=TRUE) phat <- as.matrix(phat) cv[i] <- mean((y01 - phat)^2) } }, weightedleastsquares={ methodname <- "Weighted Least Squares" ## need initial value of h from least squares h0 <- bw.relrisk(X, "leastsquares", nh=ceiling(nh/4)) phat0 <- Smooth(X01, sigma=h0, at="points", leaveoneout=TRUE, sorted=TRUE) phat0 <- as.matrix(phat0) var0 <- phat0 * (1-phat0) var0 <- pmax.int(var0, 1e-6) for(i in seq_len(nh)) { phat <- Smooth(X01, sigma=h[i], at="points", leaveoneout=TRUE, sorted=TRUE) phat <- as.matrix(phat) cv[i] <- mean((y01 - phat)^2/var0) } }) ## optimize result <- bw.optim(cv, h, hname="sigma", creator="bw.relrisk", criterion=paste(methodname, "Cross-Validation"), warnextreme=warn, hargnames=c("hmin", "hmax"), unitname=unitname(X)) return(result) } which.max.im <- function(x) { .Deprecated("im.apply", "spatstat.geom", "which.max.im(x) is deprecated: use im.apply(x, which.max)") ans <- im.apply(x, which.max) return(ans) } spatstat.core/R/rmhResolveTypes.R0000644000176200001440000000612414144333463016553 0ustar liggesusers# # # rmhResolveTypes.R # # $Revision: 1.10 $ $Date: 2019/02/20 03:34:50 $ # # rmhResolveTypes <- function(model, start, control) { # Decide whether a multitype point process is to be simulated. # If so, determine the vector of types. verifyclass(model, "rmhmodel") verifyclass(start, "rmhstart") verifyclass(control, "rmhcontrol") # Different ways of specifying types directly types.model <- model$types types.start <- if(start$given=="x" && is.marked(x.start <- start$x.start)) levels(marks(x.start, dfok=FALSE)) else NULL # Check for inconsistencies if(!is.null(types.model) && !is.null(types.start)) if(!isTRUE(all.equal(types.model, types.start))) stop("marks in start$x.start do not match model$types") types.given <- if(!is.null(types.model)) types.model else types.start types.given.source <- if(!is.null(types.model)) "model$types" else "marks of x.start" # Different ways of implying the number of types ntypes.beta <- length(model$par[["beta"]]) ntypes.ptypes <- length(control$ptypes) ntypes.nstart <- if(start$given == "n") length(start$n.start) else 0 mot <- model$trend ntypes.trend <- if(is.null(mot)) 0 else if(is.im(mot)) 1 else if(is.list(mot) && all(unlist(lapply(mot, is.im)))) length(mot) else 0 # Check for inconsistencies in implied number of types (only for numbers > 1) nty <- c(ntypes.beta, ntypes.ptypes, ntypes.nstart, ntypes.trend) nam <- c("model$par$beta", "control$ptypes", "start$n.start", "model$trend") implied <- (nty > 1) if(!any(implied)) ntypes.implied <- 1 else { if(length(unique(nty[implied])) > 1) stop(paste("Mismatch in numbers of types implied by", commasep(sQuote(nam[implied])))) ntypes.implied <- unique(nty[implied]) ntypes.implied.source <- (nam[implied])[1] } # Check consistency between types.given and ntypes.implied if(!is.null(types.given) && ntypes.implied > 1) if(length(types.given) != ntypes.implied) stop(paste("Mismatch between number of types in", types.given.source, "and length of", ntypes.implied.source)) # Finally determine the types if(model$multitype.interact) { # There MUST be a types vector types <- if(!is.null(types.given)) types.given else if(ntypes.implied > 1) 1:ntypes.implied else stop("Cannot determine types for multitype process") } else { types <- if(!is.null(types.given)) types.given else if(ntypes.implied > 1) 1:ntypes.implied else 1 } ntypes <- length(types) # If we are conditioning on the number of points of each type, # make sure the starting state is appropriate if(control$fixing == "n.each.type") { if(start$given == "n" && ntypes.nstart != ntypes) stop("Length of start$n.start not equal to number of types.\n") else if(start$given == "x" && length(types.given) != ntypes) stop("Marks of start$x.start do not match number of types.\n") } return(types) } spatstat.core/R/predict.ppm.R0000644000176200001440000006661314144333463015636 0ustar liggesusers# # predict.ppm.S # # $Revision: 1.111 $ $Date: 2019/04/27 07:22:19 $ # # predict.ppm() # From fitted model obtained by ppm(), # evaluate the fitted trend or conditional intensity # at a grid/list of other locations # # # ------------------------------------------------------------------- predict.ppm <- local({ ## ## extract undocumented/outdated arguments, and trap others ## xtract <- function(..., newdata=NULL, sumobj=NULL, E=NULL, total=NULL, getoutofjail=FALSE) { if(!is.null(newdata)) warning(paste("The use of the argument", sQuote("newdata"), "is out-of-date. See help(predict.ppm)")) if(!is.null(total)) message(paste("The use of the argument", sQuote("total"), "is out-of-date. See help(predict.ppm)")) trap.extra.arguments(..., .Context="In predict.ppm") return(list(sumobj=sumobj, E=E, total=total, getoutofjail=getoutofjail)) } ## ## confidence/prediction intervals for number of points predconfPois <- function(region, object, level, what=c("estimate", "se", "confidence", "prediction")) { what <- match.arg(what) stopifnot(0 < level && level < 1) lam <- predict(object, window=region) mu.hat <- integral.im(lam) if(what == "estimate") return(mu.hat) mo <- model.images(object, W=as.owin(lam)) ZL <- unlist(lapply(mo, function(z, w) integral.im(eval.im(z * w)), w = lam)) ZL <- matrix(ZL, nrow=1) var.muhat <- as.numeric(ZL %*% vcov(object) %*% t(ZL)) sd.muhat <- sqrt(var.muhat) if(what == "se") return(sd.muhat) alpha2 <- (1-level)/2 pp <- sort(c(alpha2, 1-alpha2)) out <- switch(what, confidence = mu.hat + qnorm(pp) * sd.muhat, prediction = qmixpois(pp, mu.hat, sd.muhat, I)) names(out) <- paste0(signif(100 * pp, 3), "%") out } typepublic <- c("trend", "cif", "intensity", "count") typeaccept <- c(typepublic, "lambda", "se", "SE", "covariates") typeuse <- c(typepublic, "cif", "se", "se", "covariates") predict.ppm <- function(object, window=NULL, ngrid=NULL, locations=NULL, covariates=NULL, type=c("trend", "cif", "intensity", "count"), se=FALSE, interval=c("none", "confidence", "prediction"), level = 0.95, X=data.ppm(object), correction, ignore.hardcore=FALSE, ..., dimyx=NULL, eps=NULL, new.coef=NULL, check=TRUE, repair=TRUE) { interval <- match.arg(interval) ## extract undocumented arguments xarg <- xtract(...) sumobj <- xarg$sumobj E <- xarg$E total <- xarg$total getoutofjail <- xarg$getoutofjail ## match 'type' argument including 'legacy' options seonly <- FALSE if(missing(type)) type <- type[1] else { if(length(type) > 1) stop("Argument 'type' should be a single value") mt <- pmatch(type, typeaccept) if(is.na(mt)) stop("Argument 'type' should be one of", commasep(sQuote(typepublic), " or ")) type <- typeuse[mt] if(type == "se") { if(!getoutofjail) message(paste("Outdated syntax:", "type='se' should be replaced by se=TRUE;", "then the standard error is predict(...)$se")) type <- "trend" se <- TRUE seonly <- TRUE } } if(!is.null(total)) { message("Outdated argument 'total': use 'window' and set type='count'") type <- "count" if(!is.logical(total)) window <- if(is.tess(total)) total else as.owin(total) } ## model <- object verifyclass(model, "ppm") ## if(check && damaged.ppm(object)) { if(!repair) stop("object format corrupted; try update(object, use.internal=TRUE)") message("object format corrupted; repairing it.") object <- update(object, use.internal=TRUE) } if(missing(correction) || is.null(correction)) correction <- object$correction fitcoef <- coef(object) if(!is.null(new.coef)) { ## validate coefs if(length(new.coef) != length(fitcoef)) stop(paste("Argument new.coef has wrong length", length(new.coef), ": should be", length(fitcoef))) coeffs <- new.coef } else { coeffs <- fitcoef } ## find out what kind of model it is if(is.null(sumobj)) sumobj <- summary(model, quick="entries") # undocumented hack! # stationary <- sumobj$stationary poisson <- sumobj$poisson marked <- sumobj$marked multitype <- sumobj$multitype notrend <- sumobj$no.trend changedcoef <- sumobj$changedcoef || !is.null(new.coef) trivial <- poisson && notrend need.covariates <- sumobj$uses.covars covnames.needed <- sumobj$covars.used if(sumobj$antiquated) warning("The model was fitted by an out-of-date version of spatstat") ## determine mark space if(marked) { if(!multitype) stop("Prediction not yet implemented for general marked point processes") else types <- levels(marks(sumobj$entries$data)) } ## For Poisson models cif=intensity=trend if(poisson && type %in% c("cif", "intensity")) type <- "trend" ## ............. trap un-implemented cases ................... ## Standard errors not yet available for cif, intensity if(se && type %in% c("cif", "intensity")) stop(paste("Standard error for", type, "is not yet implemented"), call.=FALSE) ## Intervals are only available for unmarked Poisson models if(type == "count" && interval != "none" && (marked || !poisson)) { stop(paste0(interval, " intervals for counts are only implemented for", if(marked) " unmarked" else "", if(!poisson) " Poisson", " models"), call.=FALSE) } if(interval == "prediction" && type != "count") stop("Prediction intervals are only available for type='count'", call.=FALSE) if(interval == "confidence" && type %in% c("intensity", "cif")) stop(paste("Confidence intervals are not yet available for", type), call.=FALSE) estimatename <- if(interval == "none") "estimate" else interval ## ............. start computing ............................. ## Total count in a region if(type == "count") { ## point or interval estimate, optionally with SE if(is.null(window)) { ## domain of the original data if(!seonly) est <- predconfPois(NULL, model, level, estimatename) if(se) sem <- predconfPois(NULL, model, level, "se") } else if(is.tess(window)) { ## quadrats tilz <- tiles(window) if(!seonly) { est <- lapply(tilz, predconfPois, object=model, level=level, what=estimatename) est <- switch(interval, none = unlist(est), confidence =, prediction = t(simplify2array(est))) } if(se) sem <- sapply(tilz, predconfPois, object=model, level=level, what="se") } else { ## window if(!seonly) est <- predconfPois(window, model, level, estimatename) if(se) sem <- predconfPois(window, model, level, "se") } if(!se) return(est) if(seonly) return(sem) result <- list(est, sem) names(result) <- c(estimatename, "se") return(result) } ## ..... Predict a spatial function ....... if(interval != "none") { ## Prepare for confidence interval alpha2 <- (1-level)/2 pp <- sort(c(alpha2, 1-alpha2)) ci.names <- paste0(signif(100 * pp, 3), "%") ci.q <- qnorm(pp) } ## determine what kind of output is required: ## (arguments present) (output) ## window, ngrid -> image ## locations (mask) -> image ## locations (image) -> image ## locations (rectangle) -> treat locations as 'window' ## locations (polygonal) -> treat locations as 'window' ## locations (other) -> data frame ## if(is.im(locations)) locations <- as.owin(locations) if(is.null(window) && is.owin(locations) && !is.mask(locations)) { window <- locations locations <- NULL } #' incompatible: if(!is.null(locations)) { #' other arguments are incompatible offending <- c(!is.null(ngrid), !is.null(dimyx), !is.null(eps)) if(any(offending)) { offenders <- c("grid", "dimyx", "eps")[offending] nbad <- sum(offending) stop(paste(ngettext(nbad, "The argument", "The arguments"), commasep(sQuote(offenders)), ngettext(nbad, "is", "are"), "incompatible with", sQuote("locations")), call.=FALSE) } } #' equivalent: if(!is.null(ngrid) && !is.null(dimyx)) warning(paste("The arguments", sQuote("ngrid"), "and", sQuote("dimyx"), "are equivalent: only one should be given"), call.=FALSE) ngrid <- ngrid %orifnull% dimyx if(is.null(ngrid) && is.null(locations)) ## use regular grid ngrid <- rev(spatstat.options("npixel")) want.image <- is.null(locations) || is.mask(locations) make.grid <- !is.null(ngrid) ## ############## Determine prediction points ##################### if(!want.image) { ## (A) list of (x,y) coordinates given by `locations' xpredict <- locations$x ypredict <- locations$y if(is.null(xpredict) || is.null(ypredict)) { xy <- xy.coords(locations) xpredict <- xy$x xpredict <- xy$y } if(is.null(xpredict) || is.null(ypredict)) stop(paste("Don't know how to extract x,y coordinates from", sQuote("locations"))) ## marks if required if(marked) { ## extract marks from data frame `locations' mpredict <- locations$marks if(is.null(mpredict)) stop(paste("The argument", sQuote("locations"), "does not contain a column of marks", "(required since the fitted model", "is a marked point process)")) if(is.factor(mpredict)) { ## verify mark levels match those in model if(!isTRUE(all.equal(levels(mpredict), types))) { if(all(levels(mpredict) %in% types)) mpredict <- factor(mpredict, levels=types) else stop(paste("The marks in", sQuote("locations"), "do not have the same levels as", "the marks in the model")) } } else { ## coerce to factor if possible if(all(mpredict %in% types)) mpredict <- factor(mpredict, levels=types) else stop(paste("The marks in", sQuote("locations"), "do not have the same values as the marks in the model")) } } } else { ## (B) pixel grid of points if(!make.grid) ## (B)(i) The grid is given in `locations' masque <- locations else { ## (B)(ii) We have to make the grid ourselves ## Validate ngrid if(!is.null(ngrid)) { if(!is.numeric(ngrid)) stop("ngrid should be a numeric vector") ngrid <- ensure2vector(ngrid) } if(is.null(window)) window <- sumobj$entries$data$window masque <- as.mask(window, dimyx=ngrid, eps=eps) } ## Hack ----------------------------------------------- ## gam with lo() will not allow extrapolation beyond the range of x,y ## values actually used for the fit. Check this: tums <- termsinformula(model$trend) if(any( tums == "lo(x)" | tums == "lo(y)" | tums == "lo(x,y)" | tums == "lo(y,x)") ) { ## determine range of x,y used for fit gg <- model$internal$glmdata gxr <- range(gg$x[gg$SUBSET]) gyr <- range(gg$y[gg$SUBSET]) ## trim window to this range masque <- intersect.owin(masque, owin(gxr, gyr)) } ## ------------------------------------ End Hack ## ## Finally, determine x and y vectors for grid rxy <- rasterxy.mask(masque, drop=TRUE) xpredict <- rxy$x ypredict <- rxy$y } ## ################ CREATE DATA FRAME ########################## ## ... to be passed to predict.glm() ## ## First the x, y coordinates if(!marked) newdata <- data.frame(x=xpredict, y=ypredict) else if(!want.image) newdata <- data.frame(x=xpredict, y=ypredict, marks=mpredict) else { ## replicate nt <- length(types) np <- length(xpredict) xpredict <- rep.int(xpredict,nt) ypredict <- rep.int(ypredict,nt) mpredict <- rep.int(types, rep.int(np, nt)) mpredict <- factor(mpredict, levels=types) newdata <- data.frame(x = xpredict, y = ypredict, marks=mpredict) } ## ## Next the external covariates, if any ## if(need.covariates) { if(is.null(covariates)) { ## Extract covariates from fitted model object ## They have to be images. oldcov <- model$covariates if(is.null(oldcov)) stop("External covariates are required, and are not available") if(is.data.frame(oldcov)) stop(paste("External covariates are required.", "Prediction is not possible at new locations")) covariates <- oldcov } ## restrict to covariates actually required for formula covariates <- if(is.data.frame(covariates)) { covariates[,covnames.needed, drop=FALSE] } else covariates[covnames.needed] covfunargs <- model$covfunargs covariates.df <- mpl.get.covariates(covariates, list(x=xpredict, y=ypredict), "prediction points", covfunargs) newdata <- cbind(newdata, covariates.df) } ## ###### Set up prediction variables ################################ ## ## Provide SUBSET variable ## if(is.null(newdata$SUBSET)) newdata$SUBSET <- rep.int(TRUE, nrow(newdata)) ## ## Dig out information used in Berman-Turner device ## Vnames: the names for the ``interaction variables'' ## glmdata: the data frame used for the glm fit ## glmfit: the fitted glm object ## if(!trivial) { Vnames <- model$internal$Vnames vnameprefix <- model$internal$vnameprefix glmdata <- getglmdata(model) glmfit <- getglmfit(model) if(object$method=="logi") newdata$.logi.B <- rep(glmdata$.logi.B[1], nrow(newdata)) } ## Undocumented secret exit if(type == "covariates") return(list(newdata=newdata, mask=if(want.image) masque else NULL)) ## ########## COMPUTE PREDICTION ############################## ## ## Compute the predicted value z[i] for each row of 'newdata' ## Store in a vector z and reshape it later ## ## ## ############################################################# needSE <- se || (interval != "none") attribeauts <- list() if(trivial) { ## ########### UNIFORM POISSON PROCESS ##################### lambda <- exp(coeffs[[1]]) if(needSE) { npts <- nobs(model) se.lambda <- lambda/sqrt(npts) } switch(interval, none = { z <- rep.int(lambda, nrow(newdata)) }, confidence = { z <- matrix(lambda + se.lambda * ci.q, byrow=TRUE, nrow=nrow(newdata), ncol=2, dimnames=list(NULL, ci.names)) }, stop("Internal error: unreached")) if(se) zse <- rep.int(se.lambda, nrow(newdata)) ## ############################################################## } else if((type %in% c("trend", "intensity")) || poisson) { ## ## ########## COMPUTE TREND ################################### ## ## set explanatory variables to zero ## zeroes <- numeric(nrow(newdata)) for(vn in Vnames) newdata[[vn]] <- zeroes ## ## predict trend ## z <- lambda <- GLMpredict(glmfit, newdata, coeffs, changecoef=changedcoef) ## if(type == "intensity") z <- PoisSaddle(z, fitin(model)) ## if(needSE) { ## extract variance-covariance matrix of parameters vc <- vcov(model) ## compute model matrix fmla <- rhs.of.formula(formula(glmfit)) # mf <- model.frame(fmla, newdata, ..., na.action=na.pass) # mm <- model.matrix(fmla, mf, ..., na.action=na.pass) mf <- model.frame(fmla, newdata, na.action=na.pass) mm <- model.matrix(fmla, mf, na.action=na.pass) if(nrow(mm) != nrow(newdata)) stop("Internal error: row mismatch in SE calculation") ## compute relative variance = diagonal of quadratic form if(ncol(mm) != ncol(vc)) stop("Internal error: column mismatch in SE calculation") vv <- quadform(mm, vc) ## standard error SE <- lambda * sqrt(vv) if(se) zse <- SE if(interval == "confidence") { z <- lambda + outer(SE, ci.q, "*") colnames(z) <- ci.names } } ## ############################################################ } else if(type == "cif" || type =="lambda") { ## ####### COMPUTE FITTED CONDITIONAL INTENSITY ################ ## ## set up arguments inter <- model$interaction if(!missing(X)) stopifnot(is.ppp(X)) W <- as.owin(data.ppm(model)) U <- ppp(newdata$x, y=newdata$y, window=W, check=FALSE) if(marked) marks(U) <- newdata$marks ## determine which prediction points are data points if(is.null(E)) E <- equalpairs(U, X, marked) ## evaluate interaction Vnew <- evalInteraction(X, U, E, inter, correction=correction, splitInf=ignore.hardcore, check=check) if(!ignore.hardcore) { ## Negative infinite values of potential signify cif = zero cif.equals.zero <- matrowany(Vnew == -Inf) } else { ## returned as attribute, unless vacuous cif.equals.zero <- attr(Vnew, "-Inf") %orifnull% logical(nrow(Vnew)) } attribeauts <- c(attribeauts, list(isZero=cif.equals.zero)) ## Insert the potential into the relevant column(s) of `newdata' if(ncol(Vnew) == 1) { ## Potential is real valued (Vnew is a column vector) ## Assign values to a column of the same name in newdata newdata[[Vnames]] <- as.vector(Vnew) ## } else if(is.null(avail <- colnames(Vnew))) { ## Potential is vector-valued (Vnew is a matrix) ## with unnamed components. ## Assign the components, in order of their appearance, ## to the columns of newdata labelled Vnames[1], Vnames[2],... for(i in seq_along(Vnames)) newdata[[Vnames[i] ]] <- Vnew[,i] ## } else { ## Potential is vector-valued (Vnew is a matrix) ## with named components. ## Match variables by name if(all(Vnames %in% avail)) { for(vn in Vnames) newdata[[ vn ]] <- Vnew[ , vn] } else if(all(Vnames %in% (Pavail <- paste0(vnameprefix, avail)))) { for(vn in Vnames) newdata[[ vn ]] <- Vnew[ , match(vn, Pavail)] } else stop(paste( "Internal error: unable to match names", "of available interaction terms", commasep(sQuote(avail)), "to required interaction terms", commasep(sQuote(Vnames)) ), call.=FALSE) } ## invoke predict.glm or compute prediction z <- GLMpredict(glmfit, newdata, coeffs, changecoef=changedcoef) ## reset to zero if potential was zero if(!ignore.hardcore && any(cif.equals.zero)) z[cif.equals.zero] <- 0 ## ############################################################### } else stop(paste("Unrecognised type", sQuote(type))) ## ############################################################### ## ## reshape the result ## if(!want.image) { if(!se) { z <- as.vector(z) attributes(z) <- c(attributes(z), attribeauts) out <- z } else if(seonly) { out <- as.vector(zse) } else { z <- as.vector(z) attributes(z) <- c(attributes(z), attribeauts) out <- list(z, as.vector(zse)) names(out) <- c(estimatename, "se") } } else { ## make an image of the right shape and value imago <- as.im(masque, value=1.0) if(!marked && interval=="none") { ## single image if(!se) { out <- imago ## set entries out[] <- z } else if(seonly) { out <- imago out[] <- zse } else { est <- std <- imago est[] <- z std[] <- zse out <- list(est, std) names(out) <- c(estimatename, "se") } } else if(interval != "none") { ## list of 2 images for CI if(!seonly) { hi <- lo <- imago hi[] <- z[,1] lo[] <- z[,2] est <- solist(hi, lo) names(est) <- ci.names } if(se) { std <- imago std[] <- zse } if(!se) { out <- est } else if(seonly) { out <- std } else { out <- list(est, std) names(out) <- c(estimatename, "se") } } else { ## list of images, one for each level of marks out <- list() for(i in seq_along(types)) { outi <- imago ## set entries outi[] <- z[newdata$marks == types[i]] out[[i]] <- outi } out <- as.solist(out) names(out) <- as.character(types) } } ## ## FINISHED ## return(out) } predict.ppm }) #################################################################### # # compute pointwise uncertainty of fitted intensity # model.se.image <- function(fit, W=as.owin(fit), ..., what="sd") { if(!is.poisson.ppm(fit)) stop("Only implemented for Poisson point process models", call.=FALSE) what <- pickoption("option", what, c(sd="sd", var="var", cv="cv", CV="cv", ce="ce", CE="ce")) W <- as.mask(as.owin(W)) # variance-covariance matrix of coefficients vc <- vcov(fit) np <- dim(vc)[1] # extract sufficient statistic for each coefficient mm <- model.images(fit, W, ...) # compute fitted intensity lam <- predict(fit, locations=W) # initialise resulting image U <- as.im(W) U[] <- 0 # compute pointwise matrix product, assuming vc is symmetric for(i in 1:np) { Si <- mm[[i]] aii <- vc[i,i] U <- eval.im(U + aii * Si^2) if(i > 1) { for(j in 1:(i-1)) { Sj <- mm[[j]] aij <- vc[i,j] twoaij <- 2 * aij U <- eval.im(U + twoaij * Si * Sj) } } } # the matrix product is the relative variance (CV) if(what=="cv") return(U) # relative sd if(what=="ce") { U <- eval.im(sqrt(U)) return(U) } # multiply by squared intensity to obtain variance U <- eval.im(U * lam^2) # variance if(what=="var") return(U) # compute SD and return U <- eval.im(sqrt(U)) return(U) } GLMpredict <- function(fit, data, coefs, changecoef=TRUE, type=c("response", "link")) { ok <- is.finite(coefs) type <- match.arg(type) if(!changecoef && all(ok)) { answer <- predict(fit, newdata=data, type=type) } else { if(inherits(fit, "gam")) stop("This calculation is not supported for GAM fits", call.=FALSE) # do it by hand fmla <- formula(fit) data$.mpl.Y <- 1 fram <- model.frame(fmla, data=data, na.action=NULL) # linear predictor mm <- model.matrix(fmla, data=fram) # ensure all required coefficients are present coefs <- fill.coefs(coefs, colnames(mm)) ok <- is.finite(coefs) # if(all(ok)) { eta <- as.vector(mm %*% coefs) } else { #' ensure 0 * anything = 0 eta <- as.vector(mm[ , ok, drop=FALSE] %*% coefs[ok]) for(j in which(!ok)) { mmj <- mm[, j] nonzero <- is.na(mmj) | (mmj != 0) if(any(nonzero)) eta[nonzero] <- eta[nonzero] + mmj[nonzero] * coefs[j] } } # offset mo <- model.offset(fram) if(!is.null(mo)) { if(is.matrix(mo)) mo <- apply(mo, 1, sum) eta <- mo + eta } switch(type, link = { answer <- eta }, response = { linkinv <- family(fit)$linkinv answer <- linkinv(eta) }) } # Convert from fitted logistic prob. to lambda for logistic fit if(type == "response" && family(fit)$family=="binomial") answer <- fit$data$.logi.B[1] * answer/(1-answer) return(answer) } # An 'equalpairs' matrix E is needed in the ppm class # to determine which quadrature points and data points are identical # (not just which quadrature points are data points). # It is a two-column matrix specifying all the identical pairs. # The first column gives the index of a data point (in the data pattern X) # and the second column gives the corresponding index in U. # The following function determines the equal pair information # from the coordinates (and marks) of U and X alone; # it should be used only if we can't figure out this information otherwise. equalpairs <- function(U, X, marked=FALSE) { nn <- nncross(U, X) coincides <- (nn$dist == 0) Xind <- nn$which[coincides] Uind <- which(coincides) if(marked) { samemarks <- (marks(X)[Xind] == marks(U)[Uind]) Xind <- Xind[samemarks] Uind <- Uind[samemarks] } return(cbind(Xind, Uind)) } fill.coefs <- function(coefs, required) { # 'coefs' should contain all the 'required' values coefsname <- deparse(substitute(coefs)) nama <- names(coefs) if(is.null(nama)) { #' names cannot be matched if(length(coefs) != length(required)) stop(paste("The unnamed argument", sQuote(coefsname), "has", length(coefs), "entries, but", length(required), "are required"), call.=FALSE) # blithely assume they match 1-1 names(coefs) <- required return(coefs) } stopifnot(is.character(required)) if(identical(nama, required)) return(coefs) inject <- match(nama, required) if(any(notneeded <- is.na(inject))) { warning(paste("Internal glitch: some coefficients were not required:", commasep(sQuote(nama[notneeded]))), call.=FALSE) coefs <- coefs[!notneeded] nama <- names(coefs) inject <- match(nama, required) } y <- numeric(length(required)) names(y) <- required y[inject] <- coefs return(y) } spatstat.core/R/suffstat.R0000644000176200001440000000633114144333464015240 0ustar liggesusers# # suffstat.R # # calculate sufficient statistic # # $Revision: 1.17 $ $Date: 2013/04/25 06:37:43 $ # # suffstat <- function(model, X=data.ppm(model)) { cl <- sys.call() callstring <- short.deparse(cl) verifyclass(model, "ppm") if(!missing(X)) verifyclass(X, "ppp") else X <- NULL inter <- model$interaction func <- if(is.null(inter) || is.poisson(inter)) suffstat.poisson else if(!is.null(ssinter <- inter$suffstat)) ssinter else if(!is.null(ssfamily <- inter$family$suffstat)) ssfamily else suffstat.generic return(func(model, X, callstring)) } suffstat.generic <- function(model, X=NULL, callstring="suffstat.generic") { # This should work for an arbitrary ppm # since it uses the fundamental relation between # conditional intensity and likelihood. # But it is computationally intensive. verifyclass(model, "ppm") coefnames <- names(coef(model)) if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") # refit the model to determine which points are used in pseudolikelihood modelX <- update(model, X, method="mpl") } # find data points which do not contribute to pseudolikelihood mplsubset <- getglmdata(modelX)$.mpl.SUBSET mpldata <- is.data(quad.ppm(modelX)) contribute <- mplsubset[mpldata] if(!any(contribute)) # result is zero vector return(0 * coef(model)) # Add points one-by-one # If there are points which don't contribute, condition on them use <- which(contribute) dontuse <- which(!contribute) for(i in seq_along(use)) { prior <- if(i == 1) c() else use[1:(i-1)] prior <- c(dontuse, prior) Xprior <- X[prior] Xcurrent <- X[use[i]] mom <- partialModelMatrix(Xprior, Xcurrent, model, "suffstat") lastrow <- length(prior) + 1 momrow <- mom[lastrow, ] if(i == 1) result <- momrow else result <- momrow + result } names(result) <- coefnames attr(result, "mplsubset") <- NULL return(result) } killinteraction <- function(model) { verifyclass(model, "ppm") ispoisson <- summary(model, quick=TRUE)$poisson if(ispoisson) return(model) # surgery required newmodel <- model newmodel$interaction <- NULL if(!is.null(Vnames <- model$internal$Vnames)) { matches <- names(model$coef) %in% Vnames newmodel$coef <- model$coef[!matches] newmodel$internal$Vnames <- NULL } # the other 'internal' stuff may still be wrong (or `preserved') return(newmodel) } suffstat.poisson <- function(model, X, callstring="suffstat.poisson") { verifyclass(model, "ppm") if(is.null(X)) X <- data.ppm(model) else verifyclass(X, "ppp") if(!is.poisson(model)) stop("Model is not a Poisson process") Empty <- X[numeric(0)] mom <- partialModelMatrix(X, Empty, model, "suffstat") nmom <- ncol(mom) ncoef <- length(coef(model)) if(nmom != ncoef) stop("Internal error: number of columns of model matrix does not match number of coefficients in fitted model") if(nmom > 1 && any(colnames(mom) != names(coef(model)))) warning("Internal error: mismatch between column names of model matrix and names of coefficient vector in fitted model") o1sum <- apply(mom, 2, sum) return(o1sum) } spatstat.core/R/anova.mppm.R0000644000176200001440000002577414144333464015471 0ustar liggesusers# # anova.mppm.R # # $Revision: 1.21 $ $Date: 2020/11/04 02:45:48 $ # anova.mppm <- local({ do.gripe <- function(...) warning(paste(...), call.=FALSE) dont.gripe <- function(...) NULL tests.choices <- c("Chisq", "LRT", "Rao", "score", "F", "Cp") tests.avail <- c("Chisq", "LRT", "Rao", "score") tests.random <- c("Chisq", "LRT") tests.Gibbs <- c("Chisq", "LRT") totalnquad <- function(fit) sum(sapply(quad.mppm(fit), n.quad)) totalusedquad <- function(fit) with(fit$Fit$moadf, sum(.mpl.SUBSET)) fmlaString <- function(z) { paste(as.expression(formula(z))) } ## interString <- function(z) { as.interact(z)$creator } anova.mppm <- function(object, ..., test=NULL, adjust=TRUE, fine=FALSE, warn=TRUE) { thecall <- sys.call() gripe <- if(warn) do.gripe else dont.gripe argh <- list(...) ## trap outmoded usage if("override" %in% names(argh)) { gripe("Argument 'override' is superseded and was ignored") argh <- argh[-which(names(argh) == "override")] } ## list of models objex <- append(list(object), argh) ## Check each model is an mppm object if(!all(sapply(objex, is.mppm))) stop(paste("Arguments must all be", sQuote("mppm"), "objects")) ## are all models Poisson? pois <- all(sapply(objex, is.poisson.mppm)) gibbs <- !pois ## single/multiple objects given singleobject <- (length(objex) == 1L) expandedfrom1 <- FALSE if(!singleobject) { ## several objects given ## require short names of models for output argnames <- names(thecall) %orifnull% rep("", length(thecall)) retain <- is.na(match(argnames, c("test", "adjust", "fine", "warn", "override"))) shortcall <- thecall[retain] modelnames <- vapply(as.list(shortcall[-1L]), short.deparse, "") } else if(gibbs) { ## single Gibbs model given. ## we can't rely on anova.glm in this case ## so we have to re-fit explicitly Terms <- drop.scope(object) if((nT <- length(Terms)) > 0) { ## generate models by adding terms sequentially objex <- vector(mode="list", length=nT+1) envy <- environment(terms(object)) for(n in 1L:nT) { ## model containing terms 1, ..., n-1 fmla <- paste(". ~ . - ", paste(Terms[n:nT], collapse=" - ")) fmla <- as.formula(fmla) calln <- update(object, fmla, evaluate=FALSE) objex[[n]] <- eval(calln, envy) } ## full model objex[[nT+1L]] <- object expandedfrom1 <- TRUE } } ## All models fitted using same method? Fits <- lapply(objex, getElement, name="Fit") fitter <- unique(unlist(lapply(Fits, getElement, name="fitter"))) if(length(fitter) > 1) stop(paste("Models are incompatible;", "they were fitted by different methods (", paste(fitter, collapse=", "), ")" )) ## Choice of test if(fitter == "glmmPQL") { # HACK <- spatstat.options("developer") # if(!HACK) # stop("Sorry, analysis of deviance is currently not supported for models with random effects, due to changes in the nlme package", call.=FALSE) ## anova.lme requires different format of `test' argument ## and does not recognise 'dispersion' if(is.null(test)) test <- FALSE else { test <- match.arg(test, tests.choices) if(!(test %in% tests.random)) stop(paste("Test", dQuote(test), "is not implemented for random effects models")) test <- TRUE } } else if(!is.null(test)) { test <- match.arg(test, tests.choices) if(!(test %in% tests.avail)) stop(paste("test=", dQuote(test), "is not yet implemented"), call.=FALSE) if(!pois && !(test %in% tests.Gibbs)) stop(paste("test=", dQuote(test), "is only implemented for Poisson models"), call.=FALSE) } ## Extract glm fit objects fitz <- lapply(Fits, getElement, name="FIT") ## Ensure all models were fitted using GLM, or all were fitted using GAM isgam <- sapply(fitz, inherits, what="gam") isglm <- sapply(fitz, inherits, what="glm") usegam <- any(isgam) if(usegam && any(isglm)) { gripe("Models were re-fitted with use.gam=TRUE") objex <- lapply(objex, update, use.gam=TRUE) } ## Finally do the appropriate ANOVA opt <- list(test=test) if(fitter == "glmmPQL") { ## anova.lme does not recognise 'dispersion' argument ## Disgraceful hack: ## Modify object to conform to requirements of anova.lme fitz <- lapply(fitz, stripGLMM) for(i in seq_along(fitz)) { call.i <- getCall(objex[[i]]) names(call.i) <- sub("formula", "fixed", names(call.i)) fitz[[i]]$call <- call.i } warning("anova is not strictly valid for penalised quasi-likelihood fits") } else { ## normal case opt <- append(opt, list(dispersion=1)) } result <- try(do.call(anova, append(fitz, opt))) if(inherits(result, "try-error")) stop("anova failed") if(fitter == "glmmPQL" && !singleobject && length(modelnames) == nrow(result)) row.names(result) <- modelnames ## Remove approximation-dependent columns if present result[, "Resid. Dev"] <- NULL ## replace 'residual df' by number of parameters in model if("Resid. Df" %in% names(result)) { ## count number of quadrature points used in each model nq <- totalusedquad(objex[[1L]]) result[, "Resid. Df"] <- nq - result[, "Resid. Df"] names(result)[match("Resid. Df", names(result))] <- "Npar" } ## edit header if(!is.null(h <- attr(result, "heading"))) { ## remove .mpl.Y and .logi.Y from formulae if present h <- gsub(".mpl.Y", "", h) h <- gsub(".logi.Y", "", h) ## delete GLM information if present h <- gsub("Model: quasi, link: log", "", h) h <- gsub("Model: binomial, link: logit", "", h) h <- gsub("Response: ", "", h) ## remove blank lines (up to 4 consecutive blanks can occur) for(i in 1L:5L) h <- gsub("\n\n", "\n", h) if(length(objex) > 1 && length(h) > 1) { ## anova(mod1, mod2, ...) ## change names of models fmlae <- unlist(lapply(objex, fmlaString)) # intrx <- unlist(lapply(objex, interString)) h[2L] <- paste("Model", paste0(1L:length(objex), ":"), fmlae, # "\t", # intrx, collapse="\n") } ## Add explanation if we did the stepwise thing ourselves if(expandedfrom1) h <- c(h[1L], "Terms added sequentially (first to last)\n", h[-1]) ## Contract spaces in output if spatstat.options('terse') >= 2 if(!waxlyrical('space')) h <- gsub("\n$", "", h) ## Put back attr(result, "heading") <- h } if(adjust && !pois) { ## issue warning, if not already given if(warn) warn.once("anovaMppmAdjust", "anova.mppm now computes the *adjusted* deviances", "when the models are not Poisson processes.") ## Corrected pseudolikelihood ratio nmodels <- length(objex) if(nmodels > 1) { cfac <- rep(1, nmodels) for(i in 2:nmodels) { a <- objex[[i-1]] b <- objex[[i]] df <- length(coef(a)) - length(coef(b)) if(df > 0) { ibig <- i-1 ismal <- i } else { ibig <- i ismal <- i-1 df <- -df } bigger <- objex[[ibig]] smaller <- objex[[ismal]] if(df == 0) { gripe("Models", i-1, "and", i, "have the same dimension") } else { bignames <- names(coef(bigger)) smallnames <- names(coef(smaller)) injection <- match(smallnames, bignames) if(any(uhoh <- is.na(injection))) { gripe("Unable to match", ngettext(sum(uhoh), "coefficient", "coefficients"), commasep(sQuote(smallnames[uhoh])), "of model", ismal, "to coefficients in model", ibig) } else { thetaDot <- 0 * coef(bigger) thetaDot[injection] <- coef(smaller) JH <- vcov(bigger, what="all", new.coef=thetaDot, fine=fine) # J <- if(!logi) JH$Sigma else (JH$Sigma1log+JH$Sigma2log) # H <- if(!logi) JH$A1 else JH$Slog J <- JH$fisher H <- JH$internals$A1 G <- H%*%solve(J)%*%H if(df == 1) { cfac[i] <- H[-injection,-injection]/G[-injection,-injection] } else { Res <- lapply(subfits(bigger), residuals, type="score", drop=TRUE, new.coef=thetaDot, dropcoef=TRUE) U <- sumcompatible(lapply(Res, integral.msr), names(thetaDot)) Uo <- U[-injection] Uo <- matrix(Uo, ncol=1) Hinv <- solve(H) Ginv <- solve(G) Hoo <- Hinv[-injection,-injection, drop=FALSE] Goo <- Ginv[-injection,-injection, drop=FALSE] ## ScoreStat <- t(Uo) %*% Hoo %*% solve(Goo) %*% Hoo %*% Uo HooUo <- Hoo %*% Uo ScoreStat <- t(HooUo) %*% solve(Goo) %*% HooUo ## cfac[i] <- ScoreStat/(t(Uo) %*% Hoo %*% Uo) cfac[i] <- ScoreStat/(t(HooUo) %*% Uo) } } } } ## apply Pace et al (2011) adjustment to pseudo-deviances ## (save attributes of 'result' for later reinstatement) oldresult <- result result$Deviance <- AdjDev <- result$Deviance * cfac cn <- colnames(result) colnames(result)[cn == "Deviance"] <- "AdjDeviance" if("Pr(>Chi)" %in% colnames(result)) result[["Pr(>Chi)"]] <- c(NA, pchisq(abs(AdjDev[-1L]), df=abs(result$Df[-1L]), lower.tail=FALSE)) class(result) <- class(oldresult) attr(result, "heading") <- attr(oldresult, "heading") } } return(result) } sumcompatible <- function(xlist, required) { result <- numeric(length(required)) names(result) <- required for(x in xlist) { namx <- names(x) if(!all(ok <- (namx %in% required))) stop(paste("Internal error in sumcompatible:", "list entry", i, "contains unrecognised", ngettext(sum(!ok), "value", "values"), commasep(sQuote(namx[!ok]))), call.=FALSE) inject <- match(namx, required) result[inject] <- result[inject] + x } return(result) } stripGLMM <- function(object) { oldClass(object) <- setdiff(oldClass(object), "glmmPQL") return(object) } anova.mppm }) spatstat.core/MD50000644000176200001440000013302514150203345013355 0ustar liggesusersb562ea578c8a84255c46466faa596425 *DESCRIPTION 5ddfcd4af05df158b8b14102429290eb *NAMESPACE e188baeef35fb93cdc9a37cacba563f6 *NEWS 3fca2c82032a04c4234f8dcbc6811215 *R/FGmultiInhom.R f85ebd958263c02958f6bc1339b3509a *R/Fest.R 9dab1bc42bcd089cc03dc4741e86c0b5 *R/First.R f010f8e0f7b596cfaa0762a5e502a7fb *R/GJfox.R ec11370ccf4fe15a8379d672738b061d *R/Gcom.R 1d7bb030834c482936de898c037b3b14 *R/Gest.R 19e03fe8145c8911acc7ee94230790b6 *R/Gmulti.R 1101d753c8f35af184bfa8ff36a64486 *R/Gres.R 329b5c34b765bf1c99b3a9c9935d574d *R/Hest.R a50201e1887e8d1ee49bd00d99f7f870 *R/Iest.R 74c04764ad8be8db745ba9399c01b6b1 *R/Jest.R 04a04cd3b09338d9385fbd00f7451ffd *R/Jinhom.R 07a08c1e5b5f1e72d43615e07ded63fd *R/Jmulti.R 2b9b60b66e387026fcb0776b1a6a33b5 *R/Kcom.R f140d594542cbaeed17a221e0ee0a01a *R/Kest.R d154eb2c8fd9f312e9842a8ff213e71b *R/Kinhom.R 2537e15c9c84dd90680d521c885f2493 *R/Kmeasure.R a8742620874e75312563d5844b76ac3d *R/Kmodel.R b23c0a00a75c7db5dc94479a9f66486e *R/Kmulti.R c7664376634320971f45c22a98542877 *R/Kmulti.inhom.R abbf6db440f53371a4f757cb17274de6 *R/Kres.R e02e33a946af3db753bc986a091c6bb9 *R/Kscaled.R 94033aebde371087c4c246cb9fc7ef16 *R/Ksector.R 15200464f18f33ee9e035c9121666bb4 *R/Tstat.R b58a1f9410f53357b581a729e8e2fadb *R/aaaa.R 6bfaccaa0b449d11076b688e550f23c7 *R/adaptive.density.R 2fa5f6e6a977cf98dd054fcf9af98774 *R/addvar.R 948e229cfe013701e32cbf3690856c2d *R/allstats.R 9bcbadd067e91185938599e5bca1650e *R/alltypes.R b1e84c3ea4d011c5e1cd76ab2f34e3bc *R/anova.mppm.R 1b6568b2e73859b71afc07d02d5e596a *R/anova.ppm.R 1a4cdd73107a936709d5c83f3b39783d *R/areainter.R 67df572a7188ea3700e6aea0893262c6 *R/auc.R d4db6751867578ce0bcb408cb1284ec7 *R/augment.msr.R 7270a0b79a1555829f4a7b6417d31d33 *R/badgey.R 85b90545fe7081b0deef6d0b2ed2481a *R/bc.R 1645e57c9a2da31108b5d0b273b117cd *R/bermantest.R edde9119c366c2e54fc6ecbf212d89cc *R/blur.R 9e05d38aa26b0d4d58eab7df3abcf7b6 *R/bw.CvL.R 257ba38586dc7288bc2422ef471e4946 *R/bw.CvLHeat.R 3ea771d435305f127425775d8719122b *R/bw.abram.R 6345f06bb1619891022709eabc9512c6 *R/bw.diggle.R 15bb45e5bff910b091d22a085ef52bb1 *R/bw.frac.R c91f49d53642448c7fb002e3854bf078 *R/bw.optim.R ee83e610fc062b34f9a0b171475e477d *R/bw.pcf.R bec4040fb4a1ace18101597b42d10954 *R/bw.ppl.R c13e3ad08d78e1f397e769bd13bb0393 *R/bw.pplHeat.R 7458e40ffda1e5eda31f1fcf78548f17 *R/bw.scott.R 8be0fcaa6b54ab8452154d1b6edad38e *R/cdf.test.mppm.R ab67511805e8cf58ea433a5fb32fa18e *R/cdftest.R ab416fd2e977a6192d5764c5dce2eda9 *R/circdensity.R c17c44e7db04f6205bbcc061b9bdf7fe *R/clarkevans.R b8bbd4171f899c3564c91d995fb7a85c *R/clusterfunctions.R 39621e25ce86244ce7a10efa12234b94 *R/clusterinfo.R b2a666ee329acd511ee4f5f10dad5c79 *R/clusterset.R 8b56cc31365f5a6fa4fa148894eb0a67 *R/compareFit.R 49649c7b2fa2c3789f7392dd31bb534f *R/compileK.R a00723d970d1312c47c47dd2ada3ba37 *R/concom.R 507c554ad23eb96d9f5e7dde076fb80e *R/covariates.R 93e6478f97c6e79a0d56f717d1cde65c *R/dclftest.R 53e0fb6d1af23e321722eea5e595653a *R/defaultwin.R 7c3d398e400b4d4040b0ed24366460ba *R/density.ppp.R 66e287c90a8297f594e7bb222f4c01a3 *R/density.psp.R 49c5fb62b9372124b348b7c3ae670a06 *R/densityAdaptiveKernel.R 1660cc62e20d361d9645ce34b37b7213 *R/densityHeat.ppp.R 8b2eabdfa0be0e04f5f621dacca45a16 *R/densityVoronoi.R 27295ba2cde048cea8cf3498d1a3eb0a *R/densityfun.R 6706d26fc720c429660657026da0ccac *R/derivfv.R 7f9ddd392a19a4c4a0ac2601bb086905 *R/detPPF-class.R a9e236eec70c2542bb8a64743f3c2fc9 *R/detpointprocfamilyfun.R 19f251b81e75d58d5d7445baca29bfb3 *R/dffit.R dd0e83246ae7dfeb316758bf44834c2d *R/dg.R 5e66af20667edc1ef3bc6d67024a1964 *R/dgs.R bed9220a5fa073e11ec9d3b4a2029d84 *R/diagnoseppm.R eef012a6deaa2493e4229326d3cced70 *R/digestCovariates.R e655325ba277b30b0f41bb8c3e52e7e1 *R/distcdf.R 8f729c84eb13478ab4afd3cc070172ca *R/dppm.R 3e07b7fd60e16cd4fe992e5f9a13c54b *R/dppmclass.R ed7beb4930906f00a1d7b9f0fa47f18c *R/dummify.R bb43d5912b671ced5a4054b97aac7a97 *R/edgeRipley.R b769a2c2a0e7da802acb15c9b0eaec4e *R/edgeTrans.R deb500eb5d874c0415fb4416c8fe4948 *R/eem.R a816a44b139137b878fb2bcc08c5e4b8 *R/effectfun.R 8a99321ade6784a5288e6d1345727bc3 *R/envelope.R 87e73cf8b09b29048114ce7892d5f0c6 *R/envelope3.R 02451a3ecbd36a9539349db272257058 *R/envelopeArray.R e330bb6fa7a99e028497cf61aa37905c *R/eval.fasp.R 571362947a7935e6409074cceac8ef5f *R/eval.fv.R 204710fbf1017b2e58fe1a818d28c7cd *R/evalcovar.R 5517c54a9ef09df7f90b027b9e491e99 *R/evalcovarslrm.R cf259b89afe368a0b167a6c58c615648 *R/exactMPLEstrauss.R 91ec945d73992ab05d1c4bcf988e41b9 *R/fasp.R 960ed7399c372c33ed40499f836ea52d *R/fgk3.R b8c11669d029f443d369266aa4334f17 *R/fii.R 506817a9c8e55a5531d015cb26ebc753 *R/fiksel.R 087b82c7dcc47cbd94b449b8a3c2e5e3 *R/fitted.mppm.R 291bb8b3370e56318147e57573f0ab6c *R/fitted.ppm.R ab7804845d8482aa1398fe8b26941365 *R/fryplot.R 00eb6f24f34b7f0c3bdfb1909964c3f5 *R/fv.R 05ee38c50c092dd9251d489c7049e79e *R/geyer.R 72b9c77c0ba7a28d99e777a85687b4a9 *R/hackglmm.R 9b3c9727fdceeab5be3ed00dc639ddce *R/hardcore.R f1e16ee9c975eda27002f44980d1ea57 *R/harmonic.R 7576337da12b9579db9130c9e609822f *R/hasenvelope.R 123085aba3859361ca3f643e7c956c6f *R/hermite.R 0fb7e0bb6cdf5a5a43a68bf3f63b04c4 *R/hierarchy.R 145717c89dabd410a2a3debb5ae312a9 *R/hierhard.R 50d8974e5876fe5588c01c5a68ebc176 *R/hierpair.family.R 3c1a28af26ec7ce13f8b8176dc74b64f *R/hierstrauss.R 8403d327d788f178e589a59ff3e0a0a5 *R/hierstrhard.R cb789e61235a77960c6b51d880da4c8d *R/ho.R e3447ff115c6062ebb3c5d3f30b68f1b *R/hopskel.R 9e92c403cd74fa47cb4237f76136ef32 *R/hotbox.R bef6d9101f9409d566d76059ed9f3993 *R/hybrid.R 51f886a93b22602995481e91600bc048 *R/hybrid.family.R 0427fe10b8680200e8590c7e5e854d8b *R/ic.kppm.R 5173a3015b8500eeb7466101e521aa9f *R/idw.R b5d86a0ef4b0631df4e32747acd13171 *R/inforder.family.R 61c0cc46b0fd68eed5b351f58e0d2333 *R/intensity.ppm.R 6a01dae445ef5025d3ded7d8885d9163 *R/interact.R 4f4186f0bf17ce0709633256331d939f *R/interactions.R 4ecfc8a655d6c827da464741771443d2 *R/ippm.R 28efc8c34c3e37e4c4f8dd456549f069 *R/is.R 51c369d401fb72499fb7d75b051a63ad *R/is.cadlag.R 849cacf0b093f2199ac0b79bc517f0e6 *R/kernel2d.R 3bcee3e4bca044d4e7fe19b5d25809fc *R/kernels.R 79abe3ad5c54621a3dc219c8051ad102 *R/kmrs.R 5e70dc812326d7743778ef99d939f567 *R/kppm.R 27d52190551ff19e3409d6f4234ce34a *R/laslett.R 74171b1aeb5177def1360974a23a385f *R/lennard.R 105ccad643da567feb3c05a34c55aacc *R/leverage.R 67c395118252398fc41b5e2c547ae19d *R/localK.R ebae15a6845b61339816a4e6f834b154 *R/localKcross.R a292787819d4a714dbcc1913fbc14251 *R/localpcf.R ef71edad32656c539a8828b26e598cf9 *R/logistic.R e399296061428b6a64778d0c403d0be9 *R/lohboot.R e18a44e4b02a7a978175912e01f71a7d *R/lurking.R 41c1e28d14fdde91a9f25ec1a8c6a273 *R/lurkmppm.R e2d6aae303fcfa0b2d026508e4595092 *R/lurkslrm.R 3560c758da89961b5286e6aeba152fb8 *R/markcorr.R 95cda2e43a41b86f31c6b8d68d7a62b0 *R/markmark.R 6a3d9ee234a765226a04a6c3c23663eb *R/marktable.R 279eac892afad97c66bc183506ec23fa *R/measures.R 2cf10f6981f784cbc2752dbaed7b3c57 *R/metriccontact.R a19c01e253d85bfda2d97185a1cf680b *R/mincontrast.R 9649ecae60b096f55b55ac25acc84674 *R/model.depends.R 37b2bde043a9b1540160c76895ac67ac *R/morisita.R 4e6e720c365e56d93a0b0739ab06420a *R/mpl.R 33f5816e7ed5ac74b92e41047e24c1f9 *R/mppm.R 2317e1ee00079f7ae2a7c7015b7089ce *R/multihard.R 23ade743ff6f39f31ff1bf52ee73f088 *R/multipair.util.R 21dee90242243823e6504b70762e3135 *R/multistrauss.R 641a9b05a22e90e13c6e62a73f1bffa3 *R/multistrhard.R 2573fd696ecc7d7e1507f1f59fe4c98f *R/newformula.R 756e005fb50a6e5636a8e695f2ddc6ee *R/nnclean.R aba4829868abf5130b72f8f5eaee0da8 *R/nncorr.R eba47f8399e92f6bdd92ad6504973280 *R/nndensity.R 1ddc55fe21a5f54976da16fb1c691de4 *R/nnorient.R 4739735bb8a28d2e36f8c2b79b7e06cc *R/objsurf.R 0579ac687f57e918145495b397827a03 *R/ord.R 351116d5be6f0c962874e927ccf6c416 *R/ord.family.R f48d82982ecb34d893bfce2ef96a8a09 *R/ordthresh.R 718a11138e5e35a7916dd260f0d052e7 *R/pairorient.R a205ebe3d826cce02e8199aec096e857 *R/pairpiece.R a09a11b9c4ca4f128a81cb787214fa8a *R/pairs.im.R 466b544caf507d4c55ab60882d0b7945 *R/pairsat.family.R b9643bbc4c4e7a336fcc535afc002c58 *R/pairwise.R d9268f4e14abc147ec97bf2136f167af *R/pairwise.family.R 7e219c0e44487ff76d3c06bb86a48984 *R/parameters.R 213e9441cdf895b84d9e5caaaa33bf48 *R/parres.R c89b46a4985e54bc70da276599d958c6 *R/pcf.R 5df27bc7376ec960329dd06e811dfda7 *R/pcfinhom.R c76e896661080247e9b5e95e3d1cab0b *R/pcfmulti.R 8dfd5c29c944568943c7144165e91bab *R/pcfmulti.inhom.R 7e1de57c9c386631f2ef7313259883d8 *R/penttinen.R dfb884749a05ca2792358f6d1ff35f0e *R/percy.R 99a1c34b91d40b26b39e3d57901cfedf *R/pkgRandomFields.R 1ebc98fb317174aceaf0f56a1b2e14a2 *R/plot.fasp.R b3e67e84cb7f5496ec80e0b202e6d5a6 *R/plot.fv.R aaee58845ed4bc789341c903588d2226 *R/plot.mppm.R 92821efba1ccc7d45900904b4b0e2078 *R/plot.plotppm.R 125840a6a0920802ff82d6989a807046 *R/plot.ppm.R 1a0c9f29d3c585dd7bbb9c9872188e05 *R/poisson.R eff91a3e3e11682e74644666801c4fc9 *R/polynom.R bf4b506454fb3eb819ac1400d2828fa8 *R/pool.R b8db4e3b8b463718a25ef53acfa4e6de *R/ppm.R 1130473d1ff342ad8421fb6164fa7e1d *R/ppmclass.R d06c9bdab82a9d64ccabe3154e91bcb9 *R/ppqq.R 8a0e6c4ea34206bd479f2fb8fc558a16 *R/predict.ppm.R c4404cdb19297d33a5b2d3104364fb33 *R/predictmppm.R 45c0c1f40e4ab8eb773e6b6da329918d *R/profilepl.R f1a20af4200e23511ecbc06c75be2e33 *R/progress.R 4c2f7d4fd5d10218bd0fdd3f7395ba9f *R/psst.R 264fe31fba6ceb798e8782eb3b1108eb *R/psstA.R 858b91662669b087811e8ceb212c7ea8 *R/psstG.R d6ce4fc25a5f9e8912c17043b73c8bb0 *R/qqplotppm.R 99aa240d18a1c29ee5156bc18017f63f *R/quadratmtest.R cc7fc0270683fcf28f0f85df7986c0cf *R/quadratresample.R c2fabbd39a7f1fea34c1ff96f63eb692 *R/quadrattest.R 24f39fe4ae6c36437f7fb5caa8cab62c *R/quantiledensity.R 8b23cbbc5a4c642e60296cc4c63d045a *R/rLGCP.R 701c360c61c6aa85286ce476d3ca819c *R/rPSNCP.R 68a4cab446af74eb7e258931cea960c5 *R/rPerfect.R 6e0b2255bf8b42f5a236c391b10306de *R/rags.R a180395651623b697546cc40bf6b2bb1 *R/random.R 73b70afa74d324923fd971bc1a4f8bbc *R/randomImage.R e1bbcc1547d522193bc73e1bcd67b4c4 *R/randomNS.R 6b06764b43e80727f9a488c406c12774 *R/randommk.R 84f9d77208727481e01f7469e955156b *R/randomonlines.R df69dce97b235e1542c7baf23bc0efba *R/randompp3.R dfd3c48e670e9b50da2e21122785a169 *R/randomppx.R 84fc4d1a466962c01ab5467e94e63055 *R/randomseg.R 6639370480c1a574a374d926f2ac2fba *R/randomsets.R 24972883102c87da259be0da8e982db7 *R/randomtess.R 1426e453c87adeefddb5c0fb15592a84 *R/rat.R a8a6f895acc18aa94be66b546be6c17f *R/reach.R 511b88658d51796d9a0daf71b44b9cb4 *R/reduceformula.R 4d75122e5616b6b6b04efc70e01978ff *R/relrisk.R c7084f7d598252e624a777b5c9e2dc1c *R/relrisk.ppm.R ac55aea892428d316f5eff49f587bc63 *R/resid4plot.R 7bed23bc9e49020838f350679afff371 *R/residppm.R 15f1ea6eff30e3b2b696ca6e9a3f5d4f *R/residuals.mppm.R 33df985bd31bccf01ee715c7c4d30854 *R/response.R 4605ed571ba2d76243cfb73f23a05cee *R/rho2hat.R 0a84e0f0cc0c7cb9570346c785dda025 *R/rhohat.R ff1dca8b425a42d0e400f7f06b22a5e8 *R/rhohat.slrm.R f47c0eccaa94d6655967d988635109f5 *R/rknn.R 46885b3952522553234694e60b4bd8e5 *R/rlabel.R 35f1e78c4ec615ebdf02bf522d15dbe7 *R/rmh.R 16f7cf9d8333f9ec391ae32a500ee0ec *R/rmh.default.R ccbf08161a553bb637228e9fc1907e4e *R/rmh.ppm.R 0605d8f7db7997e78130f55a254d025c *R/rmhResolveTypes.R f38b36afeb02d76d09e32dbe5f7d368e *R/rmhcontrol.R 15998f271ee8c97b82a16a551a524cf4 *R/rmhexpand.R ebe0a003a809c854489a9fe926ddcaf8 *R/rmhmodel.R e9b62883c43b618c2eed5ad3729c0a23 *R/rmhmodel.ppm.R 7a33011a8c7aeacbbb495bab033ab723 *R/rmhsnoop.R 112482932314aa9b1dba3ec93d6d0892 *R/rmhstart.R 6378d22817e69ed8dec33950baa86f63 *R/rmhtemper.R 9f1c01b9d7c7325ffba3856909dc44af *R/rose.R bc2341bd181171d63a0df581954e08fd *R/rotmean.R 1d702e818ff73cc0ac5d4ec61d362f47 *R/rppm.R f369866b700d69d132d1735b5ffa68f4 *R/rshift.R cbcc8cf0330e9d44f0524af3e3eff341 *R/rshift.psp.R d6c9955f67b6ba632995629e59fcbea3 *R/satpiece.R 06ef1d6ad3f5b348e3c584f4e5411644 *R/saturated.R 56271cd63234ac950d3681f72a4d9a49 *R/scanstat.R f1550c8f9df98785134b847f36820e70 *R/scriptUtils.R f91342c5f456f5533b80836918f60120 *R/sdr.R 2696737b6ed582332d8dd760c798a6b0 *R/segtest.R 048ef0497b1b7b5b715a35c0d88bd4f9 *R/sharpen.R c0e7adf01137747788fad043b581c8e7 *R/sigtrace.R 046946fe4ed4d6a687c4a2d68b113a22 *R/simulate.detPPF.R f625d144c07da0f4dd364dcfc1bedf33 *R/simulatekppm.R 863ba046928203d12c626f15e8811939 *R/slrm.R 859e817a8e16f5911414ae61d40fdce0 *R/smooth.ppp.R 93488a21301a5817abe392632bb72b12 *R/smoothfun.R 728a43d4f8efb168c4578f4f54123ed2 *R/smoothfv.R 682554e524030a7b60c83f394bb11f01 *R/softcore.R 562a5f049aa85ee2a7c08e55d4d4c867 *R/spatcov.R cfe29d37dc0dfe03b5f1c10b9ddb51ce *R/spatialcdf.R e1f576d06393f8f0c4ecfd88aee54d1a *R/ssf.R 59c7fff4ecd9e618764ba05c7dc838dc *R/stienen.R 8687289dd7e55f57240c3fffadebca77 *R/strauss.R 35f9016037fe08d24060b7e6b0dbadaa *R/strausshard.R 7076c6ca78c74265417f2072818e4cb0 *R/studpermutest.R a23a14156c1972a2c349988106525ec4 *R/subfits.R 5ce5c7351ab113de0450a01fa495759d *R/suffstat.R 3f5d54ba7d7b1d18abedb5e0bda342dc *R/summary.dppm.R b96a8acee06a646b7eb83c1bcb94e9fe *R/summary.kppm.R a381e5857f3e6f8901f17f49f0ea83a6 *R/summary.mppm.R 820d0d994480844474ed2b4dd45f8bc1 *R/summary.ppm.R 441dcb3b4974efb7d81a02bc9fbdb60e *R/sysdata.rda a6ab82dfc4c3abea768dc4a8db66947e *R/thresholding.R f0e499074542e294c6653d82749dda5c *R/transect.R 6852ab624a8f0e62a8e6a362efb8e055 *R/triplet.family.R 954f26a72a4d5e59c68dbe379bbffe26 *R/triplets.R 29609c1886e80eaa12a24182c7e4f580 *R/twostage.R 600d317510863cfba04bd16968bdaabb *R/update.ppm.R 1bf6fbfa1a72ca6f079394b5b94a5b7c *R/varblock.R 3fa341c1e6a2abffa33b52ac3760aa47 *R/varcount.R 3d605462069dd037c0b07cae6b128bcf *R/vblogistic.R 13de3993f910683b44865cb8b40c3dc5 *R/vcov.kppm.R ec83d35f1589344499f47e423f501ea2 *R/vcov.mppm.R 7d573b993b014d81ff31704a0c58ce52 *R/vcov.ppm.R 689e26f91d6b5d6af48352f284e32eb4 *R/zclustermodel.R 152bbf4b15996c2c583ad5578f14d45b *inst/CITATION d7017434eccc3e432d34a8cd435d8eb5 *inst/doc/packagesizes.txt 0e93004588373429f22a4270bc08ad08 *man/AreaInter.Rd 8fcff68694d6b7af7f927891b608b734 *man/BadGey.Rd 1d8d9afeacbb8e67e56abf455ebfe6d6 *man/CDF.Rd 7366180af6bc59fc6d2cf8d78d1221f3 *man/Concom.Rd 8e29ad13274d5ff6b30740a3abd5730a *man/DiggleGatesStibbard.Rd c59392fc5fa60782bf91c8c0675de144 *man/DiggleGratton.Rd 7035cca796f4e64b0b5089f25351b070 *man/Emark.Rd 7b09687b285ee8d809969ab06106a5f4 *man/Extract.fasp.Rd e50f40eb875c124095941fc99a486f36 *man/Extract.fv.Rd 62c8609253cd6cca6b78e76ead3864f0 *man/Extract.influence.ppm.Rd c165f89bfa18ceb0aafb57ba7c3977f9 *man/Extract.leverage.ppm.Rd 91e440f304e3e4a4e021236bcce45108 *man/Extract.msr.Rd 279ecfbecb82ff618333037b97bb953b *man/Extract.ssf.Rd a97cc15689364ab9c08a8df2b0938bf7 *man/F3est.Rd 54a55d398d1c8fbd315fbcea21d3cdcc *man/Fest.Rd 21ddb0ef529a342599d30a4ed85da941 *man/Fiksel.Rd 5a32cfa81cf904182e58bc52bb7e8fff *man/Finhom.Rd 2cde77506f7625f97adcda2d2056a7d2 *man/FmultiInhom.Rd 19f0bd97c665eaed8b331332d26ca7a7 *man/G3est.Rd 5915cbb8979fa986cf75db47798b1258 *man/Gcom.Rd 540b00c210f51c30aebd8620f0f85826 *man/Gcross.Rd 0f7fb9b0fb9c1a658f34bf706cb1cc73 *man/Gdot.Rd d01b4f83e6feb001f3a60602013e9b0e *man/Gest.Rd 48f800b52cb48c1f124a2dfeba305f29 *man/Geyer.Rd f843643f021473ab058adb8fcc0e77b7 *man/Gfox.Rd b44a7a3ed16775ee3d62fd8eaa950aaa *man/Ginhom.Rd fc20837f195b91fff99f485ff9403fe2 *man/Gmulti.Rd e18298340f5bf46c56b6a0cc4aad6c1a *man/GmultiInhom.Rd b52f37ca1f6a6740a497ca1df561f2f0 *man/Gres.Rd 1e74e44ffe123408322d4e1e3ef967ba *man/Hardcore.Rd 4fb5a69f38eb1906bcc34f9450ea0fdd *man/Hest.Rd 77c49a32e912ecaced766cadad6476ee *man/HierHard.Rd ebcb391ba5dcf25006f76797e8140278 *man/HierStrauss.Rd 1234e600c429e1b4e513e6aafa007cec *man/HierStraussHard.Rd 812ccdb0e684213c08a7638b28a30753 *man/Hybrid.Rd 857f637abeb713f20381e6ad5277852c *man/Iest.Rd 6cad866725a9ff6d151e090ea371be95 *man/Jcross.Rd b81fc316b39f16bd8744184eee832f7d *man/Jdot.Rd a4b9764604f960f6ba23305ba69332f5 *man/Jest.Rd 7b913efdad9b9cb87a1c9426e32678ba *man/Jinhom.Rd c4c794997556f644475e44bfeaaab636 *man/Jmulti.Rd 50515d16ac7ff32a3c890280d725d3bc *man/K3est.Rd e5a3d2d980667e1f6484f12e25bf0332 *man/Kcom.Rd a6773420b4c25a25d66748575914fbb4 *man/Kcross.Rd b6eeeaae285c6563fce6fe7d87d84e68 *man/Kcross.inhom.Rd beef6636fa5f9544799b3bda2b99ab97 *man/Kdot.Rd a41bbc4a12a8e2616cdaa3cc61535114 *man/Kdot.inhom.Rd d4a45032ad50a9daeb149771390b4c53 *man/Kest.Rd e8604ed0e68a6c592107645cb6555b62 *man/Kest.fft.Rd 197f934ef5d34df5fef8cc518d872291 *man/Kinhom.Rd 1a6b32af2b8f01a02c072267551d29be *man/Kmark.Rd 60072daf709b4e8a5e7617942bc06fad *man/Kmeasure.Rd c396e17ccc63be7995b7902317b7f3e6 *man/Kmodel.Rd b334b6e417a927af6c3ce4fd6f5a49b9 *man/Kmodel.dppm.Rd 421fcb36cf31cd17e9514bea3346fed8 *man/Kmodel.kppm.Rd 95d34521ae4303bf01c8b58eecefe2ef *man/Kmodel.ppm.Rd e7c7aee0c9d15e04e124866fe39bb0b7 *man/Kmulti.Rd 9260fa2d2918b899aaf83632bdf2ddcc *man/Kmulti.inhom.Rd 46679770ac593328b1e0f60c769cbdb6 *man/Kres.Rd 2409f9d2191ef5ef6e05b5655c9d094e *man/Kscaled.Rd c5987d1db8f0582adf5d742e70cd7377 *man/Ksector.Rd 9115a22a373040ef2d7209718e4fbe29 *man/LambertW.Rd 340e1092c0ee8f6688db527737f55c64 *man/Lcross.Rd 714a137b2871c12ad00392f6b72a8ee2 *man/Lcross.inhom.Rd a123cc24d96d315daf0aed097a9d136b *man/Ldot.Rd d767a56998e421da4fd7fd0b1534189e *man/Ldot.inhom.Rd dd9b87f89f595e1396d0d6d43cfd39b1 *man/LennardJones.Rd 3520190a05bc88aa8e63f5c8ec6fc066 *man/Lest.Rd d7473e640476fb8b4a064acd9c62ee76 *man/Linhom.Rd 26a9db71cd8fa55fdc1eb42afaa2907f *man/MultiHard.Rd b494da5f58cd07afb6c9d04f357bde61 *man/MultiStrauss.Rd bf2dcf70457431c00a3049bb814dbb33 *man/MultiStraussHard.Rd 176bbee178c7111abc5d6a0fe97ba0fd *man/Ops.msr.Rd e61d4cfd0d9bacea2346f5c064f28fe6 *man/Ord.Rd 37b2dff8a8916eea7e7927961b3c86bc *man/OrdThresh.Rd 3856350ef8ce867f1b9fa855082b74f4 *man/PPversion.Rd 42efd403238647f6034e500db374ed5d *man/PairPiece.Rd 404b13dc8185a43d0206f2e54e3878a0 *man/Pairwise.Rd 084575ea7ae835815f09f0f3db1824f4 *man/Penttinen.Rd 8fea21d9d4a6660b661ea55527e85e7c *man/Poisson.Rd 4aa0d6e2647b4e9e5b74fa687b5aee64 *man/SatPiece.Rd 586b157510810340fd0b1f34adba6819 *man/Saturated.Rd 2be113b3b5f5760af7ff4b365799210c *man/Smooth.Rd 466dcdc6cc4b3995e072f9ff9c958ccf *man/Smooth.fv.Rd aa84079654b3b4ed64afb903ca1b71b6 *man/Smooth.msr.Rd cd4d56a4d3ac116acae31ee56df5652b *man/Smooth.ppp.Rd 24591b04699f1b5ebdcfe20b587a7772 *man/Smooth.ssf.Rd 17dc82c299fef9191b2224d0f66cce9a *man/Smoothfun.ppp.Rd 07cfbc769f9ea63f4108bb3081942a03 *man/Softcore.Rd 596d52e68af89e9e40a6f34e58c24a5e *man/Strauss.Rd 533946e6ad1414afaf96a905f959c382 *man/StraussHard.Rd 33d57471f6b4da4fade48b4666838559 *man/Triplets.Rd 207c090d4efc85174fc0d99a0464f89d *man/Tstat.Rd c55ded2c38fea1cf1e3559d59f4852b0 *man/WindowOnly.Rd 4c4ad622e5ebf6861a77732435fd738a *man/adaptive.density.Rd 96d3635cd31ad5fa40a142d03ebf11a6 *man/addvar.Rd b78ff7e8cd5cd682c8f4ac86206b636b *man/allstats.Rd ab143e2d6829d5c50a6579974fb1a02e *man/alltypes.Rd e16e79fc2263b3edacc1ae7116c93fa5 *man/anova.mppm.Rd d30797ca2a0b728a7ece2cb01daf07b1 *man/anova.ppm.Rd d8d90d340989a5c40405dc5a97f5487d *man/anova.slrm.Rd 2dd8d36757c24a29aaed2c837229539f *man/as.data.frame.envelope.Rd 810416aebeb0a7e9a835d057e029debf *man/as.function.fv.Rd ac2273a069a3ce20d19cd8e5a1c4bcb6 *man/as.function.leverage.ppm.Rd 08d940dc470bb44f7e7cfff90770cd1e *man/as.function.rhohat.Rd 7446f065ac754f3e86cd2f48d527fc13 *man/as.fv.Rd 5e20b5864259039bd8ca272aee68027f *man/as.interact.Rd 8958109033ee84063a9046e7ac0cc0e8 *man/as.layered.msr.Rd 35fe0a74c3fd2d727b1d82019e267943 *man/as.owin.Rd dc165e3f06b729b80bd883fb226f700a *man/as.ppm.Rd fe363e94357cd4a0993bd3d784ba6b35 *man/auc.Rd 3df7907e462038f9d2f410983c4be948 *man/bc.ppm.Rd 3e823423aa111229d22e2316db4144a6 *man/berman.test.Rd ae4867fa6019163c598b742de672e4ab *man/bind.fv.Rd 8b83d9212b3eec63678d967432473528 *man/bits.envelope.Rd 930cf60e872e994abf9f6ceae0301142 *man/bits.test.Rd 5fd192ba5b32e2cbb37234429c64d23e *man/blur.Rd 8d1b685083f05c6909591c9c9e85135d *man/bw.CvL.Rd 44f274354e3da4219fe1f78bcc5e8fa7 *man/bw.CvLHeat.Rd 9ba4a417616ce177f390e4f36bc4eb9a *man/bw.abram.Rd 61dbc41fefdd529be0c1db782c08a394 *man/bw.diggle.Rd 44660ec80af7743495e4338c3e31bda5 *man/bw.frac.Rd 7b5aaa582d52175320515f4247c0bc89 *man/bw.pcf.Rd 25856f6538153f1b4545c0e1efa8f972 *man/bw.ppl.Rd cec1256712d6c0037f9cbde52b86ab31 *man/bw.pplHeat.Rd c1ca1a597b98297e0edadfcd3593a071 *man/bw.relrisk.Rd e0dbf3a799e1e83d417740e11fd7a0e5 *man/bw.scott.Rd 5f1ec5269f7d2bbed37da9fc82a564ba *man/bw.smoothppp.Rd c4c09b8ed66a469a7428c9df69157da5 *man/bw.stoyan.Rd 28994ed5410d8bd3381dd6720c33d140 *man/cauchy.estK.Rd 51eee97456b592fe07b4ce325c43abc4 *man/cauchy.estpcf.Rd 984276e53625e36d2389779280324764 *man/cdf.test.Rd c26dbdef9aac99899e3eb0ce15c0a047 *man/cdf.test.mppm.Rd c9d72cc2b707dc4cdc8a091441c0330e *man/circdensity.Rd 3b351b5f30e29753d670d961d8415c17 *man/clarkevans.Rd 82568157a5cda9a70d0995bed08a850c *man/clarkevans.test.Rd 2e371442c54b15af5deaeb81badf3821 *man/closepaircounts.Rd 0bf2b0d4c4b9a1a5db900de856efcb7e *man/clusterfield.Rd 5ed7e3428163d94a583877715851f082 *man/clusterfit.Rd 738b9a0663126d39af5d30eb50b815fc *man/clusterkernel.Rd b51c47fc2c637f95e7db966e3c34421d *man/clusterradius.Rd 9969967ef0c3cb82ce73a0be6f08fe39 *man/clusterset.Rd dd5b0370ff9308b4ff96985941b94cd7 *man/coef.mppm.Rd 0c3bbbf66c63e7ff5c00665c2d6692dc *man/coef.ppm.Rd 8e1270ae95df370c3c9ef1ec6ec8d3bd *man/coef.slrm.Rd cf6b5cd9e51695212ab5879474032b23 *man/collapse.fv.Rd 94b0f846eb376a7d73f4f08a8430d01e *man/compareFit.Rd 5d8a62e473c5121cec55c4ca1228d44e *man/compatible.fasp.Rd 5adeaf75a5e32133395edf2268406869 *man/compatible.fv.Rd 86f39d6bbc2448fa0a8ea7c8f5405c1b *man/compileK.Rd d42866e7843fe04827c1f8f74c96e9ae *man/cov.im.Rd 6e0c8912ceae9ee89bec670c9de6f135 *man/data.ppm.Rd 5a937a4d7cd2da5b2babdd2066816ae6 *man/dclf.progress.Rd cfe58cc740d905ec11772f662a1115a2 *man/dclf.sigtrace.Rd b32494aa80c8a1a1b28b967e4f3de1b3 *man/dclf.test.Rd 0ec93174356b4c09b9e90c5886dd50b8 *man/default.expand.Rd abb5e748d59b40a306e411526a5b2b17 *man/default.rmhcontrol.Rd 1df1c81d15962fae2faf20d06cdb658b *man/density.ppp.Rd 7fb043b95eb7eb4abb2c0545b46e9aee *man/density.psp.Rd cbda599ab9aac2710d6524721736b59d *man/density.splitppp.Rd 3662820cda030a758c37ebd79016adba *man/densityAdaptiveKernel.Rd fb3da5ac3c01ee30ed13b66a6a31a0e0 *man/densityHeat.Rd 6810e15708a2f408716e9e33245a3ba6 *man/densityHeat.ppp.Rd 2c86350b8b57058e7ea658a068fe21e9 *man/densityVoronoi.Rd 90ba244bb4129c5dc0e17358e7ad56b9 *man/densityfun.Rd 50fca06c24aac752c750d95c8f56f7f6 *man/deriv.fv.Rd 2d3c93bd98b836b4d9e36fbdee0bd596 *man/detpointprocfamilyfun.Rd 0a0b26263084e4f120a8d91c4a53bd72 *man/dfbetas.ppm.Rd dc84b805cd54c511675e9427adf28391 *man/dffit.ppm.Rd ff2cfac07040c5637e8c86d3cbbab741 *man/dg.envelope.Rd d81c8d8e2470d6081243c61dd0829a14 *man/dg.progress.Rd 869ceab579b8674d6f7a686c01d3197b *man/dg.sigtrace.Rd 5edbb7cfbde253a31a72d93c3659126b *man/dg.test.Rd 06e79fa6ccd0342f62e31f27bc7d2a1a *man/diagnose.ppm.Rd 382a56f92a804d52582cf716cdf79b09 *man/dim.detpointprocfamily.Rd b72e48220d7edbac9fc1686c28abd50f *man/dimhat.Rd cb14c5eaad68f99a4ae6c63609a01c19 *man/distcdf.Rd 80cc6cd76ccc874db32f2df71e16435b *man/dkernel.Rd 8203fba31ada9ac91ebc681f14b3ab27 *man/dmixpois.Rd 5c66ef28fc24bc784bc7ea76f48e8805 *man/domain.Rd e93b2a5629b7d0568ee47583179a89eb *man/dppBessel.Rd 80e324406c3b304135aaacfbc5965428 *man/dppCauchy.Rd 417e2a934a63afe19d76cacdddc90775 *man/dppGauss.Rd f7a4425077102442130d630aff76f500 *man/dppMatern.Rd 4c762a8765980d7d7dacfee820d913ca *man/dppPowerExp.Rd 43fca14b1c64f1a91fbdd8ec2e56b40f *man/dppapproxkernel.Rd cd8c645ef7166114d397f3132e73d3b3 *man/dppapproxpcf.Rd edb8c34118a8284a8946cddfda3392d6 *man/dppeigen.Rd 96abd9ffe6be8d538ddffd6bcab72001 *man/dppkernel.Rd 39406cfbc87e71562c06a1c35f05116b *man/dppm.Rd 435c26403f233030ea066d2135f121c8 *man/dppparbounds.Rd 976360c41648d086a5d572c80803ee46 *man/dppspecden.Rd 1f7ad57545508cbaf3ebdf359509c96f *man/dppspecdenrange.Rd cfe1652069012d2187f56f99553015aa *man/dummify.Rd c24c8a8fa5eb8b63614db81c18feb432 *man/dummy.ppm.Rd 623a912c3f9f26c4e122c3c3932a5b48 *man/edge.Ripley.Rd 57d514e98cfdcf5106306495284b167f *man/edge.Trans.Rd 82a0d6dc2235a33aefe920aac74405d5 *man/eem.Rd 92f555a540e6030d41a3ef67b6aa5519 *man/effectfun.Rd eaf551ffc6fe85878440efe55327409b *man/emend.Rd c0fd21d2e0fa25cb2a538abed97caa31 *man/emend.ppm.Rd e46e37c463079dbc7b7011bab1e56448 *man/emend.slrm.Rd 69e3e46f5424b9f5fe11eb7ffda2ba48 *man/envelope.Rd 0cc8bc4984ea85d93d847009e5777e48 *man/envelope.envelope.Rd 9a3ab59545d42fd46d3d6358aefd1e2a *man/envelope.pp3.Rd 417ab6fe29f16cd10ec714b550af8374 *man/envelopeArray.Rd d48fda4933f95954a0d91e7e5b5d518a *man/eval.fasp.Rd 106585642012ab76a03f5def23b1a353 *man/eval.fv.Rd 5185efc1b4872c1a16505cea8d895f87 *man/exactMPLEstrauss.Rd 6e1d4f4674976dcd96b752dcf9063a90 *man/expand.owin.Rd a8b1d15c579c805a5481e636c58951f1 *man/fasp.object.Rd 0daf1da7e9646e66c05f1202aac28638 *man/fitin.Rd 786b1dd29f8c2ef4ac4dcb377d273734 *man/fitted.mppm.Rd 259c85984dc95363e1ada09018756be6 *man/fitted.ppm.Rd 9f6a06b4a7c477ca26c6bcc62a345213 *man/fitted.slrm.Rd ced7b79616a49227632d3d793f3fbeb1 *man/fixef.mppm.Rd c751fce47d2bb11ac2c8181492054851 *man/formula.fv.Rd da28ccd90714621c7ba25c37b8297431 *man/formula.ppm.Rd e411490a37b7ee3668c8ce893aa27a51 *man/fryplot.Rd cd5826c148618e9ff20da78773361299 *man/fv.Rd 9bad27140b9b4eb6ac9c22855c12e11e *man/fv.object.Rd cdf23623d05c9cfabf2c13a29496b728 *man/fvnames.Rd 8786b2679753de57cff66911fd4822a9 *man/gauss.hermite.Rd 8d0a6f7848c01cdaabd5fc1b424c9e31 *man/harmonic.Rd 9d99102f5a989af5d24abc00d5d599c6 *man/harmonise.fv.Rd b2477cde55321142f298c95657f38e34 *man/harmonise.msr.Rd 63542aed06ba7cc630711a415b13b15d *man/hierpair.family.Rd 4cbcb79d39be55de64a34143cdde90f8 *man/hopskel.Rd 6aafa11d5968504243fcbe3c0c97f91d *man/hotbox.Rd f810f76144f2264bfb9c93ba7da0eaf1 *man/hybrid.family.Rd 2339410f62cf4165667269b5479cc7f2 *man/ic.kppm.Rd c06420b4d1ce545a3c93d96ef02223a0 *man/idw.Rd 4247b7dde78daaaf7aed382865eb0302 *man/improve.kppm.Rd 15c74c0a3626c007f606b73c46c658a0 *man/increment.fv.Rd c01cc984d9dacc9347669d5b30556a2c *man/influence.ppm.Rd a42d4e31bcb6e5c0f2e605e57abd12f4 *man/inforder.family.Rd cf21d9560e5a2ebc614866c7d7088f0d *man/integral.msr.Rd 753620f42fe3e253ec926fc3a250add3 *man/intensity.dppm.Rd 9c387cea5f98560ef9087de37eac385f *man/intensity.ppm.Rd 60aba05382729cf1a5a8ba0e426a82b8 *man/intensity.slrm.Rd d6a061df58310496ea224a9c31ce65de *man/ippm.Rd 342ef62d8db2ccc867de36df82a4cec6 *man/is.dppm.Rd 4f878fa63ecb6452b27d51a4bcdb35fb *man/is.hybrid.Rd 9a248a8c22b8ae9d0b9aa8f22633c70e *man/is.marked.ppm.Rd 446c5d72134417b38a7b3491d0daca16 *man/is.multitype.ppm.Rd da965936384d55138f38bd22464a0b93 *man/is.ppm.Rd 1c62a636f92767984c7f4481b401475a *man/is.stationary.Rd 4251149a436d1bf150f48f0158677bac *man/isf.object.Rd 55a3c5b55340b4242fe64a2e244235f9 *man/kaplan.meier.Rd 4baac51cd91d96502fc48a9953680066 *man/kernel.factor.Rd f7a962f81e7673b46c915b521042a39e *man/kernel.moment.Rd 549a8a0da9ff9822fed5d908f2c602bd *man/kernel.squint.Rd 1efcd196df498043d360a07bacdda65e *man/km.rs.Rd 3f51ead3980aab7df18a24b9e2fb550d *man/kppm.Rd c7d7782b860f8a3e0898fa2e07d1395b *man/laslett.Rd 36669f570fd0ae922ea1282e46a956e6 *man/leverage.ppm.Rd d48fbdffa00d8f3953de6079c28d3ac4 *man/leverage.slrm.Rd 6980772b4b01e25e9facb587759c1544 *man/lgcp.estK.Rd 2db86eb90b4c7dde2912ba74abf133d8 *man/lgcp.estpcf.Rd 40cd5716edc2e6ba7e4a5a4d9fe0b362 *man/localK.Rd 0cf23cfc1764077bc3b058c0c6c570c7 *man/localKcross.Rd 1dde27dd7df9a22e845cdca5c63461f4 *man/localKcross.inhom.Rd 75063d93f0bc38698d5295141f4f37c6 *man/localKdot.Rd ea8feb05f131ef72f96c3b02ae9c4dac *man/localKinhom.Rd 1b78c3ea70828337bab6388454c470b1 *man/localpcf.Rd 7caeac313765d2713f998e992babcd1b *man/logLik.dppm.Rd ec6b9c54f62b790559987bb0e9a84149 *man/logLik.kppm.Rd de9a9690e5ef55aaddd586b960f3a0a5 *man/logLik.mppm.Rd cf09a3bff6acfe00690b9682a6ba6ee9 *man/logLik.ppm.Rd ca7223a4495046290195eadc2f265c6f *man/logLik.slrm.Rd eeec119d489733971c6edca5f3f92514 *man/lohboot.Rd e6f9da6f717d13561c0436d75bf8745f *man/lurking.Rd 275fc51666be46726ae9f7687bfa7017 *man/lurking.mppm.Rd 51d0092b9f685051abaf0d1d4157ba45 *man/lurking.slrm.Rd 390c43841b92ba3277f25852a55d2cc9 *man/macros/defns.Rd 4348adcb8a990fa0fe18a7178c3683aa *man/markconnect.Rd b6239c254d5283717bbfb931af8d26f6 *man/markcorr.Rd bb18727cba02e6e22e104c57e95382e3 *man/markcrosscorr.Rd 3fbdd940f172e21cf0d13f94328c538e *man/markmarkscatter.Rd 6001c3ed60cf32729c2272a2411ee82a *man/marktable.Rd 45c3349058d7d736cbacaa383d0bf325 *man/markvario.Rd 03ba2efe317cef14e1f21e8ce796dbc1 *man/matclust.estK.Rd 8dc0a512d3e7e7118b4cd361dab4ab1b *man/matclust.estpcf.Rd eb7dd4a1eedec9dc245c35a321495e7f *man/measureContinuous.Rd ead8b17a0c7f48cbac4bbcf65f0346c9 *man/measureVariation.Rd 25a08058fd0e3acc74c7a7cf2a9ba83b *man/methods.dppm.Rd 42495c3b43ec06a025587dbbbc45c833 *man/methods.fii.Rd 50cd4702a40ef69d02ed0622bc6b211d *man/methods.influence.ppm.Rd 8a584ba69dc28ef1c9d3b8f4e60896aa *man/methods.kppm.Rd 992c7b5df4e24f73150f856497867ab6 *man/methods.leverage.ppm.Rd 96c4e8083fa5f17ae4ecbf665fdf4241 *man/methods.objsurf.Rd 4af538fd19d3df6cbf3e5594fef186c0 *man/methods.rho2hat.Rd 07479c50b6c25452ceed595a27321a33 *man/methods.rhohat.Rd 9ee3d1c5bbca9b2766fa02d999b8c416 *man/methods.slrm.Rd 2466a29b192167fb6c33d6c1b1af140b *man/methods.ssf.Rd cc83f970d148b8a2178e7c7b649207c5 *man/methods.zclustermodel.Rd f9cc1fd2fad58f5959f68b792cacbc15 *man/mincontrast.Rd aa070c264b489fd2cf5873bc3cd8a7b4 *man/miplot.Rd bc47733e01454abe17ed482062b87989 *man/model.depends.Rd 6652aa4bbcc732174dc98f7bf45af982 *man/model.frame.ppm.Rd 0cf20410063f83d0d76757eb0f91bd3d *man/model.images.Rd fc52408f38c7ba88f9a29fc21a865ebb *man/model.matrix.mppm.Rd 05ac435e3d1d92523320e729cfc5245e *man/model.matrix.ppm.Rd 2a367d19222418868a904b9379310b6a *man/model.matrix.slrm.Rd 89d6ce449309fa62417942829f19212d *man/mppm.Rd cfdb64ecdb9135d5aa82dc7a8f7f5302 *man/msr.Rd 20d47e94f17803ad6af2adf5214766b6 *man/nnclean.Rd 26183ba751f095c25c68c741c7163093 *man/nncorr.Rd 50ece9f5db4cda508c95aa459fe1a38b *man/nndensity.Rd 418a896aa7d1c53313e7022184ea350a *man/nnorient.Rd 0ac08ae5b07184e0f102d8be4902207d *man/npfun.Rd 33900a81652b0105ad0726a3adf36756 *man/objsurf.Rd 1dec8ec3fdb5d8a1261e0e5e009779eb *man/ord.family.Rd 130dee0c963cd94c864cfb978225b276 *man/pairMean.Rd ae6a17d8b47bc103cfc21d5ccb2f9fb2 *man/pairorient.Rd f5e9ee2439d59d962c79e3dcb9bdf677 *man/pairs.im.Rd 6c62a61638bb61fe61df3b0de17a1ac6 *man/pairsat.family.Rd 40f0043072bbeeceb6624ab10b135daa *man/pairwise.family.Rd d80f08886b5ba53b2460411d07c5ed22 *man/panel.contour.Rd 2c8ca1a2061b8cb04b0a66f027b1061e *man/parameters.Rd 0e0f84e091064f49882023a5e5e85ea1 *man/parres.Rd 9b06494a831f88b18f8196c687770fa4 *man/pcf.Rd dfadd4ff1016669603491af6a4362311 *man/pcf.fasp.Rd aeb5cdc38dbcd2acefb53b9b192eb5a5 *man/pcf.fv.Rd 27f95ee9147c3c0a0cb26fa140921642 *man/pcf.ppp.Rd ca9261b2b1f5fe1f08be5a3fc6a9c0e6 *man/pcf3est.Rd 35e9c91309a30d2ed93ea6ceecf41442 *man/pcfcross.Rd f9f8dbf05c3a13aa2d765b14a84f6865 *man/pcfcross.inhom.Rd c745bbb2ee3919ce065ea50628f8e657 *man/pcfdot.Rd b84a4cb9f3aa18d23a8c8e34a452a240 *man/pcfdot.inhom.Rd 49c7c4a8778492b88487f68904cf98c2 *man/pcfinhom.Rd 5bcf6eda621e887fdcb11d5b309a97ef *man/pcfmulti.Rd 9ff682b1593457aa4003c2b041400b96 *man/plot.bermantest.Rd d308f7035d40be415c0a1d4959e2bd80 *man/plot.cdftest.Rd 57cf07ec4c547ff8053d8cd431c9223f *man/plot.dppm.Rd d3467a14b7475b1bd3d345be2413305e *man/plot.envelope.Rd 9cf99903b6d5147617988c756e84ad49 *man/plot.fasp.Rd 9902c71db22f3adc13160c1fe89f0df9 *man/plot.fv.Rd 7b833d3f4991ea3ac2b66dc6a2e2f105 *man/plot.influence.ppm.Rd 2b31e001e7b3cae5affd64006249ea77 *man/plot.kppm.Rd 3ef61cef6dcb869e4bdfa7de9b14ba78 *man/plot.laslett.Rd e1c36fc98cbcaec98227bf6f8fff7360 *man/plot.leverage.ppm.Rd 7480127473564ad5c3a57efdf68d9d36 *man/plot.mppm.Rd 3bcbfcd3b88b35277c8d1d8531cc5dfb *man/plot.msr.Rd 717553512b9781dac73bcaf0d8c65a97 *man/plot.plotppm.Rd 423654fd5bb7167f57d9044cad307ca7 *man/plot.ppm.Rd deae3dd7410d3ec4c907e612e5e2f0a1 *man/plot.profilepl.Rd 4be5e426df9bf6b93ab71ac270e35417 *man/plot.quadrattest.Rd 29a48bdc9d2be508ee8f66afaf0f475d *man/plot.rppm.Rd 623d09d5790ab06711fbdbc9e72b145c *man/plot.scan.test.Rd 8c87c3c115a50447499916049d547a01 *man/plot.slrm.Rd 4668103de381eae315945900da7ac473 *man/plot.ssf.Rd 479505294dc35e7b2d4bfdaa8e55c9a0 *man/plot.studpermutest.Rd daf959532330f2c700243ef0693ffa37 *man/polynom.Rd 1e4ffe51385b95fa44f17d5ebbc1e023 *man/pool.Rd 7b0c3d7a78fc8ff459956f54d439c066 *man/pool.anylist.Rd ab8ac452b1a923e9577b138e7d4be21b *man/pool.envelope.Rd 22838536f87dc87f6bb34bd5761f1966 *man/pool.fasp.Rd a0059ae9ec4770fc7f7760eb11e28344 *man/pool.fv.Rd 29a3c5eb8f035c5c972e7bc8f5b25ae4 *man/pool.quadrattest.Rd a0cf222f8b437868440d320d8febb4b4 *man/pool.rat.Rd 5c36f7906308a124d1827f6936ffbb7d *man/ppm.Rd 171a33219c229e821bc051850ae69357 *man/ppm.object.Rd 71a8439bd0d7830d7bbdeb21839fdef6 *man/ppm.ppp.Rd 8cefc7e95a4f2333f1045bfed055e37c *man/ppmInfluence.Rd b09507146205e71a0951ccb14a0a8e6b *man/predict.dppm.Rd 3136a25f701450a7b2ed5c0caf25b3f6 *man/predict.kppm.Rd cee1384adf751c4afa42a8eeabc73eb5 *man/predict.mppm.Rd e7a02a95611f8a12dd87e362fd25ecf3 *man/predict.ppm.Rd baf7a18910afda5c22e97a8b394f35ec *man/predict.rppm.Rd cfb7e1f07e6b80ba77bdaa92e2fcc486 *man/predict.slrm.Rd ac69fc88597ebb97f42fbe6f34be4df6 *man/print.ppm.Rd 7d0ef00ba0ef42f544170699b53f0afc *man/profilepl.Rd a8d0bb1cb4832789478c2fd9fdff494c *man/prune.rppm.Rd b59f5f0d6b7edb61d6a0c5b1cf51da23 *man/pseudoR2.Rd 727cb700153852b0227c094b34c1b70e *man/psib.Rd 8393091907a08c1285428119595f56cb *man/psst.Rd 4b24314c026ba459ee96680b866dbee3 *man/psstA.Rd eddfcfc48ae61f70a6674d01c14df077 *man/psstG.Rd 23bd960eb8f08c2bb87d4b81d1594719 *man/qqplot.ppm.Rd 7a1937101d5f23be1e209ec7a5e5cc19 *man/quad.ppm.Rd 114351b597d720aeeb2888b5282a7845 *man/quadrat.test.Rd e5e8567142ba69f23a29231576d8a6c0 *man/quadrat.test.mppm.Rd ea895b1d7a9679c3c48123012a7e01e0 *man/quadrat.test.splitppp.Rd 399b5a89c244df698c3c85b7c620d388 *man/quadratresample.Rd f74a00203731aed9b80c02c66ff765a1 *man/quantile.density.Rd e10e5d6e7d8fbd709e9562445bd65293 *man/rCauchy.Rd 7c870ec50946ddc32f8dce58805b3957 *man/rDGS.Rd 940c3a5f14c5ded933607832fcc642b2 *man/rDiggleGratton.Rd 08e89870e624574222db2a21bd3cb9b7 *man/rGaussPoisson.Rd 7fc670c9eb80c97014c8efe1a51659ae *man/rHardcore.Rd 741c2851958d8a0fefb2a41075769797 *man/rLGCP.Rd ee27e5e6188742cd4dbcbed5e9f889f4 *man/rMatClust.Rd add9d75ec3e07cf63da3152bc7394790 *man/rMaternI.Rd 197cd3def617904dd5e1674df6277029 *man/rMaternII.Rd 40d40454aa82ff81249c9d31e8b930a6 *man/rMosaicField.Rd 168e3c311208ef80aebb1b8fa91a1010 *man/rMosaicSet.Rd bbbe71903aabcf8ceecfef2706a0f8c2 *man/rNeymanScott.Rd 83427220159b392d30fe532526bdaf75 *man/rPSNCP.Rd f002870b905894af77fa36f1bdc0eb4d *man/rPenttinen.Rd 958b981db668a82a6e9f119302584b10 *man/rPoissonCluster.Rd 946044fbcef67d750f2a19149852d447 *man/rSSI.Rd 9b2719db270f33bdfe9f37eef41016dd *man/rStrauss.Rd 402edf66306db5e4703087dbcceddb27 *man/rStraussHard.Rd 945e082e1dfc1677d07440e78737d41a *man/rThomas.Rd 1e3830535c87494f824cfc8afe448078 *man/rVarGamma.Rd 4aa8c9349b680a3eae25a4ef140a0338 *man/rags.Rd d799247ffbfec55868bfcb7ba832bcef *man/ragsAreaInter.Rd ba10e2653bf0888cecb2e0cc2d7005e1 *man/ragsMultiHard.Rd 94ccf22fc4d72433a17890fabb98cf4a *man/ranef.mppm.Rd f5859cdb173e22e9551ab30f0c78f1d0 *man/range.fv.Rd 989818640710af91db70be08f28244c3 *man/rat.Rd 76dbaf15ad5654a5da034551b883f194 *man/rcell.Rd 55aeb0c742804dd2fd18971d10ebdce1 *man/rcellnumber.Rd 886acf818af35dbd8def4913c1818718 *man/rdpp.Rd 8263beba126b1c37e440ce3cec96f64b *man/reach.Rd 759ba7977ab1b8e0a8dec1d606541e17 *man/reach.dppm.Rd 10f5a1a3d3c655d971399a31763aaf89 *man/reach.kppm.Rd f9d5416f5570ca8436655a84dcb46d93 *man/rectcontact.Rd 396ba365547cdcad60faa9d6210ece8c *man/reduced.sample.Rd 81d6a1bfcbd128e3271e9780f928c27d *man/reload.or.compute.Rd 3c00fc0b16e988740b321f6c5355ea51 *man/relrisk.Rd f79b65f13e8b2eda544cd6f9a85be22c *man/relrisk.ppm.Rd 1812cd061e71ce6636d7e14481e58e47 *man/relrisk.ppp.Rd ac347300d2bc3a23f9bfe4cb89aac5d9 *man/repul.Rd ff399f335cf2f573c0553b44dbe50572 *man/residuals.dppm.Rd 0418405470085449656f5fc8d4d87999 *man/residuals.kppm.Rd f3eb92ee605e655747524d3ea982330c *man/residuals.mppm.Rd 05f5ee7d9e1c701a608524da5abca13f *man/residuals.ppm.Rd 8b78fdfb7fd19dbfd2473e2fa927622a *man/residuals.slrm.Rd d2811710981089b7e46a52452528b29b *man/response.Rd 627fcde54f29940a08a9c1def1673bfc *man/rex.Rd d118f2f188c0d1129e4cbb0a695d2f2c *man/rho2hat.Rd 2cf898eee688ce1d6a767fc14f3adfd0 *man/rhohat.Rd 0db3350330da05f76e6f5cf7cef51613 *man/rjitter.psp.Rd 6dc4bbb5b1b2e45f381673a7488bbd44 *man/rknn.Rd 7742b613ad127d747aca21db1001bab5 *man/rlabel.Rd 3a88872dff11d1c5d3ce1e2414fce8ce *man/rmh.Rd 39fb4990a0a81612cdfdf00bd39316f8 *man/rmh.default.Rd ad1a5993e70cad9b5720da73ff9192d4 *man/rmh.ppm.Rd fdaddf3b950e9b7e871b06f3f656d303 *man/rmhcontrol.Rd 7fb92fafe4152451c5f54116faae6d69 *man/rmhexpand.Rd 585ef0f1c7e45d290e1f7c0d693eed47 *man/rmhmodel.Rd 703524691ebcba6e7afb4812ec4c597a *man/rmhmodel.default.Rd 754d31bbe18493634e1fd5021d3bc163 *man/rmhmodel.list.Rd b74fce12c103d1c45c14c78ebf3e4495 *man/rmhmodel.ppm.Rd c90b65188f256e0148e9b4152756a244 *man/rmhstart.Rd 6daa23722b901914bfec1925fe57ec22 *man/rmpoint.Rd 5b656b479bf85f0f5846165093cc8d38 *man/rmpoispp.Rd 00b9cb8b6413301c0182c77f3c7180d6 *man/rnoise.Rd 54ae09d2eb7fd0c5f4384ce7045ddd68 *man/roc.Rd b062a825c0b76bc5292d540ff065b8bf *man/rose.Rd f46cbce6f2c7cda2e670de3246fbe252 *man/rotmean.Rd 65521c4f418bdb4f2937902755168d04 *man/rpoint.Rd b6a91ef76fbc45e3cb1bef941d8e4b83 *man/rpoisline.Rd c7a03bb1f0e2e57e0fe02e29d9e5c935 *man/rpoislinetess.Rd 1267b0b52b75d4626575776dabc3e18c *man/rpoispp.Rd 5a98dd78a76b9d187fa5cc2fce68d8e5 *man/rpoispp3.Rd c0c57551015e5587fae41ec41d2b56bc *man/rpoisppOnLines.Rd a6b80bce2cc88f746bf34ad4e7048d6f *man/rpoisppx.Rd 2071b7797faa3874b4cafa424d149b3c *man/rppm.Rd df2d3a4e251d836e48a93416afc150ce *man/rshift.Rd bbb92658bb476c82b3332fb96991bdd5 *man/rshift.ppp.Rd 7025e64603cca3771c59a17930a9d413 *man/rshift.psp.Rd c6bd993eba045315b1fbbd796062bafe *man/rshift.splitppp.Rd af9052ff2629fa829c737a30e8d2b1fb *man/rstrat.Rd e846ff04cbf9038ae51986a3e99a6c26 *man/rtemper.Rd bfe74e8bbf78cbf76f290ca6d57e7748 *man/rthin.Rd e5f0ef76ed15fe38919f8eaac90df411 *man/rthinclumps.Rd 0f58540ffbc0d6b01fc785934fde788c *man/runifdisc.Rd f00c10fda16472141dae745742629b39 *man/runifpoint.Rd 2de1693c1362e6e987c05312d0f8a150 *man/runifpoint3.Rd dd5048dab20cece81901d32fc828845b *man/runifpointOnLines.Rd a9273f2fccb179783c06c7ff39ec6492 *man/runifpointx.Rd be7df2e3d96dd962d36880cb3c21d655 *man/scan.test.Rd b9fab8b1b77760071c342225e9d34130 *man/scanLRTS.Rd 7cfcd24d528d6a4427766e3a6a5c2ce0 *man/sdr.Rd 20d7ec0572c3f2faa5b0916ae5f5398b *man/sdrPredict.Rd 48a871d57cc9acb8e759d08fbdab41eb *man/segregation.test.Rd 1072ec85cf03b2046765d9b449371fb9 *man/sharpen.Rd 6d5dc98c63b345f6c10e62a5673f7296 *man/simulate.dppm.Rd d68fccf6ba9ca871e911116c95c2d22d *man/simulate.kppm.Rd 33b7b6d9e3c230b14c9197929c78676d *man/simulate.mppm.Rd 2cc959f26817dfd0b0d679f761d24aef *man/simulate.ppm.Rd 4e92e07224d7111e24cadf4b88a12b6b *man/simulate.slrm.Rd 5157ddadee55022c9355ede200ec191c *man/slrm.Rd 74ca9c56e2d77784cf84ca534cc1d99e *man/spatcov.Rd 3d7f68fccea4b7a01c87c7b1aaa7b287 *man/spatialcdf.Rd d709f6aefe64ec3c89a9befa08886143 *man/spatstat.core-deprecated.Rd d7cd4c90d913b663536413a87657491e *man/spatstat.core-internal.Rd a5f41ee81cca1a4824e894ca697b1f73 *man/spatstat.core-package.Rd a85f30741ee936b330a7aba10aa312d9 *man/split.msr.Rd 0fb48031f42c2c7f16150fcb0b093dea *man/ssf.Rd 9b089b494612e391a0400b8080191e15 *man/stieltjes.Rd 4badd1f6e4c47d32dadaac43970b5422 *man/stienen.Rd 68696bcaa5a5ec54331b008e84b4e1e0 *man/studpermu.test.Rd 68ff5b1d8fdf22dcc9b8e7b18411bef0 *man/subfits.Rd f1c7accea61aea9da256092d74cce2aa *man/subspaceDistance.Rd a733bb706605ce612aca29b0401cd3fe *man/suffstat.Rd 533ab659806b6e668fa64e8e4a4ef5b0 *man/summary.dppm.Rd beed1c5e94dfbb7907d0803c88b879a0 *man/summary.kppm.Rd 583bec5ab8a73b8dc26c84cd651ad1b4 *man/summary.ppm.Rd 85668ad3685e742f5f540a3269406d5d *man/thomas.estK.Rd 5083b5bec798fe2f17b0c8986d8ac24c *man/thomas.estpcf.Rd b2344788b9a6bbe94c688e452553df63 *man/thresholdCI.Rd b7319bebc72ef9966ceabba326495eee *man/thresholdSelect.Rd 701641d93dd00d503118db49baddad19 *man/transect.im.Rd a9a21b880aab4e36278cd8b3d5336ed1 *man/triplet.family.Rd 7bf87c2af4b43852fae55caa442392c1 *man/unitname.Rd d11a2ad5dd1042b6caff2a7aac7aa935 *man/unstack.msr.Rd 711574530501aeff2d9a83857ccfafc6 *man/update.detpointprocfamily.Rd 7e613050b5075767ca9d48d7070dd386 *man/update.interact.Rd 4c268178462342380d4e75a8aa4d5255 *man/update.kppm.Rd 98ce0cb16c8a4fe6980b9265881b58ea *man/update.ppm.Rd 70f976c07e44c9fe6bf41b9d55d326cc *man/update.rmhcontrol.Rd 47bd28833a40a74899d25734078640d6 *man/valid.Rd 9449cb5c1fec24621c998a506be0eac2 *man/valid.detpointprocfamily.Rd 1ed9f6e59dad62161fc6867d14156a24 *man/valid.ppm.Rd 14f272eb6e7d0267ae74fd5a691f4f53 *man/valid.slrm.Rd 9b9f643ceb5ba73627013d63dd7515d6 *man/varblock.Rd 7bd0f73463e1367d14e08ef32eda2bde *man/varcount.Rd 82c2654fe6b74ae1c9550293e3c37839 *man/vargamma.estK.Rd 20bdec51627e17637f8e487de060250e *man/vargamma.estpcf.Rd b434f6798cc2ebe70ac4172191c3d800 *man/vcov.kppm.Rd f85824c3c9ec3a4c31f04be59552caf7 *man/vcov.mppm.Rd b0791f02e9d9ea052bc6f13af6b86100 *man/vcov.ppm.Rd 06c0ef68c545c8667e0ed7245ac0e27c *man/vcov.slrm.Rd 513778fbca80df00f2ea2b710263fe3c *man/will.expand.Rd 56c6576f4265d630e55336ead48fd226 *man/with.fv.Rd 967466b70f99daef536e805505a877d6 *man/with.msr.Rd d02099194c00a636349b66309fed886e *man/with.ssf.Rd 46bdd584bf584fb298bfe431934e36cd *man/zclustermodel.Rd b27b2efe2ed6f95f76f2b9b6ca32af87 *src/Ediggatsti.c dd56326afe9822ac5e34b988f3b33ac3 *src/Ediggra.c bf1a408ebcd8d51978d0dd0e844c8346 *src/Efiksel.c 99deda1b881b1185a27906dd710ffa5c *src/Egeyer.c cc1af6b8e00a3ec1387f242f4f86ac7f *src/Estrauss.c 606313254e3a3982dae8272420c16383 *src/Kborder.c 597402bd9d35fbb6a02e8d6e994a5559 *src/Kborder.h c6533abef1f8acb43ed3bff9a044b931 *src/Knone.c c394c76e3bf450475cc8cc5789b8ebf5 *src/Knone.h c1937ccea286609c2306f1e373ba4ea8 *src/Krect.c 707949facbb1443a42c08d275604ce41 *src/KrectBody.h 5eef7701a7848be25a1907a03897613c *src/KrectFunDec.h f7ad99f0f44158cc2c9100e38cc1b185 *src/KrectIncrem.h 08a4f417aed6959d94da3abc8bc55d0f *src/KrectV1.h 92268e0af4222764daf4b106a12bbadc *src/KrectV2.h 263f2296e12aee28f73dff92cef5dd61 *src/KrectV3.h 4ab4852a66f7a56b52e999f326f179c2 *src/KrectV4.h 158db2ee29e3d11ee619715236d57c3c *src/Perfect.cc 03dff82c055d7d1e868e3e732102694e *src/PerfectDGS.h a8e0e937efc81c2f9035c20ca57e0bf4 *src/PerfectDiggleGratton.h 76cad4da7245795fe2ff420e8b635788 *src/PerfectHardcore.h d3512a838df380d2a448049f0000147a *src/PerfectPenttinen.h 62f1d9151646a0e7cde183356d8ff5af *src/PerfectStrauss.h 6811bea314793ed000772db2b51f24a8 *src/PerfectStraussHard.h 49cc7fd81bbad7661295a40e35a52b54 *src/areaint.c e8ed8193bc33c29418fce2aa4a834d22 *src/areapair.c 89cad006e13a81a4b793d89b2b3bb7cf *src/badgey.c 27c73a0901b9d346ba84974f7481ff3c *src/call3d.c 542e6e474340e0aac3ed97f6a92777b4 *src/chunkloop.h becea4a1ea42b8a39021c01abdc6459d *src/constants.h dd3ee2079e7446f90a4b8d18823ffcc0 *src/corrections.c dcf500827ff4c8b060819be2b274a7d7 *src/crossloop.h 8a482da6b76db84c12b938e60be34646 *src/denspt.c b68672feaccb33dd5032692c79be28f6 *src/densptcross.c fd5c0ecd545b4d9e50d0d4e1e202beb3 *src/dgs.c 1780b63b61abab575b2d855bb40e8ca8 *src/digber.c 57fcffca69c7d7deb5937acbb6955dad *src/diggra.c 057ad9a508567553f7537e203ae60a8d *src/dist2.c bcfef6be58ba5ee2fd30dd3dbd8c216f *src/dist2.h 79bdd83f960746df0bef5cea8bbaaead *src/f3.c ab7588df53688ba2bb383afaaa58d0d7 *src/fexitc.c 9ad3159a4625df6b45245dedba8b816d *src/fiksel.c 0ba81075007b6ab741f3eda829da8e99 *src/functable.h ab8eb5c8d9e04c1a67137776a64fb47d *src/g3.c 3280a084e3cdcb931801c42fcd111d2e *src/geom3.h 5e13151e750d3fedb93005afc8c67954 *src/getcif.c c4d587523e2d2e58615eb0d2084a2167 *src/geyer.c 3228576b7ca41179fe5a99fd0a4d4001 *src/hardcore.c d89458d547473b5eba7a73d9f82f2c72 *src/idw.c 3ad791c2a1e4d5b81cf7cad15d7cf804 *src/init.c 9c79e8972c24446466e9dcb30ad82217 *src/k3.c 5ca88ac5e99e094f0b91183500a4f433 *src/lennard.c e3c36533e55e56f4d33e9d91111d0428 *src/localpcf.c cc8ec72a60677de2bda193aa72878bc6 *src/localpcf.h 767ddd93b5f66825c1ed17333a89be1d *src/loccum.c ec04ffaec574081d9d2d834905ea9355 *src/loccums.h aad8df586d677c22758067ff5ec63be2 *src/loccumx.h 0e7a4d103e1c623473fb0b8e0353d763 *src/lookup.c d4f690790bb0e2585fd2a2645e0556d2 *src/looptest.h 9e0b28ecd67dd085ab596f3ae9fafa50 *src/methas.c 69d57274cda1c955d631a7c241cb9a00 *src/methas.h d4184783a5e92f36a1174f3cdfbe5aed *src/mhloop.h 86d84eb3a26f011a800acc8a41a31b12 *src/mhsnoop.c 81c1a015083476952ee14be55991f2d9 *src/mhsnoop.h cfce4c5e0f35b12efa19c0e5144fa540 *src/mhsnoopdef.h af57f00feb578ceeb59fc71b5056e27f *src/mhv1.h 30677e286f648b6f5cc6a39706fe4130 *src/mhv2.h a1cfccc17f8ec667152b7f04b69cb8e6 *src/mhv3.h d2deceb7ed8f51910ab02b2e550b0779 *src/mhv4.h 8895a12dc3a8b12e2b2fb3842bb10270 *src/mhv5.h c6a2cc088688b471abf7ea5a9bb243c0 *src/multihard.c acdf88b1cfedbbb87d81bb727761decd *src/pairloop.h 9340279b0709a21d478ae809bb67385e *src/pcf3.c 887daec80901782cc831ba2dbcd5b3be *src/penttinen.c 4b2f69c21744ac0155ef793345aa7f7f *src/proto.h 603fba27debdc456920ef3943168c8c4 *src/raster.c 51966f18266cee6238c216c5d09cf01f *src/raster.h 4ac1e356212a102be96a4a5eab91d911 *src/ripleybox.h a87b4974cf6fd61e4c49f7225478a364 *src/ripleypoly.h 574358e78217dc076352a2d21c782344 *src/rthin.c 5d8d04c3408eec3f41311d54d6b78a99 *src/scan.c b13c2357b8a58badb9bb87a3d8c60274 *src/segdens.c 3a5e04ac4ad9fc0efd10ef39dc55f041 *src/sftcr.c 91a81ef72eabefa7b055cb686f55ff21 *src/sphefrac.c 7877dac5d832e95257442e8b7fa8f02b *src/sphevol.c 18b99b034669b76b4b9ccaef945200f0 *src/straush.c e072e3a74914a74af746481c3a3b8b3b *src/straushm.c 28d7ac41aaef4367e9d57b020ed5fb3c *src/strauss.c 0cf60fa5405e4b7f31cde35a0d390351 *src/straussm.c 03e65a27588194512db2649bec6e5277 *src/triplets.c 5c127a9d5ddeaee8cc8f34b32218a3a5 *src/yesno.h 848f406cebdb05a33ac9ba713d7e00fd *tests/testsAtoC.R cbeb5f05e7b5ad44bb2feb40c91b6863 *tests/testsD.R 1fb2e4dd38bb67cde017153774d4db34 *tests/testsEtoF.R c9c53e1afa463ae4ff624aa9dde6514f *tests/testsGtoJ.R 53dc4210ad1c8939c0d058f5c0225514 *tests/testsK.R c73f90dd04bdc89af7c280780a1d8eb7 *tests/testsL.R 86f42a96f6fb5193affcbafcdd66cb61 *tests/testsM.R adb707a419e0f1a9661054d63737854c *tests/testsNtoO.R 5dfaea1d963681ef61e9e651733651ee *tests/testsP1.R 43d001c42511e41493c0665deb8ff991 *tests/testsP2.R 191fc1d612f2d4a0d46dfb27b077247c *tests/testsQ.R b4053b0efa8fba216e63a151d688fb7b *tests/testsR1.R 7744e6658dba09d17f8cd41778962bf9 *tests/testsR2.R 5807987ac1683775f503f58b0a0e6853 *tests/testsS.R bb067604d89f34c8914a72fb1aa212f4 *tests/testsT.R cb7e746a9d968abc090b26c67ff0ab51 *tests/testsUtoZ.R spatstat.core/inst/0000755000176200001440000000000014141377572014034 5ustar liggesusersspatstat.core/inst/doc/0000755000176200001440000000000014141452520014564 5ustar liggesusersspatstat.core/inst/doc/packagesizes.txt0000755000176200001440000000056214147573213020015 0ustar liggesusersdate version nhelpfiles nobjects ndatasets Rlines srclines "2020-12-13" "1.65-0" 510 1200 0 71511 14701 "2020-12-14" "1.65-0" 510 1200 0 71511 14701 "2021-01-07" "1.65-0" 510 1200 0 71609 14702 "2021-03-23" "2.0-0" 516 1206 0 72205 14748 "2021-06-16" "2.2-0" 520 1216 0 72952 14748 "2021-10-28" "2.3-1" 530 1266 0 74240 14230 "2021-11-25" "2.3-2" 531 1271 0 74653 14236 spatstat.core/inst/CITATION0000755000176200001440000000472614141377572015205 0ustar liggesuserscitHeader("To cite spatstat in publications use:") citEntry(entry = "Book", title = "Spatial Point Patterns: Methodology and Applications with {R}", author = personList(as.person("Adrian Baddeley"), as.person("Ege Rubak"), as.person("Rolf Turner")), year = "2015", publisher = "Chapman and Hall/CRC Press", address = "London", url="https://www.routledge.com/Spatial-Point-Patterns-Methodology-and-Applications-with-R/Baddeley-Rubak-Turner/9781482210200/", textVersion = paste("Adrian Baddeley, Ege Rubak, Rolf Turner (2015).", "Spatial Point Patterns: Methodology and Applications with R.", "London: Chapman and Hall/CRC Press, 2015.", "URL https://www.routledge.com/Spatial-Point-Patterns-Methodology-and-Applications-with-R/Baddeley-Rubak-Turner/9781482210200/") ) citEntry(entry = "Article", title = "Hybrids of Gibbs Point Process Models and Their Implementation", author = personList(as.person("Adrian Baddeley"), as.person("Rolf Turner"), as.person("Jorge Mateu"), as.person("Andrew Bevan")), journal = "Journal of Statistical Software", year = "2013", volume = "55", number = "11", pages = "1--43", doi = "10.18637/jss.v055.i11", textVersion = paste("Adrian Baddeley, Rolf Turner, Jorge Mateu, Andrew Bevan (2013).", "Hybrids of Gibbs Point Process Models and Their Implementation.", "Journal of Statistical Software, 55(11), 1-43.", "DOI: 10.18637/jss.v055.i11"), header = "If you use hybrid models, please also cite:" ) citEntry(entry = "Article", title = "{spatstat}: An {R} Package for Analyzing Spatial Point Patterns", author = personList(as.person("Adrian Baddeley"), as.person("Rolf Turner")), journal = "Journal of Statistical Software", year = "2005", volume = "12", number = "6", pages = "1--42", doi = "10.18637/jss.v012.i06", textVersion = paste("Adrian Baddeley, Rolf Turner (2005).", "spatstat: An R Package for Analyzing Spatial Point Patterns.", "Journal of Statistical Software 12(6), 1-42.", "DOI: 10.18637/jss.v012.i06"), header = "In survey articles, please cite the original paper on spatstat:" )