spatstat.model/0000755000176200001440000000000014515425132013216 5ustar liggesusersspatstat.model/NAMESPACE0000644000176200001440000007446014515336521014453 0ustar liggesusers## spatstat.model NAMESPACE file ## ................ Import packages .................. import(stats,graphics,grDevices,utils,methods) import(spatstat.utils,spatstat.data,spatstat.sparse, spatstat.geom,spatstat.random,spatstat.explore) import(goftest) import(Matrix,nlme,rpart) importFrom(abind,abind) importFrom(tensor,tensor) importFrom(mgcv, gam,gam.control,anova.gam,formula.gam,predict.gam, print.gam,summary.gam,vcov.gam,s) ## ................ Load dynamic library .............. ## (native routines are registered in init.c) ## (entry points are symbols with prefix "SC_") useDynLib(spatstat.model, .registration=TRUE, .fixes="SM_") ## ////////// DO NOT EDIT THE FOLLOWING /////////////// ## //////// it is generated automatically ///////////// # .................................................. # Automatically-generated list of documented objects # .................................................. export("accumulateStatus") export("active.interactions") export("adaptcoef") export("addvar") export("affine.msr") export("AIC.dppm") export("AIC.kppm") export("AIC.mppm") export("AIC.ppm") export("anova.mppm") export("anova.ppm") export("anova.slrm") export("areadelta2") export("AreaInter") export("as.function.leverage.ppm") export("as.fv.dppm") export("as.fv.kppm") export("as.fv.minconfit") export("as.im.leverage.ppm") export("as.interact") export("as.interact.fii") export("as.interact.interact") export("as.interact.ppm") export("as.interact.zgibbsmodel") export("as.isf") export("as.isf.zgibbsmodel") export("as.layered.msr") export("as.owin.dppm") export("as.owin.influence.ppm") export("as.owin.kppm") export("as.owin.leverage.ppm") export("as.owin.msr") export("as.owin.ppm") export("as.owin.slrm") export("as.ppm") export("as.ppm.dppm") export("as.ppm.kppm") export("as.ppm.ppm") export("as.ppm.profilepl") export("as.ppm.rppm") export("as.ppp.influence.ppm") export("auc.kppm") export("auc.ppm") export("auc.slrm") export("augment.msr") export("BadGey") export("bc") export("bc.ppm") export("berman.test.ppm") export("bigvaluerule") export("blankcoefnames") export("bt.frame") export("cannot.update") export("cauchy.estK") export("cauchy.estpcf") export("cdf.test.mppm") export("cdf.test.ppm") export("cdf.test.slrm") export("check.separable") export("closepaircounts") export("clusterfield.kppm") export("clusterfit") export("clusterkernel.kppm") export("clusterradius.kppm") export("clusterradius.zclustermodel") export("coef.dppm") export("coef.fii") export("coef<-.fii") export("coef.kppm") export("coef.mppm") export("coef.ppm") export("coef.slrm") export("coef.summary.fii") export("coef.summary.kppm") export("coef.summary.ppm") export("coef.summary.slrm") export("coef.vblogit") export("compareFit") export("Concom") export("condSimCox") export("contour.leverage.ppm") export("contour.objsurf") export("crosspaircounts") export("damaged.ppm") export("data.mppm") export("data.ppm") export("deltasuffstat") export("detpointprocfamilyfun") export("deviance.ppm") export("deviance.slrm") export("dfbetas.ppm") export("dfbetas.ppmInfluence") export("dfbetas.slrm") export("dffit") export("dffit.ppm") export("dffit.slrm") export("diagnose.ppm") export("diagnose.ppm.engine") export("DiggleGatesStibbard") export("DiggleGratton") export("dim.detpointprocfamily") export("dim.msr") export("dimnames.msr") export("domain.dppm") export("domain.influence.ppm") export("domain.kppm") export("domain.leverage.ppm") export("domain.msr") export("domain.ppm") export("domain.slrm") export("doMultiStraussHard") export("dppapproxkernel") export("dppapproxpcf") export("dppBessel") export("dppCauchy") export("dppDpcf") export("dppeigen") export("dppGauss") export("dppkernel") export("dppm") export("dppMatern") export("dppmFixAlgorithm") export("dppmFixIntensity") export("dppparbounds") export("dppPowerExp") export("DPPSaddle") export("DPPSaddlePairwise") export("dppspecden") export("dppspecdenrange") export("dummify") export("dummy.ppm") export("eem") export("eem.ppm") export("eem.slrm") export("effectfun") export("emend") export("emend.ppm") export("emend.slrm") export("enet.engine") export("envelope.kppm") export("envelope.ppm") export("envelope.slrm") export("equalpairs") export("evalInteraction") export("evalInterEngine") export("evalPairPotential") export("evalPairwiseTerm") export("exactMPLEstrauss") export("expandDot") export("extractAIC.dppm") export("extractAIC.kppm") export("extractAIC.mppm") export("extractAIC.ppm") export("extractAIC.slrm") export("fakefii") export("family.hackglmmPQL") export("family.vblogit") export("fii") export("Fiksel") export("fill.coefs") export("findCovariate") export("fitin") export("fitin.ppm") export("fitin.profilepl") export("fitted.dppm") export("fitted.kppm") export("fitted.mppm") export("fitted.ppm") export("fitted.rppm") export("fitted.slrm") export("fixef.mppm") export("flipxy.msr") export("forbid.logi") export("formula.dppm") export("formula.hackglmmPQL") export("formula.kppm") export("formula.ppm") export("formula.slrm") export("Gcom") export("getCall.mppm") export("getdataname") export("getglmdata") export("getglmfit") export("getglmsubset") export("getppmdatasubset") export("getppmOriginalCovariates") export("Geyer") export("geyercounts") export("geyerdelta2") export("GLMpredict") export("Gres") export("hackglmmPQL") export("Hardcore") export("hardcoredist") export("hardcoredist.fii") export("hardcoredist.ppm") export("harmonic") export("harmonise.msr") export("hasglmfit") export("has.offset") export("has.offset.term") export("hierarchicalordering") export("HierHard") export("hiermat") export("hierpair.family") export("HierStrauss") export("HierStraussHard") export("ho.engine") export("Hybrid") export("hybrid.family") export("ic") export("ic.kppm") export("ic.ppm") export("illegal.iformula") export("image.objsurf") export("impliedcoefficients") export("impliedpresence") export("improve.kppm") export("[.influence.ppm") export("influence.ppm") export("influence.ppmInfluence") export("influence.slrm") export("inforder.family") export("instantiate.interact") export("integral.influence.ppm") export("integral.leverage.ppm") export("integral.msr") export("intensity.detpointprocfamily") export("intensity.dppm") export("intensity.ppm") export("intensity.slrm") export("intensity.zclustermodel") export("intensity.zgibbsmodel") export("interactionfamilyname") export("interactionorder") export("interactionorder.fii") export("interactionorder.interact") export("interactionorder.isf") export("interactionorder.ppm") export("interactionorder.zgibbsmodel") export("intermaker") export("ippm") export("is.dppm") export("is.expandable.ppm") export("is.hybrid") export("is.hybrid.interact") export("is.hybrid.ppm") export("is.interact") export("is.kppm") export("is.lppm") export("is.marked.mppm") export("is.marked.msr") export("is.marked.ppm") export("is.marked.rppm") export("is.marked.slrm") export("is.mppm") export("is.multitype.mppm") export("is.multitype.msr") export("is.multitype.ppm") export("is.multitype.rppm") export("is.multitype.slrm") export("is.poissonclusterprocess") export("is.poissonclusterprocess.default") export("is.poissonclusterprocess.kppm") export("is.poissonclusterprocess.zclustermodel") export("is.poisson.interact") export("is.poisson.kppm") export("is.poisson.mppm") export("is.poisson.ppm") export("is.poisson.rppm") export("is.poisson.slrm") export("is.poisson.zgibbsmodel") export("is.ppm") export("is.slrm") export("is.stationary.detpointprocfamily") export("is.stationary.dppm") export("is.stationary.kppm") export("is.stationary.ppm") export("is.stationary.slrm") export("is.stationary.zgibbsmodel") export("Kcom") export("killinteraction") export("Kmodel") export("Kmodel.detpointprocfamily") export("Kmodel.dppm") export("Kmodel.kppm") export("Kmodel.ppm") export("Kmodel.slrm") export("Kmodel.zclustermodel") export("Kpcf.kppm") export("kppm") export("kppmCLadap") export("kppmComLik") export("kppm.formula") export("kppmMinCon") export("kppmPalmLik") export("kppm.ppp") export("kppm.quad") export("Kres") export("labels.dppm") export("labels.kppm") export("labels.ppm") export("labels.slrm") export("LambertW") export("LennardJones") export("leverage") export("[.leverage.ppm") export("leverage.ppm") export("leverage.ppmInfluence") export("leverage.slrm") export("lgcp.estK") export("lgcp.estpcf") export("lines.objsurf") export("lines.traj") export("logi.engine") export("logLik.dppm") export("logLik.kppm") export("logLik.mppm") export("logLik.ppm") export("logLik.slrm") export("logLik.vblogit") export("LurkEngine") export("lurking") export("lurking.mppm") export("lurking.ppm") export("lurking.ppp") export("make.pspace") export("mapInterVars") export("matclust.estK") export("matclust.estpcf") export("Mayer") export("mean.leverage.ppm") export("measureContinuous") export("measureDiscrete") export("measureNegative") export("measurePositive") export("measureVariation") export("measureWeighted") export("mincontrast") export("model.covariates") export("model.depends") export("model.frame.dppm") export("modelFrameGam") export("model.frame.kppm") export("model.frame.ppm") export("model.frame.slrm") export("model.images") export("model.images.dppm") export("model.images.kppm") export("model.images.ppm") export("model.images.slrm") export("model.is.additive") export("model.matrix.dppm") export("model.matrix.ippm") export("model.matrix.kppm") export("model.matrix.mppm") export("model.matrix.ppm") export("model.matrix.slrm") export("model.se.image") export("mpl.engine") export("mpl.get.covariates") export("mpl.prepare") export("mpl.usable") export("mppm") export("[.msr") export("msr") export("MultiHard") export("MultiStrauss") export("MultiStraussHard") export("newformula") export("newstyle.coeff.handling") export("nndcumfun") export("nobs.dppm") export("nobs.kppm") export("nobs.mppm") export("nobs.ppm") export("no.trend.ppm") export("npfun") export("objsurf") export("objsurf.dppm") export("objsurfEngine") export("objsurf.kppm") export("objsurf.minconfit") export("Ops.msr") export("optimConverged") export("optimNsteps") export("optimStatus") export("Ord") export("ord.family") export("OrdThresh") export("outdated.interact") export("oversize.quad") export("PairPiece") export("PairPotentialType") export("pairsat.family") export("Pairwise") export("pairwise.family") export("palmdiagnose") export("panel.contour") export("panel.histogram") export("panel.image") export("panysib") export("parameters") export("parameters.detpointprocfamily") export("parameters.dppm") export("parameters.fii") export("parameters.interact") export("parameters.kppm") export("parameters.ppm") export("parameters.profilepl") export("parameters.slrm") export("parres") export("partialModelMatrix") export("pcfmodel") export("pcfmodel.detpointprocfamily") export("pcfmodel.dppm") export("pcfmodel.kppm") export("pcfmodel.ppm") export("pcfmodel.slrm") export("pcfmodel.zclustermodel") export("Penttinen") export("persp.leverage.ppm") export("persp.objsurf") export("plot.addvar") export("plot.diagppm") export("plot.dppm") export("ploterodeimage") export("ploterodewin") export("plot.fii") export("plot.influence.ppm") export("plot.kppm") export("plot.leverage.ppm") export("plot.lurk") export("plot.minconfit") export("plot.mppm") export("plot.msr") export("plot.objsurf") export("plot.palmdiag") export("plot.parres") export("plot.plotppm") export("plot.ppm") export("plot.profilepl") export("plot.qqppm") export("plot.rppm") export("plot.slrm") export("plot.traj") export("PoisSaddle") export("PoisSaddleArea") export("PoisSaddleGeyer") export("PoisSaddlePairwise") export("Poisson") export("PoissonCompareCalc") export("poisson.fits.better") export("polynom") export("ppm") export("ppmCovariates") export("ppm.default") export("ppmDerivatives") export("ppm.formula") export("ppmInfluence") export("ppmInfluenceEngine") export("PPMmodelmatrix") export("ppm.ppp") export("ppm.quad") export("predict.dppm") export("predict.kppm") export("predict.mppm") export("predict.ppm") export("predict.profilepl") export("predict.rppm") export("predict.slrm") export("predict.vblogit") export("predict.zclustermodel") export("print.addvar") export("print.bt.frame") export("print.detpointprocfamily") export("print.detpointprocfamilyfun") export("print.diagppm") export("print.dppm") export("print.fii") export("print.hierarchicalordering") export("print.influence.ppm") export("print.interact") export("print.intermaker") export("print.isf") export("print.kppm") export("print.leverage.ppm") export("print.lurk") export("print.minconfit") export("print.mppm") export("print.msr") export("print.objsurf") export("print.parres") export("print.plotppm") export("print.ppm") export("print.profilepl") export("print.qqppm") export("print.rppm") export("print.slrm") export("printStatus") export("printStatusList") export("print.summary.dppm") export("print.summary.fii") export("print.summary.kppm") export("print.summary.mppm") export("print.summary.objsurf") export("print.summary.ppm") export("print.summary.slrm") export("print.traj") export("print.vblogit") export("print.zclustermodel") export("print.zgibbsmodel") export("profilepl") export("project.ppm") export("prune.rppm") export("pseudoR2") export("pseudoR2.ppm") export("pseudoR2.slrm") export("psib") export("psib.kppm") export("psst") export("psstA") export("psstG") export("qqplot.ppm") export("quadBlockSizes") export("quad.mppm") export("quad.ppm") export("quadrat.test.mppm") export("quadrat.test.ppm") export("quadrat.test.slrm") export("ranef.mppm") export("rdpp") export("reach.detpointprocfamily") export("reach.dppm") export("reach.fii") export("reach.interact") export("reach.kppm") export("reach.ppm") export("reach.slrm") export("reach.zclustermodel") export("reduceformula") export("reincarnate.interact") export("relrisk.ppm") export("repul") export("repul.dppm") export("rescale.msr") export("resid1panel") export("resid1plot") export("resid4plot") export("residualMeasure") export("residuals.dppm") export("residuals.kppm") export("residuals.mppm") export("residuals.ppm") export("residuals.rppm") export("residuals.slrm") export("response") export("response.dppm") export("response.glm") export("response.kppm") export("response.lm") export("response.mppm") export("response.ppm") export("response.rppm") export("response.slrm") export("rex") export("rhohat.ppm") export("rhohat.slrm") export("rmhmodel.ppm") export("rmh.ppm") export("roc.kppm") export("roc.ppm") export("roc.slrm") export("rotate.msr") export("rppm") export("SaddleApprox") export("safeFiniteValue") export("safePositiveValue") export("SatPiece") export("Saturated") export("scalardilate.msr") export("shift.influence.ppm") export("shift.leverage.ppm") export("shift.msr") export("signalStatus") export("simulate.detpointprocfamily") export("simulate.dppm") export("simulate.kppm") export("simulate.mppm") export("simulate.ppm") export("simulate.profilepl") export("simulate.slrm") export("slrAssemblePixelData") export("slrm") export("slrmInfluence") export("slr.prepare") export("Smooth.influence.ppm") export("Smooth.leverage.ppm") export("Smooth.msr") export("Softcore") export("spatialCovariateEvidence.ppm") export("spatialCovariateEvidence.slrm") export("spatialCovariateUnderModel") export("spatialCovariateUnderModel.dppm") export("spatialCovariateUnderModel.kppm") export("spatialCovariateUnderModel.ppm") export("spatialCovariateUnderModel.slrm") export("spatstatDPPModelInfo") export("sp.foundclass") export("sp.foundclasses") export("splitHybridInteraction") export("split.msr") export("Strauss") export("strausscounts") export("StraussHard") export("stripGLMM") export("subfits") export("subfits.new") export("subfits.old") export("suffloc") export("suffstat") export("suffstat.generic") export("suffstat.poisson") export("summary.dppm") export("summary.fii") export("summary.kppm") export("summary.mppm") export("summary.msr") export("summary.objsurf") export("summary.ppm") export("summary.profilepl") export("summary.slrm") export("summary.vblogit") export("terms.dppm") export("terms.kppm") export("terms.mppm") export("terms.ppm") export("terms.rppm") export("terms.slrm") export("thomas.estK") export("thomas.estpcf") export("totalVariation") export("traj") export("triplet.family") export("Triplets") export("tweak.coefs") export("unitname.dppm") export("unitname<-.dppm") export("unitname.kppm") export("unitname<-.kppm") export("unitname.minconfit") export("unitname<-.minconfit") export("unitname.msr") export("unitname<-.msr") export("unitname.ppm") export("unitname<-.ppm") export("unitname.slrm") export("unitname<-.slrm") export("unstack.msr") export("updateData.dppm") export("updateData.kppm") export("updateData.ppm") export("updateData.slrm") export("update.detpointprocfamily") export("update.dppm") export("update.interact") export("update.ippm") export("update.kppm") export("update.msr") export("update.ppm") export("update.rppm") export("update.slrm") export("valid") export("valid.detpointprocfamily") export("valid.ppm") export("valid.slrm") export("varcount") export("varcountEngine") export("vargamma.estK") export("vargamma.estpcf") export("vcov.kppm") export("vcov.mppm") export("vcov.ppm") export("vcov.slrm") export("versionstring.interact") export("versionstring.ppm") export("Window.dppm") export("Window.influence.ppm") export("Window.kppm") export("Window.leverage.ppm") export("Window.msr") export("Window.ppm") export("Window.slrm") export("windows.mppm") export("with.msr") export("zclustermodel") export("zgibbsmodel") # ....... Special cases ........... S3method("Ops", "msr") # ....... End of special cases ... # ......................................... # Automatically generated list of S3 methods # ......................................... S3method("affine", "msr") S3method("AIC", "dppm") S3method("AIC", "kppm") S3method("AIC", "mppm") S3method("AIC", "ppm") S3method("anova", "mppm") S3method("anova", "ppm") S3method("anova", "slrm") S3method("as.function", "leverage.ppm") S3method("as.fv", "dppm") S3method("as.fv", "kppm") S3method("as.fv", "minconfit") S3method("as.im", "leverage.ppm") S3method("as.interact", "fii") S3method("as.interact", "interact") S3method("as.interact", "ppm") S3method("as.interact", "zgibbsmodel") S3method("as.isf", "zgibbsmodel") S3method("as.layered", "msr") S3method("as.owin", "dppm") S3method("as.owin", "influence.ppm") S3method("as.owin", "kppm") S3method("as.owin", "leverage.ppm") S3method("as.owin", "msr") S3method("as.owin", "ppm") S3method("as.owin", "slrm") S3method("as.ppm", "dppm") S3method("as.ppm", "kppm") S3method("as.ppm", "ppm") S3method("as.ppm", "profilepl") S3method("as.ppm", "rppm") S3method("as.ppp", "influence.ppm") S3method("auc", "kppm") S3method("auc", "ppm") S3method("auc", "slrm") S3method("bc", "ppm") S3method("berman.test", "ppm") S3method("cdf.test", "mppm") S3method("cdf.test", "ppm") S3method("cdf.test", "slrm") S3method("clusterfield", "kppm") S3method("clusterkernel", "kppm") S3method("clusterradius", "kppm") S3method("clusterradius", "zclustermodel") S3method("coef", "dppm") S3method("coef", "fii") S3method("coef", "kppm") S3method("coef", "mppm") S3method("coef", "ppm") S3method("coef", "slrm") S3method("coef", "summary.fii") S3method("coef", "summary.kppm") S3method("coef", "summary.ppm") S3method("coef", "summary.slrm") S3method("coef", "vblogit") S3method("contour", "leverage.ppm") S3method("contour", "objsurf") S3method("deviance", "ppm") S3method("deviance", "slrm") S3method("dfbetas", "ppm") S3method("dfbetas", "ppmInfluence") S3method("dfbetas", "slrm") S3method("dffit", "ppm") S3method("dffit", "slrm") S3method("dim", "detpointprocfamily") S3method("dim", "msr") S3method("dimnames", "msr") S3method("domain", "dppm") S3method("domain", "influence.ppm") S3method("domain", "kppm") S3method("domain", "leverage.ppm") S3method("domain", "msr") S3method("domain", "ppm") S3method("domain", "slrm") S3method("eem", "ppm") S3method("eem", "slrm") S3method("emend", "ppm") S3method("emend", "slrm") S3method("envelope", "kppm") S3method("envelope", "ppm") S3method("envelope", "slrm") S3method("extractAIC", "dppm") S3method("extractAIC", "kppm") S3method("extractAIC", "mppm") S3method("extractAIC", "ppm") S3method("extractAIC", "slrm") S3method("family", "hackglmmPQL") S3method("family", "vblogit") S3method("fitin", "ppm") S3method("fitin", "profilepl") S3method("fitted", "dppm") S3method("fitted", "kppm") S3method("fitted", "mppm") S3method("fitted", "ppm") S3method("fitted", "rppm") S3method("fitted", "slrm") S3method("fixef", "mppm") S3method("flipxy", "msr") S3method("formula", "dppm") S3method("formula", "hackglmmPQL") S3method("formula", "kppm") S3method("formula", "ppm") S3method("formula", "slrm") S3method("getCall", "mppm") S3method("hardcoredist", "fii") S3method("hardcoredist", "ppm") S3method("harmonise", "msr") S3method("ic", "kppm") S3method("ic", "ppm") S3method("image", "objsurf") S3method("[", "influence.ppm") S3method("influence", "ppm") S3method("influence", "ppmInfluence") S3method("influence", "slrm") S3method("integral", "influence.ppm") S3method("integral", "leverage.ppm") S3method("integral", "msr") S3method("intensity", "detpointprocfamily") S3method("intensity", "dppm") S3method("intensity", "ppm") S3method("intensity", "slrm") S3method("intensity", "zclustermodel") S3method("intensity", "zgibbsmodel") S3method("interactionorder", "fii") S3method("interactionorder", "interact") S3method("interactionorder", "isf") S3method("interactionorder", "ppm") S3method("interactionorder", "zgibbsmodel") S3method("is.expandable", "ppm") S3method("is.hybrid", "interact") S3method("is.hybrid", "ppm") S3method("is.marked", "mppm") S3method("is.marked", "msr") S3method("is.marked", "ppm") S3method("is.marked", "rppm") S3method("is.marked", "slrm") S3method("is.multitype", "mppm") S3method("is.multitype", "msr") S3method("is.multitype", "ppm") S3method("is.multitype", "rppm") S3method("is.multitype", "slrm") S3method("is.poissonclusterprocess", "default") S3method("is.poissonclusterprocess", "kppm") S3method("is.poissonclusterprocess", "zclustermodel") S3method("is.poisson", "interact") S3method("is.poisson", "kppm") S3method("is.poisson", "mppm") S3method("is.poisson", "ppm") S3method("is.poisson", "rppm") S3method("is.poisson", "slrm") S3method("is.poisson", "zgibbsmodel") S3method("is.stationary", "detpointprocfamily") S3method("is.stationary", "dppm") S3method("is.stationary", "kppm") S3method("is.stationary", "ppm") S3method("is.stationary", "slrm") S3method("is.stationary", "zgibbsmodel") S3method("Kmodel", "detpointprocfamily") S3method("Kmodel", "dppm") S3method("Kmodel", "kppm") S3method("Kmodel", "ppm") S3method("Kmodel", "slrm") S3method("Kmodel", "zclustermodel") S3method("kppm", "formula") S3method("kppm", "ppp") S3method("kppm", "quad") S3method("labels", "dppm") S3method("labels", "kppm") S3method("labels", "ppm") S3method("labels", "slrm") S3method("[", "leverage.ppm") S3method("leverage", "ppm") S3method("leverage", "ppmInfluence") S3method("leverage", "slrm") S3method("lines", "objsurf") S3method("lines", "traj") S3method("logLik", "dppm") S3method("logLik", "kppm") S3method("logLik", "mppm") S3method("logLik", "ppm") S3method("logLik", "slrm") S3method("logLik", "vblogit") S3method("lurking", "mppm") S3method("lurking", "ppm") S3method("lurking", "ppp") S3method("mean", "leverage.ppm") S3method("model.frame", "dppm") S3method("model.frame", "kppm") S3method("model.frame", "ppm") S3method("model.frame", "slrm") S3method("model.images", "dppm") S3method("model.images", "kppm") S3method("model.images", "ppm") S3method("model.images", "slrm") S3method("model.matrix", "dppm") S3method("model.matrix", "ippm") S3method("model.matrix", "kppm") S3method("model.matrix", "mppm") S3method("model.matrix", "ppm") S3method("model.matrix", "slrm") S3method("[", "msr") S3method("nobs", "dppm") S3method("nobs", "kppm") S3method("nobs", "mppm") S3method("nobs", "ppm") S3method("objsurf", "dppm") S3method("objsurf", "kppm") S3method("objsurf", "minconfit") S3method("parameters", "detpointprocfamily") S3method("parameters", "dppm") S3method("parameters", "fii") S3method("parameters", "interact") S3method("parameters", "kppm") S3method("parameters", "ppm") S3method("parameters", "profilepl") S3method("parameters", "slrm") S3method("pcfmodel", "detpointprocfamily") S3method("pcfmodel", "dppm") S3method("pcfmodel", "kppm") S3method("pcfmodel", "ppm") S3method("pcfmodel", "slrm") S3method("pcfmodel", "zclustermodel") S3method("persp", "leverage.ppm") S3method("persp", "objsurf") S3method("plot", "addvar") S3method("plot", "diagppm") S3method("plot", "dppm") S3method("plot", "fii") S3method("plot", "influence.ppm") S3method("plot", "kppm") S3method("plot", "leverage.ppm") S3method("plot", "lurk") S3method("plot", "minconfit") S3method("plot", "mppm") S3method("plot", "msr") S3method("plot", "objsurf") S3method("plot", "palmdiag") S3method("plot", "parres") S3method("plot", "plotppm") S3method("plot", "ppm") S3method("plot", "profilepl") S3method("plot", "qqppm") S3method("plot", "rppm") S3method("plot", "slrm") S3method("plot", "traj") S3method("ppm", "default") S3method("ppm", "formula") S3method("ppm", "ppp") S3method("ppm", "quad") S3method("predict", "dppm") S3method("predict", "kppm") S3method("predict", "mppm") S3method("predict", "ppm") S3method("predict", "profilepl") S3method("predict", "rppm") S3method("predict", "slrm") S3method("predict", "vblogit") S3method("predict", "zclustermodel") S3method("print", "addvar") S3method("print", "bt.frame") S3method("print", "detpointprocfamily") S3method("print", "detpointprocfamilyfun") S3method("print", "diagppm") S3method("print", "dppm") S3method("print", "fii") S3method("print", "hierarchicalordering") S3method("print", "influence.ppm") S3method("print", "interact") S3method("print", "intermaker") S3method("print", "isf") S3method("print", "kppm") S3method("print", "leverage.ppm") S3method("print", "lurk") S3method("print", "minconfit") S3method("print", "mppm") S3method("print", "msr") S3method("print", "objsurf") S3method("print", "parres") S3method("print", "plotppm") S3method("print", "ppm") S3method("print", "profilepl") S3method("print", "qqppm") S3method("print", "rppm") S3method("print", "slrm") S3method("print", "summary.dppm") S3method("print", "summary.fii") S3method("print", "summary.kppm") S3method("print", "summary.mppm") S3method("print", "summary.objsurf") S3method("print", "summary.ppm") S3method("print", "summary.slrm") S3method("print", "traj") S3method("print", "vblogit") S3method("print", "zclustermodel") S3method("print", "zgibbsmodel") S3method("prune", "rppm") S3method("pseudoR2", "ppm") S3method("pseudoR2", "slrm") S3method("psib", "kppm") S3method("quadrat.test", "mppm") S3method("quadrat.test", "ppm") S3method("quadrat.test", "slrm") S3method("ranef", "mppm") S3method("reach", "detpointprocfamily") S3method("reach", "dppm") S3method("reach", "fii") S3method("reach", "interact") S3method("reach", "kppm") S3method("reach", "ppm") S3method("reach", "slrm") S3method("reach", "zclustermodel") S3method("relrisk", "ppm") S3method("repul", "dppm") S3method("rescale", "msr") S3method("residuals", "dppm") S3method("residuals", "kppm") S3method("residuals", "mppm") S3method("residuals", "ppm") S3method("residuals", "rppm") S3method("residuals", "slrm") S3method("response", "dppm") S3method("response", "glm") S3method("response", "kppm") S3method("response", "lm") S3method("response", "mppm") S3method("response", "ppm") S3method("response", "rppm") S3method("response", "slrm") S3method("rhohat", "ppm") S3method("rhohat", "slrm") S3method("rmhmodel", "ppm") S3method("rmh", "ppm") S3method("roc", "kppm") S3method("roc", "ppm") S3method("roc", "slrm") S3method("rotate", "msr") S3method("scalardilate", "msr") S3method("shift", "influence.ppm") S3method("shift", "leverage.ppm") S3method("shift", "msr") S3method("simulate", "detpointprocfamily") S3method("simulate", "dppm") S3method("simulate", "kppm") S3method("simulate", "mppm") S3method("simulate", "ppm") S3method("simulate", "profilepl") S3method("simulate", "slrm") S3method("Smooth", "influence.ppm") S3method("Smooth", "leverage.ppm") S3method("Smooth", "msr") S3method("spatialCovariateEvidence", "ppm") S3method("spatialCovariateEvidence", "slrm") S3method("spatialCovariateUnderModel", "dppm") S3method("spatialCovariateUnderModel", "kppm") S3method("spatialCovariateUnderModel", "ppm") S3method("spatialCovariateUnderModel", "slrm") S3method("split", "msr") S3method("summary", "dppm") S3method("summary", "fii") S3method("summary", "kppm") S3method("summary", "mppm") S3method("summary", "msr") S3method("summary", "objsurf") S3method("summary", "ppm") S3method("summary", "profilepl") S3method("summary", "slrm") S3method("summary", "vblogit") S3method("terms", "dppm") S3method("terms", "kppm") S3method("terms", "mppm") S3method("terms", "ppm") S3method("terms", "rppm") S3method("terms", "slrm") S3method("unitname", "dppm") S3method("unitname", "kppm") S3method("unitname", "minconfit") S3method("unitname", "msr") S3method("unitname", "ppm") S3method("unitname", "slrm") S3method("unstack", "msr") S3method("updateData", "dppm") S3method("updateData", "kppm") S3method("updateData", "ppm") S3method("updateData", "slrm") S3method("update", "detpointprocfamily") S3method("update", "dppm") S3method("update", "interact") S3method("update", "ippm") S3method("update", "kppm") S3method("update", "msr") S3method("update", "ppm") S3method("update", "rppm") S3method("update", "slrm") S3method("valid", "detpointprocfamily") S3method("valid", "ppm") S3method("valid", "slrm") S3method("vcov", "kppm") S3method("vcov", "mppm") S3method("vcov", "ppm") S3method("vcov", "slrm") S3method("Window", "dppm") S3method("Window", "influence.ppm") S3method("Window", "kppm") S3method("Window", "leverage.ppm") S3method("Window", "msr") S3method("Window", "ppm") S3method("Window", "slrm") S3method("with", "msr") # ......................................... # Assignment methods # ......................................... S3method("coef<-", "fii") S3method("unitname<-", "dppm") S3method("unitname<-", "kppm") S3method("unitname<-", "minconfit") S3method("unitname<-", "msr") S3method("unitname<-", "ppm") S3method("unitname<-", "slrm") # ......................................... # End of methods # ......................................... spatstat.model/man/0000755000176200001440000000000014374277632014006 5ustar liggesusersspatstat.model/man/as.ppm.Rd0000644000176200001440000000444114331173076015464 0ustar liggesusers\name{as.ppm} \alias{as.ppm} \alias{as.ppm.ppm} \alias{as.ppm.profilepl} \alias{as.ppm.kppm} \alias{as.ppm.dppm} \alias{as.ppm.rppm} \title{Extract Fitted Point Process Model} \description{ Extracts the fitted point process model from some kind of fitted model. } \usage{ as.ppm(object) \method{as.ppm}{ppm}(object) \method{as.ppm}{profilepl}(object) \method{as.ppm}{kppm}(object) \method{as.ppm}{dppm}(object) \method{as.ppm}{rppm}(object) } \arguments{ \item{object}{An object that includes a fitted Poisson or Gibbs point process model. An object of class \code{"ppm"}, \code{"profilepl"}, \code{"kppm"}, \code{"dppm"} or \code{"rppm"}, or possibly other classes. } } \details{ The function \code{as.ppm} extracts the fitted point process model (of class \code{"ppm"}) from a suitable object. The function \code{as.ppm} is generic, with methods for the classes \code{"ppm"}, \code{"profilepl"}, \code{"kppm"}, \code{"dppm"} and \code{"rppm"}, and possibly for other classes. For the class \code{"profilepl"} of models fitted by maximum profile pseudolikelihood, the method \code{as.ppm.profilepl} extracts the fitted point process model (with the optimal values of the irregular parameters). For the class \code{"kppm"} of models fitted by minimum contrast (or Palm or composite likelihood) using Waagepetersen's two-step estimation procedure (see \code{\link{kppm}}), the method \code{as.ppm.kppm} extracts the Poisson point process model that is fitted in the first stage of the procedure. The behaviour for the class \code{"dppm"} is analogous to the \code{"kppm"} case above. For the class \code{"rppm"} of models fitted by recursive partitioning (regression trees), the method \code{as.ppm.rppm} extracts the corresponding loglinear model that is fitted in the first stage of the procedure (whose purpose is merely to identify and evaluate the explanatory variables). } \value{ An object of class \code{"ppm"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{ppm}}, \code{\link{profilepl}}. } \examples{ # fit a model by profile maximum pseudolikelihood rvals <- data.frame(r=(1:10)/100) pfit <- profilepl(rvals, Strauss, cells, ~1) # extract the fitted model fit <- as.ppm(pfit) } \keyword{spatial} \keyword{models} spatstat.model/man/fitted.mppm.Rd0000644000176200001440000000507314331173100016503 0ustar liggesusers\name{fitted.mppm} \alias{fitted.mppm} \title{Fitted Conditional Intensity for Multiple Point Process Model} \description{ Given a point process model fitted to multiple point patterns, compute the fitted conditional intensity of the model at the points of each data pattern, or at the points of the quadrature schemes used to fit the model. } \usage{ \method{fitted}{mppm}(object, ..., type = "lambda", dataonly = FALSE) } \arguments{ \item{object}{ The fitted model. An object of class \code{"mppm"} obtained from \code{\link{mppm}}. } \item{\dots}{Ignored.} \item{type}{ Type of fitted values: either \code{"trend"} for the spatial trend, or \code{"lambda"} or \code{"cif"} for the conditional intensity. } \item{dataonly}{ If \code{TRUE}, fitted values are computed only for the points of the data point patterns. If \code{FALSE}, fitted values are computed for the points of the quadrature schemes used to fit the model. } } \details{ This function evaluates the conditional intensity \eqn{\hat\lambda(u,x)}{lambdahat(u,x)} or spatial trend \eqn{\hat{b(u)}}{bhat(u)} of the fitted point process model for certain locations \eqn{u}, for each of the original point patterns \eqn{x} to which the model was fitted. The locations \eqn{u} at which the fitted conditional intensity/trend is evaluated, are the points of the quadrature schemes used to fit the model in \code{\link{mppm}}. They include the data points (the points of the original point pattern datasets) and other ``dummy'' points in the window of observation. Use \code{\link{predict.mppm}} to compute the fitted conditional intensity at other locations or with other values of the explanatory variables. } \value{ A list of vectors (one for each row of the original hyperframe, i.e. one vector for each of the original point patterns) containing the values of the fitted conditional intensity or (if \code{type="trend"}) the fitted spatial trend. Entries in these vector correspond to the quadrature points (data or dummy points) used to fit the model. The quadrature points can be extracted from \code{object} by \code{\link{quad.mppm}(object)}. } \references{ \baddrubaturnbook } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \adrian \rolf and \ege } \examples{ model <- mppm(Bugs ~ x, data=hyperframe(Bugs=waterstriders), interaction=Strauss(7)) cifs <- fitted(model) } \seealso{ \code{\link{mppm}}, \code{\link{predict.mppm}} } \keyword{spatial} \keyword{models} spatstat.model/man/anova.ppm.Rd0000644000176200001440000001461514510474260016167 0ustar liggesusers\name{anova.ppm} \alias{anova.ppm} \title{ANOVA for Fitted Point Process Models} \description{ Performs analysis of deviance for one or more fitted point process models. } \usage{ \method{anova}{ppm}(object, \dots, test=NULL, adjust=TRUE, warn=TRUE, fine=FALSE) } \arguments{ \item{object}{ A fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Optional. Additional objects of class \code{"ppm"}. } \item{test}{ Character string, partially matching one of \code{"Chisq"}, \code{"LRT"}, \code{"Rao"}, \code{"score"}, \code{"F"} or \code{"Cp"}, or \code{NULL} indicating that no test should be performed. } \item{adjust}{ Logical value indicating whether to correct the pseudolikelihood ratio when some of the models are not Poisson processes. } \item{warn}{ Logical value indicating whether to issue warnings if problems arise. } \item{fine}{ Logical value, passed to \code{\link{vcov.ppm}}, indicating whether to use a quick estimate (\code{fine=FALSE}, the default) or a slower, more accurate estimate (\code{fine=TRUE}) of variance terms. Relevant only when some of the models are not Poisson and \code{adjust=TRUE}. } } \value{ An object of class \code{"anova"}, or \code{NULL}. } \details{ This is a method for \code{\link[stats]{anova}} for fitted point process models (objects of class \code{"ppm"}, usually generated by the model-fitting function \code{\link{ppm}}). If the fitted models are all Poisson point processes, then by default, this function performs an Analysis of Deviance of the fitted models. The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"} or \code{test="LRT"}) the two-sided p-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link[stats]{anova.glm}}. If \code{test="Rao"} or \code{test="score"}, the \emph{score test} (Rao, 1948) is performed instead. If some of the fitted models are \emph{not} Poisson point processes, the `deviance' differences in this table are 'pseudo-deviances' equal to 2 times the differences in the maximised values of the log pseudolikelihood (see \code{\link{ppm}}). It is not valid to compare these values to the chi-squared distribution. In this case, if \code{adjust=TRUE} (the default), the pseudo-deviances will be adjusted using the method of Pace et al (2011) and Baddeley et al (2015) so that the chi-squared test is valid. It is strongly advisable to perform this adjustment. } \section{Errors and warnings}{ \describe{ \item{models not nested:}{ There may be an error message that the models are not \dQuote{nested}. For an Analysis of Deviance the models must be nested, i.e. one model must be a special case of the other. For example the point process model with formula \code{~x} is a special case of the model with formula \code{~x+y}, so these models are nested. However the two point process models with formulae \code{~x} and \code{~y} are not nested. If you get this error message and you believe that the models should be nested, the problem may be the inability of \R to recognise that the two formulae are nested. Try modifying the formulae to make their relationship more obvious. } \item{different sizes of dataset:}{ There may be an error message from \code{anova.glmlist} that \dQuote{models were not all fitted to the same size of dataset}. This implies that the models were fitted using different quadrature schemes (see \code{\link{quadscheme}}) and/or with different edge corrections or different values of the border edge correction distance \code{rbord}. To ensure that models are comparable, check the following: \itemize{ \item the models must all have been fitted to the same point pattern dataset, in the same window. \item all models must have been fitted by the same fitting method as specified by the argument \code{method} in \code{\link{ppm}}. \item If some of the models depend on covariates, then they should all have been fitted using the same list of covariates, and using \code{allcovar=TRUE} to ensure that the same quadrature scheme is used. \item all models must have been fitted using the same edge correction as specified by the arguments \code{correction} and \code{rbord}. If you did not specify the value of \code{rbord}, then it may have taken a different value for different models. The default value of \code{rbord} is equal to zero for a Poisson model, and otherwise equals the reach (interaction distance) of the interaction term (see \code{\link{reach}}). To ensure that the models are comparable, set \code{rbord} to equal the maximum reach of the interactions that you are fitting. } } } } \seealso{ \code{\link{ppm}}, \code{\link{vcov.ppm}} } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix of one of the models was either too large or too small for reliable numerical calculation. See \code{\link{vcov.ppm}} for suggestions on how to handle this. } \examples{ mod0 <- ppm(swedishpines ~1) modx <- ppm(swedishpines ~x) # Likelihood ratio test anova(mod0, modx, test="Chi") # Score test anova(mod0, modx, test="Rao") # Single argument modxy <- ppm(swedishpines ~x + y) anova(modxy, test="Chi") # Adjusted composite likelihood ratio test modP <- ppm(swedishpines ~1, rbord=9) modS <- ppm(swedishpines ~1, Strauss(9)) anova(modP, modS, test="Chi") } \references{ Baddeley, A., Turner, R. and Rubak, E. (2015) Adjusted composite likelihood ratio test for Gibbs point processes. \emph{Journal of Statistical Computation and Simulation} \bold{86} (5) 922--941. DOI: 10.1080/00949655.2015.1044530. Pace, L., Salvan, A. and Sartori, N. (2011) Adjusting composite likelihood ratio statistics. \emph{Statistica Sinica} \bold{21}, 129--148. Rao, C.R. (1948) Large sample tests of statistical hypotheses concerning several parameters with applications to problems of estimation. \emph{Proceedings of the Cambridge Philosophical Society} \bold{44}, 50--57. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} \keyword{methods} \concept{Model selection} spatstat.model/man/rppm.Rd0000644000176200001440000000370714372605454015255 0ustar liggesusers\name{rppm} \alias{rppm} \title{ Recursively Partitioned Point Process Model } \description{ Fits a recursive partition model to point pattern data. } \usage{ rppm(\dots, rpargs=list()) } \arguments{ \item{\dots}{ Arguments passed to \code{\link{ppm}} specifying the point pattern data and the explanatory covariates. } \item{rpargs}{ Optional list of arguments passed to \code{\link[rpart]{rpart}} controlling the recursive partitioning procedure. } } \details{ This function attempts to find a simple rule for predicting low and high intensity regions of points in a point pattern, using explanatory covariates. The arguments \code{\dots} specify the point pattern data and explanatory covariates in the same way as they would be in the function \code{\link{ppm}}. The recursive partitioning algorithm \code{\link[rpart]{rpart}} is then used to find a partitioning rule. } \value{ An object of class \code{"rppm"}. There are methods for \code{print}, \code{plot}, \code{fitted}, \code{predict} and \code{prune} for this class. } \references{ Breiman, L., Friedman, J. H., Olshen, R. A., and Stone, C. J. (1984) \emph{Classification and Regression Trees}. Wadsworth. } \author{ \spatstatAuthors. } \seealso{ \code{\link{plot.rppm}}, \code{\link{predict.rppm}}, \code{\link{update.rppm}}, \code{\link{prune.rppm}}. } \examples{ # New Zealand trees data: trees planted along border # Use covariates 'x', 'y' nzfit <- rppm(nztrees ~ x + y) nzfit prune(nzfit, cp=0.035) # Murchison gold data: numeric and logical covariates mur <- solapply(murchison, rescale, s=1000, unitname="km") mur$dfault <- distfun(mur$faults) # mfit <- rppm(gold ~ dfault + greenstone, data=mur) mfit # Gorillas data: factor covariates # (symbol '.' indicates 'all variables') gfit <- rppm(unmark(gorillas) ~ . , data=gorillas.extra) gfit } \keyword{spatial} \keyword{models} spatstat.model/man/anova.slrm.Rd0000644000176200001440000000305214510474260016341 0ustar liggesusers\name{anova.slrm} \Rdversion{1.1} \alias{anova.slrm} \title{ Analysis of Deviance for Spatial Logistic Regression Models } \description{ Performs Analysis of Deviance for two or more fitted Spatial Logistic Regression models. } \usage{ \method{anova}{slrm}(object, ..., test = NULL) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ additional objects of the same type (optional). } \item{test}{ a character string, (partially) matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"}, indicating the reference distribution that should be used to compute \eqn{p}-values. } } \details{ This is a method for \code{\link[stats]{anova}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"}) the two-sided \eqn{p}-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link[stats]{anova.glm}}. } \value{ An object of class \code{"anova"}, inheriting from class \code{"data.frame"}, representing the analysis of deviance table. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit0 <- slrm(X ~ 1) fit1 <- slrm(X ~ x+y) anova(fit0, fit1, test="Chi") } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} \concept{Model selection} spatstat.model/man/Gcom.Rd0000644000176200001440000002245614331173075015160 0ustar liggesusers\name{Gcom} \Rdversion{1.1} \alias{Gcom} \title{ Model Compensator of Nearest Neighbour Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the \emph{compensator} of the nearest neighbour distance distribution function \eqn{G} based on the fitted model (as well as the usual nonparametric estimates of \eqn{G} based on the data alone). Comparison between the nonparametric and model-compensated \eqn{G} functions serves as a diagnostic for the model. } \usage{ Gcom(object, r = NULL, breaks = NULL, ..., correction = c("border", "Hanisch"), conditional = !is.poisson(object), restrict=FALSE, model=NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), ppmcorrection="border", truecoef = NULL, hi.res = NULL) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the function \eqn{G(r)} should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Edge correction(s) to be employed in calculating the compensator. Options are \code{"border"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{conditional}{ Optional. Logical value indicating whether to compute the estimates for the conditional case. See Details. } \item{restrict}{ Logical value indicating whether to compute the restriction estimator (\code{restrict=TRUE}) or the reweighting estimator (\code{restrict=FALSE}, the default). Applies only if \code{conditional=TRUE}. See Details. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord,ppmcorrection}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{\dots}{ Extra arguments passed to \code{\link{ppm}}. } \item{ppmcorrection}{ The \code{correction} argument to \code{\link{ppm}}. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes different estimates of the nearest neighbour distance distribution function \eqn{G} of the dataset, which should be approximately equal if the model is a good fit to the data. The first argument, \code{object}, is usually a fitted point process model (object of class \code{"ppm"}), obtained from the model-fitting function \code{\link{ppm}}. For convenience, \code{object} can also be a point pattern (object of class \code{"ppp"}). In that case, a point process model will be fitted to it, by calling \code{\link{ppm}} using the arguments \code{trend} (for the first order trend), \code{interaction} (for the interpoint interaction) and \code{rbord} (for the erosion distance in the border correction for the pseudolikelihood). See \code{\link{ppm}} for details of these arguments. The algorithm first extracts the original point pattern dataset (to which the model was fitted) and computes the standard nonparametric estimates of the \eqn{G} function. It then also computes the \emph{model-compensated} \eqn{G} function. The different functions are returned as columns in a data frame (of class \code{"fv"}). The interpretation of the columns is as follows (ignoring edge corrections): \describe{ \item{\code{bord}:}{ the nonparametric border-correction estimate of \eqn{G(r)}, \deqn{ \hat G(r) = \frac{\sum_i I\{ d_i \le r\} I\{ b_i > r \}}{\sum_i I\{ b_i > r\}} }{ G(r) = (sum[i] I(d[i] <= r) I(b[i] > r))/(sum[i] I(b[i] > r)) } where \eqn{d_i}{d[i]} is the distance from the \eqn{i}-th data point to its nearest neighbour, and \eqn{b_i}{b[i]} is the distance from the \eqn{i}-th data point to the boundary of the window \eqn{W}. } \item{\code{bcom}:}{ the model compensator of the border-correction estimate \deqn{ {\bf C}\, \hat G(r) = \frac{\int \lambda(u,x) I\{ b(u) > r\} I\{ d(u,x) \le r\}}{ 1 + \sum_i I\{ b_i > r\} } }{ C G(r) = (integral[u] lambda(u,x) I(b(u) > r) I( d(u,x) <= r ))/(1 + sum[i] I(b[i] > r)) } where \eqn{\lambda(u,x)}{lambda(u,x)} denotes the conditional intensity of the model at the location \eqn{u}, and \eqn{d(u,x)} denotes the distance from \eqn{u} to the nearest point in \eqn{x}, while \eqn{b(u)} denotes the distance from \eqn{u} to the boundary of the window\eqn{W}. } \item{\code{han}:}{ the nonparametric Hanisch estimate of \eqn{G(r)} \deqn{ \hat G(r) = \frac{D(r)}{D(\infty)} }{ G(r) = D(r)/D(infty) } where \deqn{ D(r) = \sum_i \frac{ I\{x_i \in W_{\ominus d_i}\} I\{d_i \le r\} }{ \mbox{area}(W_{\ominus d_i}) } }{ D(r) = sum[i] I(x[i] in W[-r]) I(d[i] <= r)/area(W[-d[i]]) } in which \eqn{W_{\ominus r}}{W[-r]} denotes the erosion of the window \eqn{W} by a distance \eqn{r}. } \item{\code{hcom}:}{ the corresponding model-compensated function \deqn{ {\bf C} \, G(r) = \int_W \frac{ \lambda(u,x) I(u \in W_{\ominus d(u)}) I(d(u) \le r) }{ \hat D(\infty) \mbox{area}(W_{\ominus d(u)}) + 1 } }{ C G(r) = integral[u] lambda(u,x) I(u in W[-d(u)]) I(d(u) <= r)/ (1 + D(infty) area(W[-d(u)])) } where \eqn{d(u) = d(u, x)} is the (`empty space') distance from location \eqn{u} to the nearest point of \eqn{x}. } } If the fitted model is a Poisson point process, then the formulae above are exactly what is computed. If the fitted model is not Poisson, the formulae above are modified slightly to handle edge effects. The modification is determined by the arguments \code{conditional} and \code{restrict}. The value of \code{conditional} defaults to \code{FALSE} for Poisson models and \code{TRUE} for non-Poisson models. If \code{conditional=FALSE} then the formulae above are not modified. If \code{conditional=TRUE}, then the algorithm calculates the \emph{restriction estimator} if \code{restrict=TRUE}, and calculates the \emph{reweighting estimator} if \code{restrict=FALSE}. See Appendix E of Baddeley, Rubak and \Moller (2011). See also \code{\link{spatstat.options}('eroded.intensity')}. Thus, by default, the reweighting estimator is computed for non-Poisson models. The border-corrected and Hanisch-corrected estimates of \eqn{G(r)} are approximately unbiased estimates of the \eqn{G}-function, assuming the point process is stationary. The model-compensated functions are unbiased estimates \emph{of the mean value of the corresponding nonparametric estimate}, assuming the model is true. Thus, if the model is a good fit, the mean value of the difference between the nonparametric and model-compensated estimates is approximately zero. To compute the difference between the nonparametric and model-compensated functions, use \code{\link{Gres}}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Related functions: \code{\link{Gest}}, \code{\link{Gres}}. Alternative functions: \code{\link{Kcom}}, \code{\link{psstA}}, \code{\link{psstG}}, \code{\link{psst}}. Model fitting: \code{\link{ppm}}. } \examples{ fit0 <- ppm(cells, ~1) # uniform Poisson G0 <- Gcom(fit0) G0 plot(G0) # uniform Poisson is clearly not correct # Hanisch estimates only plot(Gcom(fit0), cbind(han, hcom) ~ r) fit1 <- ppm(cells, ~1, Strauss(0.08)) plot(Gcom(fit1), cbind(han, hcom) ~ r) # Try adjusting interaction distance fit2 <- update(fit1, Strauss(0.10)) plot(Gcom(fit2), cbind(han, hcom) ~ r) G3 <- Gcom(cells, interaction=Strauss(0.12)) plot(G3, cbind(han, hcom) ~ r) } \keyword{spatial} \keyword{models} spatstat.model/man/leverage.slrm.Rd0000644000176200001440000000502514510474260017031 0ustar liggesusers\name{leverage.slrm} \alias{leverage.slrm} \alias{influence.slrm} \alias{dfbetas.slrm} \alias{dffit.slrm} \title{ Leverage and Influence Diagnostics for Spatial Logistic Regression } \description{ For a fitted spatial logistic regression model, these functions compute diagnostics of leverage and influence. } \usage{ \method{leverage}{slrm}(model, \dots) \method{influence}{slrm}(model, \dots) \method{dfbetas}{slrm}(model, \dots) \method{dffit}{slrm}(object, \dots) } \arguments{ \item{model,object}{ A fitted spatial logistic regression model (object of class \code{"slrm"}). } \item{\dots}{ Arguments passed to methods. } } \details{ These functions are methods for the generics \code{\link{leverage}}, \code{\link[stats]{influence}}, \code{\link[stats]{dfbetas}} and \code{\link{dffit}} for the class \code{"slrm"}. These functions adapt the standard diagnostics for logistic regression (see \code{\link[stats]{influence.measures}}) to a fitted spatial logistic regression model (object of class \code{"slrm"}). This adaptation was described by Baddeley, Chang and Song (2013). \code{leverage.slrm} computes the leverage value (diagonal of the hat matrix) for the covariate data in each pixel. The result is a pixel image. \code{influence.slrm} computes the likelihood influence for the data (covariates and presence/absence of points) in each pixel. The result is a pixel image. \code{dfbetas.slrm} computes the parameter influence for the data (covariates and presence/absence of points) in each pixel. The result is a list of pixel images, one image for each of the model coefficients in \code{coef(model)}. The list can be plotted immediately. \code{dffit.slrm} computes the total influence for the data (covariates and presence/absence of points) in each pixel. The result is a pixel image. } \value{ A pixel image, or a list of pixel images. } \references{ Baddeley, A., Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ \adrian. } \seealso{ \code{\link[stats]{influence.measures}}. \code{\link{leverage.ppm}}, \code{\link{influence.ppm}}, \code{\link{dfbetas.ppm}}, \code{\link{dffit.ppm}} } \examples{ H <- unmark(humberside) fit <- slrm(H ~ x+y, dimyx=32) plot(leverage(fit)) plot(influence(fit)) plot(dfbetas(fit)) plot(dffit(fit)) } \keyword{spatial} \keyword{models} \concept{diagnostics} \concept{Model diagnostics} spatstat.model/man/cauchy.estpcf.Rd0000644000176200001440000001355514400523233017022 0ustar liggesusers\name{cauchy.estpcf} \alias{cauchy.estpcf} \title{Fit the Neyman-Scott cluster process with Cauchy kernel} \description{ Fits the Neyman-Scott Cluster point process with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. } \usage{ cauchy.estpcf(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs = list()) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Neyman-Scott cluster point process model with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Neyman-Scott cluster point process with Cauchy kernel to \code{X}, by finding the parameters of the \Matern Cluster model which give the closest match between the theoretical pair correlation function of the \Matern Cluster process and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{\kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{\mu}, and the locations of the offspring points of one parent follow a common distribution described in Jalilian et al (2013). If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{\lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{\lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{\lambda} cannot be estimated, and the parameter \eqn{\mu}{\mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rCauchy}}. For computational reasons, the optimisation procedure internally uses the parameter \code{eta2}, which is equivalent to \code{4 * scale^2} where \code{scale} is the scale parameter for the model as used in \code{\link{rCauchy}}. Homogeneous or inhomogeneous Neyman-Scott/Cauchy models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Ghorbani, M. (2013) Cauchy cluster process. \emph{Metrika} \bold{76}, 697--706. Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{cauchy.estK}}, \code{\link{lgcp.estpcf}}, \code{\link{thomas.estpcf}}, \code{\link{vargamma.estpcf}}, \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{pcfmodel}}. \code{\link{rCauchy}} to simulate the model. } \examples{ u <- cauchy.estpcf(redwood) u plot(u, legendpos="topright") } \keyword{spatial} \keyword{models} spatstat.model/man/plot.mppm.Rd0000644000176200001440000000456014377275736016236 0ustar liggesusers\name{plot.mppm} \alias{plot.mppm} \title{plot a Fitted Multiple Point Process Model} \description{ Given a point process model fitted to multiple point patterns by \code{\link{mppm}}, compute spatial trend or conditional intensity surface of the model, in a form suitable for plotting, and (optionally) plot this surface. } \usage{ \method{plot}{mppm}(x, \dots, trend=TRUE, cif=FALSE, se=FALSE, how=c("image", "contour", "persp"), main) } \arguments{ \item{x}{ A point process model fitted to multiple point patterns, typically obtained from the model-fitting algorithm \code{\link{mppm}}. An object of class \code{"mppm"}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppm}} or \code{\link{plot.anylist}} controlling the plot. } \item{trend}{ Logical value indicating whether to plot the fitted trend. } \item{cif}{ Logical value indicating whether to plot the fitted conditional intensity. } \item{se}{ Logical value indicating whether to plot the standard error of the fitted trend. } \item{how}{ Single character string indicating the style of plot to be performed. } \item{main}{ Character string for the main title of the plot. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for the class \code{"mppm"} of point process models fitted to multiple point patterns (see \code{\link{mppm}}). It invokes \code{\link{subfits}} to compute the fitted model for each individual point pattern dataset, then calls \code{\link{plot.ppm}} to plot these individual models. These individual plots are displayed using \code{\link{plot.anylist}}, which generates either a series of separate plot frames or an array of plot panels on a single page. } \seealso{ \code{\link{plot.ppm}}, \code{\link{mppm}}, \code{\link{plot.listof}} } \references{ \baddrubaturnbook } \examples{ # Synthetic data from known model n <- 9 H <- hyperframe(V=1:n, U=runif(n, min=-1, max=1)) H$Z <- setcov(square(1)) H$U <- with(H, as.im(U, as.rectangle(Z))) H$Y <- with(H, rpoispp(eval.im(exp(2+3*Z)))) fit <- mppm(Y ~Z + U + V, data=H) plot(fit) } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \adrian \rolf and \ege } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat.model/man/print.ppm.Rd0000644000176200001440000000303514331173077016214 0ustar liggesusers\name{print.ppm} \alias{print.ppm} \title{Print a Fitted Point Process Model} \description{ Default \code{print} method for a fitted point process model. } \usage{ \method{print}{ppm}(x,\dots, what=c("all", "model", "trend", "interaction", "se", "errors")) } \arguments{ \item{x}{ A fitted point process model, typically obtained from the model-fittingg algorithm \code{\link{ppm}}. An object of class \code{"ppm"}. } \item{what}{ Character vector (partially-matched) indicating what information should be printed. } \item{\dots}{Ignored.} } \value{ none. } \details{ This is the \code{print} method for the class \code{"ppm"}. It prints information about the fitted model in a sensible format. The argument \code{what} makes it possible to print only some of the information. If \code{what} is missing, then by default, standard errors for the estimated coefficients of the model will be printed only if the model is a Poisson point process. To print the standard errors for a non-Poisson model, call \code{print.ppm} with the argument \code{what} given explicitly, or reset the default rule by typing \code{spatstat.options(print.ppm.SE="always")}. } \seealso{ \code{\link{ppm.object}} for details of the class \code{"ppm"}. \code{\link{ppm}} for generating these objects. \code{\link{plot.ppm}}, \code{\link{predict.ppm}} } \examples{ \donttest{ m <- ppm(cells ~1, Strauss(0.05)) m } } \author{\adrian and \rolf } \keyword{spatial} \keyword{print} \keyword{models} spatstat.model/man/methods.leverage.ppm.Rd0000644000176200001440000000452414510474260020315 0ustar liggesusers\name{methods.leverage.ppm} \alias{methods.leverage.ppm} %DoNotExport \alias{as.im.leverage.ppm} \alias{as.owin.leverage.ppm} \alias{domain.leverage.ppm} \alias{integral.leverage.ppm} \alias{mean.leverage.ppm} \alias{Smooth.leverage.ppm} \alias{Window.leverage.ppm} \title{Methods for Leverage Objects} \description{ Methods for the class \code{"leverage.ppm"}. } \usage{ \method{as.im}{leverage.ppm}(X, \dots, what=c("smooth", "nearest")) \method{as.owin}{leverage.ppm}(W, \dots, fatal=TRUE) \method{domain}{leverage.ppm}(X, \dots) \method{integral}{leverage.ppm}(f, domain, \dots) \method{mean}{leverage.ppm}(x, \dots) \method{Smooth}{leverage.ppm}(X, \dots) \method{Window}{leverage.ppm}(X, \dots) } \arguments{ \item{X,x,W,f}{An object of class \code{"leverage.ppm"}.} \item{domain}{ Optional. Domain of integration: a window (class \code{"owin"}) or a tessellation (class \code{"tess"}). } \item{\dots}{ Additional arguments. See Details. } \item{fatal}{ Logical value indicating what to do if the data cannot be converted to a window. If \code{fatal=TRUE} (the default) an error occurs. If \code{fatal=FALSE} a value of \code{NULL} is returned. } \item{what}{ Character string (partially matched) specifying which image data should be extracted. See \code{\link{plot.leverage.ppm}} for explanation. } } \value{ A window (object of class \code{"owin"}) for \code{as.owin}, \code{domain} and \code{Window}. A numeric value or numeric vector for \code{integral}. A pixel image, or list of pixel images, for \code{as.im} and \code{Smooth}. } \details{ These functions are methods for the class \code{"leverage.ppm"}. An object of this class represents the leverage measure of a fitted point process model (see \code{\link{leverage.ppm}}). For \code{as.im}, \code{domain} and \code{Window}, additional arguments (\code{\dots}) are ignored. For \code{as.owin}, \code{integral}, \code{mean} and \code{Smooth}, additional arguments are passed to the method for class \code{"im"}. } \seealso{ \code{\link{leverage.ppm}}, \code{\link{plot.leverage.ppm}}, \code{\link{[.leverage.ppm}}, \code{\link{as.function.leverage.ppm}}. } \examples{ fit <- ppm(cells ~ x) a <- leverage(fit) integral(a) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} \concept{Model diagnostics} spatstat.model/man/model.matrix.ppm.Rd0000644000176200001440000000771214331173077017471 0ustar liggesusers\name{model.matrix.ppm} \alias{model.matrix.ppm} \alias{model.matrix.kppm} \alias{model.matrix.dppm} \alias{model.matrix.ippm} \title{Extract Design Matrix from Point Process Model} \description{ Given a point process model that has been fitted to spatial point pattern data, this function extracts the design matrix of the model. } \usage{ \method{model.matrix}{ppm}(object, data=model.frame(object, na.action=NULL), \dots, Q=NULL, keepNA=TRUE) \method{model.matrix}{kppm}(object, data=model.frame(object, na.action=NULL), \dots, Q=NULL, keepNA=TRUE) \method{model.matrix}{dppm}(object, data=model.frame(object, na.action=NULL), \dots, Q=NULL, keepNA=TRUE) \method{model.matrix}{ippm}(object, data=model.frame(object, na.action=NULL), \dots, Q=NULL, keepNA=TRUE, irregular=FALSE) } \arguments{ \item{object}{ The fitted point process model. An object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"} or \code{"ippm"}. } \item{data}{ A model frame, containing the data required for the Berman-Turner device. } \item{Q}{ A point pattern (class \code{"ppp"}) or quadrature scheme (class \code{"quad"}) specifying new locations where the covariates should be computed. } \item{keepNA}{ Logical. Determines whether rows containing NA values will be deleted or retained. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link{model.matrix.lm}}. } \item{irregular}{ Logical value indicating whether to include the irregular score components. } } \details{ These commands are methods for the generic function \code{\link{model.matrix}}. They extract the design matrix of a spatial point process model (class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}). More precisely, this command extracts the design matrix of the generalised linear model associated with a spatial point process model. The \code{object} must be a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}) fitted to spatial point pattern data. Such objects are produced by the model-fitting functions \code{\link{ppm}}, \code{\link{kppm}}, and \code{\link{dppm}}. The methods \code{model.matrix.ppm}, \code{model.matrix.kppm}, and \code{model.matrix.dppm} extract the model matrix for the GLM. The result is a matrix, with one row for every quadrature point in the fitting procedure, and one column for every constructed covariate in the design matrix. If there are \code{NA} values in the covariates, the argument \code{keepNA} determines whether to retain or delete the corresponding rows of the model matrix. The default \code{keepNA=TRUE} is to retain them. Note that this differs from the default behaviour of many other methods for \code{model.matrix}, which typically delete rows containing \code{NA}. The quadrature points themselves can be extracted using \code{\link{quad.ppm}}. } \value{ A matrix. Columns of the matrix are canonical covariates in the model. Rows of the matrix correspond to quadrature points in the fitting procedure (provided \code{keepNA=TRUE}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{model.matrix}}, \code{\link{model.images}}, \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{dppm}}, \code{\link{ippm}}, \code{\link{ppm.object}}, \code{\link{quad.ppm}}, \code{\link{residuals.ppm}} } \examples{ fit <- ppm(cells ~ x) head(model.matrix(fit)) model.matrix(fit, Q=runifpoint(5)) kfit <- kppm(redwood ~ x, "Thomas") m <- model.matrix(kfit) } \keyword{spatial} \keyword{models} spatstat.model/man/dppPowerExp.Rd0000644000176200001440000000247714331173076016552 0ustar liggesusers\name{dppPowerExp} \alias{dppPowerExp} \title{Power Exponential Spectral Determinantal Point Process Model} \description{Function generating an instance of the Power Exponential Spectral determinantal point process model.} \usage{dppPowerExp(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the parameters. See Details.} } \details{ The Power Exponential Spectral DPP is defined in (Lavancier, \Moller and Rubak, 2015) The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the shape parameter \code{nu} as a positive numeric (artificially required to be less than 20 in the code for numerical stability) \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ \adrian \rolf and \ege } \references{ Lavancier, F. \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \examples{ m <- dppPowerExp(lambda=100, alpha=.01, nu=1, d=2) } \seealso{ \code{\link{dppBessel}}, \code{\link{dppCauchy}}, \code{\link{dppGauss}}, \code{\link{dppMatern}} } spatstat.model/man/dim.detpointprocfamily.Rd0000644000176200001440000000073114331173076020750 0ustar liggesusers\name{dim.detpointprocfamily} \alias{dim.detpointprocfamily} \title{Dimension of Determinantal Point Process Model} \description{Extracts the dimension of a determinantal point process model.} \usage{ \method{dim}{detpointprocfamily}(x) } \arguments{ \item{x}{object of class \code{"detpointprocfamily"}.} } \value{A numeric (or NULL if the dimension of the model is unspecified).} \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat.model/man/ppmInfluence.Rd0000644000176200001440000000607714331173077016723 0ustar liggesusers\name{ppmInfluence} \alias{ppmInfluence} \title{ Leverage and Influence Measures for Spatial Point Process Model } \description{ Calculates all the leverage and influence measures described in \code{\link{influence.ppm}}, \code{\link{leverage.ppm}} and \code{\link{dfbetas.ppm}}. } \usage{ ppmInfluence(fit, what = c("leverage", "influence", "dfbetas"), \dots, iScore = NULL, iHessian = NULL, iArgs = NULL, drop = FALSE, fitname = NULL) } \arguments{ \item{fit}{ A fitted point process model of class \code{"ppm"}. } \item{what}{ Character vector specifying which quantities are to be calculated. Default is to calculate all quantities. } \item{\dots}{ Ignored. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{fitname}{ Optional character string name for the fitted model \code{fit}. } } \details{ This function calculates all the leverage and influence measures described in \code{\link{influence.ppm}}, \code{\link{leverage.ppm}} and \code{\link{dfbetas.ppm}}. When analysing large datasets, the user can call \code{ppmInfluence} to perform the calculations efficiently, then extract the leverage and influence values as desired. For example the leverage can be extracted either as \code{result$leverage} or \code{leverage(result)}. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the influence calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. } \value{ A list containing the leverage and influence measures specified by \code{what}. The result also belongs to the class \code{"ppmInfluence"}. } \author{ \adrian } \seealso{ \code{\link{leverage.ppm}}, \code{\link{influence.ppm}}, \code{\link{dfbetas.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~ x+y) fI <- ppmInfluence(fit) fitlev <- fI$leverage fitlev <- leverage(fI) fitinf <- fI$influence fitinf <- influence(fI) fitdfb <- fI$dfbetas fitdfb <- dfbetas(fI) } \keyword{spatial} \keyword{models} spatstat.model/man/reach.Rd0000644000176200001440000001301114331173077015342 0ustar liggesusers\name{reach} \alias{reach.ppm} \alias{reach.interact} \alias{reach.fii} \title{Interaction Distance of a Point Process Model} \description{ Computes the interaction distance of a point process model. } \usage{ \method{reach}{ppm}(x, \dots, epsilon=0) \method{reach}{interact}(x, \dots) \method{reach}{fii}(x, \dots, epsilon) } \arguments{ \item{x}{Either a fitted point process model (object of class \code{"ppm"}), an interpoint interaction (object of class \code{"interact"}), a fitted interpoint interaction (object of class \code{"fii"}) or a point process model for simulation (object of class \code{"rmhmodel"}). } \item{epsilon}{ Numerical threshold below which interaction is treated as zero. See details. } \item{\dots}{ Other arguments are ignored. } } \value{ The interaction distance, or \code{NA} if this cannot be computed from the information given. } \details{ The function \code{reach} computes the `interaction distance' or `interaction range' of a point process model. The definition of the interaction distance depends on the type of point process model. This help page explains the interaction distance for a Gibbs point process. For other kinds of models, see \code{\link[spatstat.model]{reach.kppm}} and \code{\link[spatstat.model]{reach.dppm}}. For a Gibbs point process model, the interaction distance is the shortest distance \eqn{D} such that any two points in the process which are separated by a distance greater than \eqn{D} do not interact with each other. For example, the interaction range of a Strauss process (see \code{\link[spatstat.model]{Strauss}} or \code{\link{rStrauss}}) with parameters \eqn{\beta,\gamma,r}{beta,gamma,r} is equal to \eqn{r}, unless \eqn{\gamma=1}{gamma=1} in which case the model is Poisson and the interaction range is \eqn{0}. The interaction range of a Poisson process is zero. The interaction range of the Ord threshold process (see \code{\link[spatstat.model]{OrdThresh}}) is infinite, since two points \emph{may} interact at any distance apart. The function \code{reach} is generic, with methods for the case where \code{x} is \itemize{ \item a fitted point process model (object of class \code{"ppm"}, usually obtained from the model-fitting function \code{\link[spatstat.model]{ppm}}); \item an interpoint interaction structure (object of class \code{"interact"}) created by one of the functions \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Softcore}}, \code{\link{DiggleGratton}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Geyer}}, \code{\link{LennardJones}}, \code{\link{Saturated}}, \code{\link{OrdThresh}} or \code{\link{Ord}}; \item a fitted interpoint interaction (object of class \code{"fii"}) extracted from a fitted point process model by the command \code{\link{fitin}}; \item a point process model for simulation (object of class \code{"rmhmodel"}), usually obtained from \code{\link[spatstat.random]{rmhmodel}}. } When \code{x} is an \code{"interact"} object, \code{reach(x)} returns the maximum possible interaction range for any point process model with interaction structure given by \code{x}. For example, \code{reach(Strauss(0.2))} returns \code{0.2}. When \code{x} is a \code{"ppm"} object, \code{reach(x)} returns the interaction range for the point process model represented by \code{x}. For example, a fitted Strauss process model with parameters \code{beta,gamma,r} will return either \code{0} or \code{r}, depending on whether the fitted interaction parameter \code{gamma} is equal or not equal to 1. For some point process models, such as the soft core process (see \code{\link{Softcore}}), the interaction distance is infinite, because the interaction terms are positive for all pairs of points. A practical solution is to compute the distance at which the interaction contribution from a pair of points falls below a threshold \code{epsilon}, on the scale of the log conditional intensity. This is done by setting the argument \code{epsilon} to a positive value. } \section{Other types of models}{ Methods for \code{reach} are also defined for point process models of class \code{"kppm"} and \code{"dppm"}. Their technical definition is different from this one. See \code{\link[spatstat.model]{reach.kppm}} and \code{\link[spatstat.model]{reach.dppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Softcore}}, \code{\link{DiggleGratton}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Geyer}}, \code{\link{LennardJones}}, \code{\link{Saturated}}, \code{\link{OrdThresh}}, \code{\link{Ord}}. \code{\link[spatstat.random]{reach.rmhmodel}} See \code{\link[spatstat.model]{reach.kppm}} and \code{\link[spatstat.model]{reach.dppm}} for other types of point process models. } \examples{ reach(Poisson()) # returns 0 reach(Strauss(r=7)) # returns 7 fit <- ppm(swedishpines ~ 1, Strauss(r=7)) reach(fit) # returns 7 reach(OrdThresh(42)) # returns Inf reach(MultiStrauss(matrix(c(1,3,3,1),2,2))) # returns 3 } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/predict.ppm.Rd0000644000176200001440000003676514424137200016521 0ustar liggesusers\name{predict.ppm} \alias{predict.ppm} \title{Prediction from a Fitted Point Process Model} \description{ Given a fitted point process model obtained by \code{\link{ppm}}, evaluate the spatial trend or the conditional intensity of the model at new locations. } \usage{ \method{predict}{ppm}(object, window=NULL, ngrid=NULL, locations=NULL, covariates=NULL, type=c("trend", "cif", "intensity", "count"), se=FALSE, interval=c("none", "confidence", "prediction"), level = 0.95, X=data.ppm(object), correction, ignore.hardcore=FALSE, \dots, dimyx=NULL, eps=NULL, rule.eps = c("adjust.eps", "grow.frame", "shrink.frame"), new.coef=NULL, check=TRUE, repair=TRUE) } \arguments{ \item{object}{ A fitted point process model, typically obtained from the model-fitting algorithm \code{\link{ppm}}. An object of class \code{"ppm"} (see \code{\link{ppm.object}}). } \item{window}{ Optional. A window (object of class \code{"owin"}) \emph{delimiting} the locations where predictions should be computed. Defaults to the window of the original data used to fit the model \code{object}. } \item{ngrid}{ Optional. Dimensions of a rectangular grid of locations inside \code{window} where the predictions should be computed. An integer, or an integer vector of length 2, specifying the number of grid points in the \eqn{y} and \eqn{x} directions. (Incompatible with \code{locations}. Equivalent to \code{dimyx}.) } \item{locations}{ Optional. Data giving the exact \eqn{x,y} coordinates (and marks, if required) of locations at which predictions should be computed. Either a point pattern, or a data frame with columns named \code{x} and \code{y}, or a binary image mask, or a pixel image. (Incompatible with \code{ngrid}, \code{dimyx} and \code{eps}). } \item{covariates}{ Values of external covariates required by the model. Either a data frame or a list of images. See Details. } \item{type}{ Character string. Indicates which property of the fitted model should be predicted. Options are \code{"trend"} for the spatial trend, \code{"cif"} or \code{"lambda"} for the conditional intensity, \code{"intensity"} for the intensity, and \code{"count"} for the total number of points in \code{window}. } \item{se}{ Logical value indicating whether to calculate standard errors as well. } \item{interval}{ String (partially matched) indicating whether to produce estimates (\code{interval="none"}, the default) or a confidence interval (\code{interval="confidence"}) or a prediction interval (\code{interval="prediction"}). } \item{level}{ Coverage probability for the confidence or prediction interval. } \item{X}{ Optional. A point pattern (object of class \code{"ppp"}) to be taken as the data point pattern when calculating the conditional intensity. The default is to use the original data to which the model was fitted. } \item{correction}{ Name of the edge correction to be used in calculating the conditional intensity. Options include \code{"border"} and \code{"none"}. Other options may include \code{"periodic"}, \code{"isotropic"} and \code{"translate"} depending on the model. The default correction is the one that was used to fit \code{object}. } \item{ignore.hardcore}{ Advanced use only. Logical value specifying whether to compute only the finite part of the interaction potential (effectively removing any hard core interaction terms). } \item{\dots}{ Ignored. } \item{dimyx}{ Equivalent to \code{ngrid}. } \item{eps}{ Width and height of pixels in the prediction grid. A numerical value, or numeric vector of length 2. } \item{rule.eps}{ Argument passed to \code{\link[spatstat.geom]{as.mask}} controlling the discretisation. See \code{\link[spatstat.geom]{as.mask}}. } \item{new.coef}{ Numeric vector of parameter values to replace the fitted model parameters \code{coef(object)}. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{object}, if it is found to be damaged. } } \value{ \emph{If \code{total} is given:} a numeric vector or matrix. \emph{If \code{locations} is given and is a data frame:} a vector of predicted values for the spatial locations (and marks, if required) given in \code{locations}. \emph{If \code{ngrid} is given, or if \code{locations} is given and is a binary image mask or a pixel image:} If \code{object} is an unmarked point process, the result is a pixel image object (of class \code{"im"}, see \code{\link{im.object}}) containing the predictions. If \code{object} is a multitype point process, the result is a list of pixel images, containing the predictions for each type at the same grid of locations. The ``predicted values'' are either values of the spatial trend (if \code{type="trend"}), values of the conditional intensity (if \code{type="cif"} or \code{type="lambda"}), values of the intensity (if \code{type="intensity"}) or numbers of points (if \code{type="count"}). If \code{se=TRUE}, then the result is a list with two entries, the first being the predicted values in the format described above, and the second being the standard errors in the same format. } \details{ This function computes properties of a fitted spatial point process model (object of class \code{"ppm"}). For a Poisson point process it can compute the fitted intensity function, or the expected number of points in a region. For a Gibbs point process it can compute the spatial trend (first order potential), conditional intensity, and approximate intensity of the process. Point estimates, standard errors, confidence intervals and prediction intervals are available. Given a point pattern dataset, we may fit a point process model to the data using the model-fitting algorithm \code{\link{ppm}}. This returns an object of class \code{"ppm"} representing the fitted point process model (see \code{\link{ppm.object}}). The parameter estimates in this fitted model can be read off simply by printing the \code{ppm} object. The spatial trend, conditional intensity and intensity of the fitted model are evaluated using this function \code{predict.ppm}. The default action is to create a rectangular grid of points in the observation window of the data point pattern, and evaluate the spatial trend at these locations. The argument \code{type} specifies the values that are desired: \describe{ \item{If \code{type="trend"}:}{ the ``spatial trend'' of the fitted model is evaluated at each required spatial location \eqn{u}. See below. } \item{If \code{type="cif"}:}{ the conditional intensity \eqn{\lambda(u, X)}{lambda(u,X)} of the fitted model is evaluated at each required spatial location \eqn{u}, with respect to the data point pattern \eqn{X}. } \item{If \code{type="intensity"}:}{ the intensity \eqn{\lambda(u)}{lambda(u)} of the fitted model is evaluated at each required spatial location \eqn{u}. } \item{If \code{type="count"}:}{ the expected total number of points (or the expected number of points falling in \code{window}) is evaluated. If \code{window} is a tessellation, the expected number of points in each tile of the tessellation is evaluated. } } The spatial trend, conditional intensity, and intensity are all equivalent if the fitted model is a Poisson point process. However, if the model is not a Poisson process, then they are all different. The ``spatial trend'' is the (exponentiated) first order potential, and not the intensity of the process. [For example if we fit the stationary Strauss process with parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, then the spatial trend is constant and equal to \eqn{\beta}{beta}, while the intensity is a smaller value.] The default is to compute an estimate of the desired quantity. If \code{interval="confidence"} or \code{interval="prediction"}, the estimate is replaced by a confidence interval or prediction interval. If \code{se=TRUE}, then a standard error is also calculated, and is returned together with the (point or interval) estimate. The spatial locations where predictions are required, are determined by the (incompatible) arguments \code{ngrid} and \code{locations}. \itemize{ \item If the argument \code{ngrid} is present, then predictions are performed at a rectangular grid of locations in the window \code{window}. The result of prediction will be a pixel image or images. \item If \code{locations} is present, then predictions will be performed at the spatial locations given by this dataset. These may be an arbitrary list of spatial locations, or they may be a rectangular grid. The result of prediction will be either a numeric vector or a pixel image or images. \item If neither \code{ngrid} nor \code{locations} is given, then \code{ngrid} is assumed. The value of \code{ngrid} defaults to \code{\link{spatstat.options}("npixel")}, which is initialised to 128 when \pkg{spatstat} is loaded. } The argument \code{locations} may be a point pattern, a data frame or a list specifying arbitrary locations; or it may be a binary image mask (an object of class \code{"owin"} with type \code{"mask"}) or a pixel image (object of class \code{"im"}) specifying (a subset of) a rectangular grid of locations. \itemize{ \item If \code{locations} is a point pattern (object of class \code{"ppp"}), then prediction will be performed at the points of the point pattern. The result of prediction will be a vector of predicted values, one value for each point. If the model is a marked point process, then \code{locations} should be a marked point pattern, with marks of the same kind as the model; prediction will be performed at these marked points. The result of prediction will be a vector of predicted values, one value for each (marked) point. \item If \code{locations} is a data frame or list, then it must contain vectors \code{locations$x} and \code{locations$y} specifying the \eqn{x,y} coordinates of the prediction locations. Additionally, if the model is a marked point process, then \code{locations} must also contain a factor \code{locations$marks} specifying the marks of the prediction locations. These vectors must have equal length. The result of prediction will be a vector of predicted values, of the same length. \item If \code{locations} is a binary image mask, then prediction will be performed at each pixel in this binary image where the pixel value is \code{TRUE} (in other words, at each pixel that is inside the window). If the fitted model is an unmarked point process, then the result of prediction will be an image. If the fitted model is a marked point process, then prediction will be performed for each possible value of the mark at each such location, and the result of prediction will be a list of images, one for each mark value. \item If \code{locations} is a pixel image (object of class \code{"im"}), then prediction will be performed at each pixel in this image where the pixel value is defined (i.e.\ where the pixel value is not \code{NA}). } The argument \code{covariates} gives the values of any spatial covariates at the prediction locations. If the trend formula in the fitted model involves spatial covariates (other than the Cartesian coordinates \code{x}, \code{y}) then \code{covariates} is required. The format and use of \code{covariates} are analogous to those of the argument of the same name in \code{\link{ppm}}. It is either a data frame or a list of images. \itemize{ \item If \code{covariates} is a list of images, then the names of the entries should correspond to the names of covariates in the model formula \code{trend}. Each entry in the list must be an image object (of class \code{"im"}, see \code{\link{im.object}}). The software will look up the pixel values of each image at the quadrature points. \item If \code{covariates} is a data frame, then the \code{i}th row of \code{covariates} is assumed to contain covariate data for the \code{i}th location. When \code{locations} is a data frame, this just means that each row of \code{covariates} contains the covariate data for the location specified in the corresponding row of \code{locations}. When \code{locations} is a binary image mask, the row \code{covariates[i,]} must correspond to the location \code{x[i],y[i]} where \code{x = as.vector(raster.x(locations))} and \code{y = as.vector(raster.y(locations))}. } Note that if you only want to use prediction in order to generate a plot of the predicted values, it may be easier to use \code{\link{plot.ppm}} which calls this function and plots the results. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Berman, M. and Turner, T.R. Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41} (1992) 31--38. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{plot.ppm}}, \code{\link{print.ppm}}, \code{\link{fitted.ppm}}, \code{\link{spatstat.options}} } \section{Warnings}{ The current implementation invokes \code{\link{predict.glm}} so that \bold{prediction is wrong} if the trend formula in \code{object} involves terms in \code{ns()}, \code{bs()} or \code{poly()}. This is a weakness of \code{\link{predict.glm}} itself! Error messages may be very opaque, as they tend to come from deep in the workings of \code{\link{predict.glm}}. If you are passing the \code{covariates} argument and the function crashes, it is advisable to start by checking that all the conditions listed above are satisfied. } \examples{ \testonly{op <- spatstat.options(npixel=32)} m <- ppm(cells ~ polynom(x,y,2), Strauss(0.05)) trend <- predict(m, type="trend") if(human <- interactive()) { image(trend) points(cells) } cif <- predict(m, type="cif") if(human) { persp(cif) } mj <- ppm(japanesepines ~ harmonic(x,y,2)) se <- predict(mj, se=TRUE) # image of standard error ci <- predict(mj, interval="c") # two images, confidence interval # prediction interval for total number of points predict(mj, type="count", interval="p") # prediction intervals for counts in tiles predict(mj, window=quadrats(japanesepines, 3), type="count", interval="p") # prediction at arbitrary locations predict(mj, locations=data.frame(x=0.3, y=0.4)) X <- runifpoint(5, Window(japanesepines)) predict(mj, locations=X, se=TRUE) # multitype rr <- matrix(0.06, 2, 2) ma <- ppm(amacrine ~ marks, MultiStrauss(rr)) Z <- predict(ma) Z <- predict(ma, type="cif") predict(ma, locations=data.frame(x=0.8, y=0.5,marks="on"), type="cif") \testonly{spatstat.options(op)} } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/suffstat.Rd0000644000176200001440000001072314331173077016126 0ustar liggesusers\name{suffstat} \alias{suffstat} \title{Sufficient Statistic of Point Process Model} \description{ The canonical sufficient statistic of a point process model is evaluated for a given point pattern. } \usage{ suffstat(model, X=data.ppm(model)) } \arguments{ \item{model}{A fitted point process model (object of class \code{"ppm"}). } \item{X}{ A point pattern (object of class \code{"ppp"}). } } \value{ A numeric vector of sufficient statistics. The entries correspond to the model coefficients \code{coef(model)}. } \details{ The canonical sufficient statistic of \code{model} is evaluated for the point pattern \code{X}. This computation is useful for various Monte Carlo methods. Here \code{model} should be a point process model (object of class \code{"ppm"}, see \code{\link{ppm.object}}), typically obtained from the model-fitting function \code{\link{ppm}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). Every point process model fitted by \code{\link{ppm}} has a probability density of the form \deqn{f(x) = Z(\theta) \exp(\theta^T S(x))}{f(x) = Z(theta) exp(theta * S(x))} where \eqn{x} denotes a typical realisation (i.e. a point pattern), \eqn{\theta}{theta} is the vector of model coefficients, \eqn{Z(\theta)}{Z(theta)} is a normalising constant, and \eqn{S(x)} is a function of the realisation \eqn{x}, called the ``canonical sufficient statistic'' of the model. For example, the stationary Poisson process has canonical sufficient statistic \eqn{S(x)=n(x)}, the number of points in \eqn{x}. The stationary Strauss process with interaction range \eqn{r} (and fitted with no edge correction) has canonical sufficient statistic \eqn{S(x)=(n(x),s(x))} where \eqn{s(x)} is the number of pairs of points in \eqn{x} which are closer than a distance \eqn{r} to each other. \code{suffstat(model, X)} returns the value of \eqn{S(x)}, where \eqn{S} is the canonical sufficient statistic associated with \code{model}, evaluated when \eqn{x} is the given point pattern \code{X}. The result is a numeric vector, with entries which correspond to the entries of the coefficient vector \code{coef(model)}. The sufficient statistic \eqn{S} does not depend on the fitted coefficients of the model. However it does depend on the irregular parameters which are fixed in the original call to \code{\link{ppm}}, for example, the interaction range \code{r} of the Strauss process. The sufficient statistic also depends on the edge correction that was used to fit the model. For example in a Strauss process, \itemize{ \item If the model is fitted with \code{correction="none"}, the sufficient statistic is \eqn{S(x) = (n(x), s(x))} where \eqn{n(x)} is the number of points and \eqn{s(x)} is the number of pairs of points which are closer than \eqn{r} units apart. \item If the model is fitted with \code{correction="periodic"}, the sufficient statistic is the same as above, except that distances are measured in the periodic sense. \item If the model is fitted with \code{correction="translate"}, then \eqn{n(x)} is unchanged but \eqn{s(x)} is replaced by a weighted sum (the sum of the translation correction weights for all pairs of points which are closer than \eqn{r} units apart). \item If the model is fitted with \code{correction="border"} (the default), then points lying less than \eqn{r} units from the boundary of the observation window are treated as fixed. Thus \eqn{n(x)} is replaced by the number \eqn{n_r(x)}{n[r](x)} of points lying at least \eqn{r} units from the boundary of the observation window, and \eqn{s(x)} is replaced by the number \eqn{s_r(x)}{s[r](x)} of pairs of points, which are closer than \eqn{r} units apart, and at least one of which lies more than \eqn{r} units from the boundary of the observation window. } Non-finite values of the sufficient statistic (\code{NA} or \code{-Inf}) may be returned if the point pattern \code{X} is not a possible realisation of the model (i.e. if \code{X} has zero probability of occurring under \code{model} for all values of the canonical coefficients \eqn{\theta}{theta}). } \seealso{ \code{\link{ppm}} } \examples{ fitS <- ppm(swedishpines~1, Strauss(7)) suffstat(fitS) X <- rpoispp(intensity(swedishpines), win=Window(swedishpines)) suffstat(fitS, X) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.model/man/logLik.slrm.Rd0000644000176200001440000000317414331173076016465 0ustar liggesusers\name{logLik.slrm} \Rdversion{1.1} \alias{logLik.slrm} \title{ Loglikelihood of Spatial Logistic Regression } \description{ Computes the (maximised) loglikelihood of a fitted Spatial Logistic Regression model. } \usage{ \method{logLik}{slrm}(object, ..., adjust = TRUE) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Ignored. } \item{adjust}{ Logical value indicating whether to adjust the loglikelihood of the model to make it comparable with a point process likelihood. See Details. } } \details{ This is a method for \code{\link[stats]{logLik}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). It computes the log-likelihood of a fitted spatial logistic regression model. If \code{adjust=FALSE}, the loglikelihood is computed using the standard formula for the loglikelihood of a logistic regression model for a finite set of (pixel) observations. If \code{adjust=TRUE} then the loglikelihood is adjusted so that it is approximately comparable with the likelihood of a point process in continuous space, by subtracting the value \eqn{n \log(a)}{n * log(a)} where \eqn{n} is the number of points in the original point pattern dataset, and \eqn{a} is the area of one pixel. } \value{ A numerical value. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) logLik(fit) logLik(fit, adjust=FALSE) } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.model/man/dffit.ppm.Rd0000644000176200001440000000355614331173076016163 0ustar liggesusers\name{dffit.ppm} \alias{dffit} \alias{dffit.ppm} \title{ Case Deletion Effect Measure of Fitted Model } \description{ Computes the case deletion effect measure \code{DFFIT} for a fitted model. } \usage{ dffit(object, \dots) \method{dffit}{ppm}(object, \dots, collapse = FALSE, dfb = NULL) } \arguments{ \item{object}{ A fitted model, such as a point process model (object of class \code{"ppm"}). } \item{\dots}{ Additional arguments passed to \code{\link{dfbetas.ppm}}. } \item{collapse}{ Logical value specifying whether to collapse the vector-valued measure to a scalar-valued measure by adding all the components. } \item{dfb}{ Optional. The result of \code{dfbetas(object)}, if it has already been computed. } } \details{ The case deletion effect measure \code{DFFIT} is a model diagnostic traditionally used for regression models. In that context, \code{DFFIT[i,j]} is the negative change, in the value of the \code{j}th term in the linear predictor, that would occur if the \code{i}th data value was deleted. It is closely related to the diagnostic \code{DFBETA}. For a spatial point process model, \code{dffit} computes the analogous spatial case deletion diagnostic, described in Baddeley, Rubak and Turner (2019). } \value{ A measure (object of class \code{"msr"}). } \references{ Baddeley, A., Rubak, E. and Turner, R. (2019) Leverage and influence diagnostics for Gibbs spatial point processes. \emph{Spatial Statistics} \bold{29}, {15--48}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{dfbetas.ppm}} } \examples{ \testonly{op <- spatstat.options(npixel=32)} X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) \testonly{fit <- ppm(X ~x+y, nd=16)} plot(dffit(fit)) plot(dffit(fit, collapse=TRUE)) \testonly{spatstat.options(op)} } \keyword{spatial} \keyword{models} spatstat.model/man/as.owin.ppm.Rd0000644000176200001440000001552114510474260016436 0ustar liggesusers\name{as.owin.ppm} \alias{as.owin.ppm} \alias{as.owin.kppm} \alias{as.owin.dppm} \alias{as.owin.slrm} \alias{as.owin.msr} \title{Convert Data To Class owin} \description{ Converts data specifying an observation window in any of several formats, into an object of class \code{"owin"}. } \usage{ \method{as.owin}{ppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE) \method{as.owin}{kppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE) \method{as.owin}{dppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE) \method{as.owin}{slrm}(W, \dots, from=c("points", "covariates")) \method{as.owin}{msr}(W, \dots, fatal=TRUE) } \arguments{ \item{W}{ Data specifying an observation window, in any of several formats described under \emph{Details} below. } \item{fatal}{ Logical value determining what to do if the data cannot be converted to an observation window. See Details. } \item{\dots}{Ignored.} \item{from}{Character string. See Details.} } \value{ An object of class \code{"owin"} (see \code{\link{owin.object}}) specifying an observation window. } \details{ The class \code{"owin"} is a way of specifying the observation window for a point pattern. See \code{\link{owin.object}} for an overview. The generic function \code{\link[spatstat.geom]{as.owin}} converts data in any of several formats into an object of class \code{"owin"} for use by the \pkg{spatstat} package. The function \code{\link[spatstat.geom]{as.owin}} is generic, with methods for different classes of objects, and a default method. The argument \code{W} may be \itemize{ \item an object of class \code{"owin"} \item a structure with entries \code{xrange}, \code{yrange} specifying the \eqn{x} and \eqn{y} dimensions of a rectangle \item a structure with entries named \code{xmin}, \code{xmax}, \code{ymin}, \code{ymax} (in any order) specifying the \eqn{x} and \eqn{y} dimensions of a rectangle. This will accept objects of class \code{bbox} in the \code{sf} package. \item a numeric vector of length 4 (interpreted as \code{(xmin, xmax, ymin, ymax)} in that order) specifying the \eqn{x} and \eqn{y} dimensions of a rectangle \item a structure with entries named \code{xl}, \code{xu}, \code{yl}, \code{yu} (in any order) specifying the \eqn{x} and \eqn{y} dimensions of a rectangle as \code{(xmin, xmax) = (xl, xu)} and \code{(ymin, ymax) = (yl, yu)}. This will accept objects of class \code{spp} used in the Venables and Ripley \pkg{spatial} package. \item an object of class \code{"ppp"} representing a point pattern. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"psp"} representing a line segment pattern. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"tess"} representing a tessellation. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"quad"} representing a quadrature scheme. In this case, the window of the \code{data} component will be extracted. \item an object of class \code{"im"} representing a pixel image. In this case, a window of type \code{"mask"} will be returned, with the same pixel raster coordinates as the image. An image pixel value of \code{NA}, signifying that the pixel lies outside the window, is transformed into the logical value \code{FALSE}, which is the corresponding convention for window masks. \item an object of class \code{"ppm"}, \code{"kppm"}, \code{"slrm"} or \code{"dppm"} representing a fitted point process model. In this case, if \code{from="data"} (the default), \code{as.owin} extracts the original point pattern data to which the model was fitted, and returns the observation window of this point pattern. If \code{from="covariates"} then \code{as.owin} extracts the covariate images to which the model was fitted, and returns a binary mask window that specifies the pixel locations. \item an object of class \code{"lpp"} representing a point pattern on a linear network. In this case, \code{as.owin} extracts the linear network and returns a window containing this network. \item an object of class \code{"lppm"} representing a fitted point process model on a linear network. In this case, \code{as.owin} extracts the linear network and returns a window containing this network. \item A \code{data.frame} with exactly three columns. Each row of the data frame corresponds to one pixel. Each row contains the \eqn{x} and \eqn{y} coordinates of a pixel, and a logical value indicating whether the pixel lies inside the window. \item A \code{data.frame} with exactly two columns. Each row of the data frame contains the \eqn{x} and \eqn{y} coordinates of a pixel that lies inside the window. \item an object of class \code{"distfun"}, \code{"nnfun"} or \code{"funxy"} representing a function of spatial location, defined on a spatial domain. The spatial domain of the function will be extracted. \item an object of class \code{"rmhmodel"} representing a point process model that can be simulated using \code{\link{rmh}}. The window (spatial domain) of the model will be extracted. The window may be \code{NULL} in some circumstances (indicating that the simulation window has not yet been determined). This is not treated as an error, because the argument \code{fatal} defaults to \code{FALSE} for this method. \item an object of class \code{"layered"} representing a list of spatial objects. See \code{\link{layered}}. In this case, \code{as.owin} will be applied to each of the objects in the list, and the union of these windows will be returned. \item an object of some other suitable class from another package. For full details, see \code{vignette('shapefiles')}. } If the argument \code{W} is not in one of these formats and cannot be converted to a window, then an error will be generated (if \code{fatal=TRUE}) or a value of \code{NULL} will be returned (if \code{fatal=FALSE}). When \code{W} is a data frame, the argument \code{step} can be used to specify the pixel grid spacing; otherwise, the spacing will be guessed from the data. } \seealso{ \code{\link[spatstat.geom]{as.owin}}, \code{\link[spatstat.random]{as.owin.rmhmodel}}, \code{\link[spatstat.linnet]{as.owin.lpp}}. \code{\link[spatstat.geom]{owin.object}}, \code{\link[spatstat.geom]{owin}}. Additional methods for \code{as.owin} may be provided by other packages outside the \pkg{spatstat} family. } \examples{ fit <- ppm(cells ~ 1) as.owin(fit) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat.model/man/Pairwise.Rd0000644000176200001440000000667614331173075016064 0ustar liggesusers\name{Pairwise} \alias{Pairwise} \title{Generic Pairwise Interaction model} \description{ Creates an instance of a pairwise interaction point process model which can then be fitted to point pattern data. } \usage{ Pairwise(pot, name, par, parnames, printfun) } \arguments{ \item{pot}{An R language function giving the user-supplied pairwise interaction potential.} \item{name}{Character string.} \item{par}{List of numerical values for irregular parameters} \item{parnames}{Vector of names of irregular parameters} \item{printfun}{Do not specify this argument: for internal use only.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This code constructs a member of the pairwise interaction family \code{\link{pairwise.family}} with arbitrary pairwise interaction potential given by the user. Each pair of points in the point pattern contributes a factor \eqn{h(d)} to the probability density, where \eqn{d} is the distance between the two points. The factor term \eqn{h(d)} is \deqn{h(d) = \exp(-\theta \mbox{pot}(d))}{h(d) = exp(-theta * pot(d))} provided \eqn{\mbox{pot}(d)}{pot(d)} is finite, where \eqn{\theta}{theta} is the coefficient vector in the model. The function \code{pot} must take as its first argument a matrix of interpoint distances, and evaluate the potential for each of these distances. The result must be either a matrix with the same dimensions as its input, or an array with its first two dimensions the same as its input (the latter case corresponds to a vector-valued potential). If irregular parameters are present, then the second argument to \code{pot} should be a vector of the same type as \code{par} giving those parameter values. The values returned by \code{pot} may be finite numeric values, or \code{-Inf} indicating a hard core (that is, the corresponding interpoint distance is forbidden). We define \eqn{h(d) = 0} if \eqn{\mbox{pot}(d) = -\infty}{pot(d) = -Inf}. Thus, a potential value of minus infinity is \emph{always} interpreted as corresponding to \eqn{h(d) = 0}, regardless of the sign and magnitude of \eqn{\theta}{theta}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \examples{ #This is the same as StraussHard(r=0.7,h=0.05) strpot <- function(d,par) { r <- par$r h <- par$h value <- (d <= r) value[d < h] <- -Inf value } mySH <- Pairwise(strpot, "StraussHard process", list(r=0.7,h=0.05), c("interaction distance r", "hard core distance h")) ppm(cells ~ 1, mySH, correction="isotropic") # Fiksel (1984) double exponential interaction # see Stoyan, Kendall, Mecke 1987 p 161 fikspot <- function(d, par) { r <- par$r h <- par$h zeta <- par$zeta value <- exp(-zeta * d) value[d < h] <- -Inf value[d > r] <- 0 value } Fiksel <- Pairwise(fikspot, "Fiksel double exponential process", list(r=3.5, h=1, zeta=1), c("interaction distance r", "hard core distance h", "exponential coefficient zeta")) fit <- ppm(unmark(spruces) ~1, Fiksel, rbord=3.5) fit plot(fitin(fit), xlim=c(0,4)) coef(fit) # corresponding values obtained by Fiksel (1984) were -1.9 and -6.0 } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/valid.ppm.Rd0000644000176200001440000000532514331173100016146 0ustar liggesusers\name{valid.ppm} \alias{valid.ppm} \title{ Check Whether Point Process Model is Valid } \description{ Determines whether a fitted point process model satisfies the integrability conditions for existence of the point process. } \usage{ \method{valid}{ppm}(object, warn=TRUE, \dots) } \arguments{ \item{object}{ Fitted point process model (object of class \code{"ppm"}). } \item{warn}{ Logical value indicating whether to issue a warning if the validity of the model cannot be checked (due to unavailability of the required code). } \item{\dots}{Ignored.} } \details{ This is a method for the generic function \code{\link{valid}} for Poisson and Gibbs point process models (class \code{"ppm"}). The model-fitting function \code{\link{ppm}} fits Gibbs point process models to point pattern data. By default, \code{\link{ppm}} does not check whether the fitted model actually exists as a point process. This checking is done by \code{valid.ppm}. Unlike a regression model, which is well-defined for any values of the fitted regression coefficients, a Gibbs point process model is only well-defined if the fitted interaction parameters satisfy some constraints. A famous example is the Strauss process (see \code{\link{Strauss}}) which exists only when the interaction parameter \eqn{\gamma}{gamma} is less than or equal to 1. For values \eqn{\gamma > 1}{gamma > 1}, the probability density is not integrable and the process does not exist (and cannot be simulated). By default, \code{\link{ppm}} does not enforce the constraint that a fitted Strauss process (for example) must satisfy \eqn{\gamma \le 1}{gamma <= 1}. This is because a fitted parameter value of \eqn{\gamma > 1}{gamma > 1} could be useful information for data analysis, as it indicates that the Strauss model is not appropriate, and suggests a clustered model should be fitted. The function \code{valid.ppm} checks whether the fitted model \code{object} specifies a well-defined point process. It returns \code{TRUE} if the model is well-defined. Another possible reason for invalid models is that the data may not be adequate for estimation of the model parameters. In this case, some of the fitted coefficients could be \code{NA} or infinite values. If this happens then \code{valid.ppm} returns \code{FALSE}. Use the function \code{\link{project.ppm}} to force the fitted model to be valid. } \value{ A logical value, or \code{NA}. } \author{\adrian and \rolf } \seealso{ \code{\link{ppm}}, \code{\link{project.ppm}} } \examples{ fit1 <- ppm(cells, ~1, Strauss(0.1)) valid(fit1) fit2 <- ppm(redwood, ~1, Strauss(0.1)) valid(fit2) } \keyword{spatial} \keyword{models} spatstat.model/man/unitname.Rd0000644000176200001440000000540614331173077016111 0ustar liggesusers\name{unitname} \alias{unitname.dppm} \alias{unitname.kppm} \alias{unitname.minconfit} \alias{unitname.ppm} \alias{unitname.slrm} \alias{unitname<-.dppm} \alias{unitname<-.kppm} \alias{unitname<-.minconfit} \alias{unitname<-.ppm} \alias{unitname<-.slrm} \title{Name for Unit of Length} \description{ Inspect or change the name of the unit of length in a spatial dataset. } \usage{ \method{unitname}{dppm}(x) \method{unitname}{kppm}(x) \method{unitname}{minconfit}(x) \method{unitname}{ppm}(x) \method{unitname}{slrm}(x) \method{unitname}{dppm}(x) <- value \method{unitname}{kppm}(x) <- value \method{unitname}{minconfit}(x) <- value \method{unitname}{ppm}(x) <- value \method{unitname}{slrm}(x) <- value } \arguments{ \item{x}{A spatial dataset. Either a point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}), a window (object of class \code{"owin"}), a pixel image (object of class \code{"im"}), a tessellation (object of class \code{"tess"}), a quadrature scheme (object of class \code{"quad"}), or a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"slrm"} or \code{"dppm"} or \code{"minconfit"}). } \item{value}{ Name of the unit of length. See Details. } } \details{ Spatial datasets in the \pkg{spatstat} package may include the name of the unit of length. This name is used when printing or plotting the dataset, and in some other applications. \code{unitname(x)} extracts this name, and \code{unitname(x) <- value} sets the name to \code{value}. A valid name is either \itemize{ \item a single character string \item a vector of two character strings giving the singular and plural forms of the unit name \item a list of length 3, containing two character strings giving the singular and plural forms of the basic unit, and a number specifying the multiple of this unit. } Note that re-setting the name of the unit of length \emph{does not} affect the numerical values in \code{x}. It changes only the string containing the name of the unit of length. To rescale the numerical values, use \code{\link{rescale}}. } \value{ The return value of \code{unitname} is an object of class \code{"unitname"} containing the name of the unit of length in \code{x}. There are methods for \code{print}, \code{summary}, \code{as.character}, \code{\link{rescale}} and \code{\link{compatible}}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{rescale}}, \code{\link{owin}}, \code{\link{ppp}} } \examples{ X <- runifrect(20) # if the unit of length is 1 metre: unitname(X) <- c("metre", "metres") # if the unit of length is 6 inches: unitname(X) <- list("inch", "inches", 6) } \keyword{spatial} \keyword{manip} spatstat.model/man/unstack.msr.Rd0000644000176200001440000000223614331173100016522 0ustar liggesusers\name{unstack.msr} \alias{unstack.msr} \title{ Separate a Vector Measure into its Scalar Components } \description{ Converts a vector-valued measure into a list of scalar-valued measures. } \usage{ \method{unstack}{msr}(x, \dots) } \arguments{ \item{x}{ A measure (object of class \code{"msr"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic \code{\link[utils]{unstack}} for the class \code{"msr"} of measures. If \code{x} is a vector-valued measure, then \code{y <- unstack(x)} is a list of scalar-valued measures defined by the components of \code{x}. The \code{j}th entry of the list, \code{y[[j]]}, is equivalent to the \code{j}th component of the vector measure \code{x}. If \code{x} is a scalar-valued measure, then the result is a list consisting of one entry, which is \code{x}. } \value{ A list of measures, of class \code{"solist"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link[utils]{unstack}} \code{\link{unstack.ppp}} \code{\link{split.msr}}. } \examples{ fit <- ppm(cells ~ x) m <- residuals(fit, type="score") m unstack(m) } \keyword{spatial} \keyword{manip} spatstat.model/man/Extract.msr.Rd0000644000176200001440000000230014331173076016470 0ustar liggesusers\name{Extract.msr} \alias{[.msr} \title{Extract Subset of Signed or Vector Measure} \description{ Extract a subset of a signed measure or vector-valued measure. } \usage{ \method{[}{msr}(x, i, j, \dots) } \arguments{ \item{x}{ A signed or vector measure. An object of class \code{"msr"} (see \code{\link{msr}}). } \item{i}{ Object defining the subregion or subset to be extracted. Either a spatial window (an object of class \code{"owin"}), or a pixel image with logical values, or any type of index that applies to a matrix. } \item{j}{ Subset index selecting the vector coordinates to be extracted, if \code{x} is a vector-valued measure. } \item{\dots}{Ignored.} } \value{ An object of class \code{"msr"}. } \details{ This operator extracts a subset of the data which determines the signed measure or vector-valued measure \code{x}. The result is another measure. } \seealso{ \code{\link{msr}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) rp <- residuals(fit, type="pearson") rs <- residuals(fit, type="score") rp[square(0.5)] rs[ , 2:3] } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat.model/man/mincontrast.Rd0000644000176200001440000001723514331173077016635 0ustar liggesusers\name{mincontrast} \alias{mincontrast} \title{Method of Minimum Contrast} \description{ A general low-level algorithm for fitting theoretical point process models to point pattern data by the Method of Minimum Contrast. } \usage{ mincontrast(observed, theoretical, startpar, \dots, ctrl=list(q = 1/4, p = 2, rmin=NULL, rmax=NULL), fvlab=list(label=NULL, desc="minimum contrast fit"), explain=list(dataname=NULL, modelname=NULL, fname=NULL), action.bad.values=c("warn", "stop", "silent"), control=list(), stabilize=TRUE, pspace=NULL) } \arguments{ \item{observed}{ Summary statistic, computed for the data. An object of class \code{"fv"}. } \item{theoretical}{ An R language function that calculates the theoretical expected value of the summary statistic, given the model parameters. See Details. } \item{startpar}{ Vector of initial values of the parameters of the point process model (passed to \code{theoretical}). } \item{\dots}{ Additional arguments passed to the function \code{theoretical} and to the optimisation algorithm \code{\link[stats]{optim}}. } \item{ctrl}{ Optional. List of arguments controlling the optimisation. See Details. } \item{fvlab}{ Optional. List containing some labels for the return value. See Details. } \item{explain}{ Optional. List containing strings that give a human-readable description of the model, the data and the summary statistic. } \item{action.bad.values}{ String (partially matched) specifying what to do if values of the summary statistic are \code{NA}, \code{NaN} or infinite. See Details. } \item{control}{ Optional. Argument passed to \code{\link[stats]{optim}}. A list of parameters which control the behaviour of the optimization algorithm. } \item{stabilize}{ Logical value specifying whether to numerically stabilize the optimization algorithm, by specifying suitable default values of \code{control$fnscale} and \code{control$parscale}. } \item{pspace}{ For internal use by the package only. } } \details{ This function is a general algorithm for fitting point process models by the Method of Minimum Contrast. If you want to fit the Thomas process, see \code{\link{thomas.estK}}. If you want to fit a log-Gaussian Cox process, see \code{\link{lgcp.estK}}. If you want to fit the \Matern cluster process, see \code{\link{matclust.estK}}. The Method of Minimum Contrast (Pfanzagl, 1969; Diggle and Gratton, 1984) is a general technique for fitting a point process model to point pattern data. First a summary function (typically the \eqn{K} function) is computed from the data point pattern. Second, the theoretical expected value of this summary statistic under the point process model is derived (if possible, as an algebraic expression involving the parameters of the model) or estimated from simulations of the model. Then the model is fitted by finding the optimal parameter values for the model to give the closest match between the theoretical and empirical curves. The argument \code{observed} should be an object of class \code{"fv"} (see \code{\link{fv.object}}) containing the values of a summary statistic computed from the data point pattern. Usually this is the function \eqn{K(r)} computed by \code{\link{Kest}} or one of its relatives. The argument \code{theoretical} should be a user-supplied function that computes the theoretical expected value of the summary statistic. It must have an argument named \code{par} that will be the vector of parameter values for the model (the length and format of this vector are determined by the starting values in \code{startpar}). The function \code{theoretical} should also expect a second argument (the first argument other than \code{par}) containing values of the distance \eqn{r} for which the theoretical value of the summary statistic \eqn{K(r)} should be computed. The value returned by \code{theoretical} should be a vector of the same length as the given vector of \eqn{r} values. The argument \code{ctrl} determines the contrast criterion (the objective function that will be minimised). The algorithm minimises the criterion \deqn{ D(\theta)= \int_{r_{\mbox{\scriptsize min}}}^{r_{\mbox{\scriptsize max}}} |\hat F(r)^q - F_\theta(r)^q|^p \, {\rm d}r }{ D(theta) = integral from rmin to rmax of abs(Fhat(r)^q - F(theta,r)^q)^p } where \eqn{\theta}{theta} is the vector of parameters of the model, \eqn{\hat F(r)}{Fhat(r)} is the observed value of the summary statistic computed from the data, \eqn{F_\theta(r)}{F(theta,r)} is the theoretical expected value of the summary statistic, and \eqn{p,q} are two exponents. The default is \code{q = 1/4}, \code{p=2} so that the contrast criterion is the integrated squared difference between the fourth roots of the two functions (Waagepetersen, 2007). The argument \code{action.bad.values} specifies what to do if some of the values of the summary statistic are \code{NA}, \code{NaN} or infinite. If \code{action.bad.values="stop"}, or if all of the values are bad, then a fatal error occurs. Otherwise, the domain of the summary function is shortened to avoid the bad values. The shortened domain is the longest interval on which the function values are finite (provided this interval is at least half the length of the original domain). A warning is issued if \code{action.bad.values="warn"} (the default) and no warning is issued if \code{action.bad.values="silent"}. The other arguments just make things print nicely. The argument \code{fvlab} contains labels for the component \code{fit} of the return value. The argument \code{explain} contains human-readable strings describing the data, the model and the summary statistic. The \code{"..."} argument of \code{mincontrast} can be used to pass extra arguments to the function \code{theoretical} and/or to the optimisation function \code{\link[stats]{optim}}. In this case, the function \code{theoretical} should also have a \code{"..."} argument and should ignore it (so that it ignores arguments intended for \code{\link[stats]{optim}}). } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } \item{opt }{The object returned from the optimizer \code{\link{optim}}.} \item{crtl }{List of parameters determining the contrast objective.} \item{info }{List of explanatory strings.} } \references{ Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Pfanzagl, J. (1969). On the measurability and consistency of minimum contrast estimates. \emph{Metrika} \bold{14}, 249--276. Waagepetersen, R. (2007). An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ \rasmus. Adapted for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{kppm}}, \code{\link{lgcp.estK}}, \code{\link{matclust.estK}}, \code{\link{thomas.estK}}, } \keyword{spatial} \keyword{models} spatstat.model/man/dppkernel.Rd0000644000176200001440000000121514331173076016246 0ustar liggesusers\name{dppkernel} \alias{dppkernel} \title{Extract Kernel from Determinantal Point Process Model Object} \description{ Returns the kernel of a determinantal point process model as a function of one argument \code{x}. } \usage{dppkernel(model, \dots)} \arguments{ \item{model}{Model of class \code{"detpointprocfamily"}.} \item{\dots}{Arguments passed to \code{\link{dppapproxkernel}} if the exact kernel is unknown} } \value{A function} \author{ \adrian \rolf and \ege } \examples{ kernelMatern <- dppkernel(dppMatern(lambda = 100, alpha=.01, nu=1, d=2)) plot(kernelMatern, xlim = c(0,0.1)) } \keyword{spatial} \keyword{models} spatstat.model/man/anova.mppm.Rd0000644000176200001440000001200514510474261016334 0ustar liggesusers\name{anova.mppm} \alias{anova.mppm} \title{ANOVA for Fitted Point Process Models for Replicated Patterns} \description{ Performs analysis of deviance for one or more point process models fitted to replicated point pattern data. } \usage{ \method{anova}{mppm}(object, \dots, test=NULL, adjust=TRUE, fine=FALSE, warn=TRUE) } \arguments{ \item{object}{ Object of class \code{"mppm"} representing a point process model that was fitted to replicated point patterns. } \item{\dots}{ Optional. Additional objects of class \code{"mppm"}. } \item{test}{ Type of hypothesis test to perform. A character string, partially matching one of \code{"Chisq"}, \code{"LRT"}, \code{"Rao"}, \code{"score"}, \code{"F"} or \code{"Cp"}, or \code{NULL} indicating that no test should be performed. } \item{adjust}{ Logical value indicating whether to correct the pseudolikelihood ratio when some of the models are not Poisson processes. } \item{fine}{ Logical value passed to \code{\link{vcov.ppm}} indicating whether to use a quick estimate (\code{fine=FALSE}, the default) or a slower, more accurate estimate (\code{fine=TRUE}) of the variance of the fitted coefficients of each model. Relevant only when some of the models are not Poisson and \code{adjust=TRUE}. } \item{warn}{ Logical value indicating whether to issue warnings if problems arise. } } \value{ An object of class \code{"anova"}, or \code{NULL}. } \details{ This is a method for \code{\link{anova}} for comparing several fitted point process models of class \code{"mppm"}, usually generated by the model-fitting function \code{\link{mppm}}). If the fitted models are all Poisson point processes, then this function performs an Analysis of Deviance of the fitted models. The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"}) the two-sided p-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link{anova.glm}}. If some of the fitted models are \emph{not} Poisson point processes, the `deviance' differences in this table are 'pseudo-deviances' equal to 2 times the differences in the maximised values of the log pseudolikelihood (see \code{\link{ppm}}). It is not valid to compare these values to the chi-squared distribution. In this case, if \code{adjust=TRUE} (the default), the pseudo-deviances will be adjusted using the method of Pace et al (2011) and Baddeley, Turner and Rubak (2015) so that the chi-squared test is valid. It is strongly advisable to perform this adjustment. The argument \code{test} determines which hypothesis test, if any, will be performed to compare the models. The argument \code{test} should be a character string, partially matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"}, or \code{NULL}. The first option \code{"Chisq"} gives the likelihood ratio test based on the asymptotic chi-squared distribution of the deviance difference. The meaning of the other options is explained in \code{\link{anova.glm}}. } \section{Random effects models are currently not supported}{ For models with random effects (i.e. where the call to \code{\link{mppm}} included the argument \code{random}), %% only \code{"Chisq"} is %% available, and again gives the likelihood ratio test. analysis of deviance is currently not supported, due to changes in the \pkg{nlme} package. We will try to find a solution. } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix of one of the models was either too large or too small for reliable numerical calculation. See \code{\link{vcov.ppm}} for suggestions on how to handle this. } \seealso{ \code{\link{mppm}} } \examples{ H <- hyperframe(X=waterstriders) #' test for loglinear trend in x coordinate mod0 <- mppm(X~1, data=H, Poisson()) modx <- mppm(X~x, data=H, Poisson()) anova(mod0, modx, test="Chi") # not significant anova(modx, test="Chi") # not significant #' test for inhibition mod0S <- mppm(X~1, data=H, Strauss(2)) anova(mod0, mod0S, test="Chi") # significant! #' test for trend after accounting for inhibition modxS <- mppm(X~x, data=H, Strauss(2)) anova(mod0S, modxS, test="Chi") # not significant } \references{ \baddrubaturnbook Baddeley, A., Turner, R. and Rubak, E. (2015) Adjusted composite likelihood ratio test for Gibbs point processes. \emph{Journal of Statistical Computation and Simulation} \bold{86} (5) 922--941. DOI: 10.1080/00949655.2015.1044530. Pace, L., Salvan, A. and Sartori, N. (2011) Adjusting composite likelihood ratio statistics. \emph{Statistica Sinica} \bold{21}, 129--148. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \keyword{spatial} \keyword{models} \keyword{methods} \concept{Model selection} spatstat.model/man/PairPiece.Rd0000644000176200001440000000743414331173075016133 0ustar liggesusers\name{PairPiece} \alias{PairPiece} \title{The Piecewise Constant Pairwise Interaction Point Process Model} \description{ Creates an instance of a pairwise interaction point process model with piecewise constant potential function. The model can then be fitted to point pattern data. } \usage{ PairPiece(r) } \arguments{ \item{r}{vector of jump points for the potential function} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. The process is a pairwise interaction process, whose interaction potential is piecewise constant, with jumps at the distances given in the vector \eqn{r}. } \details{ A pairwise interaction point process in a bounded region is a stochastic point process with probability density of the form \deqn{ f(x_1,\ldots,x_n) = \alpha \prod_i b(x_i) \prod_{i < j} h(x_i, x_j) }{ f(x_1,\ldots,x_n) = alpha . product { b(x[i]) } product { h(x_i, x_j) } } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern. The first product on the right hand side is over all points of the pattern; the second product is over all unordered pairs of points of the pattern. Thus each point \eqn{x_i}{x[i]} of the pattern contributes a factor \eqn{b(x_i)}{b(x[i])} to the probability density, and each pair of points \eqn{x_i, x_j}{x[i], x[j]} contributes a factor \eqn{h(x_i,x_j)}{h(x[i], x[j])} to the density. The pairwise interaction term \eqn{h(u, v)} is called \emph{piecewise constant} if it depends only on the distance between \eqn{u} and \eqn{v}, say \eqn{h(u,v) = H(||u-v||)}, and \eqn{H} is a piecewise constant function (a function which is constant except for jumps at a finite number of places). The use of piecewise constant interaction terms was first suggested by Takacs (1986). The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant pairwise interaction is yielded by the function \code{PairPiece()}. See the examples below. The entries of \code{r} must be strictly increasing, positive numbers. They are interpreted as the points of discontinuity of \eqn{H}. It is assumed that \eqn{H(s) =1} for all \eqn{s > r_{max}}{s > rmax} where \eqn{r_{max}}{rmax} is the maximum value in \code{r}. Thus the model has as many regular parameters (see \code{\link{ppm}}) as there are entries in \code{r}. The \eqn{i}-th regular parameter \eqn{\theta_i}{theta[i]} is the logarithm of the value of the interaction function \eqn{H} on the interval \eqn{[r_{i-1},r_i)}{[r[i-1],r[i])}. If \code{r} is a single number, this model is similar to the Strauss process, see \code{\link{Strauss}}. The difference is that in \code{PairPiece} the interaction function is continuous on the right, while in \code{\link{Strauss}} it is continuous on the left. The analogue of this model for multitype point processes has not yet been implemented. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}} \code{\link{rmh.ppm}} } \examples{ PairPiece(c(0.1,0.2)) # prints a sensible description of itself ppm(cells ~1, PairPiece(r = c(0.05, 0.1, 0.2))) # fit a stationary piecewise constant pairwise interaction process \donttest{ ppm(cells ~polynom(x,y,3), PairPiece(c(0.05, 0.1))) # nonstationary process with log-cubic polynomial trend } } \references{ Takacs, R. (1986) Estimator for the pair potential of a Gibbsian point process. \emph{Statistics} \bold{17}, 429--433. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/pairsat.family.Rd0000644000176200001440000000446114331173077017214 0ustar liggesusers\name{pairsat.family} \alias{pairsat.family} \title{Saturated Pairwise Interaction Point Process Family} \description{ An object describing the Saturated Pairwise Interaction family of point process models } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the ``saturated pairwise interaction'' family of point process models. If you need to create a specific interaction model for use in spatial pattern analysis, use the function \code{\link{Saturated}()} or the two existing implementations of models in this family, \code{\link{Geyer}()} and \code{\link{SatPiece}()}. Geyer (1999) introduced the ``saturation process'', a modification of the Strauss process in which the total contribution to the potential from each point (from its pairwise interaction with all other points) is trimmed to a maximum value \eqn{c}. This model is implemented in the function \code{\link{Geyer}()}. The present class \code{pairsat.family} is the extension of this saturation idea to all pairwise interactions. Note that the resulting models are no longer pairwise interaction processes - they have interactions of infinite order. \code{pairsat.family} is an object of class \code{"isf"} containing a function \code{pairwise$eval} for evaluating the sufficient statistics of any saturated pairwise interaction point process model in which the original pair potentials take an exponential family form. } \value{ Object of class \code{"isf"}, see \code{\link{isf.object}}. } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{Geyer}} to create the Geyer saturation process. \code{\link{SatPiece}} to create a saturated process with piecewise constant pair potential. \code{\link{Saturated}} to create a more general saturation model. Other families: \code{\link{inforder.family}}, \code{\link{ord.family}}, \code{\link{pairwise.family}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/predict.rppm.Rd0000644000176200001440000000511414331173077016674 0ustar liggesusers\name{predict.rppm} \alias{fitted.rppm} \alias{predict.rppm} \title{ Make Predictions From a Recursively Partitioned Point Process Model } \description{ Given a model which has been fitted to point pattern data by recursive partitioning, compute the predicted intensity of the model. } \usage{ \method{predict}{rppm}(object, \dots) \method{fitted}{rppm}(object, \dots) } \arguments{ \item{object}{ Fitted point process model of class \code{"rppm"} produced by the function \code{\link{rppm}}. } \item{\dots}{ Optional arguments passed to \code{\link{predict.ppm}} to specify the locations where prediction is required. (Ignored by \code{fitted.rppm}) } } \details{ These functions are methods for the generic functions \code{\link[stats]{fitted}} and \code{\link[stats]{predict}}. They compute the fitted intensity of a point process model. The argument \code{object} should be a fitted point process model of class \code{"rppm"} produced by the function \code{\link{rppm}}. The \code{fitted} method computes the fitted intensity at the original data points, yielding a numeric vector with one entry for each data point. The \code{predict} method computes the fitted intensity at any locations. By default, predictions are calculated at a regular grid of spatial locations, and the result is a pixel image giving the predicted intensity values at these locations. Alternatively, predictions can be performed at other locations, or a finer grid of locations, or only at certain specified locations, using additional arguments \code{\dots} which will be interpreted by \code{\link{predict.ppm}}. Common arguments are \code{ngrid} to increase the grid resolution, \code{window} to specify the prediction region, and \code{locations} to specify the exact locations of predictions. See \code{\link{predict.ppm}} for details of these arguments. Predictions are computed by evaluating the explanatory covariates at each desired location, and applying the recursive partitioning rule to each set of covariate values. } \value{ The result of \code{fitted.rppm} is a numeric vector. The result of \code{predict.rppm} is a pixel image, a list of pixel images, or a numeric vector. } \author{ \spatstatAuthors. } \seealso{ \code{\link{rppm}}, \code{\link{plot.rppm}} } \examples{ fit <- rppm(unmark(gorillas) ~ vegetation, data=gorillas.extra) plot(predict(fit)) lambdaX <- fitted(fit) lambdaX[1:5] # Mondriaan pictures plot(predict(rppm(redwoodfull ~ x + y))) points(redwoodfull) } \keyword{spatial} \keyword{models} spatstat.model/man/domain.ppm.Rd0000644000176200001440000000452114331173076016327 0ustar liggesusers\name{domain.ppm} \alias{domain.ppm} \alias{domain.kppm} \alias{domain.dppm} \alias{domain.slrm} \alias{domain.msr} \title{ Extract the Domain of any Spatial Object } \description{ Given a spatial object such as a point pattern, in any number of dimensions, this function extracts the spatial domain in which the object is defined. } \usage{ \method{domain}{ppm}(X, \dots, from=c("points", "covariates")) \method{domain}{kppm}(X, \dots, from=c("points", "covariates")) \method{domain}{dppm}(X, \dots, from=c("points", "covariates")) \method{domain}{slrm}(X, \dots, from=c("points", "covariates")) \method{domain}{msr}(X, \dots) } \arguments{ \item{X}{ A spatial object such as a point pattern (in any number of dimensions), line segment pattern or pixel image. } \item{\dots}{ Extra arguments. They are ignored by all the methods listed here. } \item{from}{Character string. See Details.} } \details{ The function \code{\link[spatstat.geom]{domain}} is generic. For a spatial object \code{X} in any number of dimensions, \code{domain(X)} extracts the spatial domain in which \code{X} is defined. For a two-dimensional object \code{X}, typically \code{domain(X)} is the same as \code{Window(X)}. Exceptions occur for methods related to linear networks. The argument \code{from} applies when \code{X} is a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{from="data"} (the default), \code{domain} extracts the window of the original point pattern data to which the model was fitted. If \code{from="covariates"} then \code{domain} returns the window in which the spatial covariates of the model were provided. } \value{ A spatial object representing the domain of \code{X}. Typically a window (object of class \code{"owin"}), a three-dimensional box (\code{"box3"}), a multidimensional box (\code{"boxx"}) or a linear network (\code{"linnet"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.geom]{domain}}, \code{\link[spatstat.geom]{domain.quadratcount}}, \code{\link[spatstat.explore]{domain.quadrattest}}, \code{\link[spatstat.random]{domain.rmhmodel}}, \code{\link[spatstat.linnet]{domain.lpp}}. \code{\link{Window}}, \code{\link{Frame}}. } \examples{ domain(ppm(redwood ~ 1)) } \keyword{spatial} \keyword{manip} spatstat.model/man/update.detpointprocfamily.Rd0000644000176200001440000000113514331173100021444 0ustar liggesusers\name{update.detpointprocfamily} \alias{update.detpointprocfamily} \title{Set Parameter Values in a Determinantal Point Process Model} \description{ Set parameter values in a determinantal point process model object. } \usage{ \method{update}{detpointprocfamily}(object, \dots) } \arguments{ \item{object}{object of class \code{"detpointprocfamily"}.} \item{\dots}{ arguments of the form \code{tag=value} specifying the parameters values to set. } } \value{ Another object of class \code{"detpointprocfamily"}. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.model/man/plot.influence.ppm.Rd0000644000176200001440000000477514510474260020016 0ustar liggesusers\name{plot.influence.ppm} \alias{plot.influence.ppm} \title{ Plot Influence Measure } \description{ Plots an influence measure that has been computed by \code{\link{influence.ppm}}. } \usage{ \method{plot}{influence.ppm}(x, ..., multiplot=TRUE) } \arguments{ \item{x}{ Influence measure (object of class \code{"influence.ppm"}) computed by \code{\link{influence.ppm}}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppp}} to control the plotting. } \item{multiplot}{ Logical value indicating whether it is permissible to plot more than one panel. This happens if the original point process model is multitype. } } \details{ This is the plot method for objects of class \code{"influence.ppm"}. These objects are computed by the command \code{\link{influence.ppm}}. For a point process model fitted by maximum likelihood or maximum pseudolikelihood (the default), influence values are associated with the data points. The display shows circles centred at the data points with radii proportional to the influence values. If the original data were a multitype point pattern, then if \code{multiplot=TRUE} (the default), there is one such display for each possible type of point, while if \code{multiplot=FALSE} there is a single plot combining all data points regardless of type. For a model fitted by logistic composite likelihood (\code{method="logi"} in \code{\link{ppm}}) influence values are associated with the data points and also with the dummy points used to fit the model. The display consist of two panels, for the data points and dummy points respectively, showing circles with radii proportional to the influence values. If the original data were a multitype point pattern, then if \code{multiplot=TRUE} (the default), there is one pair of panels for each possible type of point, while if \code{multiplot=FALSE} there is a single plot combining all data and dummy points regardless of type. Use the argument \code{clipwin} to restrict the plot to a subset of the full data. } \value{ None. } \references{ Baddeley, A. and Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ \spatstatAuthors. } \seealso{ \code{\link{influence.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) plot(influence(fit)) } \keyword{spatial} \keyword{models} \concept{Model diagnostics} spatstat.model/man/methods.influence.ppm.Rd0000644000176200001440000000414714510474260020474 0ustar liggesusers\name{methods.influence.ppm} \alias{methods.influence.ppm} %DoNotExport \alias{as.ppp.influence.ppm} \alias{as.owin.influence.ppm} \alias{domain.influence.ppm} \alias{integral.influence.ppm} \alias{Smooth.influence.ppm} \alias{Window.influence.ppm} \title{Methods for Influence Objects} \description{ Methods for the class \code{"influence.ppm"}. } \usage{ \method{as.ppp}{influence.ppm}(X, \dots) \method{as.owin}{influence.ppm}(W, \dots, fatal=TRUE) \method{domain}{influence.ppm}(X, \dots) \method{Smooth}{influence.ppm}(X, \dots) \method{Window}{influence.ppm}(X, \dots) \method{integral}{influence.ppm}(f, domain, \dots) } \arguments{ \item{X,W,f}{An object of class \code{"influence.ppm"}.} \item{domain}{ Optional. Domain of integration: a window (class \code{"owin"}) or a tessellation (class \code{"tess"}). } \item{\dots}{ Additional arguments. See Details. } \item{fatal}{ Logical value indicating what to do if the data cannot be converted to a window. If \code{fatal=TRUE} (the default) an error occurs. If \code{fatal=FALSE} a value of \code{NULL} is returned. } } \value{ A window (object of class \code{"owin"}) for \code{as.owin}, \code{domain} and \code{Window}. A point pattern (object of class \code{"ppp"}) for \code{as.ppp}. A numeric value or numeric vector for \code{integral}. A pixel image, or list of pixel images, for \code{Smooth}. } \details{ These functions are methods for the class \code{"influence.ppm"}. An object of this class represents the influence measure of a fitted point process model (see \code{\link{influence.ppm}}). For \code{as.ppp}, \code{domain}, \code{integral} and \code{Window}, additional arguments (\code{\dots}) are ignored. For \code{as.owin} and \code{Smooth}, additional arguments are passed to the method for class \code{"ppp"}. } \seealso{ \code{\link{influence.ppm}}, \code{\link{plot.influence.ppm}}, \code{\link{[.influence.ppm}} } \examples{ fit <- ppm(cells ~ x) a <- influence(fit) Window(a) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} \concept{Model diagnostics} spatstat.model/man/Hybrid.Rd0000644000176200001440000000630014331173075015502 0ustar liggesusers\name{Hybrid} \alias{Hybrid} \title{ Hybrid Interaction Point Process Model } \description{ Creates an instance of a hybrid point process model which can then be fitted to point pattern data. } \usage{ Hybrid(...) } \arguments{ \item{\dots}{ Two or more interactions (objects of class \code{"interact"}) or objects which can be converted to interactions. See Details. } } \details{ A \emph{hybrid} (Baddeley, Turner, Mateu and Bevan, 2013) is a point process model created by combining two or more point process models, or an interpoint interaction created by combining two or more interpoint interactions. The \emph{hybrid} of two point processes, with probability densities \eqn{f(x)} and \eqn{g(x)} respectively, is the point process with probability density \deqn{h(x) = c \, f(x) \, g(x)}{h(x) = c * f(x) * g(x)} where \eqn{c} is a normalising constant. Equivalently, the hybrid of two point processes with conditional intensities \eqn{\lambda(u,x)}{lambda(u,x)} and \eqn{\kappa(u,x)}{kappa(u,x)} is the point process with conditional intensity \deqn{ \phi(u,x) = \lambda(u,x) \, \kappa(u,x). }{ phi(u,x) = lambda(u,x) * kappa(u,x). } The hybrid of \eqn{m > 3} point processes is defined in a similar way. The function \code{\link{ppm}}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of a hybrid interaction is yielded by the function \code{Hybrid()}. The arguments \code{\dots} will be interpreted as interpoint interactions (objects of class \code{"interact"}) and the result will be the hybrid of these interactions. Each argument must either be an interpoint interaction (object of class \code{"interact"}), or a point process model (object of class \code{"ppm"}) from which the interpoint interaction will be extracted. The arguments \code{\dots} may also be given in the form \code{name=value}. This is purely cosmetic: it can be used to attach simple mnemonic names to the component interactions, and makes the printed output from \code{\link{print.ppm}} neater. } \value{ An object of class \code{"interact"} describing an interpoint interaction structure. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \code{DOI: 10.18637/jss.v055.i11} } \seealso{ \code{\link{ppm}} } \examples{ Hybrid(Strauss(0.1), Geyer(0.2, 3)) Hybrid(Ha=Hardcore(0.05), St=Strauss(0.1), Ge=Geyer(0.2, 3)) fit <- ppm(redwood, ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2))) fit ctr <- rmhcontrol(nrep=5e4, expand=1) plot(simulate(fit, control=ctr)) # hybrid components can be models (including hybrid models) Hybrid(fit, S=Softcore(0.5)) # plot.fii only works if every component is a pairwise interaction fit2 <- ppm(swedishpines, ~1, Hybrid(DG=DiggleGratton(2,10), S=Strauss(5))) plot(fitin(fit2)) plot(fitin(fit2), separate=TRUE, mar.panel=rep(4,4)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/eem.Rd0000644000176200001440000000542414331173076015036 0ustar liggesusers\name{eem} \alias{eem} \alias{eem.ppm} \alias{eem.slrm} \title{ Exponential Energy Marks } \description{ Given a point process model fitted to a point pattern, compute the Stoyan-Grabarnik diagnostic ``exponential energy marks'' for the data points. } \usage{ eem(fit, \dots) \method{eem}{ppm}(fit, check=TRUE, \dots) \method{eem}{slrm}(fit, check=TRUE, \dots) } \arguments{ \item{fit}{ The fitted point process model. An object of class \code{"ppm"}. } \item{check}{ Logical value indicating whether to check the internal format of \code{fit}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{\dots}{Ignored.} } \value{ A vector containing the values of the exponential energy mark for each point in the pattern. } \details{ Stoyan and Grabarnik (1991) proposed a diagnostic tool for point process models fitted to spatial point pattern data. Each point \eqn{x_i}{x[i]} of the data pattern \eqn{X} is given a `mark' or `weight' \deqn{m_i = \frac 1 {\hat\lambda(x_i,X)}}{m[i] = 1/\lambda(x[i],X)} where \eqn{\hat\lambda(x_i,X)}{\lambda(x[i],X)} is the conditional intensity of the fitted model. If the fitted model is correct, then the sum of these marks for all points in a region \eqn{B} has expected value equal to the area of \eqn{B}. The argument \code{fit} must be a fitted point process model (object of class \code{"ppm"} or \code{"slrm"}). Such objects are produced by the fitting algorithms \code{\link{ppm}}) and \code{\link{slrm}}. This fitted model object contains complete information about the original data pattern and the model that was fitted to it. The value returned by \code{eem} is the vector of weights \eqn{m[i]}{m_i} associated with the points \eqn{x[i]}{x_i} of the original data pattern. The original data pattern (in corresponding order) can be extracted from \code{fit} using \code{\link{response}}. The function \code{\link{diagnose.ppm}} produces a set of sensible diagnostic plots based on these weights. } \references{ Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \seealso{ \code{\link{diagnose.ppm}}, \code{\link{ppm.object}}, \code{\link{data.ppm}}, \code{\link{residuals.ppm}}, \code{\link{ppm}} } \examples{ fit <- ppm(cells ~x, Strauss(r=0.15)) ee <- eem(fit) sum(ee)/area(Window(cells)) # should be about 1 if model is correct Y <- setmarks(cells, ee) plot(Y, main="Cells data\n Exponential energy marks") } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/vargamma.estpcf.Rd0000644000176200001440000001521614331173100017332 0ustar liggesusers\name{vargamma.estpcf} \alias{vargamma.estpcf} \title{Fit the Neyman-Scott Cluster Point Process with Variance Gamma kernel} \description{ Fits the Neyman-Scott cluster point process, with Variance Gamma kernel, to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. } \usage{ vargamma.estpcf(X, startpar=c(kappa=1,scale=1), nu = -1/4, lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs = list()) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{nu}{ Numerical value controlling the shape of the tail of the clusters. A number greater than \code{-1/2}. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Neyman-Scott Cluster point process model with Variance Gamma kernel (Jalilian et al, 2013) to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Neyman-Scott Cluster point process with Variance Gamma kernel to \code{X}, by finding the parameters of the model which give the closest match between the theoretical pair correlation function of the model and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Neyman-Scott cluster point process with Variance Gamma kernel is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent have a common distribution described in Jalilian et al (2013). The shape of the kernel is determined by the dimensionless index \code{nu}. This is the parameter \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in equation (12) on page 126 of Jalilian et al (2013). In previous versions of spatstat instead of specifying \code{nu} (called \code{nu.ker} at that time) the user could specify \code{nu.pcf} which is the parameter \eqn{\nu=\alpha-1}{nu = alpha-1} appearing in equation (13), page 127 of Jalilian et al (2013). These are related by \code{nu.pcf = 2 * nu.ker + 1} and \code{nu.ker = (nu.pcf - 1)/2}. This syntax is still supported but not recommended for consistency across the package. In that case exactly one of \code{nu.ker} or \code{nu.pcf} must be specified. If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rVarGamma}}. The parameter \code{eta} appearing in \code{startpar} is equivalent to the scale parameter \code{omega} used in \code{\link{rVarGamma}}. Homogeneous or inhomogeneous Neyman-Scott/VarGamma models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{vargamma.estK}}, \code{\link{lgcp.estpcf}}, \code{\link{thomas.estpcf}}, \code{\link{cauchy.estpcf}}, \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{pcfmodel}}. \code{\link{rVarGamma}} to simulate the model. } \examples{ u <- vargamma.estpcf(redwood) u plot(u, legendpos="topright") } \keyword{spatial} \keyword{models} spatstat.model/man/DiggleGratton.Rd0000644000176200001440000000544514331173075017024 0ustar liggesusers\name{DiggleGratton} \alias{DiggleGratton} \title{Diggle-Gratton model} \description{ Creates an instance of the Diggle-Gratton pairwise interaction point process model, which can then be fitted to point pattern data. } \usage{ DiggleGratton(delta=NA, rho) } \arguments{ \item{delta}{lower threshold \eqn{\delta}{\delta}} \item{rho}{upper threshold \eqn{\rho}{\rho}} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Diggle and Gratton (1984, pages 208-210) introduced the pairwise interaction point process with pair potential \eqn{h(t)} of the form \deqn{ h(t) = \left( \frac{t-\delta}{\rho-\delta} \right)^\kappa \quad\quad \mbox{ if } \delta \le t \le \rho }{ h(t) = ((t - \delta)/(\rho - \delta))^\kappa, { } \delta \le t \le \rho } with \eqn{h(t) = 0} for \eqn{t < \delta}{t < \delta} and \eqn{h(t) = 1} for \eqn{t > \rho}{t > \rho}. Here \eqn{\delta}{\delta}, \eqn{\rho}{\rho} and \eqn{\kappa}{\kappa} are parameters. Note that we use the symbol \eqn{\kappa}{\kappa} where Diggle and Gratton (1984) and Diggle, Gates and Stibbard (1987) use \eqn{\beta}{\beta}, since in \pkg{spatstat} we reserve the symbol \eqn{\beta}{\beta} for an intensity parameter. The parameters must all be nonnegative, and must satisfy \eqn{\delta \le \rho}{\delta \le \rho}. The potential is inhibitory, i.e.\ this model is only appropriate for regular point patterns. The strength of inhibition increases with \eqn{\kappa}{\kappa}. For \eqn{\kappa=0}{\kappa=0} the model is a hard core process with hard core radius \eqn{\delta}{\delta}. For \eqn{\kappa=\infty}{\kappa=Inf} the model is a hard core process with hard core radius \eqn{\rho}{\rho}. The irregular parameters \eqn{\delta, \rho}{\delta, \rho} must be given in the call to \code{DiggleGratton}, while the regular parameter \eqn{\kappa}{\kappa} will be estimated. If the lower threshold \code{delta} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{delta} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{Pairwise}} } \examples{ ppm(cells ~1, DiggleGratton(0.05, 0.1)) } \references{ Diggle, P.J., Gates, D.J. and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. \emph{Biometrika} \bold{74}, 763 -- 770. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat.model/man/Kmodel.kppm.Rd0000644000176200001440000000362414331173075016450 0ustar liggesusers\name{Kmodel.kppm} \alias{Kmodel.kppm} \alias{pcfmodel.kppm} \title{K Function or Pair Correlation Function of Cluster Model or Cox model} \description{ Returns the theoretical \eqn{K} function or the pair correlation function of a cluster point process model or Cox point process model. } \usage{ \method{Kmodel}{kppm}(model, \dots) \method{pcfmodel}{kppm}(model, \dots) } \arguments{ \item{model}{ A fitted cluster point process model (object of class \code{"kppm"}) typically obtained from the model-fitting algorithm \code{\link{kppm}}. } \item{\dots}{ Ignored. } } \value{ A \code{function} in the \R language, which takes one argument \code{r}. } \details{ For certain types of point process models, it is possible to write down a mathematical expression for the \eqn{K} function or the pair correlation function of the model. In particular this is possible for a fitted cluster point process model (object of class \code{"kppm"} obtained from \code{\link{kppm}}). The functions \code{\link{Kmodel}} and \code{\link{pcfmodel}} are generic. The functions documented here are the methods for the class \code{"kppm"}. The return value is a \code{function} in the \R language, which takes one argument \code{r}. Evaluation of this function, on a numeric vector \code{r}, yields values of the desired \eqn{K} function or pair correlation function at these distance values. } \seealso{ \code{\link{Kest}} or \code{\link{pcf}} to estimate the \eqn{K} function or pair correlation function nonparametrically from data. \code{\link{kppm}} to fit cluster models. \code{\link{Kmodel}} for the generic functions. \code{\link{Kmodel.ppm}} for the method for Gibbs processes. } \examples{ fit <- kppm(redwood, ~x, "MatClust") K <- Kmodel(fit) K(c(0.1, 0.2)) curve(K(x), from=0, to=0.25) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/zclustermodel.Rd0000644000176200001440000000176414331173100017153 0ustar liggesusers\name{zclustermodel} \alias{zclustermodel} \title{ Cluster Point Process Model } \description{ Experimental code. Creates an object representing a cluster point process model. Typically used for theoretical calculations about such a model. } \usage{ zclustermodel(name = "Thomas", \dots, mu, kappa, scale) } \arguments{ \item{name}{ Name of the cluster process. One of \code{"Thomas"}, \code{"MatClust"}, \code{"VarGamma"} or \code{"Cauchy"}. } \item{\dots}{ Other arguments needed for the model. } \item{mu}{ Mean cluster size. A single number, or a pixel image. } \item{kappa}{ Parent intensity. A single number. } \item{scale}{ Cluster scale parameter of the model. } } \details{ Experimental. } \value{ Object of the experimental class \code{"zclustermodel"}. } \author{ \adrian } \seealso{ \code{\link{methods.zclustermodel}} } \examples{ m <- zclustermodel("Thomas", kappa=10, mu=5, scale=0.1) } \keyword{spatial} \keyword{models} spatstat.model/man/clusterradius.kppm.Rd0000644000176200001440000000645614374302015020127 0ustar liggesusers\name{clusterradius.kppm} \alias{clusterradius.kppm} \title{ Compute or Extract Effective Range of Cluster Kernel } \description{ Given a cluster point process model, this command returns a value beyond which the the probability density of the cluster offspring is neglible. } \usage{ \method{clusterradius}{kppm}(model, \dots, thresh = NULL, precision = FALSE) } \arguments{ \item{model}{ Cluster model. Either a fitted cluster or Cox model (object of class \code{"kppm"}), or a character string specifying the type of cluster model. } \item{\dots}{ Parameter values for the model, when \code{model} is a character string. } \item{thresh}{ Numerical threshold relative to the cluster kernel value at the origin (parent location) determining when the cluster kernel will be considered neglible. A sensible default is provided. } \item{precision}{ Logical. If \code{precision=TRUE} the precision of the calculated range is returned as an attribute to the range. See details. } } \details{ Given a cluster model this function by default returns the effective range of the model with the given parameters as used in spatstat. For the \Matern cluster model (see e.g. \code{\link[spatstat.random]{rMatClust}}) this is simply the finite radius of the offsring density given by the paramter \code{scale} irrespective of other options given to this function. The remaining models in spatstat have infinite theoretical range, and an effective finite value is given as follows: For the Thomas model (see e.g. \code{\link[spatstat.random]{rThomas}} the default is \code{4*scale} where scale is the scale or standard deviation parameter of the model. If \code{thresh} is given the value is instead found as described for the other models below. For the Cauchy model (see e.g. \code{\link[spatstat.random]{rCauchy}}) and the Variance Gamma (Bessel) model (see e.g. \code{\link[spatstat.random]{rVarGamma}}) the value of \code{thresh} defaults to 0.001, and then this is used to compute the range numerically as follows. If \eqn{k(x,y)=k_0(r)}{k(x,y)=k0(r)} with \eqn{r=\sqrt(x^2+y^2)}{r=sqrt(x^2+y^2)} denotes the isotropic cluster kernel then \eqn{f(r) = 2 \pi r k_0(r)}{f(r) = 2 \pi r k0(r)} is the density function of the offspring distance from the parent. The range is determined as the value of \eqn{r} where \eqn{f(r)} falls below \code{thresh} times \eqn{k_0(r)}{k0(r)}. If \code{precision=TRUE} the precision related to the chosen range is returned as an attribute. Here the precision is defined as the polar integral of the kernel from distance 0 to the calculated range. Ideally this should be close to the value 1 which would be obtained for the true theretical infinite range. } \value{ A positive numeric. Additionally, the precision related to this range value is returned as an attribute \code{"prec"}, if \code{precision=TRUE}. } \author{\spatstatAuthors.} \seealso{ \code{\link{clusterkernel}}, \code{\link[spatstat.model]{kppm}}, \code{\link[spatstat.random]{rMatClust}}, \code{\link[spatstat.random]{rThomas}}, \code{\link[spatstat.random]{rCauchy}}, \code{\link[spatstat.random]{rVarGamma}}, \code{\link[spatstat.random]{rNeymanScott}}. } \examples{ fit <- kppm(redwood ~ x, "MatClust") clusterradius(fit) } \keyword{spatial} spatstat.model/man/BadGey.Rd0000644000176200001440000001141114331173075015413 0ustar liggesusers\name{BadGey} \alias{BadGey} \title{Hybrid Geyer Point Process Model} \description{ Creates an instance of the Baddeley-Geyer point process model, defined as a hybrid of several Geyer interactions. The model can then be fitted to point pattern data. } \usage{ BadGey(r, sat) } \arguments{ \item{r}{vector of interaction radii} \item{sat}{ vector of saturation parameters, or a single common value of saturation parameter } } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This is Baddeley's generalisation of the Geyer saturation point process model, described in \code{\link{Geyer}}, to a process with multiple interaction distances. The BadGey point process with interaction radii \eqn{r_1,\ldots,r_k}{r[1], \ldots, r[k]}, saturation thresholds \eqn{s_1,\ldots,s_k}{s[1],\ldots,s[k]}, intensity parameter \eqn{\beta}{\beta} and interaction parameters \eqn{\gamma_1,\ldots,gamma_k}{\gamma[1], \ldots, \gamma[k]}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{ \beta \gamma_1^{v_1(x_i, X)} \ldots gamma_k^{v_k(x_i,X)} }{ \beta \gamma[1]^v(1, x_i, X) \ldots \gamma[k]^v(k, x_i, X) } to the probability density of the point pattern, where \deqn{ v_j(x_i, X) = \min( s_j, t_j(x_i,X) ) }{ v(j, x[i], X) = min(s[j], t(j, x[i], X)) } where \eqn{t_j(x_i, X)}{t(j,x[i],X)} denotes the number of points in the pattern \eqn{X} which lie within a distance \eqn{r_j}{r[j]} from the point \eqn{x_i}{x[i]}. \code{BadGey} is used to fit this model to data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant Saturated pairwise interaction is yielded by the function \code{BadGey()}. See the examples below. The argument \code{r} specifies the vector of interaction distances. The entries of \code{r} must be strictly increasing, positive numbers. The argument \code{sat} specifies the vector of saturation parameters that are applied to the point counts \eqn{t_j(x_i, X)}{t(j,x[i],X)}. It should be a vector of the same length as \code{r}, and its entries should be nonnegative numbers. Thus \code{sat[1]} is applied to the count of points within a distance \code{r[1]}, and \code{sat[2]} to the count of points within a distance \code{r[2]}, etc. Alternatively \code{sat} may be a single number, and this saturation value will be applied to every count. Infinite values of the saturation parameters are also permitted; in this case \eqn{v_j(x_i,X) = t_j(x_i,X)}{v(j, x[i], X) = t(j, x[i], X)} and there is effectively no `saturation' for the distance range in question. If all the saturation parameters are set to \code{Inf} then the model is effectively a pairwise interaction process, equivalent to \code{\link{PairPiece}} (however the interaction parameters \eqn{\gamma}{\gamma} obtained from \code{\link{BadGey}} have a complicated relationship to the interaction parameters \eqn{\gamma}{\gamma} obtained from \code{\link{PairPiece}}). If \code{r} is a single number, this model is virtually equivalent to the Geyer process, see \code{\link{Geyer}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairsat.family}}, \code{\link{Geyer}}, \code{\link{PairPiece}}, \code{\link{SatPiece}}, \code{\link{Hybrid}} } \section{Hybrids}{ A \sQuote{hybrid} interaction is one which is built by combining several different interactions (Baddeley et al, 2013). The \code{BadGey} interaction can be described as a hybrid of several \code{\link{Geyer}} interactions. The \code{\link{Hybrid}} command can be used to build hybrids of any interactions. If the \code{\link{Hybrid}} operator is applied to several \code{\link{Geyer}} models, the result is equivalent to a \code{BadGey} model. This can be useful for incremental model selection. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \code{DOI: 10.18637/jss.v055.i11} } \examples{ BadGey(c(0.1,0.2), c(1,1)) # prints a sensible description of itself BadGey(c(0.1,0.2), 1) # fit a stationary Baddeley-Geyer model ppm(cells ~1, BadGey(c(0.07, 0.1, 0.13), 2)) # nonstationary process with log-cubic polynomial trend \donttest{ ppm(cells ~polynom(x,y,3), BadGey(c(0.07, 0.1, 0.13), 2)) } } \author{ \adrian and \rolf in collaboration with Hao Wang and Jeff Picka } \keyword{spatial} \keyword{models} spatstat.model/man/model.matrix.slrm.Rd0000644000176200001440000000325114331173077017644 0ustar liggesusers\name{model.matrix.slrm} \alias{model.matrix.slrm} \title{Extract Design Matrix from Spatial Logistic Regression Model} \description{ This function extracts the design matrix of a spatial logistic regression model. } \usage{ \method{model.matrix}{slrm}(object, ..., keepNA=TRUE) } \arguments{ \item{object}{ A fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link{model.matrix.lm}}. } \item{keepNA}{ Logical. Determines whether rows containing \code{NA} values will be deleted or retained. } } \details{ This command is a method for the generic function \code{\link{model.matrix}}. It extracts the design matrix of a spatial logistic regression. The \code{object} must be a fitted spatial logistic regression (object of class \code{"slrm"}). Such objects are produced by the model-fitting function \code{\link{slrm}}. Usually the result is a matrix with one column for every constructed covariate in the model, and one row for every pixel in the grid used to fit the model. If \code{object} was fitted using split pixels (by calling \code{\link{slrm}} using the argument \code{splitby}) then the matrix has one row for every pixel or half-pixel. } \value{ A matrix. Columns of the matrix are canonical covariates in the model. } \author{\adrian and \rolf } \seealso{ \code{\link{model.matrix}}, \code{\link{model.images}}, \code{\link{slrm}}. } \examples{ fit <- slrm(japanesepines ~x) head(model.matrix(fit)) # matrix with two columns: '(Intercept)' and 'x' } \keyword{spatial} \keyword{models} spatstat.model/man/OrdThresh.Rd0000644000176200001440000000336314331173075016171 0ustar liggesusers\name{OrdThresh} \alias{OrdThresh} \title{Ord's Interaction model} \description{ Creates an instance of Ord's point process model which can then be fitted to point pattern data. } \usage{ OrdThresh(r) } \arguments{ \item{r}{Positive number giving the threshold value for Ord's model.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Ord's point process model (Ord, 1977) is a Gibbs point process of infinite order. Each point \eqn{x_i}{x[i]} in the point pattern \eqn{x} contributes a factor \eqn{g(a_i)}{g(a[i])} where \eqn{a_i = a(x_i, x)}{a[i] = a(x[i], x)} is the area of the tile associated with \eqn{x_i}{x[i]} in the Dirichlet tessellation of \eqn{x}. The function \eqn{g} is simply \eqn{g(a) = 1} if \eqn{a \ge r}{a >= r} and \eqn{g(a) = \gamma < 1}{g(a) = gamma < 1} if \eqn{a < r}{a < r}, where \eqn{r} is called the threshold value. This function creates an instance of Ord's model with a given value of \eqn{r}. It can then be fitted to point process data using \code{\link{ppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ord, J.K. (1977) Contribution to the discussion of Ripley (1977). Ord, J.K. (1978) How many trees in a forest? \emph{Mathematical Scientist} \bold{3}, 23--33. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/DiggleGatesStibbard.Rd0000644000176200001440000000505314331173075020117 0ustar liggesusers\name{DiggleGatesStibbard} \alias{DiggleGatesStibbard} \title{Diggle-Gates-Stibbard Point Process Model} \description{ Creates an instance of the Diggle-Gates-Stibbard point process model which can then be fitted to point pattern data. } \usage{ DiggleGatesStibbard(rho) } \arguments{ \item{rho}{Interaction range} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Diggle-Gates-Stibbard process with interaction range \code{rho}. } \details{ Diggle, Gates and Stibbard (1987) proposed a pairwise interaction point process in which each pair of points separated by a distance \eqn{d} contributes a factor \eqn{e(d)} to the probability density, where \deqn{ e(d) = \sin^2\left(\frac{\pi d}{2\rho}\right) }{ e(d) = sin^2((\pi * d)/(2 * \rho)) } for \eqn{d < \rho}{d < \rho}, and \eqn{e(d)} is equal to 1 for \eqn{d \ge \rho}{d \ge \rho}. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Diggle-Gates-Stibbard pairwise interaction is yielded by the function \code{DiggleGatesStibbard()}. See the examples below. Note that this model does not have any regular parameters (as explained in the section on Interaction Parameters in the help file for \code{\link{ppm}}). The parameter \eqn{\rho} is not estimated by \code{\link{ppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{DiggleGratton}}, \code{\link{rDGS}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. } \examples{ DiggleGatesStibbard(0.02) # prints a sensible description of itself ppm(cells ~1, DiggleGatesStibbard(0.05)) # fit the stationary D-G-S process to `cells' \donttest{ ppm(cells ~ polynom(x,y,3), DiggleGatesStibbard(0.05)) # fit a nonstationary D-G-S process # with log-cubic polynomial trend } } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat.model/man/valid.slrm.Rd0000644000176200001440000000303614331173100016324 0ustar liggesusers\name{valid.slrm} \alias{valid.slrm} \title{ Check Whether Spatial Logistic Regression Model is Valid } \description{ Determines whether a fitted spatial logistic regression model is a well-defined model. } \usage{ \method{valid}{slrm}(object, warn=TRUE, \dots) } \arguments{ \item{object}{ Fitted spatial logistic regression model (object of class \code{"slrm"}). } \item{warn}{ Logical value indicating whether to issue a warning if the validity of the model cannot be checked (due to unavailability of the required code). } \item{\dots}{Ignored.} } \details{ This is a method for the generic function \code{\link{valid}} for spatial logistic regression models (class \code{"slrm"}). In a model fitted by \code{\link{slrm}}, some of the fitted coefficients may be \code{NA} or infinite values. This can occur if the data are not adequate for estimation of the model parameters. The model is said to be \emph{unidentifiable} or \emph{confounded}. The function \code{valid.slrm} checks whether the fitted coefficients of \code{object} specify a well-defined model. It returns \code{TRUE} if the model is well-defined, and \code{FALSE} otherwise. Use the function \code{\link{emend.slrm}} to force the fitted model to be valid. } \value{ A logical value, or \code{NA}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{slrm}}, \code{\link{emend.slrm}} } \examples{ fit1 <- slrm(cells ~ x) valid(fit1) fit2 <- slrm(cells ~ x + I(x)) valid(fit2) } \keyword{spatial} \keyword{models} spatstat.model/man/Extract.influence.ppm.Rd0000644000176200001440000000344214510474260020440 0ustar liggesusers\name{Extract.influence.ppm} \alias{[.influence.ppm} \title{Extract Subset of Influence Object} \description{ Extract a subset of an influence object, or extract the influence values at specified locations. } \usage{ \method{[}{influence.ppm}(x, i, ...) } \arguments{ \item{x}{ A influence object (of class \code{"influence.ppm"}) computed by \code{\link{influence.ppm}}. } \item{i}{ Subset index (passed to \code{\link{[.ppp}}). Either a spatial window (object of class \code{"owin"}) or an integer index. } \item{\dots}{ Ignored. } } \value{ Another object of class \code{"influence.ppm"}. } \details{ An object of class \code{"influence.ppm"} contains the values of the likelihood influence for a point process model, computed by \code{\link{influence.ppm}}. This is effectively a marked point pattern obtained by marking each of the original data points with its likelihood influence. This function extracts a designated subset of the influence values, either as another influence object, or as a vector of numeric values. The function \code{[.influence.ppm} is a method for \code{\link{[}} for the class \code{"influence.ppm"}. The argument \code{i} should be an index applicable to a point pattern. It may be either a spatial window (object of class \code{"owin"}) or a sequence index. The result will be another influence object (of class \code{influence.ppm}). To extract the influence values as a numeric vector, use \code{marks(as.ppp(x))}. } \seealso{ \code{\link{influence.ppm}}. } \examples{ fit <- ppm(cells, ~x) infl <- influence(fit) b <- owin(c(0.1, 0.3), c(0.2, 0.4)) infl[b] infl[1:5] marks(as.ppp(infl))[1:3] } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} \concept{Model diagnostics} spatstat.model/man/detpointprocfamilyfun.Rd0000644000176200001440000001527714331173076020724 0ustar liggesusers\name{detpointprocfamilyfun} \alias{detpointprocfamilyfun} \title{Construct a New Determinantal Point Process Model Family Function} \description{ Function to ease the implementation of a new determinantal point process model family. } \usage{detpointprocfamilyfun(kernel = NULL, specden = NULL, basis = "fourierbasis", convkernel = NULL, Kfun = NULL, valid = NULL, intensity = NULL, dim = 2, name = "User-defined", isotropic = TRUE, range = NULL, parbounds = NULL, specdenrange = NULL, startpar = NULL, \dots) } \arguments{ \item{kernel}{ function specifying the kernel. May be set to \code{NULL}. See Details. } \item{specden}{ function specifying the spectral density. May be set to \code{NULL}. See Details. } \item{basis}{ character string giving the name of the basis. Defaults to the Fourier basis. See Details. } \item{convkernel}{ function specifying the k-fold auto-convolution of the kernel. May be set to \code{NULL}. See Details. } \item{Kfun}{ function specifying the K-function. May be set to \code{NULL}. See Details. } \item{valid}{ function determining whether a given set of parameter values yields a valid model. May be set to \code{NULL}. See Examples. } \item{intensity}{ character string specifying which parameter is the intensity in the model family. Should be \code{NULL} if the model family has no intensity parameter. } \item{dim}{ character strig specifying which parameter is the dimension of the state space in this model family (if any). Alternatively a positive integer specifying the dimension. } \item{name}{ character string giving the name of the model family used for printing. } \item{isotropic}{ logical value indicating whether or not the model is isotropic. } \item{range}{ function determining the interaction range of the model. May be set to \code{NULL}. See Examples. } \item{parbounds}{ function determining the bounds for each model parameter when all other parameters are fixed. May be set to \code{NULL}. See Examples. } \item{specdenrange}{ function specifying the the range of the spectral density if it is finite (only the case for very few models). May be set to \code{NULL}. } \item{startpar}{ function determining starting values for parameters in any estimation algorithm. May be set to \code{NULL}. See Examples. } \item{\dots}{ Additional arguments for inclusion in the returned model object. These are not checked in any way. } } \details{ A determinantal point process family is specified either in terms of a kernel (a positive semi-definite function, i.e. a covariance function) or a spectral density, or preferably both. One of these can be \code{NULL} if it is unknown, but not both. When both are supplied they must have the same arguments. The first argument gives the values at which the function should be evaluated. In general the function should accept an \eqn{n} by \eqn{d} matrix or \code{data.frame} specifying \eqn{n (>=0)} points in dimension \eqn{d}. If the model is isotropic it only needs to accept a non-negative valued numeric of length \eqn{n}. (In fact there is currently almost no support for non-isotropic models, so it is recommended not to specify such a model.) The name of this argument could be chosen freely, but \eqn{x} is recommended. The remaining arguments are the parameters of the model. If one of these is an intensity parameter the name should be mentioned in the argument \code{intensity}. If one of these specifies the dimension of the model it should be mentioned in the argument \code{dim}. The kernel and spectral density is with respect to a specific set of basis functions, which is typically the Fourier basis. However this can be changed to any user-supplied basis in the argument \code{basis}. If such an alternative is supplied it must be the name of a function expecting the same arguments as \code{\link{fourierbasis}} and returning the results in the same form as \code{\link{fourierbasis}}. If supplied, the arguments of convkernel must obey the following: first argument should be like the first argument of kernel and/or specden (see above). The second argument (preferably called \code{k}) should be the positive integer specifying how many times the auto-convolution is done (i.e. the \eqn{k} in \eqn{k}-fold auto-convolution). The remaining arguments must agree with the arguments of \code{kernel} and/or \code{specden} (see above). If supplied, the arguments of \code{Kfun} should be like the arguments of \code{kernel} and \code{specden} (see above). } \value{ A function in the \R language, belonging to the class \code{"detpointprocfamilyfun"}. The function has formal arguments \code{\dots} and returns a determinantal point process family (object of class \code{"detpointprocfamily"}). } \author{ \spatstatAuthors. } \examples{ ## Example of how to define the Gauss family exGauss <- detpointprocfamilyfun( name="Gaussian", kernel=function(x, lambda, alpha, d){ lambda*exp(-(x/alpha)^2) }, specden=function(x, lambda, alpha, d){ lambda * (sqrt(pi)*alpha)^d * exp(-(x*alpha*pi)^2) }, convkernel=function(x, k, lambda, alpha, d){ logres <- k*log(lambda*pi*alpha^2) - log(pi*k*alpha^2) - x^2/(k*alpha^2) return(exp(logres)) }, Kfun = function(x, lambda, alpha, d){ pi*x^2 - pi*alpha^2/2*(1-exp(-2*x^2/alpha^2)) }, valid=function(lambda, alpha, d){ lambda>0 && alpha>0 && d>=1 && lambda <= (sqrt(pi)*alpha)^(-d) }, isotropic=TRUE, intensity="lambda", dim="d", range=function(alpha, bound = .99){ if(missing(alpha)) stop("The parameter alpha is missing.") if(!(is.numeric(bound)&&bound>0&&bound<1)) stop("Argument bound must be a numeric between 0 and 1.") return(alpha*sqrt(-log(sqrt(1-bound)))) }, parbounds=function(name, lambda, alpha, d){ switch(name, lambda = c(0, (sqrt(pi)*alpha)^(-d)), alpha = c(0, lambda^(-1/d)/sqrt(pi)), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" \%in\% model$freepar){ lambda <- intensity(X) rslt <- c(rslt, "lambda" = lambda) model <- update(model, lambda=lambda) } if("alpha" \%in\% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2] rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) exGauss m <- exGauss(lambda=100, alpha=.05, d=2) m } \keyword{spatial} \keyword{models} spatstat.model/man/plot.palmdiag.Rd0000644000176200001440000000536714334122307017023 0ustar liggesusers\name{plot.palmdiag} \alias{plot.palmdiag} \title{ Plot the Palm Intensity Diagnostic } \description{ Plot the Palm intensity diagnostic for a fitted cluster process or Cox process model. } \usage{ \method{plot}{palmdiag}(x, \dots, style = c("intervals", "dots", "bands"), args.dots = list(pch = 16), args.intervals = list(), xlim=NULL, main) } \arguments{ \item{x}{ Object of class \code{"palmdiag"} produced by \code{\link{palmdiagnose}}. } \item{\dots}{ Additional arguments passed to \code{\link[spatstat.explore]{plot.fv}} when the fitted curve is plotted. } \item{style}{ Character string specifying the style of plot for the nonparametric estimates. See Details. } \item{args.dots}{ Arguments passed to \code{\link[graphics]{points}} when \code{style="dots"}. } \item{args.intervals}{ Arguments passed to \code{\link[graphics]{segments}} when \code{style="intervals"}. } \item{xlim}{Optional range of distances plotted along the horizontal axis. A numeric vector of length 2.} \item{main}{Optional main title for plot.} } \details{ This function plots the diagnostic proposed by Tanaka, Ogata and Stoyan (2008, Section 2.3) for assessing goodness-of-fit of a Neyman-Scott cluster process model to a point pattern dataset. The diagnostic is computed by the function \code{\link{palmdiagnose}}. First the Palm intensity of the fitted model is plotted as a function of interpoint distance \eqn{r} using \code{\link[spatstat.explore]{plot.fv}}. Then the nonparametric estimates of the Palm intensity are plotted on the same graph as follows: \itemize{ \item if \code{style="dots"}, the nonparametric estimate for each band of distances is plotted as a dot, with horizontal coordinate at the middle of the band. This is the style proposed by Tanaka et al (2008). \item if \code{style="intervals"} (the default), each nonparametric estimate is plotted as a dot, and a 95\% confidence interval is plotted as a vertical line segment, centred on the dot. The confidence interval is based on the Poisson approximation. \item if \code{style="bands"}, the nonparametric estimates are drawn as a continuous curve which is flat on each band of distances. The 95\% confidence intervals are drawn as grey shading. } } \value{ Null. } \references{ Tanaka, U., Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott Point Processes. \emph{Biometrical Journal} \bold{50}, 1, 43--57. } \author{ \adrian. } \seealso{ \code{\link{palmdiagnose}} } \examples{ fit <- kppm(redwood) R <- palmdiagnose(fit) plot(R, style="d") plot(R) plot(R, style="b") } \keyword{models} \keyword{nonparametric} spatstat.model/man/predict.dppm.Rd0000644000176200001440000000305014331173077016653 0ustar liggesusers\name{predict.dppm} \alias{predict.dppm} \alias{fitted.dppm} \title{Prediction from a Fitted Determinantal Point Process Model} \description{ Given a fitted determinantal point process model, these functions compute the fitted intensity. } \usage{ \method{fitted}{dppm}(object, ...) \method{predict}{dppm}(object, ...) } \arguments{ \item{object}{ Fitted determinantal point process model. An object of class \code{"dppm"}. } \item{\dots}{ Arguments passed to \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } } \details{ These functions are methods for the generic functions \code{\link{fitted}} and \code{\link{predict}}. The argument \code{object} should be a determinantal point process model (object of class \code{"dppm"}) obtained using the function \code{\link{dppm}}. The \emph{intensity} of the fitted model is computed, using \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } \value{ The value of \code{fitted.dppm} is a numeric vector giving the fitted values at the quadrature points. The value of \code{predict.dppm} is usually a pixel image (object of class \code{"im"}), but see \code{\link{predict.ppm}} for details. } \seealso{ \code{\link{dppm}}, \code{\link{plot.dppm}}, \code{\link{fitted.ppm}}, \code{\link{predict.ppm}} } \examples{ if(interactive()) { fit <- dppm(swedishpines ~ x + y, dppGauss()) } else { fit <- dppm(redwood ~ x, dppGauss()) } predict(fit) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.model/man/coef.mppm.Rd0000644000176200001440000000622614331173100016141 0ustar liggesusers\name{coef.mppm} \alias{coef.mppm} \title{ Coefficients of Point Process Model Fitted to Multiple Point Patterns } \description{ Given a point process model fitted to a list of point patterns, extract the coefficients of the fitted model. A method for \code{coef}. } \usage{ \method{coef}{mppm}(object, \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"mppm"}) } \item{\dots}{ Ignored. } } \value{ Either a vector containing the fitted coefficients, or a data frame containing the fitted coefficients for each point pattern. } \details{ This function is a method for the generic function \code{\link{coef}}. The argument \code{object} must be a fitted point process model (object of class \code{"mppm"}) produced by the fitting algorithm \code{\link{mppm}}). This represents a point process model that has been fitted to a list of several point pattern datasets. See \code{\link{mppm}} for information. This function extracts the vector of coefficients of the fitted model. This is the estimate of the parameter vector \eqn{\theta}{\theta} such that the conditional intensity of the model is of the form \deqn{ \lambda(u,x) = \exp(\theta S(u,x)) }{ \lambda(u,x) = \exp(\theta . S(u,x)) } where \eqn{S(u,x)} is a (vector-valued) statistic. For example, if the model \code{object} is the uniform Poisson process, then \code{coef(object)} will yield a single value (named \code{"(Intercept)"}) which is the logarithm of the fitted intensity of the Poisson process. If the fitted model includes random effects (i.e. if the argument \code{random} was specified in the call to \code{\link{mppm}}), then the fitted coefficients are different for each point pattern in the original data, so \code{coef(object)} is a data frame with one row for each point pattern, and one column for each parameter. Use \code{\link{fixef.mppm}} to extract the vector of fixed effect coefficients, and \code{\link{ranef.mppm}} to extract the random effect coefficients at each level. Use \code{\link{print.mppm}} to print a more useful description of the fitted model. } \seealso{ \code{\link{fixef.mppm}} and \code{\link{ranef.mppm}} for the fixed and random effect coefficients in a model that includes random effects. \code{\link{print.mppm}}, \code{\link{mppm}} } \examples{ H <- hyperframe(X=waterstriders) fit.Poisson <- mppm(X ~ 1, H) coef(fit.Poisson) # The single entry "(Intercept)" # is the log of the fitted intensity of the Poisson process fit.Strauss <- mppm(X~1, H, Strauss(7)) coef(fit.Strauss) # The two entries "(Intercept)" and "Interaction" # are respectively log(beta) and log(gamma) # in the usual notation for Strauss(beta, gamma, r) # Tweak data to exaggerate differences H$X[[1]] <- rthin(H$X[[1]], 0.3) # Model with random effects fitran <- mppm(X ~ 1, H, random=~1|id) coef(fitran) } \references{ \baddrubaturnbook } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.model/man/clusterkernel.kppm.Rd0000644000176200001440000000252314374302015020107 0ustar liggesusers\name{clusterkernel.kppm} \alias{clusterkernel.kppm} \title{ Extract Cluster Offspring Kernel } \description{ Given a fitted cluster point process model, this command returns the probability density of the cluster offspring. } \usage{ \method{clusterkernel}{kppm}(model, \dots) } \arguments{ \item{model}{ Cluster model. Either a fitted cluster or Cox model (object of class \code{"kppm"}), or a character string specifying the type of cluster model. } \item{\dots}{ Parameter values for the model, when \code{model} is a character string. } } \details{ Given a cluster point process model, this command returns a \code{function(x,y)} giving the two-dimensional probability density of the cluster offspring points assuming a cluster parent located at the origin. The function \code{clusterkernel} is generic, with methods for class \code{"kppm"} (described here) and \code{"character"} (described in \code{\link[spatstat.random]{clusterkernel.character}}). } \value{ A function in the \R language with arguments \code{x,y,\dots}. } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.random]{clusterkernel.character}}, \code{\link{clusterfield}}, \code{\link[spatstat.model]{kppm}} } \examples{ fit <- kppm(redwood ~ x, "MatClust") f <- clusterkernel(fit) f(0.05, 0.02) } \keyword{spatial} spatstat.model/man/model.frame.ppm.Rd0000644000176200001440000000440014331173077017246 0ustar liggesusers\name{model.frame.ppm} \alias{model.frame.ppm} \alias{model.frame.kppm} \alias{model.frame.dppm} \alias{model.frame.slrm} \title{ Extract the Variables in a Point Process Model } \description{ Given a fitted point process model, this function returns a data frame containing all the variables needed to fit the model using the Berman-Turner device. } \usage{ \method{model.frame}{ppm}(formula, ...) \method{model.frame}{kppm}(formula, ...) \method{model.frame}{dppm}(formula, ...) \method{model.frame}{slrm}(formula, ...) } \arguments{ \item{formula}{ A fitted point process model. An object of class \code{"ppm"}, \code{"kppm"}, \code{"slrm"}, or \code{"dppm"}. } \item{\dots}{ Additional arguments passed to \code{\link{model.frame.glm}}. } } \details{ The function \code{\link{model.frame}} is generic. These functions are method for \code{\link{model.frame}} for fitted point process models (objects of class \code{"ppm"}, \code{"kppm"}, \code{"slrm"}, or \code{"dppm"}). The first argument should be a fitted point process model; it has to be named \code{formula} for consistency with the generic function. The result is a data frame containing all the variables used in fitting the model. The data frame has one row for each quadrature point used in fitting the model. The quadrature scheme can be extracted using \code{\link{quad.ppm}}. } \value{ A \code{data.frame} containing all the variables used in the fitted model, plus additional variables specified in \code{\dots}. It has an additional attribute \code{"terms"} containing information about the model formula. For details see \code{\link{model.frame.glm}}. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \seealso{ \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{dppm}}, \code{\link{slrm}}, \code{\link{model.frame}}, \code{\link{model.matrix.ppm}} } \examples{ fit <- ppm(cells ~ x) mf <- model.frame(fit) kfit <- kppm(redwood ~ x, "Thomas") kmf <- model.frame(kfit) sfit <- slrm(cells ~ x) smf <- model.frame(sfit) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.model/man/palmdiagnose.Rd0000644000176200001440000001071414331675406016735 0ustar liggesusers\name{palmdiagnose} \alias{palmdiagnose} \title{ Diagnostic based on Palm Intensity } \description{ Given a fitted cluster process or Cox process model, calculate a diagnostic which compares nonparametric and parametric estimates of the Palm intensity. } \usage{ palmdiagnose(object, \dots, breaks = 30, trim = 30, rmax=Inf) } \arguments{ \item{object}{ Fitted model (object of class \code{"kppm"}) or a list of fitted models. } \item{\dots}{ Optional. Additional arguments which are fitted models of class \code{"kppm"}. } \item{breaks}{ Optional argument passed to \code{\link[base]{cut.default}} determining the breakpoints of distance values for the nonparametric estimate. Either an integer specifying the number of breakpoints, or a numeric vector of distance values to be used as the breakpoints. } \item{trim}{ Optional. Maximum value of the translation edge correction weight. } \item{rmax}{ Optional. Maximum interpoint distance \eqn{r} that should be considered. See Details. } } \details{ This function computes the diagnostic proposed by Tanaka, Ogata and Stoyan (2008, Section 2.3) for assessing goodness-of-fit of a Neyman-Scott cluster process model to a point pattern dataset. The fitted model \code{object} should be an object of class \code{"kppm"} representing a Neyman-Scott cluster process model or a Cox process model. In the current implementation, the model must be stationary. The code computes parametric and non-parametric estimates of the Palm intensity \eqn{\lambda_0(r)}{\lambda[0](r)}, loosely speaking, the intensity of the point process given that there is a point at the origin. The parametric estimate is obtained from the fitted model by substituting the fitted parameter estimates into expressions for the pair correlation and the intensity. The non-parametric estimate is obtained by considering all pairs of data points, dividing the range of interpoint distances into several equally-spaced bands (determined by the argument \code{breaks}), counting the number of pairs of points whose interpoint distances fall in each band, and numerically adjusting for edge effects. Tanaka, Ogata and Stoyan (2008) used the periodic (toroidal) edge correction; our code uses the translation edge correction so that the method can be applied to data in any window. The result is a function value table (object of class \code{"fv"}) containing the nonparametric and parametric estimates of the Palm intensity. The result also belongs to the class \code{"palmdiag"} which has a method for \code{plot}. The default behaviour of \code{\link{plot.palmdiag}} is to plot the model fit as a curve, and to display the nonparametric estimates as dots; this is the plot style proposed by Tanaka, Ogata and Stoyan (2008). Alternative display styles are also supported by \code{\link{plot.palmdiag}}. For computational efficiency, the argument \code{rmax} specifies the maximum value of interpoint distance \eqn{r} for which estimates of \eqn{\lambda_0(r)}{\lambda[0](r)} shall be computed. The default \code{rmax = Inf} implies there is no constraint on interpoint distance, and the resulting function object contains estimates of \eqn{\lambda_0(r)}{\lambda[0](r)} up to the maximum distance that would have been observable in the window containing the original point pattern data. If there are additional arguments \code{\dots} which are fitted models of class \code{"kppm"}, or if \code{object} is a list of fitted models of class \code{"kppm"}, then the parametric estimates for each of the fitted models will be included in the resulting function object. If names are attached to these fitted models, the names will be used in the resulting function object. } \value{ Function value table (object of class \code{"fv"}) containing the nonparametric and parametric estimates of the Palm intensity. Also belongs to the class \code{"palmdiag"} which has a \code{plot} method. } \author{ \adrian. } \references{ Tanaka, U., Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott Point Processes. \emph{Biometrical Journal} \bold{50}, 1, 43--57. } \examples{ fitK <- kppm(redwood) R <- palmdiagnose(fitK) plot(R) fitg <- kppm(redwood, statistic="pcf") R2 <- palmdiagnose(A=fitK, B=fitg) plot(R2) } \seealso{ \code{\link{plot.palmdiag}} } \keyword{spatial} \keyword{models} \keyword{nonparametric} spatstat.model/man/LennardJones.Rd0000644000176200001440000001200214331173075016637 0ustar liggesusers\name{LennardJones} \alias{LennardJones} \title{The Lennard-Jones Potential} \description{ Creates the Lennard-Jones pairwise interaction structure which can then be fitted to point pattern data. } \usage{ LennardJones(sigma0=NA) } \value{ An object of class \code{"interact"} describing the Lennard-Jones interpoint interaction structure. } \arguments{ \item{sigma0}{ Optional. Initial estimate of the parameter \eqn{\sigma}{sigma}. A positive number. } } \details{ In a pairwise interaction point process with the Lennard-Jones pair potential (Lennard-Jones, 1924) each pair of points in the point pattern, a distance \eqn{d} apart, contributes a factor \deqn{ v(d) = \exp \left\{ - 4\epsilon \left[ \left( \frac{\sigma}{d} \right)^{12} - \left( \frac{\sigma}{d} \right)^6 \right] \right\} }{ v(d) = exp( - 4 * epsilon * ((sigma/d)^12 - (sigma/d)^6)) } to the probability density, where \eqn{\sigma}{sigma} and \eqn{\epsilon}{epsilon} are positive parameters to be estimated. See \bold{Examples} for a plot of this expression. This potential causes very strong inhibition between points at short range, and attraction between points at medium range. The parameter \eqn{\sigma}{sigma} is called the \emph{characteristic diameter} and controls the scale of interaction. The parameter \eqn{\epsilon}{epsilon} is called the \emph{well depth} and determines the strength of attraction. The potential switches from inhibition to attraction at \eqn{d=\sigma}{d=sigma}. The maximum value of the pair potential is \eqn{\exp(\epsilon)}{exp(epsilon)} occuring at distance \eqn{d = 2^{1/6} \sigma}{d = 2^(1/6) * sigma}. Interaction is usually considered to be negligible for distances \eqn{d > 2.5 \sigma \max\{1,\epsilon^{1/6}\}}{d > 2.5 * sigma * max(1, epsilon^(1/6))}. This potential is used to model interactions between uncharged molecules in statistical physics. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Lennard-Jones pairwise interaction is yielded by the function \code{LennardJones()}. See the examples below. } \section{Rescaling}{ To avoid numerical instability, the interpoint distances \code{d} are rescaled when fitting the model. Distances are rescaled by dividing by \code{sigma0}. In the formula for \eqn{v(d)} above, the interpoint distance \eqn{d} will be replaced by \code{d/sigma0}. The rescaling happens automatically by default. If the argument \code{sigma0} is missing or \code{NA} (the default), then \code{sigma0} is taken to be the minimum nearest-neighbour distance in the data point pattern (in the call to \code{\link{ppm}}). If the argument \code{sigma0} is given, it should be a positive number, and it should be a rough estimate of the parameter \eqn{\sigma}{sigma}. The ``canonical regular parameters'' estimated by \code{\link{ppm}} are \eqn{\theta_1 = 4 \epsilon (\sigma/\sigma_0)^{12}}{theta1 = 4 * epsilon * (sigma/sigma0)^12} and \eqn{\theta_2 = 4 \epsilon (\sigma/\sigma_0)^6}{theta2 = 4 * epsilon * (sigma/sigma0)^6}. } \section{Warnings and Errors}{ Fitting the Lennard-Jones model is extremely unstable, because of the strong dependence between the functions \eqn{d^{-12}}{d^(-12)} and \eqn{d^{-6}}{d^(-6)}. The fitting algorithm often fails to converge. Try increasing the number of iterations of the GLM fitting algorithm, by setting \code{gcontrol=list(maxit=1e3)} in the call to \code{\link{ppm}}. Errors are likely to occur if this model is fitted to a point pattern dataset which does not exhibit both short-range inhibition and medium-range attraction between points. The values of the parameters \eqn{\sigma}{sigma} and \eqn{\epsilon}{epsilon} may be \code{NA} (because the fitted canonical parameters have opposite sign, which usually occurs when the pattern is completely random). An absence of warnings does not mean that the fitted model is sensible. A negative value of \eqn{\epsilon}{epsilon} may be obtained (usually when the pattern is strongly clustered); this does not correspond to a valid point process model, but the software does not issue a warning. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \examples{ badfit <- ppm(cells ~1, LennardJones(), rbord=0.1) badfit fit <- ppm(unmark(longleaf) ~1, LennardJones(), rbord=1) fit plot(fitin(fit)) # Note the Longleaf Pines coordinates are rounded to the nearest decimetre # (multiple of 0.1 metres) so the apparent inhibition may be an artefact } \references{ Lennard-Jones, J.E. (1924) On the determination of molecular fields. \emph{Proc Royal Soc London A} \bold{106}, 463--477. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/Window.ppm.Rd0000644000176200001440000000326314427127042016327 0ustar liggesusers\name{Window.ppm} \alias{Window.ppm} \alias{Window.kppm} \alias{Window.dppm} \alias{Window.slrm} \alias{Window.msr} \title{Extract Window of Spatial Object} \description{ Given a spatial object (such as a point pattern or pixel image) in two dimensions, these functions extract the window in which the object is defined. } \usage{ \method{Window}{ppm}(X, \dots, from=c("points", "covariates")) \method{Window}{kppm}(X, \dots, from=c("points", "covariates")) \method{Window}{dppm}(X, \dots, from=c("points", "covariates")) \method{Window}{slrm}(X, \dots, from=c("points", "covariates")) \method{Window}{msr}(X, \dots) } \arguments{ \item{X}{A spatial object.} \item{\dots}{Ignored.} \item{from}{Character string. See Details.} } \value{ An object of class \code{"owin"} (see \code{\link{owin.object}}) specifying an observation window. } \details{ These are methods for the generic function \code{\link{Window}} which extract the spatial window in which the object \code{X} is defined. The argument \code{from} applies when \code{X} is a fitted two-dimensional point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"slrm"} or \code{"dppm"}). If \code{from="data"} (the default), \code{Window} extracts the window of the original point pattern data to which the model was fitted. If \code{from="covariates"} then \code{Window} returns the window in which the spatial covariates of the model were provided. } \seealso{ \code{\link{Window}}, \code{\link{Window.ppp}}, \code{\link{Window.psp}}. \code{\link{owin.object}} } \examples{ A <- ppm(cells ~ 1) Window(A) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat.model/man/measureContinuous.Rd0000644000176200001440000000246214331173077020020 0ustar liggesusers\name{measureContinuous} \alias{measureContinuous} \alias{measureDiscrete} \title{ Discrete and Continuous Components of a Measure } \description{ Given a measure \code{A} (object of class \code{"msr"}) these functions find the discrete and continuous parts of \code{A}. } \usage{ measureDiscrete(x) measureContinuous(x) } \arguments{ \item{x}{ A measure (object of class \code{"msr"}). } } \details{ The functions \code{measureDiscrete} and \code{measureContinuous} return the discrete and continuous components, respectively, of a measure. If \code{x} is a measure, then \code{measureDiscrete(x)} is a measure consisting only of the discrete (atomic) component of \code{x}, and \code{measureContinuous(x)} is a measure consisting only of the continuous (diffuse) component of \code{x}. } \value{ Another measure (object of class \code{"msr"}) on the same spatial domain. } \references{ Halmos, P.R. (1950) \emph{Measure Theory}. Van Nostrand. } \author{ \adrian. } \seealso{ \code{\link{msr}}, \code{\link{with.msr}}, \code{\link{split.msr}}, \code{\link{measurePositive}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rp measureDiscrete(rp) measureContinuous(rp) } \keyword{spatial} \keyword{math} spatstat.model/man/prune.rppm.Rd0000644000176200001440000000255014331173077016374 0ustar liggesusers\name{prune.rppm} \alias{prune.rppm} \title{ Prune a Recursively Partitioned Point Process Model } \description{ Given a model which has been fitted to point pattern data by recursive partitioning, apply pruning to reduce the complexity of the partition tree. } \usage{ \method{prune}{rppm}(tree, \dots) } \arguments{ \item{tree}{ Fitted point process model of class \code{"rppm"} produced by the function \code{\link{rppm}}. } \item{\dots}{ Arguments passed to \code{\link[rpart]{prune.rpart}} to control the pruning procedure. } } \details{ This is a method for the generic function \code{\link[rpart]{prune}} for the class \code{"rppm"}. An object of this class is a point process model, fitted to point pattern data by recursive partitioning, by the function \code{\link{rppm}}. The recursive partition tree will be pruned using \code{\link[rpart]{prune.rpart}}. The result is another object of class \code{"rppm"}. } \value{ Object of class \code{"rppm"}. } \author{ \spatstatAuthors } \seealso{ \code{\link{rppm}}, \code{\link{plot.rppm}}, \code{\link{predict.rppm}}. } \examples{ # Murchison gold data mur <- solapply(murchison, rescale, s=1000, unitname="km") mur$dfault <- distfun(mur$faults) fit <- rppm(gold ~ dfault + greenstone, data=mur) fit prune(fit, cp=0.1) } \keyword{spatial} \keyword{models} spatstat.model/man/panysib.Rd0000644000176200001440000000345114334130011015714 0ustar liggesusers\name{panysib} \alias{panysib} \title{ Probability that a Point Has Any Siblings } \description{ Given a cluster process model, calculate the probability that a point of the process has any siblings. } \usage{ panysib(object) } \arguments{ \item{object}{ Fitted cluster process model (object of class \code{"kppm"}). } } \details{ In a Poisson cluster process, two points are called \emph{siblings} if they belong to the same cluster, that is, if they had the same parent point. This function computes the probability that a given random point has \code{any} siblings. If \code{object} is a stationary point process, the result is a single number, which is the probability that a typical point of the process has any siblings. If this number is small, then the process is approximately a homogeneous Poisson process (complete spatial randomness). The converse is not true (Baddeley et al, 2022). Otherwise, the result is a pixel image, in which the value at any location \code{u} is the conditional probability, given there is a point of the process at \code{u}, that this point has any siblings. If the pixel values are all small, then the process is approximately an inhomogeneous Poisson process. This concept was proposed by Baddeley et al (2022). } \value{ A single number (if \code{object} is a stationary point process) or a pixel image (otherwise). } \author{ \adrian. } \seealso{ \code{\link{psib}} } \references{ Baddeley, A., Davies, T.M., Hazelton, M.L., Rakshit, S. and Turner, R. (2022) Fundamental problems in fitting spatial cluster process models. \emph{Spatial Statistics} \bold{52}, 100709. DOI: \code{10.1016/j.spasta.2022.100709} } \examples{ fit <- kppm(redwood ~ polynom(x,y,2)) plot(panysib(fit)) } \keyword{spatial} \keyword{models} spatstat.model/man/simulate.dppm.Rd0000644000176200001440000001140414331173077017046 0ustar liggesusers\name{simulate.dppm} \alias{simulate.dppm} \alias{simulate.detpointprocfamily} \title{Simulation of Determinantal Point Process Model} \description{ Generates simulated realisations from a determinantal point process model. } \usage{ \method{simulate}{dppm}(object, nsim = 1, seed = NULL, \dots, W = NULL, trunc = 0.99, correction = "periodic", rbord = reach(object)) \method{simulate}{detpointprocfamily}(object, nsim = 1, seed = NULL, \dots, W = NULL, trunc = 0.99, correction = "periodic", rbord = reach(object)) } \arguments{ \item{object}{ Determinantal point process model. An object of class \code{"detpointprocfamily"} or \code{"dppm"}. } \item{nsim}{Number of simulated realisations.} \item{seed}{ an object specifying whether and how to initialise the random number generator. Either \code{NULL} or an integer that will be used in a call to \code{\link[base:Random]{set.seed}} before simulating the point patterns. } \item{\dots}{Arguments passed on to \code{\link{rdpp}}.} \item{W}{ Object specifying the window of simulation (defaults to a unit box if nothing else is sensible -- see Details). Can be any single argument acceptable to \code{\link{as.boxx}} (e.g. an \code{"owin"}, \code{"box3"} or \code{"boxx"} object). } \item{trunc}{ Numeric value specifying how the model truncation is preformed. See Details. } \item{correction}{ Character string specifying the type of correction to use. The options are \code{"periodic"} (default) and \code{"border"}. See Details. } \item{rbord}{ Numeric value specifying the extent of the border correction if this correction is used. See Details. } } \details{ These functions are methods for the generic function \code{\link[stats]{simulate}} for the classes \code{"detpointprocfamily"} and \code{"dppm"} of determinantal point process models. The return value is a list of \code{nsim} point patterns. It also carries an attribute \code{"seed"} that captures the initial state of the random number generator. This follows the convention used in \code{simulate.lm} (see \code{\link[stats]{simulate}}). It can be used to force a sequence of simulations to be repeated exactly, as shown in the examples for \code{\link[stats]{simulate}}. The exact simulation of a determinantal point process model involves an infinite series, which typically has no analytical solution. In the implementation a truncation is performed. The truncation \code{trunc} can be specified either directly as a positive integer or as a fraction between 0 and 1. In the latter case the truncation is chosen such that the expected number of points in a simulation is \code{trunc} times the theoretical expected number of points in the model. The default is 0.99. The window of the returned point pattern(s) can be specified via the argument \code{W}. For a fitted model (of class \code{"dppm"}) it defaults to the observation window of the data used to fit the model. For inhomogeneous models it defaults to the window of the intensity image. Otherwise it defaults to a unit box. For non-rectangular windows simulation is done in the containing rectangle and then restricted to the window. For inhomogeneous models a stationary model is first simulated using the maximum intensity and then the result is obtained by thinning. The default is to use periodic edge correction for simulation such that opposite edges are glued together. If border correction is used then the simulation is done in an extended window. Edge effects are theoretically completely removed by doubling the size of the window in each spatial dimension, but for practical purposes much less extension may be sufficient. The numeric \code{rbord} determines the extent of the extra space added to the window. } \value{ A list of length \code{nsim} containing simulated point patterns. If the patterns are two-dimensional, then they are objects of class \code{"ppp"}, and the list has class \code{"solist"}. Otherwise, the patterns are objects of class \code{"ppx"} and the list has class \code{"anylist"}. The return value also carries an attribute \code{"seed"} that captures the initial state of the random number generator. See Details. } \references{ Lavancier, F. \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \author{ \spatstatAuthors. } \seealso{ \code{\link{rdpp}}, \code{\link[stats]{simulate}} } \examples{ if(interactive()) { nsim <- 2 lam <- 100 } else { nsim <- 1 lam <- 30 } model <- dppGauss(lambda=lam, alpha=.05, d=2) simulate(model, nsim) } \keyword{datagen} \keyword{spatial} \keyword{models} spatstat.model/man/vargamma.estK.Rd0000644000176200001440000001455314331173100016757 0ustar liggesusers\name{vargamma.estK} \alias{vargamma.estK} \title{Fit the Neyman-Scott Cluster Point Process with Variance Gamma kernel} \description{ Fits the Neyman-Scott cluster point process, with Variance Gamma kernel, to a point pattern dataset by the Method of Minimum Contrast. } \usage{ vargamma.estK(X, startpar=c(kappa=1,scale=1), nu = -1/4, lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{nu}{ Numerical value controlling the shape of the tail of the clusters. A number greater than \code{-1/2}. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Neyman-Scott Cluster point process model with Variance Gamma kernel (Jalilian et al, 2013) to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Neyman-Scott Cluster point process with Variance Gamma kernel to \code{X}, by finding the parameters of the model which give the closest match between the theoretical \eqn{K} function of the model and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Neyman-Scott cluster point process with Variance Gamma kernel is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent have a common distribution described in Jalilian et al (2013). The shape of the kernel is determined by the dimensionless index \code{nu}. This is the parameter \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in equation (12) on page 126 of Jalilian et al (2013). In previous versions of spatstat instead of specifying \code{nu} (called \code{nu.ker} at that time) the user could specify \code{nu.pcf} which is the parameter \eqn{\nu=\alpha-1}{nu = alpha-1} appearing in equation (13), page 127 of Jalilian et al (2013). These are related by \code{nu.pcf = 2 * nu.ker + 1} and \code{nu.ker = (nu.pcf - 1)/2}. This syntax is still supported but not recommended for consistency across the package. In that case exactly one of \code{nu.ker} or \code{nu.pcf} must be specified. If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rVarGamma}}. The parameter \code{eta} appearing in \code{startpar} is equivalent to the scale parameter \code{omega} used in \code{\link{rVarGamma}}. Homogeneous or inhomogeneous Neyman-Scott/VarGamma models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{kppm}}, \code{\link{vargamma.estpcf}}, \code{\link{lgcp.estK}}, \code{\link{thomas.estK}}, \code{\link{cauchy.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{Kmodel}}. \code{\link{rVarGamma}} to simulate the model. } \examples{ if(interactive()) { u <- vargamma.estK(redwood) print(u) plot(u) } } \keyword{spatial} \keyword{models} spatstat.model/man/lgcp.estK.Rd0000644000176200001440000002022214514520744016114 0ustar liggesusers\name{lgcp.estK} \alias{lgcp.estK} \title{Fit a Log-Gaussian Cox Point Process by Minimum Contrast} \description{ Fits a log-Gaussian Cox point process model to a point pattern dataset by the Method of Minimum Contrast. } \usage{ lgcp.estK(X, startpar=c(var=1,scale=1), covmodel=list(model="exponential"), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the log-Gaussian Cox process model. } \item{covmodel}{ Specification of the covariance model for the log-Gaussian field. See Details. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits a log-Gaussian Cox point process (LGCP) model to a point pattern dataset by the Method of Minimum Contrast, using the K function of the point pattern. The shape of the covariance of the LGCP must be specified: the default is the exponential covariance function, but other covariance models can be selected. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits a log-Gaussian Cox point process (LGCP) model to \code{X}, by finding the parameters of the LGCP model which give the closest match between the theoretical \eqn{K} function of the LGCP model and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model fitted is a stationary, isotropic log-Gaussian Cox process (\Moller and Waagepetersen, 2003, pp. 72-76). To define this process we start with a stationary Gaussian random field \eqn{Z} in the two-dimensional plane, with constant mean \eqn{\mu}{mu} and covariance function \eqn{C(r)}. Given \eqn{Z}, we generate a Poisson point process \eqn{Y} with intensity function \eqn{\lambda(u) = \exp(Z(u))}{lambda(u) = exp(Z(u))} at location \eqn{u}. Then \eqn{Y} is a log-Gaussian Cox process. The \eqn{K}-function of the LGCP is \deqn{ K(r) = \int_0^r 2\pi s \exp(C(s)) \, {\rm d}s. }{ K(r) = integral from 0 to r of (2 * pi * s * exp(C(s))) ds. } The intensity of the LGCP is \deqn{ \lambda = \exp(\mu + \frac{C(0)}{2}). }{ lambda= exp(mu + C(0)/2). } The covariance function \eqn{C(r)} is parametrised in the form \deqn{ C(r) = \sigma^2 c(r/\alpha) }{ C(r) = sigma^2 * c(-r/alpha) } where \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} are parameters controlling the strength and the scale of autocorrelation, respectively, and \eqn{c(r)} is a known covariance function determining the shape of the covariance. The strength and scale parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} will be estimated by the algorithm as the values \code{var} and \code{scale} respectively. The template covariance function \eqn{c(r)} must be specified as explained below. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. The template covariance function \eqn{c(r)} is specified using the argument \code{covmodel}. This should be of the form \code{list(model="modelname", \dots)} where \code{modelname} is a string identifying the template model as explained below, and \code{\dots} are optional arguments of the form \code{tag=value} giving the values of parameters controlling the \emph{shape} of the template model. The default is the exponential covariance \eqn{c(r) = e^{-r}}{c(r) = e^(-r)} so that the scaled covariance is \deqn{ C(r) = \sigma^2 e^{-r/\alpha}. }{ C(r) = sigma^2 * exp(-r/alpha). } For a list of available models see \code{\link{kppm}}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \note{ This function is considerably slower than \code{\link{lgcp.estpcf}} because of the computation time required for the integral in the \eqn{K}-function. Computation can be accelerated, at the cost of less accurate results, by setting \code{spatstat.options(fastK.lgcp=TRUE)}. } \references{ \Moller, J, Syversveen, A. and Waagepetersen, R. (1998) Log Gaussian Cox Processes. \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ \rasmus. Adapted for \pkg{spatstat} by \adrian. Further modifications by Rasmus Waagepetersen and Shen Guochun, and by \ege. } \seealso{ \code{\link{kppm}} and \code{\link{lgcp.estpcf}} for alternative methods of fitting LGCP. \code{\link{matclust.estK}}, \code{\link{thomas.estK}} for other models. \code{\link{mincontrast}} for the generic minimum contrast fitting algorithm, including important parameters that affect the accuracy of the fit. \code{\link{Kest}} for the \eqn{K} function. } \examples{ if(interactive()) { u <- lgcp.estK(redwood) print(u) plot(u) } else { # faster - better starting point u <- lgcp.estK(redwood, c(var=1.05, scale=0.1)) } \testonly{ K <- Kest(redwood, r=seq(0, 0.1, length=9)) op <- spatstat.options(fastK.lgcp=TRUE) lgcp.estK(K, covmodel=list(model="matern", nu=0.3), control=list(maxit=2)) spatstat.options(op) } if(FALSE) { ## takes several minutes! lgcp.estK(redwood, covmodel=list(model="matern", nu=0.3)) } } \keyword{spatial} \keyword{models} spatstat.model/man/qqplot.ppm.Rd0000644000176200001440000003567314331173077016415 0ustar liggesusers\name{qqplot.ppm} \alias{qqplot.ppm} \title{ Q-Q Plot of Residuals from Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, produce a Q-Q plot based on residuals from the model. } \usage{ qqplot.ppm(fit, nsim=100, expr=NULL, \dots, type="raw", style="mean", fast=TRUE, verbose=TRUE, plot.it=TRUE, dimyx=NULL, nrep=if(fast) 5e4 else 1e5, control=update(default.rmhcontrol(fit), nrep=nrep), saveall=FALSE, monochrome=FALSE, limcol=if(monochrome) "black" else "red", maxerr=max(100, ceiling(nsim/10)), check=TRUE, repair=TRUE, envir.expr) } \arguments{ \item{fit}{ The fitted point process model, which is to be assessed using the Q-Q plot. An object of class \code{"ppm"}. Smoothed residuals obtained from this fitted model will provide the ``data'' quantiles for the Q-Q plot. } \item{nsim}{ The number of simulations from the ``reference'' point process model. } \item{expr}{ Determines the simulation mechanism which provides the ``theoretical'' quantiles for the Q-Q plot. See Details. } \item{\dots}{ Arguments passed to \code{\link{diagnose.ppm}} influencing the computation of residuals. } \item{type}{ String indicating the type of residuals or weights to be used. Current options are \code{"eem"} for the Stoyan-Grabarnik exponential energy weights, \code{"raw"} for the raw residuals, \code{"inverse"} for the inverse-lambda residuals, and \code{"pearson"} for the Pearson residuals. A partial match is adequate. } \item{style}{ Character string controlling the type of Q-Q plot. Options are \code{"classical"} and \code{"mean"}. See Details. } \item{fast}{ Logical flag controlling the speed and accuracy of computation. Use \code{fast=TRUE} for interactive use and \code{fast=FALSE} for publication standard plots. See Details. } \item{verbose}{ Logical flag controlling whether the algorithm prints progress reports during long computations. } \item{plot.it}{ Logical flag controlling whether the function produces a plot or simply returns a value (silently). } \item{dimyx}{ Dimensions of the pixel grid on which the smoothed residual field will be calculated. A vector of two integers. } \item{nrep}{ If \code{control} is absent, then \code{nrep} gives the number of iterations of the Metropolis-Hastings algorithm that should be used to generate one simulation of the fitted point process. } \item{control}{ List of parameters controlling the Metropolis-Hastings algorithm \code{\link{rmh}} which generates each simulated realisation from the model (unless the model is Poisson). This list becomes the argument \code{control} of \code{\link{rmh.default}}. It overrides \code{nrep}. } \item{saveall}{ Logical flag indicating whether to save all the intermediate calculations. } \item{monochrome}{ Logical flag indicating whether the plot should be in black and white (\code{monochrome=TRUE}), or in colour (\code{monochrome=FALSE}). } \item{limcol}{ String. The colour to be used when plotting the 95\% limit curves. } \item{maxerr}{ Maximum number of failures tolerated while generating simulated realisations. See Details. } \item{check}{ Logical value indicating whether to check the internal format of \code{fit}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{fit}, if it is found to be damaged. } \item{envir.expr}{ Optional. An environment in which the expression \code{expr} should be evaluated. } } \value{ An object of class \code{"qqppm"} containing the information needed to reproduce the Q-Q plot. Entries \code{x} and \code{y} are numeric vectors containing quantiles of the simulations and of the data, respectively. } \details{ This function generates a Q-Q plot of the residuals from a fitted point process model. It is an addendum to the suite of diagnostic plots produced by the function \code{\link{diagnose.ppm}}, kept separate because it is computationally intensive. The quantiles of the theoretical distribution are estimated by simulation. In classical statistics, a Q-Q plot of residuals is a useful diagnostic for checking the distributional assumptions. Analogously, in spatial statistics, a Q-Q plot of the (smoothed) residuals from a fitted point process model is a useful way to check the interpoint interaction part of the model (Baddeley et al, 2005). The systematic part of the model (spatial trend, covariate effects, etc) is assessed using other plots made by \code{\link{diagnose.ppm}}. The argument \code{fit} represents the fitted point process model. It must be an object of class \code{"ppm"} (typically produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). Residuals will be computed for this fitted model using \code{\link{residuals.ppm}}, and the residuals will be kernel-smoothed to produce a ``residual field''. The values of this residual field will provide the ``data'' quantiles for the Q-Q plot. The argument \code{expr} is not usually specified. It provides a way to modify the ``theoretical'' or ``reference'' quantiles for the Q-Q plot. In normal usage we set \code{expr=NULL}. The default is to generate \code{nsim} simulated realisations of the fitted model \code{fit}, re-fit this model to each of the simulated patterns, evaluate the residuals from these fitted models, and use the kernel-smoothed residual field from these fitted models as a sample from the reference distribution for the Q-Q plot. In advanced use, \code{expr} may be an \code{expression}. It will be re-evaluated \code{nsim} times, and should include random computations so that the results are not identical each time. The result of evaluating \code{expr} should be either a point pattern (object of class \code{"ppp"}) or a fitted point process model (object of class \code{"ppm"}). If the value is a point pattern, then the original fitted model \code{fit} will be fitted to this new point pattern using \code{\link{update.ppm}}, to yield another fitted model. Smoothed residuals obtained from these \code{nsim} fitted models will yield the ``theoretical'' quantiles for the Q-Q plot. Alternatively \code{expr} can be a list of point patterns, or an \code{envelope} object that contains a list of point patterns (typically generated by calling \code{\link{envelope}} with \code{savepatterns=TRUE}). These point patterns will be used as the simulated patterns. Simulation is performed (if \code{expr=NULL}) using the Metropolis-Hastings algorithm \code{\link{rmh}}. Each simulated realisation is the result of running the Metropolis-Hastings algorithm from an independent random starting state each time. The iterative and termination behaviour of the Metropolis-Hastings algorithm are governed by the argument \code{control}. See \code{\link{rmhcontrol}} for information about this argument. As a shortcut, the argument \code{nrep} determines the number of Metropolis-Hastings iterations used to generate each simulated realisation, if \code{control} is absent. By default, simulations are generated in an expanded window. Use the argument \code{control} to change this, as explained in the section on \emph{Warning messages}. The argument \code{type} selects the type of residual or weight that will be computed. For options, see \code{\link{diagnose.ppm}}. The argument \code{style} determines the type of Q-Q plot. It is highly recommended to use the default, \code{style="mean"}. \describe{ \item{\code{style="classical"}}{ The quantiles of the residual field for the data (on the \eqn{y} axis) are plotted against the quantiles of the \bold{pooled} simulations (on the \eqn{x} axis). This plot is biased, and therefore difficult to interpret, because of strong autocorrelations in the residual field and the large differences in sample size. } \item{\code{style="mean"}}{ The order statistics of the residual field for the data are plotted against the sample means, over the \code{nsim} simulations, of the corresponding order statistics of the residual field for the simulated datasets. Dotted lines show the 2.5 and 97.5 percentiles, over the \code{nsim} simulations, of each order statistic. } } The argument \code{fast} is a simple way to control the accuracy and speed of computation. If \code{fast=FALSE}, the residual field is computed on a fine grid of pixels (by default 100 by 100 pixels, see below) and the Q-Q plot is based on the complete set of order statistics (usually 10,000 quantiles). If \code{fast=TRUE}, the residual field is computed on a coarse grid (at most 40 by 40 pixels) and the Q-Q plot is based on the \emph{percentiles} only. This is about 7 times faster. It is recommended to use \code{fast=TRUE} for interactive data analysis and \code{fast=FALSE} for definitive plots for publication. The argument \code{dimyx} gives full control over the resolution of the pixel grid used to calculate the smoothed residuals. Its interpretation is the same as the argument \code{dimyx} to the function \code{\link{as.mask}}. Note that \code{dimyx[1]} is the number of pixels in the \eqn{y} direction, and \code{dimyx[2]} is the number in the \eqn{x} direction. If \code{dimyx} is not present, then the default pixel grid dimensions are controlled by \code{spatstat.options("npixel")}. Since the computation is so time-consuming, \code{qqplot.ppm} returns a list containing all the data necessary to re-display the Q-Q plot. It is advisable to assign the result of \code{qqplot.ppm} to something (or use \code{.Last.value} if you forgot to.) The return value is an object of class \code{"qqppm"}. There are methods for \code{\link{plot.qqppm}} and \code{\link{print.qqppm}}. See the Examples. The argument \code{saveall} is usually set to \code{FALSE}. If \code{saveall=TRUE}, then the intermediate results of calculation for each simulated realisation are saved and returned. The return value includes a 3-dimensional array \code{sim} containing the smoothed residual field images for each of the \code{nsim} realisations. When \code{saveall=TRUE}, the return value is an object of very large size, and should not be saved on disk. Errors may occur during the simulation process, because random data are generated. For example: \itemize{ \item one of the simulated patterns may be empty. \item one of the simulated patterns may cause an error in the code that fits the point process model. \item the user-supplied argument \code{expr} may have a bug. } Empty point patterns do not cause a problem for the code, but they are reported. Other problems that would lead to a crash are trapped; the offending simulated data are discarded, and the simulation is retried. The argument \code{maxerr} determines the maximum number of times that such errors will be tolerated (mainly as a safeguard against an infinite loop). } \section{Side Effects}{ Produces a Q-Q plot if \code{plot.it} is TRUE. } \section{Warning messages}{ A warning message will be issued if any of the simulations trapped an error (a potential crash). A warning message will be issued if all, or many, of the simulated point patterns are empty. This usually indicates a problem with the simulation procedure. The default behaviour of \code{qqplot.ppm} is to simulate patterns on an expanded window (specified through the argument \code{control}) in order to avoid edge effects. The model's trend is extrapolated over this expanded window. If the trend is strongly inhomogeneous, the extrapolated trend may have very large (or even infinite) values. This can cause the simulation algorithm to produce empty patterns. The only way to suppress this problem entirely is to prohibit the expansion of the window, by setting the \code{control} argument to something like \code{control=list(nrep=1e6, expand=1)}. Here \code{expand=1} means there will be no expansion. See \code{\link{rmhcontrol}} for more information about the argument \code{control}. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \seealso{ \code{\link{diagnose.ppm}}, \code{\link{lurking}}, \code{\link{residuals.ppm}}, \code{\link{eem}}, \code{\link{ppm.object}}, \code{\link{ppm}}, \code{\link{rmh}}, \code{\link{rmhcontrol}} } \examples{ fit <- ppm(cells ~1, Poisson()) diagnose.ppm(fit) # no suggestion of departure from stationarity if(interactive()) { qqplot.ppm(fit, 80) # strong evidence of non-Poisson interaction diagnose.ppm(fit, type="pearson") qqplot.ppm(fit, type="pearson") } \testonly{ qqplot.ppm(fit, 4) qqplot.ppm(fit, 4, type="pearson") } \donttest{ # capture the plot coordinates mypreciousdata <- qqplot.ppm(fit, 4, type="pearson") ## or use the idiom .Last.value if you forgot to assign them qqplot.ppm(fit, 4, type="pearson") mypreciousdata <- .Last.value plot(mypreciousdata) } ###################################################### # Q-Q plots based on fixed n # The above QQ plots used simulations from the (fitted) Poisson process. # But I want to simulate conditional on n, instead of Poisson # Do this by setting rmhcontrol(p=1) fixit <- list(p=1) if(interactive()) {qqplot.ppm(fit, 100, control=fixit)} \testonly{qqplot.ppm(fit, 4, control=fixit)} ###################################################### # Inhomogeneous Poisson data X <- rpoispp(function(x,y){1000 * exp(-3*x)}, 1000) plot(X) # Inhomogeneous Poisson model fit <- ppm(X ~x, Poisson()) if(interactive()) {qqplot.ppm(fit, 100)} \testonly{qqplot.ppm(fit, 4)} # conclusion: fitted inhomogeneous Poisson model looks OK ###################################################### # Advanced use of 'expr' argument # # set the initial conditions in Metropolis-Hastings algorithm # expr <- expression(rmh(fit, start=list(n.start=42), verbose=FALSE)) if(interactive()) \donttest{qqplot.ppm(fit, 100, expr)} \testonly{qqplot.ppm(fit, 4, expr)} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat.model/man/simulate.kppm.Rd0000644000176200001440000001170214514645563017065 0ustar liggesusers\name{simulate.kppm} \alias{simulate.kppm} \title{Simulate a Fitted Cluster Point Process Model} \description{ Generates simulated realisations from a fitted cluster point process model. } \usage{ \method{simulate}{kppm}(object, nsim = 1, seed=NULL, ..., window=NULL, covariates=NULL, n.cond = NULL, w.cond = NULL, verbose=TRUE, retry=10, drop=FALSE) } \arguments{ \item{object}{ Fitted cluster point process model. An object of class \code{"kppm"}. } \item{nsim}{ Number of simulated realisations. } \item{seed}{ an object specifying whether and how to initialise the random number generator. Either \code{NULL} or an integer that will be used in a call to \code{\link[base:Random]{set.seed}} before simulating the point patterns. } \item{\dots}{ Additional arguments passed to the relevant random generator. See Details. } \item{window}{ Optional. Window (object of class \code{"owin"}) in which the model should be simulated. } \item{covariates}{ Optional. A named list containing new values for the covariates in the model. } \item{n.cond}{ Optional. Integer specifying a fixed number of points. See the section on \emph{Conditional Simulation}. } \item{w.cond}{ Optional. Conditioning region. A window (object of class \code{"owin"}) specifying the region which must contain exactly \code{n.cond} points. See the section on \emph{Conditional Simulation}. } \item{verbose}{ Logical. Whether to print progress reports (when \code{nsim > 1}). } \item{retry}{ Number of times to repeat the simulation if it fails (e.g. because of insufficient memory). } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE}, the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"kppm"} of fitted cluster point process models. Simulations are performed by \code{\link{rThomas}}, \code{\link{rMatClust}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}} or \code{\link{rLGCP}} depending on the model. Additional arguments \code{\dots} are passed to the relevant function performing the simulation. For example the argument \code{saveLambda} is recognised by all of the simulation functions. The return value is a list of point patterns. It also carries an attribute \code{"seed"} that captures the initial state of the random number generator. This follows the convention used in \code{simulate.lm} (see \code{\link[stats]{simulate}}). It can be used to force a sequence of simulations to be repeated exactly, as shown in the examples for \code{\link[stats]{simulate}}. } \section{Conditional Simulation}{ If \code{n.cond} is specified, it should be a single integer. Simulation will be conditional on the event that the pattern contains exactly \code{n.cond} points (or contains exactly \code{n.cond} points inside the region \code{w.cond} if it is given). Conditional simulation uses the rejection algorithm described in Section 6.2 of Moller, Syversveen and Waagepetersen (1998). There is a maximum number of proposals which will be attempted. Consequently the return value may contain fewer than \code{nsim} point patterns. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"ppp"}). (For conditional simulation, the length of the result may be shorter than \code{nsim}). The return value also carries an attribute \code{"seed"} that captures the initial state of the random number generator. See Details. } \section{Warning: new implementation for LGCP}{ The simulation algorithm for log-Gaussian Cox processes has been completely re-written in \pkg{spatstat.random} version \code{3.2-0} to avoid depending on the package \pkg{RandomFields} which is now defunct (and is sadly missed). It is no longer possible to replicate results of \code{simulate.kppm} for log-Gaussian Cox processes that were obtained using previous versions of \pkg{spatstat.random}. The current code for simulating log-Gaussian Cox processes is a new implementation and should be considered vulnerable to new bugs. } \examples{ if(offline <- !interactive()) { spatstat.options(npixel=32, ndummy.min=16) } fit <- kppm(redwood ~x, "Thomas") simulate(fit, 2) simulate(fit, n.cond=60) if(offline) reset.spatstat.options() } \references{ \baddrubaturnbook \Moller, J., Syversveen, A. and Waagepetersen, R. (1998) Log Gaussian Cox Processes. \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482. } \seealso{ \code{\link{kppm}}, \code{\link{rThomas}}, \code{\link{rMatClust}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, \code{\link{rLGCP}}, \code{\link{simulate.ppm}}, \code{\link[stats]{simulate}} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat.model/man/plot.leverage.ppm.Rd0000644000176200001440000001046614510474260017632 0ustar liggesusers\name{plot.leverage.ppm} \alias{plot.leverage.ppm} \alias{contour.leverage.ppm} \alias{persp.leverage.ppm} \title{ Plot Leverage Function } \description{ Generate a pixel image plot, or a contour plot, or a perspective plot, of a leverage function that has been computed by \code{\link{leverage.ppm}}. } \usage{ \method{plot}{leverage.ppm}(x, \dots, what=c("smooth", "nearest", "exact"), showcut=TRUE, args.cut=list(drawlabels=FALSE), multiplot=TRUE) \method{contour}{leverage.ppm}(x, \dots, what=c("smooth", "nearest"), showcut=TRUE, args.cut=list(col=3, lwd=3, drawlabels=FALSE), multiplot=TRUE) \method{persp}{leverage.ppm}(x, \dots, what=c("smooth", "nearest"), main, zlab="leverage") } \arguments{ \item{x}{ Leverage function (object of class \code{"leverage.ppm"}) computed by \code{\link{leverage.ppm}}. } \item{\dots}{ Arguments passed to \code{\link{plot.im}} or \code{\link{contour.im}} or \code{\link{persp.im}} controlling the plot. } \item{what}{ Character string (partially matched) specifying the values to be plotted. See Details. } \item{showcut}{ Logical. If \code{TRUE}, a contour line is plotted at the level equal to the theoretical mean of the leverage. } \item{args.cut}{ Optional list of arguments passed to \code{\link[graphics]{contour.default}} to control the plotting of the contour line for the mean leverage. } \item{multiplot}{ Logical value indicating whether it is permissible to display several plot panels. } \item{main}{ Optional main title. A character string or character vector. } \item{zlab}{ Label for the \eqn{z} axis. A character string. } } \details{ These functions are the \code{plot}, \code{contour} and \code{persp} methods for objects of class \code{"leverage.ppm"}. Such objects are computed by the command \code{\link{leverage.ppm}}. The \code{plot} method displays the leverage function as a colour pixel image using \code{\link{plot.im}}, and draws a single contour line at the mean leverage value using \code{\link{contour.default}}. Use the argument \code{clipwin} to restrict the plot to a subset of the full data. The \code{contour} method displays the leverage function as a contour plot, and also draws a single contour line at the mean leverage value, using \code{\link{contour.im}}. The \code{persp} method displays the leverage function as a surface in perspective view, using \code{\link{persp.im}}. Since the exact values of leverage are computed only at a finite set of quadrature locations, there are several options for these plots: \describe{ \item{\code{what="smooth"}:}{ (the default) an image plot showing a smooth function, obtained by applying kernel smoothing to the exact leverage values; } \item{\code{what="nearest"}:}{ an image plot showing a piecewise-constant function, obtained by taking the exact leverage value at the nearest quadrature point; } \item{\code{what="exact"}:}{ a symbol plot showing the exact values of leverage as circles, centred at the quadrature points, with diameters proportional to leverage. } } The pixel images are already contained in the object \code{x} and were computed by \code{\link{leverage.ppm}}; the resolution of these images is controlled by arguments to \code{\link{leverage.ppm}}. } \value{ Same as for \code{\link{plot.im}}, \code{\link{contour.im}} and \code{\link{persp.im}} respectively. } \references{ Baddeley, A., Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ \spatstatAuthors. } \seealso{ \code{\link{leverage.ppm}}. } \examples{ if(offline <- !interactive()) op <- spatstat.options(npixel=32, ndummy.min=16) X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) lef <- leverage(fit) plot(lef) contour(lef) persp(lef) if(offline) spatstat.options(op) } \keyword{spatial} \keyword{models} \concept{Model diagnostics} spatstat.model/man/Fiksel.Rd0000644000176200001440000000741114331173075015502 0ustar liggesusers\name{Fiksel} \alias{Fiksel} \title{The Fiksel Interaction} \description{ Creates an instance of Fiksel's double exponential pairwise interaction point process model, which can then be fitted to point pattern data. } \usage{ Fiksel(r, hc=NA, kappa) } \arguments{ \item{r}{The interaction radius of the Fiksel model} \item{hc}{The hard core distance} \item{kappa}{The rate parameter} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Fiksel process with interaction radius \eqn{r}, hard core distance \code{hc} and rate parameter \code{kappa}. } \details{ Fiksel (1984) introduced a pairwise interaction point process with the following interaction function \eqn{c}. For two points \eqn{u} and \eqn{v} separated by a distance \eqn{d=||u-v||}, the interaction \eqn{c(u,v)} is equal to \eqn{0} if \eqn{d < h}, equal to \eqn{1} if \eqn{d > r}, and equal to \deqn{ \exp(a \exp(-\kappa d))}{exp(a * exp(-kappa * d))} if \eqn{h \le d \le r}{h <= d <= r}, where \eqn{h,r,\kappa,a}{h,r,kappa,a} are parameters. A graph of this interaction function is shown in the Examples. The interpretation of the parameters is as follows. \itemize{ \item \eqn{h} is the hard core distance: distinct points are not permitted to come closer than a distance \eqn{h} apart. \item \eqn{r} is the interaction range: points further than this distance do not interact. \item \eqn{\kappa}{kappa} is the rate or slope parameter, controlling the decay of the interaction as distance increases. \item \eqn{a} is the interaction strength parameter, controlling the strength and type of interaction. If \eqn{a} is zero, the process is Poisson. If \code{a} is positive, the process is clustered. If \code{a} is negative, the process is inhibited (regular). } The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Fiksel pairwise interaction is yielded by the function \code{Fiksel()}. See the examples below. The parameters \eqn{h}, \eqn{r} and \eqn{\kappa}{kappa} must be fixed and given in the call to \code{Fiksel}, while the canonical parameter \eqn{a} is estimated by \code{\link{ppm}()}. To estimate \eqn{h}, \eqn{r} and\eqn{\kappa}{kappa} it is possible to use \code{\link{profilepl}}. The maximum likelihood estimator of\eqn{h} is the minimum interpoint distance. If the hard core distance argument \code{hc} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{hc} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. See also Stoyan, Kendall and Mecke (1987) page 161. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{StraussHard}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Fiksel, T. (1984) Estimation of parameterized pair potentials of marked and non-marked Gibbsian point processes. \emph{Electronische Informationsverabeitung und Kybernetika} \bold{20}, 270--278. Stoyan, D, Kendall, W.S. and Mecke, J. (1987) \emph{Stochastic geometry and its applications}. Wiley. } \examples{ Fiksel(r=1,hc=0.02, kappa=2) # prints a sensible description of itself X <- unmark(spruces) fit <- ppm(X ~ 1, Fiksel(r=3.5, kappa=1)) plot(fitin(fit)) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/Hardcore.Rd0000644000176200001440000000527614331173075016023 0ustar liggesusers\name{Hardcore} \alias{Hardcore} \title{The Hard Core Point Process Model} \description{ Creates an instance of the hard core point process model which can then be fitted to point pattern data. } \usage{ Hardcore(hc=NA) } \arguments{ \item{hc}{The hard core distance} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hard core process with hard core distance \code{hc}. } \details{ A hard core process with hard core distance \eqn{h} and abundance parameter \eqn{\beta}{beta} is a pairwise interaction point process in which distinct points are not allowed to come closer than a distance \eqn{h} apart. The probability density is zero if any pair of points is closer than \eqn{h} units apart, and otherwise equals \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, and \eqn{\alpha}{alpha} is the normalising constant. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hard core process pairwise interaction is yielded by the function \code{Hardcore()}. See the examples below. If the hard core distance argument \code{hc} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{hc} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. } \seealso{ \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{MultiHard}}, \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. } \examples{ Hardcore(0.02) # prints a sensible description of itself ppm(cells ~1, Hardcore(0.05)) # fit the stationary hard core process to `cells' # estimate hard core radius from data ppm(cells ~1, Hardcore()) \donttest{ # equivalent: ppm(cells ~1, Hardcore) } \donttest{ # fit a nonstationary hard core process # with log-cubic polynomial trend ppm(cells ~ polynom(x,y,3), Hardcore(0.05)) } } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/plot.rppm.Rd0000644000176200001440000000425414331173077016224 0ustar liggesusers\name{plot.rppm} \alias{plot.rppm} \title{ Plot a Recursively Partitioned Point Process Model } \description{ Given a model which has been fitted to point pattern data by recursive partitioning, plot the partition tree or the fitted intensity. } \usage{ \method{plot}{rppm}(x, \dots, what = c("tree", "spatial"), treeplot=NULL) } \arguments{ \item{x}{ Fitted point process model of class \code{"rppm"} produced by the function \code{\link{rppm}}. } \item{what}{ Character string (partially matched) specifying whether to plot the partition tree or the fitted intensity. } \item{\dots}{ Arguments passed to \code{\link[rpart]{plot.rpart}} and \code{\link[rpart]{text.rpart}} (if \code{what="tree"}) or passed to \code{\link{plot.im}} (if \code{what="spatial"}) controlling the appearance of the plot. } \item{treeplot}{ Optional. A function to be used to plot and label the partition tree, replacing the two functions \code{\link[rpart]{plot.rpart}} and \code{\link[rpart]{text.rpart}}. } } \details{ If \code{what="tree"} (the default), the partition tree will be plotted using \code{\link[rpart]{plot.rpart}}, and labelled using \code{\link[rpart]{text.rpart}}. If the argument \code{treeplot} is given, then plotting and labelling will be performed by \code{treeplot} instead. A good choice is the function \code{prp} in package \pkg{rpart.plot}. If \code{what="spatial"}, the predicted intensity will be computed using \code{\link{predict.rppm}}, and this intensity will be plotted as an image using \code{\link{plot.im}}. } \value{ If \code{what="tree"}, a list containing \code{x} and \code{y} coordinates of the plotted nodes of the tree. If \code{what="spatial"}, the return value of \code{\link{plot.im}}. } \author{ \spatstatAuthors } \seealso{ \code{\link{rppm}} } \examples{ # Murchison gold data mur <- solapply(murchison, rescale, s=1000, unitname="km") mur$dfault <- distfun(mur$faults) # fit <- rppm(gold ~ dfault + greenstone, data=mur) # opa <- par(mfrow=c(1,2)) plot(fit) plot(fit, what="spatial") par(opa) } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat.model/man/MultiHard.Rd0000644000176200001440000000555014331173075016160 0ustar liggesusers\name{MultiHard} \alias{MultiHard} \title{The Multitype Hard Core Point Process Model} \description{ Creates an instance of the multitype hard core point process model which can then be fitted to point pattern data. } \usage{ MultiHard(hradii, types=NULL) } \arguments{ \item{hradii}{Matrix of hard core radii} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the multitype hard core process with hard core radii \eqn{hradii[i,j]}. } \details{ This is a multitype version of the hard core process. A pair of points of types \eqn{i} and \eqn{j} must not lie closer than \eqn{h_{ij}}{h[i,j]} units apart. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the MultiStrauss interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{hradii}. The matrix \code{hradii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no distance constraint should be applied for this combination of types. Note that only the hardcore radii are specified in \code{MultiHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} are estimated by \code{\link{ppm}()}, not fixed in \code{MultiHard()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Strauss}}. See \code{\link{ragsMultiHard}} and \code{\link{rmh}} for simulation. } \examples{ h <- matrix(c(1,2,2,1), nrow=2,ncol=2) # prints a sensible description of itself MultiHard(h) # Fit the stationary multitype hardcore process to `amacrine' # with hard core operating only between cells of the same type. h <- 0.02 * matrix(c(1, NA, NA, 1), nrow=2,ncol=2) ppm(amacrine ~1, MultiHard(h)) } \section{Warnings}{ In order that \code{\link{ppm}} can fit the multitype hard core model correctly to a point pattern \code{X}, this pattern must be marked, with \code{markformat} equal to \code{vector} and the mark vector \code{marks(X)} must be a factor. If the argument \code{types} is specified it is interpreted as a set of factor levels and this set must equal \code{levels(marks(X))}. } \section{Changed Syntax}{ Before \pkg{spatstat} version \code{1.37-0}, the syntax of this function was different: \code{MultiHard(types=NULL, hradii)}. The new code attempts to handle the old syntax as well. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.model/man/methods.zclustermodel.Rd0000644000176200001440000000360014331173100020604 0ustar liggesusers\name{methods.zclustermodel} \alias{methods.zclustermodel} % DoNotExport \alias{pcfmodel.zclustermodel} \alias{Kmodel.zclustermodel} \alias{predict.zclustermodel} \alias{intensity.zclustermodel} \alias{print.zclustermodel} \alias{clusterradius.zclustermodel} \alias{reach.zclustermodel} \title{ Methods for Cluster Models } \description{ Methods for the experimental class of cluster models. } \usage{ \method{pcfmodel}{zclustermodel}(model, \dots) \method{Kmodel}{zclustermodel}(model, \dots) \method{intensity}{zclustermodel}(X, \dots) \method{predict}{zclustermodel}(object, \dots, locations, type = "intensity", ngrid = NULL) \method{print}{zclustermodel}(x, \dots) \method{clusterradius}{zclustermodel}(model,\dots,thresh=NULL, precision=FALSE) \method{reach}{zclustermodel}(x, \dots, epsilon) } \arguments{ \item{model,object,x,X}{ Object of class \code{"zclustermodel"}. } \item{\dots}{ Arguments passed to other methods. } \item{locations}{ Locations where prediction should be performed. A window or a point pattern. } \item{type}{ Currently must equal \code{"intensity"}. } \item{ngrid}{ Pixel grid dimensions for prediction, if \code{locations} is a rectangle or polygon. } \item{thresh,epsilon}{Tolerance thresholds} \item{precision}{ Logical value stipulating whether the precision should also be returned. } } \details{ Experimental. } \value{ Same as for other methods. } \author{ \adrian } \seealso{ \code{\link{zclustermodel}} } \examples{ m <- zclustermodel("Thomas", kappa=10, mu=5, scale=0.1) m2 <- zclustermodel("VarGamma", kappa=10, mu=10, scale=0.1, nu=0.7) m m2 g <- pcfmodel(m) g(0.2) g2 <- pcfmodel(m2) g2(1) Z <- predict(m, locations=square(2)) Z2 <- predict(m2, locations=square(1)) varcount(m, square(1)) varcount(m2, square(1)) } \keyword{spatial} \keyword{models} spatstat.model/man/plot.slrm.Rd0000644000176200001440000000225014331173077016215 0ustar liggesusers\name{plot.slrm} \Rdversion{1.1} \alias{plot.slrm} \title{ Plot a Fitted Spatial Logistic Regression } \description{ Plots a fitted Spatial Logistic Regression model. } \usage{ \method{plot}{slrm}(x, ..., type = "intensity") } \arguments{ \item{x}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Extra arguments passed to \code{\link{plot.im}} to control the appearance of the plot. } \item{type}{ Character string (partially) matching one of \code{"probabilities"}, \code{"intensity"} or \code{"link"}. } } \details{ This is a method for \code{\link{plot}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). This function plots the result of \code{\link{predict.slrm}}. } \value{ None. } \seealso{ \code{\link{slrm}}, \code{\link{predict.slrm}}, \code{\link{plot.im}} } \examples{ X <- copper$SouthPoints Y <- copper$SouthLines Z <- distmap(Y) fit <- slrm(X ~ Z) plot(fit) plot(fit, type="link") } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat.model/man/varcount.Rd0000644000176200001440000000757314334122307016132 0ustar liggesusers\name{varcount} \alias{varcount} \title{ Predicted Variance of the Number of Points } \description{ Given a fitted point process model, calculate the predicted variance of the number of points in a nominated set \code{B}. } \usage{ varcount(model, B=Window(model), \dots, dimyx = NULL, relative=FALSE) } \arguments{ \item{model}{ A fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). } \item{B}{ A window (object of class \code{"owin"} specifying the region in which the points are counted. Alternatively a pixel image (object of class \code{"im"}) or a function of spatial coordinates specifying a numerical weight for each random point. The default is the window of the original point pattern data to which the model was fitted. } \item{\dots}{ Additional arguments passed to \code{B} when it is a function. } \item{dimyx}{ Spatial resolution for the calculations. Argument passed to \code{\link{as.mask}}. } \item{relative}{ Logical value specifying whether to divide the variance by the mean value. } } \details{ The function \code{varcount} calculates the variance of the number of points falling in a specified window \code{B} according to the \code{model}. It can also calculate the variance of a sum of weights attached to each random point. If \code{relative=FALSE} (the default), the result is the variance. If \code{relative=TRUE}, the result is the variance divided by the mean, which is the overdispersion index (equal to 1 if the number of points has a Poisson distribution). The \code{model} should be a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). \itemize{ \item{ If \code{B} is a window, \code{varcount} calculates the variance of the number of points falling in \code{B}, according to the fitted \code{model}. If the \code{model} depends on spatial covariates other than the Cartesian coordinates, then \code{B} should be a subset of the domain in which these covariates are defined. } \item{ If \code{B} is a pixel image, \code{varcount} calculates the variance of \eqn{T = \sum_i B(x_i)}{T = sum[i] B(x[i])}, the sum of the values of \code{B} over all random points falling in the domain of the image. If the \code{model} depends on spatial covariates other than the Cartesian coordinates, then the domain of the pixel image, \code{as.owin(B)}, should be a subset of the domain in which these covariates are defined. } \item{ If \code{B} is a \code{function(x,y)} or \code{function(x,y,...)} then \code{varcount} calculates the variance of \eqn{T = \sum_i B(x_i)}{T = sum[i] B(x[i])}, the sum of the values of \code{B} over all random points falling inside the window \code{W=as.owin(model)}, the window in which the original data were observed. } } The variance calculation involves the intensity and the pair correlation function of the model. The calculation is exact (up to discretisation error) for models of class \code{"kppm"} and \code{"dppm"}, and for Poisson point process models of class \code{"ppm"}. For Gibbs point process models of class \code{"ppm"} the calculation depends on the Poisson-saddlepoint approximations to the intensity and pair correlation function, which are rough approximations. The approximation is not yet implemented for some Gibbs models. } \value{ A single number. } \author{ \spatstatAuthors } \seealso{ \code{\link{predict.ppm}}, \code{\link{predict.kppm}}, \code{\link{predict.dppm}} } \examples{ fitT <- kppm(redwood ~ 1, "Thomas") B <- owin(c(0, 0.5), c(-0.5, 0)) varcount(fitT, B) fitS <- ppm(swedishpines ~ 1, Strauss(9)) BS <- square(50) varcount(fitS, BS) } \keyword{spatial} \keyword{models} spatstat.model/man/Kmodel.ppm.Rd0000644000176200001440000000450214331173075016271 0ustar liggesusers\name{Kmodel.ppm} \alias{Kmodel.ppm} \alias{pcfmodel.ppm} \title{K Function or Pair Correlation Function of Gibbs Point Process model} \description{ Returns the theoretical \eqn{K} function or the pair correlation function of a fitted Gibbs point process model. } \usage{ \method{Kmodel}{ppm}(model, \dots) \method{pcfmodel}{ppm}(model, \dots) } \arguments{ \item{model}{ A fitted Poisson or Gibbs point process model (object of class \code{"ppm"}) typically obtained from the model-fitting algorithm \code{\link{ppm}}. } \item{\dots}{ Ignored. } } \value{ A \code{function} in the \R language, which takes one argument \code{r}. } \details{ This function computes an \emph{approximation} to the \eqn{K} function or the pair correlation function of a Gibbs point process. The functions \code{\link{Kmodel}} and \code{\link{pcfmodel}} are generic. The functions documented here are the methods for the class \code{"ppm"}. The approximation is only available for stationary pairwise-interaction models. It uses the second order Poisson-saddlepoint approximation (Baddeley and Nair, 2012b) which is a combination of the Poisson-Boltzmann-Emden and Percus-Yevick approximations. The return value is a \code{function} in the \R language, which takes one argument \code{r}. Evaluation of this function, on a numeric vector \code{r}, yields values of the desired \eqn{K} function or pair correlation function at these distance values. } \seealso{ \code{\link{Kest}} or \code{\link{pcf}} to estimate the \eqn{K} function or pair correlation function nonparametrically from data. \code{\link{ppm}} to fit Gibbs models. \code{\link{Kmodel}} for the generic functions. \code{\link{Kmodel.kppm}} for the method for cluster/Cox processes. } \examples{ fit <- ppm(swedishpines, ~1, Strauss(8)) p <- pcfmodel(fit) K <- Kmodel(fit) p(6) K(8) curve(K(x), from=0, to=15) } \references{ Baddeley, A. and Nair, G. (2012a) Fast approximation of the intensity of Gibbs point processes. \emph{Electronic Journal of Statistics} \bold{6} 1155--1169. Baddeley, A. and Nair, G. (2012b) Approximating the moments of a spatial point process. \emph{Stat} \bold{1}, 1, 18--30. \code{DOI: 10.1002/sta4.5} } \author{\adrian and Gopalan Nair. } \keyword{spatial} \keyword{models} spatstat.model/man/Softcore.Rd0000644000176200001440000001321314331173075016046 0ustar liggesusers\name{Softcore} \alias{Softcore} \title{The Soft Core Point Process Model} \description{ Creates an instance of the Soft Core point process model which can then be fitted to point pattern data. } \usage{ Softcore(kappa, sigma0=NA) } \arguments{ \item{kappa}{The exponent \eqn{\kappa}{kappa} of the Soft Core interaction} \item{sigma0}{ Optional. Initial estimate of the parameter \eqn{\sigma}{sigma}. A positive number. } } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Soft Core process with exponent \eqn{\kappa}{kappa}. } \details{ The (stationary) Soft Core point process with parameters \eqn{\beta}{beta} and \eqn{\sigma}{sigma} and exponent \eqn{\kappa}{kappa} is the pairwise interaction point process in which each point contributes a factor \eqn{\beta}{beta} to the probability density of the point pattern, and each pair of points contributes a factor \deqn{ \exp \left\{ - \left( \frac{\sigma}{d} \right)^{2/\kappa} \right\} }{ exp( - (sigma/d)^(2/kappa) ) } to the density, where \eqn{d} is the distance between the two points. See the Examples for a plot of this interaction curve. Thus the process has probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \exp \left\{ - \sum_{i < j} \left( \frac{\sigma}{||x_i-x_j||} \right)^{2/\kappa} \right\} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) exp( - sum (sigma/||x[i]-x[j]||)^(2/kappa)) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{\alpha}{alpha} is the normalising constant, and the sum on the right hand side is over all unordered pairs of points of the pattern. This model describes an ``ordered'' or ``inhibitive'' process, with the strength of inhibition decreasing smoothly with distance. The interaction is controlled by the parameters \eqn{\sigma}{sigma} and \eqn{\kappa}{kappa}. \itemize{ \item The \emph{spatial scale} of interaction is controlled by the parameter \eqn{\sigma}{sigma}, which is a positive real number interpreted as a distance, expressed in the same units of distance as the spatial data. The parameter \eqn{\sigma}{sigma} is the distance at which the pair potential reaches the threshold value 0.37. \item The \emph{shape} of the interaction function is controlled by the exponent \eqn{\kappa}{kappa} which is a dimensionless number in the range \eqn{(0,1)}, with larger values corresponding to a flatter shape (or a more gradual decay rate). The process is well-defined only for \eqn{\kappa}{kappa} in \eqn{(0,1)}. The limit of the model as \eqn{\kappa \to 0}{kappa -> 0} is the hard core process with hard core distance \eqn{h=\sigma}{h=sigma}. \item The \dQuote{strength} of the interaction is determined by both of the parameters \eqn{\sigma}{sigma} and \eqn{\kappa}{kappa}. The larger the value of \eqn{\kappa}{kappa}, the wider the range of distances over which the interaction has an effect. If \eqn{\sigma}{sigma} is very small, the interaction is very weak for all practical purposes (theoretically if \eqn{\sigma = 0}{sigma = 0} the model reduces to the Poisson point process). } The nonstationary Soft Core process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Soft Core process pairwise interaction is yielded by the function \code{Softcore()}. See the examples below. The main argument is the exponent \code{kappa}. When \code{kappa} is fixed, the model becomes an exponential family with canonical parameters \eqn{\log \beta}{log(beta)} and \deqn{ \log \gamma = \frac{2}{\kappa} \log\sigma }{ log(gamma) = (2/kappa) log(sigma) } The canonical parameters are estimated by \code{\link{ppm}()}, not fixed in \code{Softcore()}. The optional argument \code{sigma0} can be used to improve numerical stability. If \code{sigma0} is given, it should be a positive number, and it should be a rough estimate of the parameter \eqn{\sigma}{sigma}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Ogata, Y, and Tanemura, M. (1981). Estimation of interaction potentials of spatial point patterns through the maximum likelihood procedure. \emph{Annals of the Institute of Statistical Mathematics}, B \bold{33}, 315--338. Ogata, Y, and Tanemura, M. (1984). Likelihood analysis of spatial point patterns. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 496--518. } \examples{ # fit the stationary Soft Core process to `cells' fit5 <- ppm(cells ~1, Softcore(kappa=0.5), correction="isotropic") # study shape of interaction and explore effect of parameters fit2 <- update(fit5, Softcore(kappa=0.2)) fit8 <- update(fit5, Softcore(kappa=0.8)) plot(fitin(fit2), xlim=c(0, 0.4), main="Pair potential (sigma = 0.1)", xlab=expression(d), ylab=expression(h(d)), legend=FALSE) plot(fitin(fit5), add=TRUE, col=4) plot(fitin(fit8), add=TRUE, col=3) legend("bottomright", col=c(1,4,3), lty=1, legend=expression(kappa==0.2, kappa==0.5, kappa==0.8)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.model/man/ppm.ppp.Rd0000644000176200001440000010676214515336521015671 0ustar liggesusers\name{ppm.ppp} \alias{ppm.ppp} \alias{ppm.quad} \concept{point process model} \concept{Poisson point process} \concept{Gibbs point process} \title{ Fit Point Process Model to Point Pattern Data } \description{ Fits a point process model to an observed point pattern. } \usage{ \method{ppm}{ppp}(Q, trend=~1, interaction=Poisson(), \dots, covariates=data, data=NULL, covfunargs = list(), subset, clipwin, correction="border", rbord=reach(interaction), use.gam=FALSE, method=c("mpl", "logi", "VBlogi"), forcefit=FALSE, improve.type = c("none", "ho", "enet"), improve.args=list(), emend=project, project=FALSE, prior.mean = NULL, prior.var = NULL, nd = NULL, eps = NULL, gcontrol=list(), nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE, callstring=NULL) \method{ppm}{quad}(Q, trend=~1, interaction=Poisson(), \dots, covariates=data, data=NULL, covfunargs = list(), subset, clipwin, correction="border", rbord=reach(interaction), use.gam=FALSE, method=c("mpl", "logi", "VBlogi"), forcefit=FALSE, improve.type = c("none", "ho", "enet"), improve.args=list(), emend=project, project=FALSE, prior.mean = NULL, prior.var = NULL, nd = NULL, eps = NULL, gcontrol=list(), nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE, callstring=NULL) } \arguments{ \item{Q}{ A data point pattern (of class \code{"ppp"}) to which the model will be fitted, or a quadrature scheme (of class \code{"quad"}) containing this pattern. } \item{trend}{ An \R formula object specifying the spatial trend to be fitted. The default formula, \code{~1}, indicates the model is stationary and no trend is to be fitted. } \item{interaction}{ An object of class \code{"interact"} describing the point process interaction structure, or a function that makes such an object, or \code{NULL} indicating that a Poisson process (stationary or nonstationary) should be fitted. } \item{\dots}{Ignored.} \item{data,covariates}{ The values of any spatial covariates (other than the Cartesian coordinates) required by the model. Either a data frame, or a list whose entries are images, functions, windows, tessellations or single numbers. See Details. } \item{subset}{ Optional. An expression (which may involve the names of the Cartesian coordinates \code{x} and \code{y} and the names of entries in \code{data}) defining a subset of the spatial domain, to which the likelihood or pseudolikelihood should be restricted. See Details. The result of evaluating the expression should be either a logical vector, or a window (object of class \code{"owin"}) or a logical-valued pixel image (object of class \code{"im"}). } \item{clipwin}{ Optional. A spatial window (object of class \code{"owin"}) to which data will be restricted, before model-fitting is performed. See Details. } \item{covfunargs}{ A named list containing the values of any additional arguments required by covariate functions. } \item{correction}{ The name of the edge correction to be used. The default is \code{"border"} indicating the border correction. Other possibilities may include \code{"Ripley"}, \code{"isotropic"}, \code{"periodic"}, \code{"translate"} and \code{"none"}, depending on the \code{interaction}. } \item{rbord}{ If \code{correction = "border"} this argument specifies the distance by which the window should be eroded for the border correction. } \item{use.gam}{ Logical flag; if \code{TRUE} then computations are performed using \code{gam} instead of \code{\link{glm}}. } \item{method}{ String (partially matched) specifying the method used to fit the model. Options are \code{"mpl"} for the method of Maximum PseudoLikelihood (the default), \code{"logi"} for the Logistic Likelihood method and \code{"VBlogi"} for the Variational Bayes Logistic Likelihood method. } \item{forcefit}{ Logical flag for internal use. If \code{forcefit=FALSE}, some trivial models will be fitted by a shortcut. If \code{forcefit=TRUE}, the generic fitting method will always be used. } \item{improve.type}{ String (partially matched) specifying a method for improving the initial fit. If \code{improve.type = "none"} (the default), no improvement is performed. If \code{improve.type="ho"}, the Huang-Ogata approximate maximum likelihood method is used. If \code{improve.type="enet"}, the model coefficients are re-estimated using a regularized version of the composite likelihood. } \item{improve.args}{ Arguments used to control the algorithm for improving the initial fit. See Details. } \item{emend,project}{ (These are equivalent: \code{project} is an older name for \code{emend}.) Logical value. Setting \code{emend=TRUE} will ensure that the fitted model is always a valid point process by applying \code{\link{emend.ppm}}. } \item{prior.mean}{ Optional vector of prior means for canonical parameters (for \code{method="VBlogi"}). See Details. } \item{prior.var}{ Optional prior variance covariance matrix for canonical parameters (for \code{method="VBlogi"}). See Details. } \item{nd}{ Optional. Integer or pair of integers. The dimension of the grid of dummy points (\code{nd * nd} or \code{nd[1] * nd[2]}) used to evaluate the integral in the pseudolikelihood. Incompatible with \code{eps}. } \item{eps}{ Optional. A positive number, or a vector of two positive numbers, giving the horizontal and vertical spacing, respectively, of the grid of dummy points. Incompatible with \code{nd}. } \item{gcontrol}{ Optional. List of parameters passed to \code{\link{glm.control}} (or passed to \code{\link{gam.control}} if \code{use.gam=TRUE}) controlling the model-fitting algorithm. } \item{nsim}{ Number of simulated realisations to generate (for \code{improve.type="ho"}) } \item{nrmh}{ Number of Metropolis-Hastings iterations for each simulated realisation (for \code{improve.type="ho"}) } \item{start,control}{ Arguments passed to \code{\link{rmh}} controlling the behaviour of the Metropolis-Hastings algorithm (for \code{improve.type="ho"}) } \item{verb}{ Logical flag indicating whether to print progress reports (for \code{improve.type="ho"}) } \item{callstring}{ Internal use only. } } \value{ An object of class \code{"ppm"} describing a fitted point process model. See \code{\link{ppm.object}} for details of the format of this object and methods available for manipulating it. } \details{ \bold{NOTE:} This help page describes the \bold{old syntax} of the function \code{ppm}, described in many older documents. This old syntax is still supported. However, if you are learning about \code{ppm} for the first time, we recommend you use the \bold{new syntax} described in the help file for \code{\link{ppm}}. This function fits a point process model to an observed point pattern. The model may include spatial trend, interpoint interaction, and dependence on covariates. \describe{ \item{basic use:}{ In basic use, \code{Q} is a point pattern dataset (an object of class \code{"ppp"}) to which we wish to fit a model. The syntax of \code{ppm()} is closely analogous to the \R functions \code{\link{glm}} and \code{gam}. The analogy is: \tabular{ll}{ \bold{glm} \tab \bold{ppm} \cr \code{formula} \tab \code{trend} \cr \code{family} \tab \code{interaction} } The point process model to be fitted is specified by the arguments \code{trend} and \code{interaction} which are respectively analogous to the \code{formula} and \code{family} arguments of glm(). Systematic effects (spatial trend and/or dependence on spatial covariates) are specified by the argument \code{trend}. This is an \R formula object, which may be expressed in terms of the Cartesian coordinates \code{x}, \code{y}, the marks \code{marks}, or the variables in \code{covariates} (if supplied), or both. It specifies the \bold{logarithm} of the first order potential of the process. The formula should not use any names beginning with \code{.mpl} as these are reserved for internal use. If \code{trend} is absent or equal to the default, \code{~1}, then the model to be fitted is stationary (or at least, its first order potential is constant). The symbol \code{.} in the trend expression stands for all the covariates supplied in the argument \code{data}. For example the formula \code{~ .} indicates an additive model with a main effect for each covariate in \code{data}. Stochastic interactions between random points of the point process are defined by the argument \code{interaction}. This is an object of class \code{"interact"} which is initialised in a very similar way to the usage of family objects in \code{\link{glm}} and \code{gam}. The models currently available are: \GibbsInteractionsList. See the examples below. It is also possible to combine several interactions using \code{\link{Hybrid}}. If \code{interaction} is missing or \code{NULL}, then the model to be fitted has no interpoint interactions, that is, it is a Poisson process (stationary or nonstationary according to \code{trend}). In this case the methods of maximum pseudolikelihood and maximum logistic likelihood coincide with maximum likelihood. The fitted point process model returned by this function can be printed (by the print method \code{\link{print.ppm}}) to inspect the fitted parameter values. If a nonparametric spatial trend was fitted, this can be extracted using the predict method \code{\link{predict.ppm}}. } \item{Models with covariates:}{ To fit a model involving spatial covariates other than the Cartesian coordinates \eqn{x} and \eqn{y}, the values of the covariates should be supplied in the argument \code{covariates}. Note that it is not sufficient to have observed the covariate only at the points of the data point pattern; the covariate must also have been observed at other locations in the window. Typically the argument \code{covariates} is a list, with names corresponding to variables in the \code{trend} formula. Each entry in the list is either \describe{ \item{a pixel image,}{ giving the values of a spatial covariate at a fine grid of locations. It should be an object of class \code{"im"}, see \code{\link{im.object}}. } \item{a function,}{ which can be evaluated at any location \code{(x,y)} to obtain the value of the spatial covariate. It should be a \code{function(x, y)} or \code{function(x, y, ...)} in the \R language. For marked point pattern data, the covariate can be a \code{function(x, y, marks)} or \code{function(x, y, marks, ...)}. The first two arguments of the function should be the Cartesian coordinates \eqn{x} and \eqn{y}. The function may have additional arguments; if the function does not have default values for these additional arguments, then the user must supply values for them, in \code{covfunargs}. See the Examples. } \item{a window,}{ interpreted as a logical variable which is \code{TRUE} inside the window and \code{FALSE} outside it. This should be an object of class \code{"owin"}. } \item{a tessellation,}{ interpreted as a factor covariate. For each spatial location, the factor value indicates which tile of the tessellation it belongs to. This should be an object of class \code{"tess"}. } \item{a single number,}{indicating a covariate that is constant in this dataset. } } The software will look up the values of each covariate at the required locations (quadrature points). Note that, for covariate functions, only the \emph{name} of the function appears in the trend formula. A covariate function is treated as if it were a single variable. The function arguments do not appear in the trend formula. See the Examples. If \code{covariates} is a list, the list entries should have names corresponding to the names of covariates in the model formula \code{trend}. The variable names \code{x}, \code{y} and \code{marks} are reserved for the Cartesian coordinates and the mark values, and these should not be used for variables in \code{covariates}. If \code{covariates} is a data frame, \code{Q} must be a quadrature scheme (see under Quadrature Schemes below). Then \code{covariates} must have as many rows as there are points in \code{Q}. The \eqn{i}th row of \code{covariates} should contain the values of spatial variables which have been observed at the \eqn{i}th point of \code{Q}. } \item{Quadrature schemes:}{ In advanced use, \code{Q} may be a `quadrature scheme'. This was originally just a technicality but it has turned out to have practical uses, as we explain below. Quadrature schemes are required for our implementation of the method of maximum pseudolikelihood. The definition of the pseudolikelihood involves an integral over the spatial window containing the data. In practice this integral must be approximated by a finite sum over a set of quadrature points. We use the technique of Baddeley and Turner (2000), a generalisation of the Berman-Turner (1992) device. In this technique the quadrature points for the numerical approximation include all the data points (points of the observed point pattern) as well as additional `dummy' points. Quadrature schemes are also required for the method of maximum logistic likelihood, which combines the data points with additional `dummy' points. A quadrature scheme is an object of class \code{"quad"} (see \code{\link{quad.object}}) which specifies both the data point pattern and the dummy points for the quadrature scheme, as well as the quadrature weights associated with these points. If \code{Q} is simply a point pattern (of class \code{"ppp"}, see \code{\link{ppp.object}}) then it is interpreted as specifying the data points only; a set of dummy points specified by \code{\link{default.dummy}()} is added, and the default weighting rule is invoked to compute the quadrature weights. Finer quadrature schemes (i.e. those with more dummy points) generally yield a better approximation, at the expense of higher computational load. An easy way to fit models using a finer quadrature scheme is to let \code{Q} be the original point pattern data, and use the argument \code{nd} to determine the number of dummy points in the quadrature scheme. Complete control over the quadrature scheme is possible. See \code{\link{quadscheme}} for an overview. Use \code{quadscheme(X, D, method="dirichlet")} to compute quadrature weights based on the Dirichlet tessellation, or \code{quadscheme(X, D, method="grid")} to compute quadrature weights by counting points in grid squares, where \code{X} and \code{D} are the patterns of data points and dummy points respectively. Alternatively use \code{\link{pixelquad}} to make a quadrature scheme with a dummy point at every pixel in a pixel image. A practical advantage of quadrature schemes arises when we want to fit a model involving covariates (e.g. soil pH). Suppose we have only been able to observe the covariates at a small number of locations. Suppose \code{cov.dat} is a data frame containing the values of the covariates at the data points (i.e.\ \code{cov.dat[i,]} contains the observations for the \code{i}th data point) and \code{cov.dum} is another data frame (with the same columns as \code{cov.dat}) containing the covariate values at another set of points whose locations are given by the point pattern \code{Y}. Then setting \code{Q = quadscheme(X,Y)} combines the data points and dummy points into a quadrature scheme, and \code{covariates = rbind(cov.dat, cov.dum)} combines the covariate data frames. We can then fit the model by calling \code{ppm(Q, ..., covariates)}. } \item{Model-fitting technique:}{ There are several choices for the technique used to fit the model. \describe{ \item{method="mpl"}{ (the default): the model will be fitted by maximising the pseudolikelihood (Besag, 1975) using the Berman-Turner computational approximation (Berman and Turner, 1992; Baddeley and Turner, 2000). Maximum pseudolikelihood is equivalent to maximum likelihood if the model is a Poisson process. Maximum pseudolikelihood is biased if the interpoint interaction is very strong, unless there is a large number of dummy points. The default settings for \code{method='mpl'} specify a moderately large number of dummy points, striking a compromise between speed and accuracy. } \item{method="logi":}{ the model will be fitted by maximising the logistic likelihood (Baddeley et al, 2014). This technique is roughly equivalent in speed to maximum pseudolikelihood, but is believed to be less biased. Because it is less biased, the default settings for \code{method='logi'} specify a relatively small number of dummy points, so that this method is the fastest, in practice. } \item{method="VBlogi":}{ the model will be fitted in a Bayesian setup by maximising the posterior probability density for the canonical model parameters. This uses the variational Bayes approximation to the posterior derived from the logistic likelihood as described in Rajala (2014). The prior is assumed to be multivariate Gaussian with mean vector \code{prior.mean} and variance-covariance matrix \code{prior.var}. } } Note that \code{method='logi'} and \code{method='VBlogi'} involve randomisation, so that the results are subject to random variation. After this initial fit, there are several ways to improve the fit: \describe{ \item{improve.type="none":}{ No further improvement is performed. } \item{improve.type="ho":}{ the model will be re-fitted by applying the approximate maximum likelihood method of Huang and Ogata (1999). See below. The Huang-Ogata method is slower than the other options, but has better statistical properties. This method involves randomisation, so the results are subject to random variation. } \item{improve.type="enet":}{ The model will be re-fitted using a regularized version of the composite likelihood. See below. } } } \item{Huang-Ogata method:}{ If \code{improve.type="ho"} then the model will be fitted using the Huang-Ogata (1999) approximate maximum likelihood method. First the model is fitted by maximum pseudolikelihood as described above, yielding an initial estimate of the parameter vector \eqn{\theta_0}{theta0}. From this initial model, \code{nsim} simulated realisations are generated. The score and Fisher information of the model at \eqn{\theta=\theta_0}{theta=theta0} are estimated from the simulated realisations. Then one step of the Fisher scoring algorithm is taken, yielding an updated estimate \eqn{\theta_1}{theta1}. The corresponding model is returned. Simulated realisations are generated using \code{\link{rmh}}. The iterative behaviour of the Metropolis-Hastings algorithm is controlled by the arguments \code{start} and \code{control} which are passed to \code{\link{rmh}}. As a shortcut, the argument \code{nrmh} determines the number of Metropolis-Hastings iterations run to produce one simulated realisation (if \code{control} is absent). Also if \code{start} is absent or equal to \code{NULL}, it defaults to \code{list(n.start=N)} where \code{N} is the number of points in the data point pattern. } \item{Regularization:}{ This requires the package \pkg{glmnet}. \bold{Details to be written.} } \item{Edge correction}{ Edge correction should be applied to the sufficient statistics of the model, to reduce bias. The argument \code{correction} is the name of an edge correction method. The default \code{correction="border"} specifies the border correction, in which the quadrature window (the domain of integration of the pseudolikelihood) is obtained by trimming off a margin of width \code{rbord} from the observation window of the data pattern. Not all edge corrections are implemented (or implementable) for arbitrary windows. Other options depend on the argument \code{interaction}, but these generally include \code{correction="periodic"} (the periodic or toroidal edge correction in which opposite edges of a rectangular window are identified) and \code{correction="translate"} (the translation correction, see Baddeley 1998 and Baddeley and Turner 2000). For pairwise interaction models there is also Ripley's isotropic correction, identified by \code{correction="isotropic"} or \code{"Ripley"}. } \item{Subsetting}{ The arguments \code{subset} and \code{clipwin} specify that the model should be fitted to a restricted subset of the available data. These arguments are equivalent for Poisson point process models, but different for Gibbs models. If \code{clipwin} is specified, then all the available data will be restricted to this spatial region, and data outside this region will be discarded, before the model is fitted. If \code{subset} is specified, then no data are deleted, but the domain of integration of the likelihood or pseudolikelihood is restricted to the \code{subset}. For Poisson models, these two arguments have the same effect; but for a Gibbs model, interactions between points inside and outside the \code{subset} are taken into account, while interactions between points inside and outside the \code{clipwin} are ignored. } } } \section{Interaction parameters}{ Apart from the Poisson model, every point process model fitted by \code{ppm} has parameters that determine the strength and range of \sQuote{interaction} or dependence between points. These parameters are of two types: \describe{ \item{regular parameters:}{ A parameter \eqn{\phi}{phi} is called \emph{regular} if the log likelihood is a linear function of \eqn{\theta}{theta} where \eqn{\theta = \theta(\psi)}{theta = theta(psi)} is some transformation of \eqn{\psi}{psi}. [Then \eqn{\theta}{theta} is called the canonical parameter.] } \item{irregular parameters}{ Other parameters are called \emph{irregular}. } } Typically, regular parameters determine the \sQuote{strength} of the interaction, while irregular parameters determine the \sQuote{range} of the interaction. For example, the Strauss process has a regular parameter \eqn{\gamma}{gamma} controlling the strength of interpoint inhibition, and an irregular parameter \eqn{r} determining the range of interaction. The \code{ppm} command is only designed to estimate regular parameters of the interaction. It requires the values of any irregular parameters of the interaction to be fixed. For example, to fit a Strauss process model to the \code{cells} dataset, you could type \code{ppm(cells, ~1, Strauss(r=0.07))}. Note that the value of the irregular parameter \code{r} must be given. The result of this command will be a fitted model in which the regular parameter \eqn{\gamma}{gamma} has been estimated. To determine the irregular parameters, there are several practical techniques, but no general statistical theory available. Useful techniques include maximum profile pseudolikelihood, which is implemented in the command \code{\link{profilepl}}, and Newton-Raphson maximisation, implemented in the experimental command \code{\link{ippm}}. Some irregular parameters can be estimated directly from data: the hard-core radius in the model \code{\link{Hardcore}} and the matrix of hard-core radii in \code{\link{MultiHard}} can be estimated easily from data. In these cases, \code{ppm} allows the user to specify the interaction without giving the value of the irregular parameter. The user can give the hard core interaction as \code{interaction=Hardcore()} or even \code{interaction=Hardcore}, and the hard core radius will then be estimated from the data. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014) Logistic regression for spatial Gibbs point processes. \emph{Biometrika} \bold{101} (2) 377--392. Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Berman, M. and Turner, T.R. Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41} (1992) 31--38. Besag, J. Statistical analysis of non-lattice data. \emph{The Statistician} \bold{24} (1975) 179-195. Diggle, P.J., Fiksel, T., Grabarnik, P., Ogata, Y., Stoyan, D. and Tanemura, M. On parameter estimation for pairwise interaction processes. \emph{International Statistical Review} \bold{62} (1994) 99-117. Huang, F. and Ogata, Y. Improvements of the maximum pseudo-likelihood estimators in various spatial statistical models. \emph{Journal of Computational and Graphical Statistics} \bold{8} (1999) 510-530. Jensen, J.L. and Moeller, M. Pseudolikelihood for exponential family models of spatial point processes. \emph{Annals of Applied Probability} \bold{1} (1991) 445--461. Jensen, J.L. and Kuensch, H.R. On asymptotic normality of pseudo likelihood estimates for pairwise interaction processes, \emph{Annals of the Institute of Statistical Mathematics} \bold{46} (1994) 475-486. Rajala T. (2014) \emph{A note on Bayesian logistic regression for spatial exponential family Gibbs point processes}, Preprint on ArXiv.org. \url{https://arxiv.org/abs/1411.0539} } \seealso{ \code{\link{ppm.object}} for details of how to print, plot and manipulate a fitted model. \code{\link{ppp}} and \code{\link{quadscheme}} for constructing data. Interactions: \GibbsInteractionsList. See \code{\link{profilepl}} for advice on fitting nuisance parameters in the interaction, and \code{\link{ippm}} for irregular parameters in the trend. See \code{\link{valid.ppm}} and \code{\link{emend.ppm}} for ensuring the fitted model is a valid point process. } \section{Error and Warning Messages}{ Some common error messages and warning messages are listed below, with explanations. \describe{ \item{\dQuote{System is computationally singular}}{ The Fisher information matrix of the fitted model has a determinant close to zero, so that the matrix cannot be inverted, and the software cannot calculate standard errors or confidence intervals. This error is usually reported when the model is printed, because the \code{print} method calculates standard errors for the fitted parameters. Singularity usually occurs because the spatial coordinates in the original data were very large numbers (e.g. expressed in metres) so that the fitted coefficients were very small numbers. The simple remedy is to \bold{rescale the data}, for example, to convert from metres to kilometres by \code{X <- \link{rescale}(X, 1000)}, then re-fit the model. Singularity can also occur if the covariate values are very large numbers, or if the covariates are approximately collinear. } \item{\dQuote{Covariate values were NA or undefined at X\% (M out of N) of the quadrature points}}{ The covariate data (typically a pixel image) did not provide values of the covariate at some of the spatial locations in the observation window of the point pattern. This means that the spatial domain of the pixel image does not completely cover the observation window of the point pattern. If the percentage is small, this warning can be ignored - typically it happens because of rounding effects which cause the pixel image to be one-pixel-width narrower than the observation window. However if more than a few percent of covariate values are undefined, it would be prudent to check that the pixel images are correct, and are correctly registered in their spatial relation to the observation window. } \item{\dQuote{Some tiles with positive area do not contain any quadrature points: relative error = X\%}}{ A problem has arisen when creating the quadrature scheme used to fit the model. In the default rule for computing the quadrature weights, space is divided into rectangular tiles, and the number of quadrature points (data and dummy points) in each tile is counted. It is possible for a tile with non-zero area to contain no quadrature points; in this case, the quadrature scheme will contribute a bias to the model-fitting procedure. \bold{A small relative error (less than 2 percent) is not important.} Relative errors of a few percent can occur because of the shape of the window. If the relative error is greater than about 5 percent, we recommend trying different parameters for the quadrature scheme, perhaps setting a larger value of \code{nd} to increase the number of dummy points. A relative error greater than 10 percent indicates a major problem with the input data: in this case, extract the quadrature scheme by applying \code{\link{quad.ppm}} to the fitted model, and inspect it. (The most likely cause of this problem is that the spatial coordinates of the original data were not handled correctly, for example, coordinates of the locations and the window boundary were incompatible.) } \item{\dQuote{Model is unidentifiable}}{ It is not possible to estimate all the model parameters from this dataset. The error message gives a further explanation, such as \dQuote{data pattern is empty}. Choose a simpler model, or check the data. } \item{\dQuote{N data points are illegal (zero conditional intensity)}}{ In a Gibbs model (i.e. with interaction between points), the conditional intensity may be zero at some spatial locations, indicating that the model forbids the presence of a point at these locations. However if the conditional intensity is zero \emph{at a data point}, this means that the model is inconsistent with the data. Modify the interaction parameters so that the data point is not illegal (e.g. reduce the value of the hard core radius) or choose a different interaction. } } } \section{Warnings}{ The implementation of the Huang-Ogata method is experimental; several bugs were fixed in \pkg{spatstat} 1.19-0. See the comments above about the possible inefficiency and bias of the maximum pseudolikelihood estimator. The accuracy of the Berman-Turner approximation to the pseudolikelihood depends on the number of dummy points used in the quadrature scheme. The number of dummy points should at least equal the number of data points. The parameter values of the fitted model do not necessarily determine a valid point process. Some of the point process models are only defined when the parameter values lie in a certain subset. For example the Strauss process only exists when the interaction parameter \eqn{\gamma}{gamma} is less than or equal to \eqn{1}, corresponding to a value of \code{ppm()$theta[2]} less than or equal to \code{0}. By default (if \code{emend=FALSE}) the algorithm maximises the pseudolikelihood without constraining the parameters, and does not apply any checks for sanity after fitting the model. This is because the fitted parameter value could be useful information for data analysis. To constrain the parameters to ensure that the model is a valid point process, set \code{emend=TRUE}. See also the functions \code{\link{valid.ppm}} and \code{\link{emend.ppm}}. The \code{trend} formula should not use any variable names beginning with the prefixes \code{.mpl} or \code{Interaction} as these names are reserved for internal use. The data frame \code{covariates} should have as many rows as there are points in \code{Q}. It should not contain variables called \code{x}, \code{y} or \code{marks} as these names are reserved for the Cartesian coordinates and the marks. If the model formula involves one of the functions \code{poly()}, \code{bs()} or \code{ns()} (e.g. applied to spatial coordinates \code{x} and \code{y}), the fitted coefficients can be misleading. The resulting fit is not to the raw spatial variates (\code{x}, \code{x^2}, \code{x*y}, etc.) but to a transformation of these variates. The transformation is implemented by \code{poly()} in order to achieve better numerical stability. However the resulting coefficients are appropriate for use with the transformed variates, not with the raw variates. This affects the interpretation of the constant term in the fitted model, \code{logbeta}. Conventionally, \eqn{\beta}{beta} is the background intensity, i.e. the value taken by the conditional intensity function when all predictors (including spatial or ``trend'' predictors) are set equal to \eqn{0}. However the coefficient actually produced is the value that the log conditional intensity takes when all the predictors, including the \emph{transformed} spatial predictors, are set equal to \code{0}, which is not the same thing. Worse still, the result of \code{\link{predict.ppm}} can be completely wrong if the trend formula contains one of the functions \code{poly()}, \code{bs()} or \code{ns()}. This is a weakness of the underlying function \code{\link{predict.glm}}. If you wish to fit a polynomial trend, we offer an alternative to \code{\link{poly}()}, namely \code{polynom()}, which avoids the difficulty induced by transformations. It is completely analogous to \code{poly} except that it does not orthonormalise. The resulting coefficient estimates then have their natural interpretation and can be predicted correctly. Numerical stability may be compromised. Values of the maximised pseudolikelihood are not comparable if they have been obtained with different values of \code{rbord}. } \examples{ # fit the stationary Poisson process # to point pattern 'nztrees' ppm(nztrees) ppm(nztrees ~ 1) # equivalent. Q <- quadscheme(nztrees) ppm(Q) # equivalent. fit1 <- ppm(nztrees, ~ x) # fit the nonstationary Poisson process # with intensity function lambda(x,y) = exp(a + bx) # where x,y are the Cartesian coordinates # and a,b are parameters to be estimated # For other examples, see help(ppm) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.model/man/Concom.Rd0000644000176200001440000001327414331173075015507 0ustar liggesusers\name{Concom} \alias{Concom} \title{The Connected Component Process Model} \description{ Creates an instance of the Connected Component point process model which can then be fitted to point pattern data. } \usage{ Concom(r) } \arguments{ \item{r}{Threshold distance} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the connected component process with disc radius \eqn{r}. } \details{ This function defines the interpoint interaction structure of a point process called the connected component process. It can be used to fit this model to point pattern data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the connected component interaction is yielded by the function \code{Concom()}. See the examples below. In \bold{standard form}, the connected component process (Baddeley and \Moller, 1989) with disc radius \eqn{r}, intensity parameter \eqn{\kappa}{\kappa} and interaction parameter \eqn{\gamma}{\gamma} is a point process with probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \kappa^{n(x)} \gamma^{-C(x)} }{ f(x[1],\ldots,x[n]) = \alpha . \kappa^n(x) . \gamma^(-C(x)) } for a point pattern \eqn{x}, where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, and \eqn{C(x)} is defined below. Here \eqn{\alpha}{\alpha} is a normalising constant. To define the term \code{C(x)}, suppose that we construct a planar graph by drawing an edge between each pair of points \eqn{x_i,x_j}{x[i],x[j]} which are less than \eqn{r} units apart. Two points belong to the same connected component of this graph if they are joined by a path in the graph. Then \eqn{C(x)} is the number of connected components of the graph. The interaction parameter \eqn{\gamma}{\gamma} can be any positive number. If \eqn{\gamma = 1}{\gamma = 1} then the model reduces to a Poisson process with intensity \eqn{\kappa}{\kappa}. If \eqn{\gamma < 1}{\gamma < 1} then the process is regular, while if \eqn{\gamma > 1}{\gamma > 1} the process is clustered. Thus, a connected-component interaction process can be used to model either clustered or regular point patterns. In \pkg{spatstat}, the model is parametrised in a different form, which is easier to interpret. In \bold{canonical form}, the probability density is rewritten as \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{-U(x)} }{ f(x_1,\ldots,x_n) = \alpha . \beta^n(x) \gamma^(-U(x)) } where \eqn{\beta}{\beta} is the new intensity parameter and \eqn{U(x) = C(x) - n(x)} is the interaction potential. In this formulation, each isolated point of the pattern contributes a factor \eqn{\beta}{\beta} to the probability density (so the first order trend is \eqn{\beta}{\beta}). The quantity \eqn{U(x)} is a true interaction potential, in the sense that \eqn{U(x) = 0} if the point pattern \eqn{x} does not contain any points that lie close together. When a new point \eqn{u} is added to an existing point pattern \eqn{x}, the rescaled potential \eqn{-U(x)} increases by zero or a positive integer. The increase is zero if \eqn{u} is not close to any point of \eqn{x}. The increase is a positive integer \eqn{k} if there are \eqn{k} different connected components of \eqn{x} that lie close to \eqn{u}. Addition of the point \eqn{u} contributes a factor \eqn{\beta \eta^\delta}{\beta * \eta^\delta} to the probability density, where \eqn{\delta}{\delta} is the increase in potential. If desired, the original parameter \eqn{\kappa}{\kappa} can be recovered from the canonical parameter by \eqn{\kappa = \beta\gamma}{\kappa = \beta * \gamma}. The \emph{nonstationary} connected component process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{\beta(x[i])} of location, rather than a constant beta. Note the only argument of \code{Concom()} is the threshold distance \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(\beta)} and \eqn{\log(\gamma)}{log(\gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Concom()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \section{Edge correction}{ The interaction distance of this process is infinite. There are no well-established procedures for edge correction for fitting such models, and accordingly the model-fitting function \code{\link{ppm}} will give an error message saying that the user must specify an edge correction. A reasonable solution is to use the border correction at the same distance \code{r}, as shown in the Examples. } \examples{ # prints a sensible description of itself Concom(r=0.1) # Fit the stationary connected component process to redwood data ppm(redwood ~1, Concom(r=0.07), rbord=0.07) # Fit the stationary connected component process to `cells' data ppm(cells ~1, Concom(r=0.06), rbord=0.06) # eta=0 indicates hard core process. # Fit a nonstationary connected component model # with log-cubic polynomial trend \donttest{ ppm(swedishpines ~polynom(x/10,y/10,3), Concom(r=7), rbord=7) } } \references{ Baddeley, A.J. and \Moller, J. (1989) Nearest-neighbour Markov point processes and random sets. \emph{International Statistical Review} \bold{57}, 89--121. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat.model/man/polynom.Rd0000644000176200001440000000324714331173077015767 0ustar liggesusers\name{polynom} \alias{polynom} \title{ Polynomial in One or Two Variables } \description{ This function is used to represent a polynomial term in a model formula. It computes the homogeneous terms in the polynomial of degree \code{n} in one variable \code{x} or two variables \code{x,y}. } \usage{ polynom(x, \dots) } \arguments{ \item{x}{ A numerical vector. } \item{\dots}{ Either a single integer \code{n} specifying the degree of the polynomial, or two arguments \code{y,n} giving another vector of data \code{y} and the degree of the polynomial. } } \details{ This function is typically used inside a model formula in order to specify the most general possible polynomial of order \code{n} involving one numerical variable \code{x} or two numerical variables \code{x,y}. It is equivalent to \code{\link[stats]{poly}(, raw=TRUE)}. If only one numerical vector argument \code{x} is given, the function computes the vectors \code{x^k} for \code{k = 1, 2, \dots, n}. These vectors are combined into a matrix with \code{n} columns. If two numerical vector arguments \code{x,y} are given, the function computes the vectors \code{x^k * y^m} for \code{k >= 0} and \code{m >= 0} satisfying \code{0 < k + m <= n}. These vectors are combined into a matrix with one column for each homogeneous term. } \value{ A numeric matrix, with rows corresponding to the entries of \code{x}, and columns corresponding to the terms in the polynomial. } \author{ \spatstatAuthors. } \seealso{ \code{\link[stats]{poly}}, \code{\link{harmonic}} } \examples{ x <- 1:4 y <- 10 * (0:3) polynom(x, 3) polynom(x, y, 3) } \keyword{arith} spatstat.model/man/SatPiece.Rd0000644000176200001440000001074114331173075015762 0ustar liggesusers\name{SatPiece} \alias{SatPiece} \title{Piecewise Constant Saturated Pairwise Interaction Point Process Model} \description{ Creates an instance of a saturated pairwise interaction point process model with piecewise constant potential function. The model can then be fitted to point pattern data. } \usage{ SatPiece(r, sat) } \arguments{ \item{r}{vector of jump points for the potential function} \item{sat}{ vector of saturation values, or a single saturation value } } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This is a generalisation of the Geyer saturation point process model, described in \code{\link{Geyer}}, to the case of multiple interaction distances. It can also be described as the saturated analogue of a pairwise interaction process with piecewise-constant pair potential, described in \code{\link{PairPiece}}. The saturated point process with interaction radii \eqn{r_1,\ldots,r_k}{r[1], ..., r[k]}, saturation thresholds \eqn{s_1,\ldots,s_k}{s[1],...,s[k]}, intensity parameter \eqn{\beta}{beta} and interaction parameters \eqn{\gamma_1,\ldots,gamma_k}{gamma[1], ..., gamma[k]}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{ \beta \gamma_1^{v_1(x_i, X)} \ldots gamma_k^{v_k(x_i,X)} }{ beta gamma[1]^v(1, x_i, X) ... gamma[k]^v(k, x_i, X) } to the probability density of the point pattern, where \deqn{ v_j(x_i, X) = \min( s_j, t_j(x_i,X) ) }{ v(j, x_i, X) = min(s[j], t(j, x_i, X)) } where \eqn{t_j(x_i, X)}{t(j,x[i],X)} denotes the number of points in the pattern \eqn{X} which lie at a distance between \eqn{r_{j-1}}{r[j-1]} and \eqn{r_j}{r[j]} from the point \eqn{x_i}{x[i]}. We take \eqn{r_0 = 0}{r[0] = 0} so that \eqn{t_1(x_i,X)}{t(1, x[i], X)} is the number of points of \eqn{X} that lie within a distance \eqn{r_1}{r[1]} of the point \eqn{x_i}{x[i]}. \code{SatPiece} is used to fit this model to data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant Saturated pairwise interaction is yielded by the function \code{SatPiece()}. See the examples below. Simulation of this point process model is not yet implemented. This model is not locally stable (the conditional intensity is unbounded). The argument \code{r} specifies the vector of interaction distances. The entries of \code{r} must be strictly increasing, positive numbers. The argument \code{sat} specifies the vector of saturation parameters. It should be a vector of the same length as \code{r}, and its entries should be nonnegative numbers. Thus \code{sat[1]} corresponds to the distance range from \code{0} to \code{r[1]}, and \code{sat[2]} to the distance range from \code{r[1]} to \code{r[2]}, etc. Alternatively \code{sat} may be a single number, and this saturation value will be applied to every distance range. Infinite values of the saturation parameters are also permitted; in this case \eqn{v_j(x_i,X) = t_j(x_i,X)}{v(j, x_i, X) = t(j, x_i, X)} and there is effectively no `saturation' for the distance range in question. If all the saturation parameters are set to \code{Inf} then the model is effectively a pairwise interaction process, equivalent to \code{\link{PairPiece}} (however the interaction parameters \eqn{\gamma}{gamma} obtained from \code{\link{SatPiece}} are the square roots of the parameters \eqn{\gamma}{gamma} obtained from \code{\link{PairPiece}}). If \code{r} is a single number, this model is virtually equivalent to the Geyer process, see \code{\link{Geyer}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairsat.family}}, \code{\link{Geyer}}, \code{\link{PairPiece}}, \code{\link{BadGey}}. } \examples{ SatPiece(c(0.1,0.2), c(1,1)) # prints a sensible description of itself SatPiece(c(0.1,0.2), 1) ppm(cells ~1, SatPiece(c(0.07, 0.1, 0.13), 2)) # fit a stationary piecewise constant Saturated pairwise interaction process \donttest{ ppm(cells ~polynom(x,y,3), SatPiece(c(0.07, 0.1, 0.13), 2)) # nonstationary process with log-cubic polynomial trend } } \author{\adrian and \rolf in collaboration with Hao Wang and Jeff Picka } \keyword{spatial} \keyword{models} spatstat.model/man/Saturated.Rd0000644000176200001440000000136514331173075016223 0ustar liggesusers\name{Saturated} \alias{Saturated} \title{Saturated Pairwise Interaction model} \description{ Experimental. } \usage{ Saturated(pot, name) } \arguments{ \item{pot}{An S language function giving the user-supplied pairwise interaction potential.} \item{name}{Character string.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This is experimental. It constructs a member of the ``saturated pairwise'' family \code{\link{pairsat.family}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairsat.family}}, \code{\link{Geyer}}, \code{\link{SatPiece}}, \code{\link{ppm.object}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/dppspecdenrange.Rd0000644000176200001440000000111114331173076017417 0ustar liggesusers\name{dppspecdenrange} \alias{dppspecdenrange} \title{Range of Spectral Density of a Determinantal Point Process Model} \description{ Computes the range of the spectral density of a determinantal point process model. } \usage{dppspecdenrange(model)} \arguments{ \item{model}{Model of class \code{"detpointprocfamily"}.} } \value{Numeric value (possibly \code{Inf}).} \author{ \adrian \rolf and \ege } \examples{ m <- dppBessel(lambda=100, alpha=0.05, sigma=1, d=2) dppspecdenrange(m) } \seealso{ \code{\link{dppspecden}} } \keyword{spatial} \keyword{models} spatstat.model/man/is.dppm.Rd0000644000176200001440000000050314331173076015633 0ustar liggesusers\name{is.dppm} \alias{is.dppm} \title{Recognise Fitted Determinantal Point Process Models} \description{Check that an object inherits the class dppm} \usage{is.dppm(x)} \arguments{ \item{x}{Any object.} } \value{A single logical value.} \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} \keyword{models} spatstat.model/man/Strauss.Rd0000644000176200001440000000575314331173075015740 0ustar liggesusers\name{Strauss} \alias{Strauss} \title{The Strauss Point Process Model} \description{ Creates an instance of the Strauss point process model which can then be fitted to point pattern data. } \usage{ Strauss(r) } \arguments{ \item{r}{The interaction radius of the Strauss process} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Strauss process with interaction radius \eqn{r}. } \details{ The (stationary) Strauss process with interaction radius \eqn{r} and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma} is the pairwise interaction point process in which each point contributes a factor \eqn{\beta}{beta} to the probability density of the point pattern, and each pair of points closer than \eqn{r} units apart contributes a factor \eqn{\gamma}{gamma} to the density. Thus the probability density is \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of distinct unordered pairs of points that are closer than \eqn{r} units apart, and \eqn{\alpha}{alpha} is the normalising constant. The interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1} so that this model describes an ``ordered'' or ``inhibitive'' pattern. The nonstationary Strauss process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Strauss process pairwise interaction is yielded by the function \code{Strauss()}. See the examples below. Note the only argument is the interaction radius \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\gamma)}{log(gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Strauss()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. Strauss, D.J. (1975) A model for clustering. \emph{Biometrika} \bold{62}, 467--475. } \examples{ Strauss(r=0.1) # prints a sensible description of itself \donttest{ ppm(cells ~1, Strauss(r=0.07)) # fit the stationary Strauss process to `cells' } ppm(cells ~polynom(x,y,3), Strauss(r=0.07)) # fit a nonstationary Strauss process with log-cubic polynomial trend } \author{\adrian and \rolf. } \keyword{spatial} \keyword{models} spatstat.model/man/compareFit.Rd0000644000176200001440000001007014331173076016352 0ustar liggesusers\name{compareFit} \alias{compareFit} \title{ Residual Diagnostics for Multiple Fitted Models } \description{ Compares several fitted point process models using the same residual diagnostic. } \usage{ compareFit(object, Fun, r = NULL, breaks = NULL, ..., trend = ~1, interaction = Poisson(), rbord = NULL, modelnames = NULL, same = NULL, different = NULL) } \arguments{ \item{object}{ Object or objects to be analysed. Either a fitted point process model (object of class \code{"ppm"}), a point pattern (object of class \code{"ppp"}), or a list of these objects. } \item{Fun}{ Diagnostic function to be computed for each model. One of the functions \code{Kcom}, \code{Kres}, \code{Gcom}, \code{Gres}, \code{psst}, \code{psstA} or \code{psstG} or a string containing one of these names. } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the diagnostic should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Extra arguments passed to \code{Fun}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern or list of point patterns. See \code{\link{ppm}} for details. Each of these arguments can be a list, specifying different \code{trend}, \code{interaction} and/or \code{rbord} values to be used to generate different fitted models. } \item{modelnames}{ Character vector. Short descriptive names for the different models. } \item{same,different}{ Character strings or character vectors passed to \code{\link{collapse.fv}} to determine the format of the output. } } \details{ This is a convenient way to collect diagnostic information for several different point process models fitted to the same point pattern dataset, or for point process models of the same form fitted to several different datasets, etc. The first argument, \code{object}, is usually a list of fitted point process models (objects of class \code{"ppm"}), obtained from the model-fitting function \code{\link{ppm}}. For convenience, \code{object} can also be a list of point patterns (objects of class \code{"ppp"}). In that case, point process models will be fitted to each of the point pattern datasets, by calling \code{\link{ppm}} using the arguments \code{trend} (for the first order trend), \code{interaction} (for the interpoint interaction) and \code{rbord} (for the erosion distance in the border correction for the pseudolikelihood). See \code{\link{ppm}} for details of these arguments. Alternatively \code{object} can be a single point pattern (object of class \code{"ppp"}) and one or more of the arguments \code{trend}, \code{interaction} or \code{rbord} can be a list. In this case, point process models will be fitted to the same point pattern dataset, using each of the model specifications listed. The diagnostic function \code{Fun} will be applied to each of the point process models. The results will be collected into a single function value table. The \code{modelnames} are used to label the results from each fitted model. } \value{ Function value table (object of class \code{"fv"}). } \author{ \ege, \adrian and Jesper \Moller. } \seealso{ \code{\link{ppm}}, \code{\link{Kcom}}, \code{\link{Kres}}, \code{\link{Gcom}}, \code{\link{Gres}}, \code{\link{psst}}, \code{\link{psstA}}, \code{\link{psstG}}, \code{\link{collapse.fv}} } \examples{ nd <- 40 \testonly{ nd <- 10 } ilist <- list(Poisson(), Geyer(7, 2), Strauss(7)) iname <- c("Poisson", "Geyer", "Strauss") \testonly{ ilist <- ilist[c(1,3)] iname <- iname[c(1,3)] } K <- compareFit(swedishpines, Kcom, interaction=ilist, rbord=9, correction="translate", same="trans", different="tcom", modelnames=iname, nd=nd) K } \keyword{spatial} \keyword{models} spatstat.model/man/reach.dppm.Rd0000644000176200001440000000212514331173077016305 0ustar liggesusers\name{reach.dppm} \alias{reach.dppm} \alias{reach.detpointprocfamily} \title{Range of Interaction for a Determinantal Point Process Model} \description{ Returns the range of interaction for a determinantal point process model. } \usage{ \method{reach}{dppm}(x, \dots) \method{reach}{detpointprocfamily}(x, \dots) } \arguments{ \item{x}{Model of class \code{"detpointprocfamily"} or \code{"dppm"}.} \item{\dots}{Additional arguments passed to the range function of the given model. } } \details{ The range of interaction for a determinantal point process model may defined as the smallest number \eqn{R} such that \eqn{g(r)=1} for all \eqn{r\ge R}{r>=R}, where \eqn{g} is the pair correlation function. For many models the range is infinite, but one may instead use a value where the pair correlation function is sufficiently close to 1. For example in the \Matern model this defaults to finding \eqn{R} such that \eqn{g(R)=0.99}. } \value{Numeric} \author{ \spatstatAuthors. } \examples{ reach(dppMatern(lambda=100, alpha=.01, nu=1, d=2)) } \keyword{spatial} \keyword{models} spatstat.model/man/relrisk.ppm.Rd0000644000176200001440000001716114331173077016540 0ustar liggesusers\name{relrisk.ppm} \alias{relrisk.ppm} \title{ Parametric Estimate of Spatially-Varying Relative Risk } \description{ Given a point process model fitted to a multitype point pattern, this function computes the fitted spatially-varying probability of each type of point, or the ratios of such probabilities, according to the fitted model. Optionally the standard errors of the estimates are also computed. } \usage{ \method{relrisk}{ppm}(X, \dots, at = c("pixels", "points"), relative = FALSE, se = FALSE, casecontrol = TRUE, control = 1, case, ngrid = NULL, window = NULL) } \arguments{ \item{X}{ A fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored. } \item{at}{ String specifying whether to compute the probability values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } \item{relative}{ Logical. If \code{FALSE} (the default) the algorithm computes the probabilities of each type of point. If \code{TRUE}, it computes the \emph{relative risk}, the ratio of probabilities of each type relative to the probability of a control. } \item{se}{ Logical value indicating whether to compute standard errors as well. } \item{casecontrol}{ Logical. Whether to treat a bivariate point pattern as consisting of cases and controls, and return only the probability or relative risk of a case. Ignored if there are more than 2 types of points. See Details. } \item{control}{ Integer, or character string, identifying which mark value corresponds to a control. } \item{case}{ Integer, or character string, identifying which mark value corresponds to a case (rather than a control) in a bivariate point pattern. This is an alternative to the argument \code{control} in a bivariate point pattern. Ignored if there are more than 2 types of points. } \item{ngrid}{ Optional. Dimensions of a rectangular grid of locations inside \code{window} where the predictions should be computed. An integer, or an integer vector of length 2, specifying the number of grid points in the \eqn{y} and \eqn{x} directions. (Applies only when \code{at="pixels"}.) } \item{window}{ Optional. A window (object of class \code{"owin"}) \emph{delimiting} the locations where predictions should be computed. Defaults to the window of the original data used to fit the model \code{object}. (Applies only when \code{at="pixels"}.) } } \details{ The command \code{\link{relrisk}} is generic and can be used to estimate relative risk in different ways. This function \code{relrisk.ppm} is the method for fitted point process models (class \code{"ppm"}). It computes \emph{parametric} estimates of relative risk, using the fitted model. If \code{X} is a bivariate point pattern (a multitype point pattern consisting of two types of points) then by default, the points of the first type (the first level of \code{marks(X)}) are treated as controls or non-events, and points of the second type are treated as cases or events. Then by default this command computes the spatially-varying \emph{probability} of a case, i.e. the probability \eqn{p(u)} that a point at spatial location \eqn{u} will be a case. If \code{relative=TRUE}, it computes the spatially-varying \emph{relative risk} of a case relative to a control, \eqn{r(u) = p(u)/(1- p(u))}. If \code{X} is a multitype point pattern with \eqn{m > 2} types, or if \code{X} is a bivariate point pattern and \code{casecontrol=FALSE}, then by default this command computes, for each type \eqn{j}, a nonparametric estimate of the spatially-varying \emph{probability} of an event of type \eqn{j}. This is the probability \eqn{p_j(u)}{p[j](u)} that a point at spatial location \eqn{u} will belong to type \eqn{j}. If \code{relative=TRUE}, the command computes the \emph{relative risk} of an event of type \eqn{j} relative to a control, \eqn{r_j(u) = p_j(u)/p_k(u)}{r[j](u) = p[j](u)/p[k](u)}, where events of type \eqn{k} are treated as controls. The argument \code{control} determines which type \eqn{k} is treated as a control. If \code{at = "pixels"} the calculation is performed for every spatial location \eqn{u} on a fine pixel grid, and the result is a pixel image representing the function \eqn{p(u)} or a list of pixel images representing the functions \eqn{p_j(u)}{p[j](u)} or \eqn{r_j(u)}{r[j](u)} for \eqn{j = 1,\ldots,m}{j = 1,...,m}. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{NA}. If \code{at = "points"} the calculation is performed only at the data points \eqn{x_i}{x[i]}. By default the result is a vector of values \eqn{p(x_i)}{p(x[i])} giving the estimated probability of a case at each data point, or a matrix of values \eqn{p_j(x_i)}{p[j](x[i])} giving the estimated probability of each possible type \eqn{j} at each data point. If \code{relative=TRUE} then the relative risks \eqn{r(x_i)}{r(x[i])} or \eqn{r_j(x_i)}{r[j](x[i])} are returned. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{Inf}. Probabilities and risks are computed from the fitted intensity of the model, using \code{\link{predict.ppm}}. If \code{se=TRUE} then standard errors will also be computed, based on asymptotic theory, using \code{\link{vcov.ppm}}. } \value{ If \code{se=FALSE} (the default), the format is described below. If \code{se=TRUE}, the result is a list of two entries, \code{estimate} and \code{SE}, each having the format described below. If \code{X} consists of only two types of points, and if \code{casecontrol=TRUE}, the result is a pixel image (if \code{at="pixels"}) or a vector (if \code{at="points"}). The pixel values or vector values are the probabilities of a case if \code{relative=FALSE}, or the relative risk of a case (probability of a case divided by the probability of a control) if \code{relative=TRUE}. If \code{X} consists of more than two types of points, or if \code{casecontrol=FALSE}, the result is: \itemize{ \item (if \code{at="pixels"}) a list of pixel images, with one image for each possible type of point. The result also belongs to the class \code{"solist"} so that it can be printed and plotted. \item (if \code{at="points"}) a matrix of probabilities, with rows corresponding to data points \eqn{x_i}{x[i]}, and columns corresponding to types \eqn{j}. } The pixel values or matrix entries are the probabilities of each type of point if \code{relative=FALSE}, or the relative risk of each type (probability of each type divided by the probability of a control) if \code{relative=TRUE}. If \code{relative=FALSE}, the resulting values always lie between 0 and 1. If \code{relative=TRUE}, the results are either non-negative numbers, or the values \code{Inf} or \code{NA}. } \author{ \spatstatAuthors. } \seealso{ There is another method \code{\link{relrisk.ppp}} for point pattern datasets which computes \emph{nonparametric} estimates of relative risk by kernel smoothing. See also \code{\link{relrisk}}, \code{\link{relrisk.ppp}}, \code{\link{ppm}} } \examples{ fit <- ppm(chorley ~ marks * (x+y)) rr <- relrisk(fit, relative=TRUE, control="lung", se=TRUE) plot(rr$estimate) plot(rr$SE) rrX <- relrisk(fit, at="points", relative=TRUE, control="lung") } \keyword{spatial} \keyword{models} spatstat.model/man/AreaInter.Rd0000644000176200001440000001733214331173075016142 0ustar liggesusers\name{AreaInter} \alias{AreaInter} \title{The Area Interaction Point Process Model} \description{ Creates an instance of the Area Interaction point process model (Widom-Rowlinson penetrable spheres model) which can then be fitted to point pattern data. } \usage{ AreaInter(r) } \arguments{ \item{r}{The radius of the discs in the area interaction process} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the area-interaction process with disc radius \eqn{r}. } \details{ This function defines the interpoint interaction structure of a point process called the Widom-Rowlinson penetrable sphere model or area-interaction process. It can be used to fit this model to point pattern data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the area interaction structure is yielded by the function \code{AreaInter()}. See the examples below. In \bold{standard form}, the area-interaction process (Widom and Rowlinson, 1970; Baddeley and Van Lieshout, 1995) with disc radius \eqn{r}, intensity parameter \eqn{\kappa}{\kappa} and interaction parameter \eqn{\gamma}{\gamma} is a point process with probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \kappa^{n(x)} \gamma^{-A(x)} }{ f(x[1],\ldots,x[n]) = \alpha . \kappa^n(x) . \gamma^(-A(x)) } for a point pattern \eqn{x}, where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, and \eqn{A(x)} is the area of the region formed by the union of discs of radius \eqn{r} centred at the points \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]}. Here \eqn{\alpha}{\alpha} is a normalising constant. The interaction parameter \eqn{\gamma}{\gamma} can be any positive number. If \eqn{\gamma = 1}{\gamma = 1} then the model reduces to a Poisson process with intensity \eqn{\kappa}{\kappa}. If \eqn{\gamma < 1}{\gamma < 1} then the process is regular, while if \eqn{\gamma > 1}{\gamma > 1} the process is clustered. Thus, an area interaction process can be used to model either clustered or regular point patterns. Two points interact if the distance between them is less than \eqn{2r}{2 * r}. The standard form of the model, shown above, is a little complicated to interpret in practical applications. For example, each isolated point of the pattern \eqn{x} contributes a factor \eqn{\kappa \gamma^{-\pi r^2}}{\kappa * \gamma^(-\pi * r^2)} to the probability density. In \pkg{spatstat}, the model is parametrised in a different form, which is easier to interpret. In \bold{canonical scale-free form}, the probability density is rewritten as \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \eta^{-C(x)} }{ f(x_1,\ldots,x_n) = \alpha . \beta^n(x) \eta^(-C(x)) } where \eqn{\beta}{\beta} is the new intensity parameter, \eqn{\eta}{\eta} is the new interaction parameter, and \eqn{C(x) = B(x) - n(x)} is the interaction potential. Here \deqn{ B(x) = \frac{A(x)}{\pi r^2} }{ B(x) = A(x)/(\pi * r^2) } is the normalised area (so that the discs have unit area). In this formulation, each isolated point of the pattern contributes a factor \eqn{\beta}{\beta} to the probability density (so the first order trend is \eqn{\beta}{\beta}). The quantity \eqn{C(x)} is a true interaction potential, in the sense that \eqn{C(x) = 0} if the point pattern \eqn{x} does not contain any points that lie close together (closer than \eqn{2r}{2*r} units apart). When a new point \eqn{u} is added to an existing point pattern \eqn{x}, the rescaled potential \eqn{-C(x)} increases by a value between 0 and 1. The increase is zero if \eqn{u} is not close to any point of \eqn{x}. The increase is 1 if the disc of radius \eqn{r} centred at \eqn{u} is completely contained in the union of discs of radius \eqn{r} centred at the data points \eqn{x_i}{x[i]}. Thus, the increase in potential is a measure of how close the new point \eqn{u} is to the existing pattern \eqn{x}. Addition of the point \eqn{u} contributes a factor \eqn{\beta \eta^\delta}{\beta * \eta^\delta} to the probability density, where \eqn{\delta}{\delta} is the increase in potential. The old parameters \eqn{\kappa,\gamma}{\kappa,\gamma} of the standard form are related to the new parameters \eqn{\beta,\eta}{\beta,\eta} of the canonical scale-free form, by \deqn{ \beta = \kappa \gamma^{-\pi r^2} = \kappa /\eta }{ \beta = \kappa * \gamma^(-\pi * r^2)= \kappa / \eta } and \deqn{ \eta = \gamma^{\pi r^2} }{ \eta = \gamma^(\pi * r^2) } provided \eqn{\gamma}{\gamma} and \eqn{\kappa}{\kappa} are positive and finite. In the canonical scale-free form, the parameter \eqn{\eta}{\eta} can take any nonnegative value. The value \eqn{\eta = 1}{\eta = 1} again corresponds to a Poisson process, with intensity \eqn{\beta}{\beta}. If \eqn{\eta < 1}{\eta < 1} then the process is regular, while if \eqn{\eta > 1}{\eta > 1} the process is clustered. The value \eqn{\eta = 0}{\eta = 0} corresponds to a hard core process with hard core radius \eqn{r} (interaction distance \eqn{2r}). The \emph{nonstationary} area interaction process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{\beta(x[i])} of location, rather than a constant beta. Note the only argument of \code{AreaInter()} is the disc radius \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(\beta)} and \eqn{\log(\eta)}{log(\eta)} are estimated by \code{\link{ppm}()}, not fixed in \code{AreaInter()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} \code{\link{ragsAreaInter}} and \code{\link{rmh}} for simulation of area-interaction models. } \section{Warnings}{ The interaction distance of this process is equal to \code{2 * r}. Two discs of radius \code{r} overlap if their centres are closer than \code{2 * r} units apart. The estimate of the interaction parameter \eqn{\eta}{\eta} is unreliable if the interaction radius \code{r} is too small or too large. In these situations the model is approximately Poisson so that \eqn{\eta}{\eta} is unidentifiable. As a rule of thumb, one can inspect the empty space function of the data, computed by \code{\link{Fest}}. The value \eqn{F(r)} of the empty space function at the interaction radius \code{r} should be between 0.2 and 0.8. } \examples{ \testonly{op <- spatstat.options(ngrid.disc=8)} # prints a sensible description of itself AreaInter(r=0.1) # Note the reach is twice the radius reach(AreaInter(r=1)) # Fit the stationary area interaction process to Swedish Pines data ppm(swedishpines ~1, AreaInter(r=7)) # Fit the stationary area interaction process to `cells' ppm(cells ~1, AreaInter(r=0.06)) # eta=0 indicates hard core process. # Fit a nonstationary area interaction with log-cubic polynomial trend \donttest{ ppm(swedishpines ~polynom(x/10,y/10,3), AreaInter(r=7)) } \testonly{spatstat.options(op)} } \references{ Baddeley, A.J. and Van Lieshout, M.N.M. (1995). Area-interaction point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{47} (1995) 601--619. Widom, B. and Rowlinson, J.S. (1970). New model for the study of liquid-vapor phase transitions. \emph{The Journal of Chemical Physics} \bold{52} (1970) 1670--1684. } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/ranef.mppm.Rd0000644000176200001440000000300214331173100016305 0ustar liggesusers\name{ranef.mppm} \alias{ranef.mppm} \title{ Extract Random Effects from Point Process Model } \description{ Given a point process model fitted to a list of point patterns, extract the fixed effects of the model. A method for \code{ranef}. } \usage{ \method{ranef}{mppm}(object, \dots) } \arguments{ \item{object}{ A fitted point process model (an object of class \code{"mppm"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[nlme]{ranef}}. The argument \code{object} must be a fitted point process model (object of class \code{"mppm"}) produced by the fitting algorithm \code{\link{mppm}}). This represents a point process model that has been fitted to a list of several point pattern datasets. See \code{\link{mppm}} for information. This function extracts the coefficients of the random effects of the model. } \value{ A data frame, or list of data frames, as described in the help for \code{\link[nlme]{ranef.lme}}. } \references{ \baddrubaturnbook } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{fixef.mppm}}, \code{\link{coef.mppm}} } \examples{ H <- hyperframe(Y = waterstriders) # Tweak data to exaggerate differences H$Y[[1]] <- rthin(H$Y[[1]], 0.3) m1 <- mppm(Y ~ id, data=H, Strauss(7)) ranef(m1) m2 <- mppm(Y ~ 1, random=~1|id, data=H, Strauss(7)) ranef(m2) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.model/man/formula.ppm.Rd0000644000176200001440000000323614331173076016527 0ustar liggesusers\name{formula.ppm} \alias{formula.ppm} \alias{terms.ppm} \title{ Model Formulae for Gibbs Point Process Models } \description{ Extract the trend formula, or the terms in the trend formula, in a fitted Gibbs point process model. } \usage{ \method{formula}{ppm}(x, \dots) \method{terms}{ppm}(x, \dots) } \arguments{ \item{x}{ An object of class \code{"ppm"}, representing a fitted point process model. } \item{\dots}{ Arguments passed to other methods. } } \details{ These functions are methods for the generic commands \code{\link{formula}} and \code{\link{terms}} for the class \code{"ppm"}. An object of class \code{"ppm"} represents a fitted Poisson or Gibbs point process model. It is obtained from the model-fitting function \code{\link{ppm}}. The method \code{formula.ppm} extracts the trend formula from the fitted model \code{x} (the formula originally specified as the argument \code{trend} to \code{\link{ppm}}). The method \code{terms.ppm} extracts the individual terms in the trend formula. } \value{ See the help files for the corresponding generic functions. } \author{ \adrian } \seealso{ \code{\link{ppm}}, \code{\link{as.owin}}, \code{\link{coef.ppm}}, \code{\link{extractAIC.ppm}}, \code{\link{fitted.ppm}}, \code{\link{logLik.ppm}}, \code{\link{model.frame.ppm}}, \code{\link{model.matrix.ppm}}, \code{\link{plot.ppm}}, \code{\link{predict.ppm}}, \code{\link{residuals.ppm}}, \code{\link{simulate.ppm}}, \code{\link{summary.ppm}}, \code{\link{update.ppm}}, \code{\link{vcov.ppm}}. } \examples{ fit <- ppm(cells, ~x) formula(fit) terms(fit) } \keyword{spatial} \keyword{methods} spatstat.model/man/valid.detpointprocfamily.Rd0000644000176200001440000000121414331173100021257 0ustar liggesusers\name{valid.detpointprocfamily} \alias{valid.detpointprocfamily} \title{Check Validity of a Determinantal Point Process Model} \description{ Checks the validity of a determinantal point process model. } \usage{ \method{valid}{detpointprocfamily}(object, \dots) } \arguments{ \item{object}{Model of class \code{"detpointprocfamily"}.} \item{\dots}{Ignored.} } \value{Logical} \author{ \adrian \rolf and \ege } \examples{ model1 <- dppMatern(lambda=100, alpha=.01, nu=1, d=2) valid(model1) model2 <- dppMatern(lambda=100, alpha=1, nu=1, d=2) valid(model2) } \seealso{ \code{\link{valid}} } \keyword{spatial} \keyword{models} spatstat.model/man/quad.ppm.Rd0000644000176200001440000000625714331173077016023 0ustar liggesusers\name{quad.ppm} \alias{quad.ppm} \title{Extract Quadrature Scheme Used to Fit a Point Process Model} \description{ Given a fitted point process model, this function extracts the quadrature scheme used to fit the model. } \usage{ quad.ppm(object, drop=FALSE, clip=FALSE) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"}). } \item{drop}{ Logical value determining whether to delete quadrature points that were not used to fit the model. } \item{clip}{ Logical value determining whether to erode the window, if \code{object} was fitted using the border correction. See Details. } } \value{ A quadrature scheme (object of class \code{"quad"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The maximum pseudolikelihood algorithm in \code{\link{ppm}} approximates the pseudolikelihood integral by a sum over a finite set of quadrature points, which is constructed by augmenting the original data point pattern by a set of ``dummy'' points. The fitted model object returned by \code{\link{ppm}} contains complete information about this quadrature scheme. See \code{\link{ppm}} or \code{\link{ppm.object}} for further information. This function \code{quad.ppm} extracts the quadrature scheme. A typical use of this function would be to inspect the quadrature scheme (points and weights) to gauge the accuracy of the approximation to the exact pseudolikelihood. Some quadrature points may not have been used in fitting the model. This happens if the border correction is used, and in other cases (e.g. when the value of a covariate is \code{NA} at these points). The argument \code{drop} specifies whether these unused quadrature points shall be deleted (\code{drop=TRUE}) or retained (\code{drop=FALSE}) in the return value. The quadrature scheme has a \emph{window}, which by default is set to equal the window of the original data. However this window may be larger than the actual domain of integration of the pseudolikelihood or composite likelihood that was used to fit the model. If \code{clip=TRUE} then the window of the quadrature scheme is set to the actual domain of integration. This option only has an effect when the model was fitted using the border correction; then the window is obtained by eroding the original data window by the border correction distance. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. See \code{\link{quad.object}} for a list of all operations that can be performed on objects of class \code{"quad"}. This function can also be applied to objects of class \code{"kppm"} and \code{"lppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{quad.object}}, \code{\link{ppm}} } \examples{ fit <- ppm(cells ~1, Strauss(r=0.1)) Q <- quad.ppm(fit) \donttest{ plot(Q) } npoints(Q$data) npoints(Q$dummy) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat.model/man/exactMPLEstrauss.Rd0000644000176200001440000001034514331173076017475 0ustar liggesusers\name{exactMPLEstrauss} \alias{exactMPLEstrauss} \title{ Exact Maximum Pseudolikelihood Estimate for Stationary Strauss Process } \description{ Computes, to very high accuracy, the Maximum Pseudolikelihood Estimates of the parameters of a stationary Strauss point process. } \usage{ exactMPLEstrauss(X, R, ngrid = 2048, plotit = FALSE, project=TRUE) } \arguments{ \item{X}{ Data to which the Strauss process will be fitted. A point pattern dataset (object of class \code{"ppp"}). } \item{R}{ Interaction radius of the Strauss process. A non-negative number. } \item{ngrid}{ Grid size for calculation of integrals. An integer, giving the number of grid points in the \eqn{x} and \eqn{y} directions. } \item{plotit}{ Logical. If \code{TRUE}, the log pseudolikelihood is plotted on the current device. } \item{project}{ Logical. If \code{TRUE} (the default), the parameter \eqn{\gamma}{gamma} is constrained to lie in the interval \eqn{[0,1]}. If \code{FALSE}, this constraint is not applied. } } \details{ This function is intended mainly for technical investigation of algorithm performance. Its practical use is quite limited. It fits the stationary Strauss point process model to the point pattern dataset \code{X} by maximum pseudolikelihood (with the border edge correction) using an algorithm with very high accuracy. This algorithm is more accurate than the \emph{default} behaviour of the model-fitting function \code{\link{ppm}} because the discretisation is much finer. Ripley (1988) and Baddeley and Turner (2000) derived the log pseudolikelihood for the stationary Strauss process, and eliminated the parameter \eqn{\beta}{beta}, obtaining an exact formula for the partial log pseudolikelihood as a function of the interaction parameter \eqn{\gamma}{gamma} only. The algorithm evaluates this expression to a high degree of accuracy, using numerical integration on a \code{ngrid * ngrid} lattice, uses \code{\link[stats]{optim}} to maximise the log pseudolikelihood with respect to \eqn{\gamma}{gamma}, and finally recovers \eqn{\beta}{beta}. The result is a vector of length 2, containing the fitted coefficients \eqn{\log\beta}{log(beta)} and \eqn{\log\gamma}{log(gamma)}. These values correspond to the entries that would be obtained with \code{coef(ppm(X, ~1, Strauss(R)))}. The fitted coefficients are typically accurate to within \eqn{10^{-6}}{10^(-6)} as shown in Baddeley and Turner (2013). Note however that (by default) \code{exactMPLEstrauss} constrains the parameter \eqn{\gamma}{gamma} to lie in the interval \eqn{[0,1]} in which the point process is well defined (Kelly and Ripley, 1976) whereas \code{\link{ppm}} does not constrain the value of \eqn{\gamma}{gamma} (by default). This behaviour is controlled by the argument \code{project} to \code{\link{ppm}} and \code{exactMPLEstrauss}. The default for \code{\link{ppm}} is \code{project=FALSE}, while the default for \code{exactMPLEstrauss} is \code{project=TRUE}. } \value{ Vector of length 2. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Baddeley, A. and Turner, R. (2013) Bias correction for parameter estimates of spatial point process models. \emph{Journal of Statistical Computation and Simulation} \bold{2012}. \code{DOI: 10.1080/00949655.2012.755976} Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. Ripley, B.D. (1988) \emph{Statistical inference for spatial processes}. Cambridge University Press. } \author{ \adrian and \rolf } \seealso{ \code{\link{ppm}} } \examples{ if(interactive()) { rc <- 0.09 exactMPLEstrauss(cells, rc, plotit=TRUE) coef(ppm(cells ~1, Strauss(rc))) coef(ppm(cells ~1, Strauss(rc), nd=128)) rr <- 0.04 exactMPLEstrauss(redwood, rr) exactMPLEstrauss(redwood, rr, project=FALSE) coef(ppm(redwood ~1, Strauss(rr))) } else { rc <- 0.09 exactMPLEstrauss(cells, rc, ngrid=64, plotit=TRUE) exactMPLEstrauss(cells, rc, ngrid=64, project=FALSE) } } \keyword{spatial} \keyword{models} spatstat.model/man/residuals.mppm.Rd0000644000176200001440000000452414510474261017232 0ustar liggesusers\name{residuals.mppm} \alias{residuals.mppm} \title{Residuals for Point Process Model Fitted to Multiple Point Patterns} \description{ Given a point process model fitted to multiple point patterns, compute residuals for each pattern. } \usage{ \method{residuals}{mppm}(object, type = "raw", ..., fittedvalues = fitted.mppm(object)) } \arguments{ \item{object}{Fitted point process model (object of class \code{"mppm"}).} \item{\dots}{Ignored.} \item{type}{Type of residuals: either \code{"raw"}, \code{"pearson"} or \code{"inverse"}. Partially matched.} \item{fittedvalues}{Advanced use only. Fitted values of the model to be used in the calculation. } } \details{ Baddeley et al (2005) defined residuals for the fit of a point process model to spatial point pattern data. For an explanation of these residuals, see the help file for \code{\link{residuals.ppm}}. This function computes the residuals for a point process model fitted to \emph{multiple} point patterns. The \code{object} should be an object of class \code{"mppm"} obtained from \code{\link{mppm}}. The return value is a list. The number of entries in the list equals the number of point patterns in the original data. Each entry in the list has the same format as the output of \code{\link{residuals.ppm}}. That is, each entry in the list is a signed measure (object of class \code{"msr"}) giving the residual measure for the corresponding point pattern. } \value{ A list of signed measures (objects of class \code{"msr"}) giving the residual measure for each of the original point patterns. See Details. } \examples{ fit <- mppm(Bugs ~ x, hyperframe(Bugs=waterstriders)) r <- residuals(fit) # compute total residual for each point pattern rtot <- sapply(r, integral.msr) # standardise the total residuals areas <- sapply(windows.mppm(fit), area.owin) rtot/sqrt(areas) } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. \baddrubaturnbook } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \seealso{ \code{\link{mppm}}, \code{\link{residuals.mppm}} } \keyword{spatial} \keyword{models} spatstat.model/man/is.marked.ppm.Rd0000644000176200001440000000440114331173076016732 0ustar liggesusers\name{is.marked.ppm} \alias{is.marked.ppm} \title{Test Whether A Point Process Model is Marked} \description{ Tests whether a fitted point process model involves ``marks'' attached to the points. } \usage{ \method{is.marked}{ppm}(X, \dots) } \arguments{ \item{X}{ Fitted point process model (object of class \code{"ppm"}) usually obtained from \code{\link{ppm}}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a model that was fitted to a marked point pattern dataset. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link[spatstat.data]{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. The argument \code{X} is a fitted point process model (an object of class \code{"ppm"}) typically obtained by fitting a model to point pattern data using \code{\link{ppm}}. This function returns \code{TRUE} if the \emph{original data} (to which the model \code{X} was fitted) were a marked point pattern. Note that this is not the same as testing whether the model involves terms that depend on the marks (i.e. whether the fitted model ignores the marks in the data). See the Examples for a trick to do this. If this function returns \code{TRUE}, the implications are (for example) that any simulation of this model will require simulation of random marks as well as random point locations. } \seealso{ \code{\link{is.marked}}, \code{\link{is.marked.ppp}} } \examples{ X <- lansing # Multitype point pattern --- trees marked by species \testonly{ # Smaller dataset X <- amacrine } fit1 <- ppm(X, ~ marks, Poisson()) is.marked(fit1) fit2 <- ppm(X, ~ 1, Poisson()) is.marked(fit2) ## test whether the model formula involves marks "marks" \%in\% spatstat.utils::variablesinformula(formula(fit2)) # Unmarked point pattern fit3 <- ppm(cells, ~ 1, Poisson()) is.marked(fit3) # FALSE } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{manip} \keyword{models} spatstat.model/man/psib.Rd0000644000176200001440000000344214334115644015223 0ustar liggesusers\name{psib} \alias{psib} \alias{psib.kppm} \title{ Sibling Probability of Cluster Point Process } \description{ Computes the sibling probability of a cluster point process model. } \usage{ psib(object) \method{psib}{kppm}(object) } \arguments{ \item{object}{ Fitted cluster point process model (object of class \code{"kppm"}). } } \details{ In a Poisson cluster process, two points are called \emph{siblings} if they belong to the same cluster, that is, if they had the same parent point. If two points of the process are separated by a distance \eqn{r}, the probability that they are siblings is \eqn{p(r) = 1 - 1/g(r)} where \eqn{g} is the pair correlation function of the process. The value \eqn{p(0) = 1 - 1/g(0)} is the probability that, if two points of the process are situated very close to each other, they came from the same cluster. This probability is an index of the strength of clustering, with high values suggesting strong clustering. This concept was proposed in Baddeley, Rubak and Turner (2015, page 479) and Baddeley (2017). It was shown in Baddeley et al (2022) that the sibling probability is directly related to the strength of clustering. } \value{ A single number. } \references{ Baddeley, A. (2017) Local composite likelihood for spatial point processes. \emph{Spatial Statistics} \bold{22}, 261--295. \baddrubaturnbook Baddeley, A., Davies, T.M., Hazelton, M.L., Rakshit, S. and Turner, R. (2022) Fundamental problems in fitting spatial cluster process models. \emph{Spatial Statistics} \bold{52}, 100709. DOI: \code{10.1016/j.spasta.2022.100709} } \author{ \adrian. } \seealso{ \code{\link{kppm}}, \code{\link{panysib}} } \examples{ fit <- kppm(redwood ~1, "Thomas") psib(fit) } \keyword{spatial} \keyword{models} spatstat.model/man/lgcp.estpcf.Rd0000644000176200001440000001710714514520744016502 0ustar liggesusers\name{lgcp.estpcf} \alias{lgcp.estpcf} \title{Fit a Log-Gaussian Cox Point Process by Minimum Contrast} \description{ Fits a log-Gaussian Cox point process model to a point pattern dataset by the Method of Minimum Contrast using the pair correlation function. } \usage{ lgcp.estpcf(X, startpar=c(var=1,scale=1), covmodel=list(model="exponential"), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list()) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the log-Gaussian Cox process model. } \item{covmodel}{ Specification of the covariance model for the log-Gaussian field. See Details. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits a log-Gaussian Cox point process (LGCP) model to a point pattern dataset by the Method of Minimum Contrast, using the estimated pair correlation function of the point pattern. The shape of the covariance of the LGCP must be specified: the default is the exponential covariance function, but other covariance models can be selected. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits a log-Gaussian Cox point process (LGCP) model to \code{X}, by finding the parameters of the LGCP model which give the closest match between the theoretical pair correlation function of the LGCP model and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model fitted is a stationary, isotropic log-Gaussian Cox process (\Moller and Waagepetersen, 2003, pp. 72-76). To define this process we start with a stationary Gaussian random field \eqn{Z} in the two-dimensional plane, with constant mean \eqn{\mu}{mu} and covariance function \eqn{C(r)}. Given \eqn{Z}, we generate a Poisson point process \eqn{Y} with intensity function \eqn{\lambda(u) = \exp(Z(u))}{lambda(u) = exp(Z(u))} at location \eqn{u}. Then \eqn{Y} is a log-Gaussian Cox process. The theoretical pair correlation function of the LGCP is \deqn{ g(r) = \exp(C(s)) }{ g(r) = exp(C(s)) } The intensity of the LGCP is \deqn{ \lambda = \exp(\mu + \frac{C(0)}{2}). }{ lambda= exp(mu + C(0)/2). } The covariance function \eqn{C(r)} takes the form \deqn{ C(r) = \sigma^2 c(r/\alpha) }{ C(r) = sigma^2 * c(-r/alpha) } where \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} are parameters controlling the strength and the scale of autocorrelation, respectively, and \eqn{c(r)} is a known covariance function determining the shape of the covariance. The strength and scale parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} will be estimated by the algorithm. The template covariance function \eqn{c(r)} must be specified as explained below. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. The template covariance function \eqn{c(r)} is specified using the argument \code{covmodel}. This should be of the form \code{list(model="modelname", \dots)} where \code{modelname} is a string identifying the template model as explained below, and \code{\dots} are optional arguments of the form \code{tag=value} giving the values of parameters controlling the \emph{shape} of the template model. The default is the exponential covariance \eqn{c(r) = e^{-r}}{c(r) = e^(-r)} so that the scaled covariance is \deqn{ C(r) = \sigma^2 e^{-r/\alpha}. }{ C(r) = sigma^2 * exp(-r/alpha). } For a list of available models see \code{\link{kppm}}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ \Moller, J., Syversveen, A. and Waagepetersen, R. (1998) Log Gaussian Cox Processes. \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ \adrian with modifications by Shen Guochun and \rasmus and \ege. } \seealso{ \code{\link{kppm}} and \code{\link{lgcp.estK}} for alternative methods of fitting LGCP. \code{\link{matclust.estpcf}}, \code{\link{thomas.estpcf}} for other models. \code{\link{mincontrast}} for the generic minimum contrast fitting algorithm, including important parameters that affect the accuracy of the fit. \code{\link{pcf}} for the pair correlation function. } \examples{ u <- lgcp.estpcf(redwood, c(var=1, scale=0.1)) u plot(u) lgcp.estpcf(redwood, covmodel=list(model="matern", nu=0.3)) } \keyword{spatial} \keyword{models} spatstat.model/man/dppapproxkernel.Rd0000644000176200001440000000120614331173076017500 0ustar liggesusers\name{dppapproxkernel} \alias{dppapproxkernel} \title{Approximate Determinantal Point Process Kernel} \description{ Returns an approximation to the kernel of a determinantal point process, as a function of one argument \eqn{x}. } \usage{dppapproxkernel(model, trunc = 0.99, W = NULL)} \arguments{ \item{model}{Object of class \code{"detpointprocfamily"}.} \item{trunc}{Numeric specifying how the model truncation is performed. See Details section of \code{\link{simulate.detpointprocfamily}}. } \item{W}{Optional window -- undocumented at the moment.} } \value{A function} \author{ \adrian \rolf and \ege } spatstat.model/man/Kmodel.dppm.Rd0000644000176200001440000000230014331173075016427 0ustar liggesusers\name{Kmodel.dppm} \alias{Kmodel.detpointprocfamily} \alias{pcfmodel.detpointprocfamily} \alias{Kmodel.dppm} \alias{pcfmodel.dppm} \title{ K-function or Pair Correlation Function of a Determinantal Point Process Model } \description{Returns the theoretical \eqn{K}-function or theoretical pair correlation function of a determinantal point process model as a function of one argument \eqn{r}. } \usage{ \method{Kmodel}{dppm}(model, \dots) \method{pcfmodel}{dppm}(model, \dots) \method{Kmodel}{detpointprocfamily}(model, \dots) \method{pcfmodel}{detpointprocfamily}(model, \dots) } \arguments{ \item{model}{Model of class \code{"detpointprocfamily"} or \code{"dppm"}.} \item{\dots}{Ignored (not quite true -- there is some undocumented internal use)} } \value{ A function in the \R language, with one numeric argument \code{r}, that can be used to evaluate the theoretical \eqn{K}-function or pair correlation function of the model at distances \code{r}. } \author{ \spatstatAuthors. } \examples{ model <- dppMatern(lambda=100, alpha=.01, nu=1, d=2) KMatern <- Kmodel(model) pcfMatern <- pcfmodel(model) plot(KMatern, xlim = c(0,0.05)) plot(pcfMatern, xlim = c(0,0.05)) } spatstat.model/man/update.kppm.Rd0000644000176200001440000000472714366424525016533 0ustar liggesusers\name{update.kppm} \alias{update.kppm} \title{Update a Fitted Cluster Point Process Model} \description{ \code{update} method for class \code{"kppm"}. } \usage{ \method{update}{kppm}(object, \dots, evaluate=TRUE, envir=environment(terms(object))) } \arguments{ \item{object}{ Fitted cluster point process model. An object of class \code{"kppm"}, obtained from \code{\link{kppm}}. } \item{\dots}{ Arguments passed to \code{\link{kppm}}. } \item{evaluate}{ Logical value indicating whether to return the updated fitted model (\code{evaluate=TRUE}, the default) or just the updated call to \code{kppm} (\code{evaluate=FALSE}). } \item{envir}{ Environment in which to re-evaluate the call to \code{\link{kppm}}. } } \details{ \code{object} should be a fitted cluster point process model, obtained from the model-fitting function \code{\link{kppm}}. The model will be updated according to the new arguments provided. If the argument \code{trend} is provided, it determines the intensity in the updated model. It should be an \R formula (with or without a left hand side). It may include the symbols \code{+} or \code{-} to specify addition or deletion of terms in the current model formula, as shown in the Examples below. The symbol \code{.} refers to the current contents of the formula. The intensity in the updated model is determined by the argument \code{trend} if it is provided, or otherwise by any unnamed argument that is a formula, or otherwise by the formula of the original model, \code{formula(object)}. The spatial point pattern data to which the new model is fitted is determined by the left hand side of the updated model formula, if this is present. Otherwise it is determined by the argument \code{X} if it is provided, or otherwise by any unnamed argument that is a point pattern or a quadrature scheme. The model is refitted using \code{\link{kppm}}. } \value{ Another fitted cluster point process model (object of class \code{"kppm"}. } \seealso{ \code{\link{kppm}}, \code{\link{plot.kppm}}, \code{\link{predict.kppm}}, \code{\link{simulate.kppm}}, \code{\link{methods.kppm}}, \code{\link{vcov.kppm}} } \examples{ fit <- kppm(redwood ~1, "Thomas") fitx <- update(fit, ~ . + x) fitM <- update(fit, clusters="MatClust") fitC <- update(fit, cells) fitCx <- update(fit, cells ~ x) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat.model/man/matclust.estpcf.Rd0000644000176200001440000001473114331173077017411 0ustar liggesusers\name{matclust.estpcf} \alias{matclust.estpcf} \title{Fit the \Matern Cluster Point Process by Minimum Contrast Using Pair Correlation} \description{ Fits the \Matern Cluster point process to a point pattern dataset by the Method of Minimum Contrast using the pair correlation function. } \usage{ matclust.estpcf(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list()) } \arguments{ \item{X}{ Data to which the \Matern Cluster model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the \Matern Cluster process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the \Matern Cluster point process model to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the \Matern Cluster point process to \code{X}, by finding the parameters of the \Matern Cluster model which give the closest match between the theoretical pair correlation function of the \Matern Cluster process and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The \Matern Cluster point process is described in \Moller and Waagepetersen (2003, p. 62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and uniformly distributed inside a circle of radius \eqn{R} centred on the parent point, where \eqn{R}{R} is equal to the parameter \code{scale}. The named vector of stating values can use either \code{R} or \code{scale} as the name of the second component, but the latter is recommended for consistency with other cluster models. The theoretical pair correlation function of the \Matern Cluster process is \deqn{ g(r) = 1 + \frac 1 {4\pi R \kappa r} h(\frac{r}{2R}) }{ g(r) = 1 + h(r/(2*R))/(4 * pi * R * kappa * r) } where the radius R is the parameter \code{scale} and \deqn{ h(z) = \frac {16} \pi [ z \mbox{arccos}(z) - z^2 \sqrt{1 - z^2} ] }{ h(z) = (16/pi) * ((z * arccos(z) - z^2 * sqrt(1 - z^2)) } for \eqn{z <= 1}, and \eqn{h(z) = 0} for \eqn{z > 1}. The theoretical intensity of the \Matern Cluster process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{R}{R}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The \Matern Cluster process can be simulated, using \code{\link{rMatClust}}. Homogeneous or inhomogeneous \Matern Cluster models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ \adrian } \seealso{ \code{\link{kppm}}, \code{\link{matclust.estK}}, \code{\link{thomas.estpcf}}, \code{\link{thomas.estK}}, \code{\link{lgcp.estK}}, \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{rMatClust}} to simulate the fitted model. } \examples{ u <- matclust.estpcf(redwood, c(kappa=10, R=0.1)) u plot(u, legendpos="topright") } \keyword{spatial} \keyword{models} spatstat.model/man/fitin.Rd0000644000176200001440000000472314331173076015402 0ustar liggesusers\name{fitin.ppm} \alias{fitin} \alias{fitin.ppm} \alias{fitin.profilepl} \title{Extract the Interaction from a Fitted Point Process Model} \description{ Given a point process model that has been fitted to point pattern data, this function extracts the interpoint interaction part of the model as a separate object. } \usage{ fitin(object) \method{fitin}{ppm}(object) \method{fitin}{profilepl}(object) } \arguments{ \item{object}{A fitted point process model (object of class \code{"ppm"} or \code{"profilepl"}). } } \details{ An object of class \code{"ppm"} describes a fitted point process model. It contains information about the original data to which the model was fitted, the spatial trend that was fitted, the interpoint interaction that was fitted, and other data. See \code{\link{ppm.object}}) for details of this class. The function \code{fitin} extracts from this model the information about the fitted interpoint interaction only. The information is organised as an object of class \code{"fii"} (fitted interpoint interaction). This object can be printed or plotted. Users may find this a convenient way to plot the fitted interpoint interaction term, as shown in the Examples. For a pairwise interaction, the plot of the fitted interaction shows the pair interaction function (the contribution to the probability density from a pair of points as a function of the distance between them). For a higher-order interaction, the plot shows the strongest interaction (the value most different from 1) that could ever arise at the given distance. The fitted interaction coefficients can also be extracted from this object using \code{\link{coef}}. } \value{ An object of class \code{"fii"} representing the fitted interpoint interaction. This object can be printed and plotted. } \author{ \spatstatAuthors. } \seealso{ Methods for handling fitted interactions: \code{\link{methods.fii}}, \code{\link{reach.fii}}, \code{\link{as.interact.fii}}. Background: \code{\link{ppm}}, \code{\link{ppm.object}}. } \examples{ # unmarked model <- ppm(swedishpines ~1, PairPiece(seq(3,19,by=4))) f <- fitin(model) f plot(f) # extract fitted interaction coefficients coef(f) # multitype # fit the stationary multitype Strauss process to `amacrine' r <- 0.02 * matrix(c(1,2,2,1), nrow=2,ncol=2) model <- ppm(amacrine ~1, MultiStrauss(r)) f <- fitin(model) f plot(f) } \keyword{spatial} \keyword{models} spatstat.model/man/intensity.dppm.Rd0000644000176200001440000000133214331173076017247 0ustar liggesusers\name{intensity.dppm} \alias{intensity.dppm} \alias{intensity.detpointprocfamily} \title{Intensity of Determinantal Point Process Model} \description{Extracts the intensity of a determinantal point process model.} \usage{ \method{intensity}{detpointprocfamily}(X, \dots) \method{intensity}{dppm}(X, \dots) } \arguments{ \item{X}{ A determinantal point process model (object of class \code{"detpointprocfamily"} or \code{"dppm"}). } \item{\dots}{Ignored.} } \value{ A numeric value (if the model is stationary), a pixel image (if the model is non-stationary) or \code{NA} if the intensity is unknown for the model. } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat.model/man/predict.slrm.Rd0000644000176200001440000000530014331173077016670 0ustar liggesusers\name{predict.slrm} \Rdversion{1.1} \alias{predict.slrm} \title{ Predicted or Fitted Values from Spatial Logistic Regression } \description{ Given a fitted Spatial Logistic Regression model, this function computes the fitted probabilities for each pixel, or the fitted point process intensity, or the values of the linear predictor in each pixel. } \usage{ \method{predict}{slrm}(object, ..., type = "intensity", newdata=NULL, window=NULL) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Optional arguments passed to \code{\link{pixellate}} determining the pixel resolution for the discretisation of the point pattern. } \item{type}{ Character string (partially) matching one of \code{"probabilities"}, \code{"intensity"} or \code{"link"}. } \item{newdata}{ Optional. List containing new covariate values for the prediction. See Details. } \item{window}{ Optional. New window in which to predict. An object of class \code{"owin"}. } } \details{ This is a method for \code{\link[stats]{predict}} for spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). The argument \code{type} determines which quantity is computed. If \code{type="intensity"}), the value of the point process intensity is computed at each pixel. If \code{type="probabilities"}) the probability of the presence of a random point in each pixel is computed. If \code{type="link"}, the value of the linear predictor is computed at each pixel. If \code{newdata = NULL} (the default), the algorithm computes fitted values of the model (based on the data that was originally used to fit the model \code{object}). If \code{newdata} is given, the algorithm computes predicted values of the model, using the new values of the covariates provided by \code{newdata}. The argument \code{newdata} should be a list; names of entries in the list should correspond to variables appearing in the model formula of the \code{object}. Each list entry may be a pixel image or a single numeric value. } \value{ A pixel image (object of class \code{"im"}) containing the predicted values for each pixel. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) plot(predict(fit)) X <- copper$SouthPoints Y <- copper$SouthLines Z <- distmap(Y) fitc <- slrm(X ~ Z) pc <- predict(fitc) Znew <- distmap(copper$Lines)[copper$SouthWindow] pcnew <- predict(fitc, newdata=list(Z=Znew)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.model/man/coef.ppm.Rd0000644000176200001440000000365314331173076016001 0ustar liggesusers\name{coef.ppm} \alias{coef.ppm} \title{ Coefficients of Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, extract the coefficients of the fitted model. A method for \code{coef}. } \usage{ \method{coef}{ppm}(object, \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) } \item{\dots}{ Ignored. } } \value{ A vector containing the fitted coefficients. } \details{ This function is a method for the generic function \code{\link{coef}}. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). This function extracts the vector of coefficients of the fitted model. This is the estimate of the parameter vector \eqn{\theta}{\theta} such that the conditional intensity of the model is of the form \deqn{ \lambda(u,x) = \exp(\theta S(u,x)) }{ \lambda(u,x) = exp(\theta . S(u,x)) } where \eqn{S(u,x)} is a (vector-valued) statistic. For example, if the model \code{object} is the uniform Poisson process, then \code{coef(object)} will yield a single value (named \code{"(Intercept)"}) which is the logarithm of the fitted intensity of the Poisson process. Use \code{\link{print.ppm}} to print a more useful description of the fitted model. } \seealso{ \code{\link{print.ppm}}, \code{\link{ppm.object}}, \code{\link{ppm}} } \examples{ poi <- ppm(cells, ~1, Poisson()) coef(poi) # This is the log of the fitted intensity of the Poisson process stra <- ppm(cells, ~1, Strauss(r=0.07)) coef(stra) # The two entries "(Intercept)" and "Interaction" # are respectively log(beta) and log(gamma) # in the usual notation for Strauss(beta, gamma, r) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.model/man/with.msr.Rd0000644000176200001440000000536314331173100016031 0ustar liggesusers\name{with.msr} \alias{with.msr} \title{Evaluate Expression Involving Components of a Measure} \description{ An expression involving the names of components of a measure is evaluated. } \usage{ \method{with}{msr}(data, expr, \dots) } \arguments{ \item{data}{ A measure (object of class \code{"msr"}). } \item{expr}{ An expression to be evaluated. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[base]{with}} for the class \code{"msr"}. The argument \code{data} should be an object of class \code{"msr"} representing a measure (a function which assigns a value to each subset of two-dimensional space). This function can be used to extract the components of the measure, or to perform more complicated manipulations of the components. The argument \code{expr} should be an un-evaluated expression in the \R language. The expression may involve any of the variable names listed below with their corresponding meanings. \tabular{ll}{ \code{qlocations} \tab (point pattern) all quadrature locations \cr \code{qweights} \tab (numeric) all quadrature weights \cr \code{density} \tab (numeric) density value at each quadrature point \cr \code{discrete} \tab (numeric) discrete mass at each quadrature point \cr \code{continuous} \tab (numeric) increment of continuous component \cr \code{increment} \tab (numeric) increment of measure \cr \code{is.atom} \tab (logical) whether quadrature point is an atom \cr \code{atoms} \tab (point pattern) locations of atoms \cr \code{atommass} \tab (numeric) massess of atoms } The measure is the sum of discrete and continuous components. The discrete component assigns non-zero mass to several points called atoms. The continuous component has a density which should be integrated over a region to determine the value for that region. An object of class \code{"msr"} approximates the continuous component by a sum over quadrature points. The quadrature points are chosen so that they include the atoms of the measure. In the list above, we have \code{increment = continuous + discrete}, \code{continuous = density * qweights}, \code{is.atom = (discrete > 0)}, \code{atoms = qlocations[is.atom]} and \code{atommass = discrete[is.atom]}. } \value{ The result of evaluating the expression could be an object of any kind. } \author{ \spatstatAuthors. } \seealso{ \code{\link{msr}}, \code{\link{split.msr}}, \code{\link{measureContinuous}}, \code{\link{measurePositive}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") with(rp, atoms) with(rp, qlocations \%mark\% continuous) } \keyword{spatial} \keyword{manip} spatstat.model/man/methods.kppm.Rd0000644000176200001440000000306014331173077016674 0ustar liggesusers\name{methods.kppm} \alias{methods.kppm} %DoNotExport \alias{coef.kppm} \alias{formula.kppm} \alias{print.kppm} \alias{terms.kppm} \alias{labels.kppm} \title{ Methods for Cluster Point Process Models } \description{ These are methods for the class \code{"kppm"}. } \usage{ \method{coef}{kppm}(object, \dots) \method{formula}{kppm}(x, \dots) \method{print}{kppm}(x, ...) \method{terms}{kppm}(x, \dots) \method{labels}{kppm}(object, \dots) } \arguments{ \item{x,object}{ An object of class \code{"kppm"}, representing a fitted cluster point process model. } \item{\dots}{ Arguments passed to other methods. } } \details{ These functions are methods for the generic commands \code{\link{coef}}, \code{\link{formula}}, \code{\link{print}}, \code{\link{terms}} and \code{\link{labels}} for the class \code{"kppm"}. An object of class \code{"kppm"} represents a fitted cluster point process model. It is obtained from \code{\link{kppm}}. The method \code{coef.kppm} returns the vector of \emph{regression coefficients} of the fitted model. It does not return the clustering parameters. } \value{ See the help files for the corresponding generic functions. } \author{ \adrian } \seealso{ \code{\link{kppm}}, \code{\link{plot.kppm}}, \code{\link{predict.kppm}}, \code{\link{simulate.kppm}}, \code{\link{update.kppm}}, \code{\link{vcov.kppm}}, \code{\link{as.ppm.kppm}}. } \examples{ fit <- kppm(redwood ~ x, "MatClust") coef(fit) formula(fit) tf <- terms(fit) labels(fit) } \keyword{spatial} \keyword{methods} spatstat.model/man/mppm.Rd0000644000176200001440000002465014331173100015227 0ustar liggesusers\name{mppm} \alias{mppm} \title{Fit Point Process Model to Several Point Patterns} \description{ Fits a Gibbs point process model to several point patterns simultaneously. } \usage{ mppm(formula, data, interaction=Poisson(), ..., iformula=NULL, random=NULL, weights=NULL, use.gam = FALSE, reltol.pql=1e-3, gcontrol=list()) } \arguments{ \item{formula}{ A formula describing the systematic part of the model. Variables in the formula are names of columns in \code{data}. } \item{data}{ A hyperframe (object of class \code{"hyperframe"}, see \code{\link{hyperframe}}) containing the point pattern responses and the explanatory variables. } \item{interaction}{ Interpoint interaction(s) appearing in the model. Either an object of class \code{"interact"} describing the point process interaction structure, or a hyperframe (with the same number of rows as \code{data}) whose entries are objects of class \code{"interact"}. } \item{\dots}{Arguments passed to \code{\link{ppm}} controlling the fitting procedure. } \item{iformula}{ Optional. A formula (with no left hand side) describing the interaction to be applied to each case. Each variable name in the formula should either be the name of a column in the hyperframe \code{interaction}, or the name of a column in the hyperframe \code{data} that is a vector or factor. } \item{random}{ Optional. A formula (with no left hand side) describing a random effect. Variable names in the formula may be any of the column names of \code{data} and \code{interaction}. The formula must be recognisable to \code{\link{lme}}. } \item{weights}{ Optional. Numeric vector of case weights for each row of \code{data}. } \item{use.gam}{Logical flag indicating whether to fit the model using \code{\link[mgcv]{gam}} or \code{\link[stats]{glm}}. } \item{reltol.pql}{ Relative tolerance for successive steps in the penalised quasi-likelihood algorithm, used when the model includes random effects. The algorithm terminates when the root mean square of the relative change in coefficients is less than \code{reltol.pql}. } \item{gcontrol}{ List of arguments to control the fitting algorithm. Arguments are passed to \code{\link[stats]{glm.control}} or \code{\link[mgcv]{gam.control}} or \code{\link[nlme]{lmeControl}} depending on the kind of model being fitted. If the model has random effects, the arguments are passed to \code{\link[nlme]{lmeControl}}. Otherwise, if \code{use.gam=TRUE} the arguments are passed to \code{\link[mgcv]{gam.control}}, and if \code{use.gam=FALSE} (the default) they are passed to \code{\link[stats]{glm.control}}. } } \details{ This function fits a common point process model to a dataset containing several different point patterns. It extends the capabilities of the function \code{\link{ppm}} to deal with data such as \itemize{ \item replicated observations of spatial point patterns \item two groups of spatial point patterns \item a designed experiment in which the response from each unit is a point pattern. } The syntax of this function is similar to that of standard \R model-fitting functions like \code{\link{lm}} and \code{\link{glm}}. The first argument \code{formula} is an \R formula describing the systematic part of the model. The second argument \code{data} contains the responses and the explanatory variables. Other arguments determine the stochastic structure of the model. Schematically, the data are regarded as the results of a designed experiment involving \eqn{n} experimental units. Each unit has a \sQuote{response}, and optionally some \sQuote{explanatory variables} (covariates) describing the experimental conditions for that unit. In this context, \emph{the response from each unit is a point pattern}. The value of a particular covariate for each unit can be either a single value (numerical, logical or factor), or a spatial covariate. A \sQuote{spatial} covariate is a quantity that depends on spatial location, for example, the soil acidity or altitude at each location. For the purposes of \code{mppm}, a spatial covariate must be stored as a pixel image (object of class \code{"im"}) which gives the values of the covariate at a fine grid of locations. The argument \code{data} is a hyperframe (a generalisation of a data frame, see \code{\link{hyperframe}}). This is like a data frame except that the entries can be objects of any class. The hyperframe has one row for each experimental unit, and one column for each variable (response or explanatory variable). The \code{formula} should be an \R formula. The left hand side of \code{formula} determines the \sQuote{response} variable. This should be a single name, which should correspond to a column in \code{data}. The right hand side of \code{formula} determines the spatial trend of the model. It specifies the linear predictor, and effectively represents the \bold{logarithm} of the spatial trend. Variables in the formula must be the names of columns of \code{data}, or one of the reserved names \describe{ \item{x,y}{Cartesian coordinates of location} \item{marks}{Mark attached to point} \item{id}{which is a factor representing the serial number (\eqn{1} to \eqn{n}) of the point pattern, i.e. the row number in the data hyperframe. } } The column of responses in \code{data} must consist of point patterns (objects of class \code{"ppp"}). The individual point pattern responses can be defined in different spatial windows. If some of the point patterns are marked, then they must all be marked, and must have the same type of marks. The scope of models that can be fitted to each pattern is the same as the scope of \code{\link{ppm}}, that is, Gibbs point processes with interaction terms that belong to a specified list, including for example the Poisson process, Strauss process, Geyer's saturation model, and piecewise constant pairwise interaction models. Additionally, it is possible to include random effects as explained in the section on Random Effects below. The stochastic part of the model is determined by the arguments \code{interaction} and (optionally) \code{iformula}. \itemize{ \item In the simplest case, \code{interaction} is an object of class \code{"interact"}, determining the interpoint interaction structure of the point process model, for all experimental units. \item Alternatively, \code{interaction} may be a hyperframe, whose entries are objects of class \code{"interact"}. It should have the same number of rows as \code{data}. \itemize{ \item If \code{interaction} consists of only one column, then the entry in row \code{i} is taken to be the interpoint interaction for the \code{i}th experimental unit (corresponding to the \code{i}th row of \code{data}). \item If \code{interaction} has more than one column, then the argument \code{iformula} is also required. Each row of \code{interaction} determines several interpoint interaction structures that might be applied to the corresponding row of \code{data}. The choice of interaction is determined by \code{iformula}; this should be an \R formula, without a left hand side. For example if \code{interaction} has two columns called \code{A} and \code{B} then \code{iformula = ~B} indicates that the interpoint interactions are taken from the second column. } } Variables in \code{iformula} typically refer to column names of \code{interaction}. They can also be names of columns in \code{data}, but only for columns of numeric, logical or factor values. For example \code{iformula = ~B * group} (where \code{group} is a column of \code{data} that contains a factor) causes the model with interpoint interaction \code{B} to be fitted with different interaction parameters for each level of \code{group}. } \section{Random Effects}{ It is also possible to include random effects in the trend term. The argument \code{random} is a formula, with no left-hand side, that specifies the structure of the random effects. The formula should be recognisable to \code{\link{lme}} (see the description of the argument \code{random} for \code{\link{lme}}). The names in the formula \code{random} may be any of the covariates supplied by \code{data}. Additionally the formula may involve the name \code{id}, which is a factor representing the serial number (\eqn{1} to \eqn{n}) of the point pattern in the list \code{X}. } \value{ An object of class \code{"mppm"} representing the fitted model. There are methods for \code{print}, \code{summary}, \code{coef}, \code{AIC}, \code{anova}, \code{fitted}, \code{fixef}, \code{logLik}, \code{plot}, \code{predict}, \code{ranef}, \code{residuals}, \code{summary}, \code{terms} and \code{vcov} for this class. The default methods for \code{\link[stats]{update}} and \code{\link[stats]{formula}} also work on this class. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Baddeley, A., Bischof, L., Sintorn, I.-M., Haggarty, S., Bell, M. and Turner, R. Analysis of a designed experiment where the response is a spatial point pattern. In preparation. \baddrubaturnbook Bell, M. and Grunwald, G. (2004) Mixed models for the analysis of replicated spatial point patterns. \emph{Biostatistics} \bold{5}, 633--648. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{ppm}}, \code{\link{print.mppm}}, \code{\link{summary.mppm}}, \code{\link{coef.mppm}}, } \examples{ # Waterstriders data H <- hyperframe(Y = waterstriders) mppm(Y ~ 1, data=H) mppm(Y ~ 1, data=H, Strauss(7)) mppm(Y ~ id, data=H) mppm(Y ~ x, data=H) # Synthetic data from known model n <- 10 H <- hyperframe(V=1:n, U=runif(n, min=-1, max=1), M=factor(letters[1 + (1:n) \%\% 3])) H$Z <- setcov(square(1)) H$U <- with(H, as.im(U, as.rectangle(Z))) H$Y <- with(H, rpoispp(eval.im(exp(2+3*Z)))) fit <- mppm(Y ~Z + U + V, data=H) } \keyword{spatial} \keyword{models} spatstat.model/man/cauchy.estK.Rd0000644000176200001440000001303314400523233016433 0ustar liggesusers\name{cauchy.estK} \alias{cauchy.estK} \title{Fit the Neyman-Scott cluster process with Cauchy kernel} \description{ Fits the Neyman-Scott Cluster point process with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast. } \usage{ cauchy.estK(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Neyman-Scott cluster point process model with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Neyman-Scott cluster point process with Cauchy kernel to \code{X}, by finding the parameters of the \Matern Cluster model which give the closest match between the theoretical \eqn{K} function of the \Matern Cluster process and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{\kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{\mu}, and the locations of the offspring points of one parent follow a common distribution described in Jalilian et al (2013). If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{\lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{\lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{\lambda} cannot be estimated, and the parameter \eqn{\mu}{\mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rCauchy}}. For computational reasons, the optimisation procedure uses the parameter \code{eta2}, which is equivalent to \code{4 * scale^2} where \code{scale} is the scale parameter for the model as used in \code{\link{rCauchy}}. Homogeneous or inhomogeneous Neyman-Scott/Cauchy models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Ghorbani, M. (2013) Cauchy cluster process. \emph{Metrika} \bold{76}, 697--706. Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{cauchy.estpcf}}, \code{\link{lgcp.estK}}, \code{\link{thomas.estK}}, \code{\link{vargamma.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{Kmodel}}. \code{\link{rCauchy}} to simulate the model. } \examples{ u <- cauchy.estK(redwood) u plot(u) } \keyword{spatial} \keyword{models} spatstat.model/man/vcov.kppm.Rd0000644000176200001440000000557114331173100016202 0ustar liggesusers\name{vcov.kppm} \alias{vcov.kppm} \title{Variance-Covariance Matrix for a Fitted Cluster Point Process Model} \description{ Returns the variance-covariance matrix of the estimates of the parameters of a fitted cluster point process model. } \usage{ \method{vcov}{kppm}(object, ..., what=c("vcov", "corr", "fisher", "internals"), fast = NULL, rmax = NULL, eps.rmax = 0.01, verbose = TRUE) } \arguments{ \item{object}{ A fitted cluster point process model (an object of class \code{"kppm"}.) } \item{\dots}{ Ignored. } \item{what}{ Character string (partially-matched) that specifies what matrix is returned. Options are \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} for the Fisher information matrix. } \item{fast}{ Logical specifying whether tapering (using sparse matrices from \pkg{Matrix}) should be used to speed up calculations. Warning: This is expected to underestimate the true asymptotic variances/covariances. } \item{rmax}{ Optional. The dependence range. Not usually specified by the user. Only used when \code{fast=TRUE}. } \item{eps.rmax}{ Numeric. A small positive number which is used to determine \code{rmax} from the tail behaviour of the pair correlation function when fast option (\code{fast=TRUE}) is used. Namely \code{rmax} is the smallest value of \eqn{r} at which \eqn{(g(r)-1)/(g(0)-1)} falls below \code{eps.rmax}. Only used when \code{fast=TRUE}. Ignored if \code{rmax} is provided. } \item{verbose}{ Logical value indicating whether to print progress reports during very long calculations. } } \details{ This function computes the asymptotic variance-covariance matrix of the estimates of the canonical (regression) parameters in the cluster point process model \code{object}. It is a method for the generic function \code{\link{vcov}}. The result is an \code{n * n} matrix where \code{n = length(coef(model))}. To calculate a confidence interval for a regression parameter, use \code{\link[stats]{confint}} as shown in the examples. } \value{ A square matrix. } \references{ Waagepetersen, R. (2007) Estimating functions for inhomogeneous spatial point processes with incomplete covariate data. \emph{Biometrika} \bold{95}, 351--363. } \author{ Abdollah Jalilian and Rasmus Waagepetersen. Ported to \pkg{spatstat} by \adrian and \ege. } \seealso{ \code{\link{kppm}}, \code{\link{vcov}}, \code{\link{vcov.ppm}} } \examples{ fit <- kppm(redwood ~ x + y) vcov(fit) vcov(fit, what="corr") # confidence interval confint(fit) # cross-check the confidence interval by hand: sd <- sqrt(diag(vcov(fit))) t(coef(fit) + 1.96 * outer(sd, c(lower=-1, upper=1))) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.model/man/lurking.Rd0000644000176200001440000003041514331173076015741 0ustar liggesusers\name{lurking} \alias{lurking} \alias{lurking.ppp} \alias{lurking.ppm} \title{Lurking Variable Plot} \description{ Plot spatial point process residuals against a covariate } \usage{ lurking(object, \dots) \method{lurking}{ppm}(object, covariate, type="eem", cumulative=TRUE, \dots, plot.it = TRUE, plot.sd = is.poisson(object), clipwindow=default.clipwindow(object), rv = NULL, envelope=FALSE, nsim=39, nrank=1, typename, covname, oldstyle=FALSE, check=TRUE, verbose=TRUE, nx=128, splineargs=list(spar=0.5), internal=NULL) \method{lurking}{ppp}(object, covariate, type="eem", cumulative=TRUE, \dots, plot.it = TRUE, plot.sd = is.poisson(object), clipwindow=default.clipwindow(object), rv = NULL, envelope=FALSE, nsim=39, nrank=1, typename, covname, oldstyle=FALSE, check=TRUE, verbose=TRUE, nx=128, splineargs=list(spar=0.5), internal=NULL) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which diagnostics should be produced. This object is usually obtained from \code{\link{ppm}}. Alternatively, \code{object} may be a point pattern (object of class \code{"ppp"}). } \item{covariate}{ The covariate against which residuals should be plotted. Either a numeric vector, a pixel image, or an \code{expression}. See \emph{Details} below. } \item{type}{ String indicating the type of residuals or weights to be computed. Choices include \code{"eem"}, \code{"raw"}, \code{"inverse"} and \code{"pearson"}. See \code{\link{diagnose.ppm}} for all possible choices. } \item{cumulative}{ Logical flag indicating whether to plot a cumulative sum of marks (\code{cumulative=TRUE}) or the derivative of this sum, a marginal density of the smoothed residual field (\code{cumulative=FALSE}). } \item{\dots}{ Arguments passed to \code{\link{plot.default}} and \code{\link{lines}} to control the plot behaviour. } \item{plot.it}{ Logical value indicating whether plots should be shown. If \code{plot.it=FALSE}, only the computed coordinates for the plots are returned. See \emph{Value}. } \item{plot.sd}{ Logical value indicating whether error bounds should be added to plot. The default is \code{TRUE} for Poisson models and \code{FALSE} for non-Poisson models. See Details. } \item{clipwindow}{ If not \code{NULL} this argument indicates that residuals shall only be computed inside a subregion of the window containing the original point pattern data. Then \code{clipwindow} should be a window object of class \code{"owin"}. } \item{rv}{ Usually absent. If this argument is present, the point process residuals will not be calculated from the fitted model \code{object}, but will instead be taken directly from \code{rv}. } \item{envelope}{ Logical value indicating whether to compute simulation envelopes for the plot. Alternatively \code{envelope} may be a list of point patterns to use for computing the simulation envelopes, or an object of class \code{"envelope"} containing simulated point patterns. } \item{nsim}{ Number of simulated point patterns to be generated to produce the simulation envelope, if \code{envelope=TRUE}. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{typename}{ Usually absent. If this argument is present, it should be a string, and will be used (in the axis labels of plots) to describe the type of residuals. } \item{covname}{ A string name for the covariate, to be used in axis labels of plots. } \item{oldstyle}{ Logical flag indicating whether error bounds should be plotted using the approximation given in the original paper (\code{oldstyle=TRUE}), or using the correct asymptotic formula (\code{oldstyle=FALSE}). } \item{check}{ Logical flag indicating whether the integrity of the data structure in \code{object} should be checked. } \item{verbose}{ Logical value indicating whether to print progress reports during Monte Carlo simulation. } \item{nx}{ Integer. Number of covariate values to be used in the plot. } \item{splineargs}{ A list of arguments passed to \code{smooth.spline} for the estimation of the derivatives in the case \code{cumulative=FALSE}. } \item{internal}{ Internal use only. } } \value{ The (invisible) return value is an object belonging to the class \code{"lurk"}, for which there are methods for \code{plot} and \code{print}. This object is a list containing two dataframes \code{empirical} and \code{theoretical}. The first dataframe \code{empirical} contains columns \code{covariate} and \code{value} giving the coordinates of the lurking variable plot. The second dataframe \code{theoretical} contains columns \code{covariate}, \code{mean} and \code{sd} giving the coordinates of the plot of the theoretical mean and standard deviation. } \details{ This function generates a \sQuote{lurking variable} plot for a fitted point process model. Residuals from the model represented by \code{object} are plotted against the covariate specified by \code{covariate}. This plot can be used to reveal departures from the fitted model, in particular, to reveal that the point pattern depends on the covariate. The function \code{lurking} is generic, with methods for \code{ppm} and \code{ppp} documented here, and possibly other methods. The argument \code{object} would usually be a fitted point process model (object of class \code{"ppm"}) produced by the model-fitting algorithm \code{\link{ppm}}). If \code{object} is a point pattern (object of class \code{"ppp"}) then the model is taken to be the uniform Poisson process (Complete Spatial Randomness) fitted to this point pattern. First the residuals from the fitted model (Baddeley et al, 2004) are computed at each quadrature point, or alternatively the `exponential energy marks' (Stoyan and Grabarnik, 1991) are computed at each data point. The argument \code{type} selects the type of residual or weight. See \code{\link{diagnose.ppm}} for options and explanation. A lurking variable plot for point processes (Baddeley et al, 2004) displays either the cumulative sum of residuals/weights (if \code{cumulative = TRUE}) or a kernel-weighted average of the residuals/weights (if \code{cumulative = FALSE}) plotted against the covariate. The empirical plot (solid lines) is shown together with its expected value assuming the model is true (dashed lines) and optionally also the pointwise two-standard-deviation limits (grey shading). To be more precise, let \eqn{Z(u)} denote the value of the covariate at a spatial location \eqn{u}. \itemize{ \item If \code{cumulative=TRUE} then we plot \eqn{H(z)} against \eqn{z}, where \eqn{H(z)} is the sum of the residuals over all quadrature points where the covariate takes a value less than or equal to \eqn{z}, or the sum of the exponential energy weights over all data points where the covariate takes a value less than or equal to \eqn{z}. \item If \code{cumulative=FALSE} then we plot \eqn{h(z)} against \eqn{z}, where \eqn{h(z)} is the derivative of \eqn{H(z)}, computed approximately by spline smoothing. } For the point process residuals \eqn{E(H(z)) = 0}, while for the exponential energy weights \eqn{E(H(z)) = } area of the subset of the window satisfying \eqn{Z(u) <= z}{Z(u) \le z}. If the empirical and theoretical curves deviate substantially from one another, the interpretation is that the fitted model does not correctly account for dependence on the covariate. The correct form (of the spatial trend part of the model) may be suggested by the shape of the plot. If \code{plot.sd = TRUE}, then superimposed on the lurking variable plot are the pointwise two-standard-deviation error limits for \eqn{H(x)} calculated for the inhomogeneous Poisson process. The default is \code{plot.sd = TRUE} for Poisson models and \code{plot.sd = FALSE} for non-Poisson models. By default, the two-standard-deviation limits are calculated from the exact formula for the asymptotic variance of the residuals under the asymptotic normal approximation, equation (37) of Baddeley et al (2006). However, for compatibility with the original paper of Baddeley et al (2005), if \code{oldstyle=TRUE}, the two-standard-deviation limits are calculated using the innovation variance, an over-estimate of the true variance of the residuals. The argument \code{covariate} is either a numeric vector, a pixel image, or an R language expression. If it is a numeric vector, it is assumed to contain the values of the covariate for each of the quadrature points in the fitted model. The quadrature points can be extracted by \code{\link{quad.ppm}(object)}. If \code{covariate} is a pixel image, it is assumed to contain the values of the covariate at each location in the window. The values of this image at the quadrature points will be extracted. Alternatively, if \code{covariate} is an \code{expression}, it will be evaluated in the same environment as the model formula used in fitting the model \code{object}. It must yield a vector of the same length as the number of quadrature points. The expression may contain the terms \code{x} and \code{y} representing the cartesian coordinates, and may also contain other variables that were available when the model was fitted. Certain variable names are reserved words; see \code{\link{ppm}}. Note that lurking variable plots for the \eqn{x} and \eqn{y} coordinates are also generated by \code{\link{diagnose.ppm}}, amongst other types of diagnostic plots. This function is more general in that it enables the user to plot the residuals against any chosen covariate that may have been present. For advanced use, even the values of the residuals/weights can be altered. If the argument \code{rv} is present, the residuals will not be calculated from the fitted model \code{object} but will instead be taken directly from the object \code{rv}. If \code{type = "eem"} then \code{rv} should be similar to the return value of \code{\link{eem}}, namely, a numeric vector with length equal to the number of data points in the original point pattern. Otherwise, \code{rv} should be similar to the return value of \code{\link{residuals.ppm}}, that is, \code{rv} should be an object of class \code{"msr"} (see \code{\link{msr}}) representing a signed measure. } \seealso{ \code{\link{residuals.ppm}}, \code{\link{diagnose.ppm}}, \code{\link{residuals.ppm}}, \code{\link{qqplot.ppm}}, \code{\link{eem}}, \code{\link{ppm}} } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2006) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \examples{ (a <- lurking(nztrees, expression(x), type="raw")) fit <- ppm(nztrees ~x, Poisson(), nd=128) (b <- lurking(fit, expression(x), type="raw")) lurking(fit, expression(x), type="raw", cumulative=FALSE) } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat.model/man/dppMatern.Rd0000644000176200001440000000243614331173076016222 0ustar liggesusers\name{dppMatern} \alias{dppMatern} \title{Whittle-Matern Determinantal Point Process Model} \description{ Function generating an instance of the Whittle-\Matern determinantal point process model } \usage{dppMatern(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the parameters. See Details. } } \details{ The Whittle-\Matern DPP is defined in (Lavancier, \Moller and Rubak, 2015) The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the shape parameter \code{nu} as a positive numeric (artificially required to be less than 20 in the code for numerical stability) \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ \adrian \rolf and \ege } \references{ Lavancier, F. \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \examples{ m <- dppMatern(lambda=100, alpha=.02, nu=1, d=2) } \seealso{ \code{\link{dppBessel}}, \code{\link{dppCauchy}}, \code{\link{dppGauss}}, \code{\link{dppPowerExp}} } spatstat.model/man/hierpair.family.Rd0000644000176200001440000000140414331173076017345 0ustar liggesusers\name{hierpair.family} \alias{hierpair.family} \title{Hierarchical Pairwise Interaction Process Family} \description{ An object describing the family of all hierarchical pairwise interaction Gibbs point processes. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the hierarchical pairwise interaction family of point process models. } \value{ Object of class \code{"isf"}, see \code{\link{isf.object}}. } \seealso{ Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{ord.family}}, \code{\link{inforder.family}}. Hierarchical Strauss interaction: \code{\link{HierStrauss}}. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.model/man/methods.slrm.Rd0000644000176200001440000000374514331173077016714 0ustar liggesusers\name{methods.slrm} \alias{methods.slrm} %DoNotExport \alias{formula.slrm} \alias{update.slrm} \alias{print.slrm} \alias{summary.slrm} \alias{terms.slrm} \alias{labels.slrm} \alias{deviance.slrm} \title{ Methods for Spatial Logistic Regression Models } \description{ These are methods for the class \code{"slrm"}. } \usage{ \method{formula}{slrm}(x, \dots) \method{print}{slrm}(x, \dots) \method{summary}{slrm}(object, \dots) \method{terms}{slrm}(x, \dots) \method{labels}{slrm}(object, \dots) \method{deviance}{slrm}(object, \dots) \method{update}{slrm}(object, fmla, \dots, evaluate = TRUE, env = parent.frame()) } \arguments{ \item{x,object}{ An object of class \code{"slrm"}, representing a fitted spatial logistic regression model. } \item{\dots}{ Arguments passed to other methods. } \item{fmla}{ Optional. A formula, to replace the formula of the model. } \item{evaluate}{ Logical value. If \code{TRUE}, evaluate the updated call to \code{slrm}, so that the model is refitted; if \code{FALSE}, simply return the updated call. } \item{env}{ Optional environment in which the model should be updated. } } \details{ These functions are methods for the generic commands \code{\link[stats]{formula}}, \code{\link[stats]{update}}, \code{\link{print}}, \code{\link{summary}}, \code{\link[stats]{terms}}, \code{\link{labels}} and \code{\link[stats]{deviance}} for the class \code{"slrm"}. An object of class \code{"slrm"} represents a fitted spatial logistic regression model. It is obtained from \code{\link{slrm}}. } \value{ See the help files for the corresponding generic functions. } \author{ \adrian } \seealso{ \code{\link{slrm}}, \code{\link{plot.slrm}}, \code{\link{predict.slrm}}, \code{\link{simulate.slrm}}, \code{\link{vcov.slrm}}, \code{\link{coef.slrm}}. } \examples{ fit <- slrm(redwood ~ x) coef(fit) formula(fit) tf <- terms(fit) labels(fit) deviance(fit) } \keyword{spatial} \keyword{methods} spatstat.model/man/dppapproxpcf.Rd0000644000176200001440000000202614331173076016771 0ustar liggesusers\name{dppapproxpcf} \alias{dppapproxpcf} \title{Approximate Pair Correlation Function of Determinantal Point Process Model} \description{ Returns an approximation to the theoretical pair correlation function of a determinantal point process model, as a function of one argument \eqn{x}. } \usage{dppapproxpcf(model, trunc = 0.99, W = NULL)} \arguments{ \item{model}{Object of class \code{"detpointprocfamily"}.} \item{trunc}{Numeric value specifying how the model truncation is performed. See Details section of \code{\link{simulate.detpointprocfamily}}.} \item{W}{Optional window -- undocumented at the moment.} } \details{ This function is usually NOT needed for anything. It only exists for investigative purposes. } \value{ A function in the \R language, with one numeric argument \code{x}, that returns the value of the approximate pair correlation at distances \code{x}. } \author{ \spatstatAuthors. } \examples{ f <- dppapproxpcf(dppMatern(lambda = 100, alpha=.028, nu=1, d=2)) plot(f, xlim = c(0,0.1)) } spatstat.model/man/residuals.slrm.Rd0000644000176200001440000000565314331173077017244 0ustar liggesusers\name{residuals.slrm} \alias{residuals.slrm} \title{ Residuals for Fitted Spatial Logistic Regression Model } \description{ Given a spatial logistic regression model fitted to a point pattern, compute the residuals for each pixel. } \usage{ \method{residuals}{slrm}(object, type=c("raw", "deviance", "pearson", "working", "response", "partial", "score"), \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which residuals should be calculated. } \item{type}{ String (partially matched) indicating the type of residuals to be calculated. } \item{\dots}{ Ignored. } } \value{ A pixel image (if the residual values are scalar), or a list of pixel images (if the residual values are vectors). } \details{ This function computes several kinds of residuals for the fit of a spatial logistic regression model to a spatial point pattern dataset. The argument \code{object} must be a fitted spatial logistic regression model (object of class \code{"slrm"}). Such objects are created by the fitting algorithm \code{\link{slrm}}. The residuals are computed for each pixel that was used to fit the original model. The residuals are returned as a pixel image (if the residual values are scalar), or a list of pixel images (if the residual values are vectors). The type of residual is chosen by the argument \code{type}. For a given pixel, suppose \eqn{p} is the fitted probability of presence of a point, and \eqn{y} is the presence indicator (equal to 1 if the pixel contains any data points, and equal to 0 otherwise). Then \itemize{ \item \code{type="raw"} or \code{type="response"} specifies the response residual \deqn{r = y - p} \item \code{type="pearson"} is the Pearson residual \deqn{ r_P = \frac{y - p}{\sqrt{p (1-p)}} }{ rP = (y-p)/sqrt(p * (1-p)) } \item \code{type="deviance"} is the deviance residual \deqn{ r_D = (-1)^{y+1} \sqrt{-2(y log p + (1-y) log(1-p))} }{ rD = (-1)^(y+1) (-2(y log p + (1-y) log(1-p)))^(1/2) } \item \code{type="score"} specifies the score residuals \deqn{ r_S = (y-p) x }{ rS = (y-p) x } where \code{x} is the vector of canonical covariate values for the pixel \item \code{type="working"} specifies the working residuals as defined in \code{\link[stats]{residuals.glm}} \item \code{type="partial"} specifies the partial residuals as defined in \code{\link[stats]{residuals.glm}} } } \seealso{ \code{\link[stats]{residuals.glm}}, \code{\link{residuals.ppm}} } \examples{ d <- if(interactive()) 128 else 32 H <- unmark(humberside) fit <- slrm(H ~ x + y, dimyx=d) plot(residuals(fit)) plot(residuals(fit, type="score")) } \author{ \adrian } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.model/man/ppm.object.Rd0000644000176200001440000001356014331173077016332 0ustar liggesusers\name{ppm.object} \alias{ppm.object} %DoNotExport \alias{methods.ppm} %DoNotExport \title{Class of Fitted Point Process Models} \description{ A class \code{ppm} to represent a fitted stochastic model for a point process. The output of \code{\link{ppm}}. } \details{ An object of class \code{ppm} represents a stochastic point process model that has been fitted to a point pattern dataset. Typically it is the output of the model fitter, \code{\link{ppm}}. The class \code{ppm} has methods for the following standard generic functions: \tabular{lll}{ generic \tab method \tab description \cr \code{print} \tab \code{\link{print.ppm}} \tab print details \cr \code{plot} \tab \code{\link{plot.ppm}} \tab plot fitted model \cr \code{predict} \tab \code{\link{predict.ppm}} \tab fitted intensity and conditional intensity \cr \code{fitted} \tab \code{\link{fitted.ppm}} \tab fitted intensity \cr \code{coef} \tab \code{\link{coef.ppm}} \tab fitted coefficients of model \cr \code{anova} \tab \code{\link{anova.ppm}} \tab Analysis of Deviance \cr \code{formula} \tab \code{\link{formula.ppm}} \tab Extract model formula \cr \code{terms} \tab \code{\link{terms.ppm}} \tab Terms in the model formula \cr \code{labels} \tab \code{labels.ppm} \tab Names of estimable terms in the model formula \cr \code{residuals} \tab \code{\link{residuals.ppm}} \tab Point process residuals \cr \code{simulate} \tab \code{\link{simulate.ppm}} \tab Simulate the fitted model \cr \code{update} \tab \code{\link{update.ppm}} \tab Change or refit the model \cr \code{vcov} \tab \code{\link{vcov.ppm}} \tab Variance/covariance matrix of parameter estimates \cr \code{model.frame} \tab \code{\link{model.frame.ppm}} \tab Model frame \cr \code{model.matrix} \tab \code{\link{model.matrix.ppm}} \tab Design matrix \cr \code{logLik} \tab \code{\link{logLik.ppm}} \tab log \emph{pseudo} likelihood \cr \code{extractAIC} \tab \code{\link{extractAIC.ppm}} \tab pseudolikelihood counterpart of AIC \cr \code{nobs} \tab \code{\link{nobs.ppm}} \tab number of observations } Objects of class \code{ppm} can also be handled by the following standard functions, without requiring a special method: \tabular{ll}{ name \tab description \cr \code{\link{confint}} \tab Confidence intervals for parameters \cr \code{\link{step}} \tab Stepwise model selection \cr \code{\link{drop1}} \tab One-step model improvement \cr \code{\link{add1}} \tab One-step model improvement } The class \code{ppm} also has methods for the following generic functions defined in the \pkg{spatstat} package: \tabular{lll}{ generic \tab method \tab description \cr \code{\link{as.interact}} \tab \code{\link{as.interact.ppm}} \tab Interpoint interaction structure \cr \code{\link{as.owin}} \tab \code{\link{as.owin.ppm}} \tab Observation window of data \cr \code{\link{berman.test}} \tab \code{\link{berman.test.ppm}} \tab Berman's test \cr \code{\link{envelope}} \tab \code{\link{envelope.ppm}} \tab Simulation envelopes \cr \code{\link{fitin}} \tab \code{\link{fitin.ppm}} \tab Fitted interaction \cr \code{\link{is.marked}} \tab \code{\link{is.marked.ppm}} \tab Determine whether the model is marked \cr \code{\link{is.multitype}} \tab \code{\link{is.multitype.ppm}} \tab Determine whether the model is multitype \cr \code{\link{is.poisson}} \tab \code{\link{is.poisson.ppm}} \tab Determine whether the model is Poisson \cr \code{\link{is.stationary}} \tab \code{\link{is.stationary.ppm}} \tab Determine whether the model is stationary \cr \code{\link{cdf.test}} \tab \code{\link{cdf.test.ppm}} \tab Spatial distribution test \cr \code{\link{quadrat.test}} \tab \code{\link{quadrat.test.ppm}} \tab Quadrat counting test \cr \code{\link{reach}} \tab \code{\link{reach.ppm}} \tab Interaction range of model \cr \code{\link{rmhmodel}} \tab \code{\link{rmhmodel.ppm}} \tab Model in a form that can be simulated \cr \code{\link{rmh}} \tab \code{\link{rmh.ppm}} \tab Perform simulation \cr \code{\link{unitname}} \tab \code{\link{unitname.ppm}} \tab Name of unit of length } Information about the data (to which the model was fitted) can be extracted using \code{\link{data.ppm}}, \code{\link{dummy.ppm}} and \code{\link{quad.ppm}}. } \section{Internal format}{ If you really need to get at the internals, a \code{ppm} object contains at least the following entries: \tabular{ll}{ \code{coef} \tab the fitted regular parameters (as returned by \code{glm}) \cr \code{trend} \tab the trend formula or \code{NULL} \cr \code{interaction} \tab the point process interaction family (an object of class \code{"interact"}) or \code{NULL} \cr \code{Q} \tab the quadrature scheme used \cr \code{maxlogpl} \tab the maximised value of log pseudolikelihood \cr \code{correction} \tab name of edge correction method used \cr } See \code{\link{ppm}} for explanation of these concepts. The irregular parameters (e.g. the interaction radius of the Strauss process) are encoded in the \code{interaction} entry. However see the Warnings. } \seealso{ \code{\link{ppm}}, \code{\link{coef.ppm}}, \code{\link{fitted.ppm}}, \code{\link{print.ppm}}, \code{\link{predict.ppm}}, \code{\link{plot.ppm}}. } \section{Warnings}{ The internal representation of \code{ppm} objects may change slightly between releases of the \pkg{spatstat} package. } \examples{ fit <- ppm(cells ~ x, Strauss(0.1), correction="periodic") fit coef(fit) \donttest{ pred <- predict(fit) } pred <- predict(fit, ngrid=20, type="trend") if(interactive()) { plot(fit) } } \author{\adrian and \rolf.} \keyword{spatial} \keyword{attribute} spatstat.model/man/roc.Rd0000644000176200001440000000504414331173077015052 0ustar liggesusers\name{roc.ppm} \alias{roc.ppm} \alias{roc.kppm} \alias{roc.slrm} \title{ Receiver Operating Characteristic } \description{ Computes the Receiver Operating Characteristic curve for a point pattern or a fitted point process model. } \usage{ \method{roc}{ppm}(X, \dots) \method{roc}{kppm}(X, \dots) \method{roc}{slrm}(X, \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"} or \code{"lpp"}) or fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"slrm"} or \code{"lppm"}). } \item{\dots}{ Arguments passed to \code{\link{as.mask}} controlling the pixel resolution for calculations. } } \details{ This command computes Receiver Operating Characteristic curve. The area under the ROC is computed by \code{\link{auc}}. For a point pattern \code{X} and a covariate \code{Z}, the ROC is a plot showing the ability of the covariate to separate the spatial domain into areas of high and low density of points. For each possible threshold \eqn{z}, the algorithm calculates the fraction \eqn{a(z)} of area in the study region where the covariate takes a value greater than \eqn{z}, and the fraction \eqn{b(z)} of data points for which the covariate value is greater than \eqn{z}. The ROC is a plot of \eqn{b(z)} against \eqn{a(z)} for all thresholds \eqn{z}. For a fitted point process model, the ROC shows the ability of the fitted model intensity to separate the spatial domain into areas of high and low density of points. The ROC is \bold{not} a diagnostic for the goodness-of-fit of the model (Lobo et al, 2007). (For spatial logistic regression models (class \code{"slrm"}) replace \dQuote{intensity} by \dQuote{probability of presence} in the text above.) } \value{ Function value table (object of class \code{"fv"}) which can be plotted to show the ROC curve. } \references{ Lobo, J.M., \ifelse{latex}{\out{Jim{\'e}nez}}{Jimenez}-Valverde, A. and Real, R. (2007) AUC: a misleading measure of the performance of predictive distribution models. \emph{Global Ecology and Biogeography} \bold{17}(2) 145--151. Nam, B.-H. and D'Agostino, R. (2002) Discrimination index, the area under the {ROC} curve. Pages 267--279 in Huber-Carol, C., Balakrishnan, N., Nikulin, M.S. and Mesbah, M., \emph{Goodness-of-fit tests and model validity}, \ifelse{latex}{\out{Birkh{\"a}user}}{Birkhauser}, Basel. } \author{ \spatstatAuthors. } \seealso{ \code{\link{auc}} } \examples{ fit <- ppm(swedishpines ~ x+y) plot(roc(fit)) } \keyword{spatial} spatstat.model/man/split.msr.Rd0000644000176200001440000000476514331173077016233 0ustar liggesusers\name{split.msr} \alias{split.msr} \title{ Divide a Measure into Parts } \description{ Decomposes a measure into components, each component being a measure. } \usage{ \method{split}{msr}(x, f, drop = FALSE, \dots) } \arguments{ \item{x}{ Measure (object of class \code{"msr"}) to be decomposed. } \item{f}{ Factor or tessellation determining the decomposition. Argument passed to \code{\link{split.ppp}}. See Details. } \item{drop}{ Logical value indicating whether empty components should be retained in the list (\code{drop=FALSE}, the default) or deleted (\code{drop=TRUE}). } \item{\dots}{ Ignored. } } \details{ An object of class \code{"msr"} represents a signed (i.e. real-valued) or vector-valued measure in the \pkg{spatstat} package. See \code{\link{msr}} for explanation. This function is a method for the generic \code{\link[base]{split}}. It divides the measure \code{x} into components, each of which is a measure. A measure \code{x} is represented in \pkg{spatstat} by a finite set of sample points with values attached to them. The function \code{split.msr} divides this pattern of sample points into several sub-patterns of points using \code{\link{split.ppp}}. For each sub-pattern, the values attached to these points are extracted from \code{x}, and these values and sample points determine a measure, which is a component or piece of the original \code{x}. The argument \code{f} can be missing, if the sample points of \code{x} are multitype points. In this case, \code{x} represents a measure associated with marked spatial locations, and the command \code{split(x)} separates \code{x} into a list of component measures, one for each possible mark. Otherwise the argument \code{f} is passed to \code{\link{split.ppp}}. It should be either a factor (of length equal to the number of sample points of \code{x}) or a tessellation (object of class \code{"tess"} representing a division of space into tiles) as documented under \code{\link{split.ppp}}. } \value{ A list, each of whose entries is a measure (object of class \code{"msr"}). } \author{ \spatstatAuthors } \seealso{ \code{\link{msr}}, \code{\link{[.msr}}, \code{\link{with.msr}} } \examples{ ## split by tessellation a <- residuals(ppm(cells ~ x)) aa <- split(a, dirichlet(runifpoint(4))) aa sapply(aa, integral) ## split by type of point b <- residuals(ppm(amacrine ~ marks + x)) bb <- split(b) bb } \keyword{spatial} \keyword{manip} spatstat.model/man/fitted.slrm.Rd0000644000176200001440000000216014331173076016515 0ustar liggesusers\name{fitted.slrm} \Rdversion{1.1} \alias{fitted.slrm} \title{ Fitted Probabilities for Spatial Logistic Regression } \description{ Given a fitted Spatial Logistic Regression model, this function computes the fitted probabilities for each pixel. } \usage{ \method{fitted}{slrm}(object, ...) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[stats:fitted.values]{fitted}} for spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). The algorithm computes the fitted probabilities of the presence of a random point in each pixel. } \value{ A pixel image (object of class \code{"im"}) containing the fitted probability for each pixel. } \seealso{ \code{\link{slrm}}, \code{\link[stats:fitted.values]{fitted}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) plot(fitted(fit)) } \author{\adrian and \rolf. } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.model/man/predict.mppm.Rd0000644000176200001440000001457114331173100016661 0ustar liggesusers\name{predict.mppm} \alias{predict.mppm} \title{Prediction for Fitted Multiple Point Process Model} \description{ Given a fitted multiple point process model obtained by \code{\link{mppm}}, evaluate the spatial trend and/or the conditional intensity of the model. By default, predictions are evaluated over a grid of locations, yielding pixel images of the trend and conditional intensity. Alternatively predictions may be evaluated at specified locations with specified values of the covariates. } \usage{ \method{predict}{mppm}(object, ..., newdata = NULL, type = c("trend", "cif"), ngrid = 40, locations=NULL, verbose=FALSE) } \arguments{ \item{object}{The fitted model. An object of class \code{"mppm"} obtained from \code{\link{mppm}}. } \item{\dots}{Ignored.} \item{newdata}{ Optional. New values of the covariates, for which the predictions should be computed. See Details. } \item{type}{ Type of predicted values required. A character string or vector of character strings. Options are \code{"trend"} for the spatial trend (first-order term) and \code{"cif"} or \code{"lambda"} for the conditional intensity. Alternatively \code{type="all"} selects all options. } \item{ngrid}{ Dimensions of the grid of spatial locations at which prediction will be performed (if \code{locations=NULL}). An integer or a pair of integers. } \item{locations}{ Optional. The locations at which predictions should be performed. A list of point patterns, with one entry for each row of \code{newdata}. } \item{verbose}{ Logical flag indicating whether to print progress reports. } } \details{ This function computes the spatial trend and the conditional intensity of a spatial point process model that has been fitted to several spatial point patterns. See Chapter 16 of Baddeley, Rubak and Turner (2015) for explanation and examples. Note that by ``spatial trend'' we mean the (exponentiated) first order potential and not the intensity of the process. [For example if we fit the stationary Strauss process with parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, then the spatial trend is constant and equal to \eqn{\beta}{beta}.] The conditional intensity \eqn{\lambda(u,X)}{lambda(u,X)} of the fitted model is evaluated at each required spatial location u, with respect to the response point pattern X. If \code{newdata=NULL}, predictions are computed for the original values of the covariates, to which the model was fitted. Otherwise \code{newdata} should be a hyperframe (see \code{\link{hyperframe}}) containing columns of covariates as required by the model. If \code{type} includes \code{"cif"}, then \code{newdata} must also include a column of spatial point pattern responses, in order to compute the conditional intensity. If \code{locations=NULL}, then predictions are performed at an \code{ngrid} by \code{ngrid} grid of locations in the window for each response point pattern. The result will be a hyperframe containing a column of images of the trend (if selected) and a column of images of the conditional intensity (if selected). The result can be plotted. If \code{locations} is given, then it should be a list of point patterns (objects of class \code{"ppp"}). Predictions are performed at these points, and the results are returned as mark values attached to the \code{locations}. The result is a hyperframe containing columns called \code{trend} and/or \code{cif}. The column called \code{trend} contains marked point patterns in which the point locations are the \code{locations} and the mark value is the predicted trend. The column called \code{cif} contains marked point patterns in which the point locations are the \code{locations} and the mark value is the predicted conditional intensity. } \section{Models that depend on row number}{ The point process model that is described by an \code{mppm} object may be a different point process for each row of the original hyperframe of data. This occurs if the model formula includes the variable \code{id} (representing row number) or if the model has a different interpoint interaction on each row. If the point process model is different on each row of the original data, then either \itemize{ \item \code{newdata} is missing. Predictions are computed for each row of the original data using the point process model that applies on each row. \item \code{newdata} must have the same number of rows as the original data. Each row of \code{newdata} is assumed to be a replacement for the corresponding row of the original data. The prediction for row \code{i} of \code{newdata} will be computed for the point process model that applies to row \code{i} of the original data. \item \code{newdata} must include a column called \code{id} specifying the row number, and therefore identifying which of the point process models should apply. The predictions for row \code{i} of \code{newdata} will be computed for the point process model that applies to row \code{k} of the original data, where \code{k = newdata$id[i]}. } } \value{ A hyperframe with columns named \code{trend} and/or \code{cif}. If \code{locations=NULL}, the entries of the hyperframe are pixel images. If \code{locations} is not null, the entries are marked point patterns constructed by attaching the predicted values to the \code{locations} point patterns. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Baddeley, A., Bischof, L., Sintorn, I.-M., Haggarty, S., Bell, M. and Turner, R. Analysis of a designed experiment where the response is a spatial point pattern. In preparation. \baddrubaturnbook } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \seealso{ \code{\link{mppm}}, \code{\link{fitted.mppm}}, \code{\link{hyperframe}} } \examples{ h <- hyperframe(Bugs=waterstriders) fit <- mppm(Bugs ~ x, data=h, interaction=Strauss(7)) # prediction on a grid p <- predict(fit) plot(p$trend) # prediction at specified locations loc <- with(h, runifpoint(20, Window(Bugs))) p2 <- predict(fit, locations=loc) plot(p2$trend) } \keyword{spatial} \keyword{models} spatstat.model/man/methods.objsurf.Rd0000644000176200001440000000337314331173077017406 0ustar liggesusers\name{methods.objsurf} \Rdversion{1.1} \alias{methods.objsurf} %DoNotExport \alias{print.objsurf} \alias{plot.objsurf} \alias{persp.objsurf} \alias{image.objsurf} \alias{contour.objsurf} \alias{summary.objsurf} \alias{print.summary.objsurf} \title{ Methods for Objective Function Surfaces } \description{ Methods for printing and plotting an objective function surface. } \usage{ \method{print}{objsurf}(x, ...) \method{plot}{objsurf}(x, ...) \method{image}{objsurf}(x, ...) \method{contour}{objsurf}(x, ...) \method{persp}{objsurf}(x, ...) \method{summary}{objsurf}(object, ...) \method{print}{summary.objsurf}(x, ...) } \arguments{ \item{x,object}{ Object of class \code{"objsurf"} representing an objective function surface. } \item{\dots}{ Additional arguments passed to plot methods. } } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{plot}}, \code{\link{image}}, \code{\link{contour}}, \code{\link{persp}} and \code{\link{summary}} for the class \code{"objsurf"}. } \value{ For \code{print.objsurf}, \code{print.summary.objsurf}, \code{plot.objsurf} and \code{image.objsurf} the value is \code{NULL}. For \code{contour.objsurf} and \code{persp.objsurf} the value is described in the help for \code{\link{contour.default}} and \code{\link{persp.default}} respectively. For \code{summary.objsurf} the result is a list, of class \code{summary.objsurf}, containing summary information. This list is printed in sensible format by \code{print.summary.objsurf}. } \author{ \adrian and \ege. } \seealso{ \code{\link{objsurf}} } \examples{ fit <- kppm(redwood ~ 1, "Thomas") os <- objsurf(fit) os summary(os) plot(os) contour(os, add=TRUE) persp(os) } \keyword{spatial} \keyword{hplot} spatstat.model/man/summary.kppm.Rd0000644000176200001440000000572214334130011016715 0ustar liggesusers\name{summary.kppm} \alias{summary.kppm} \alias{print.summary.kppm} \title{Summarizing a Fitted Cox or Cluster Point Process Model} \description{ \code{summary} method for class \code{"kppm"}. } \usage{ \method{summary}{kppm}(object, \dots, quick=FALSE) \method{print}{summary.kppm}(x, \dots) } \arguments{ \item{object}{ A fitted Cox or cluster point process model (object of class \code{"kppm"}). } \item{quick}{Logical value controlling the scope of the summary.} \item{\dots}{Arguments passed to \code{\link{summary.ppm}} or \code{\link{print.summary.ppm}} controlling the treatment of the trend component of the model.} \item{x}{Object of class \code{"summary.kppm"} as returned by \code{summary.kppm}. } } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"kppm"}. An object of class \code{"kppm"} describes a fitted Cox or cluster point process model. See \code{\link{kppm}}. \code{summary.kppm} extracts information about the type of model that has been fitted, the data to which the model was fitted, and the values of the fitted coefficients. \code{print.summary.kppm} prints this information in a comprehensible format. In normal usage, \code{print.summary.kppm} is invoked implicitly when the user calls \code{summary.kppm} without assigning its value to anything. See the examples. You can also type \code{coef(summary(object))} to extract a table of the fitted coefficients of the point process model \code{object} together with standard errors and confidence limits. } \value{ \code{summary.kppm} returns an object of class \code{"summary.kppm"}, while \code{print.summary.kppm} returns \code{NULL}. The result of \code{summary.kppm} includes at least the following components: \item{Xname}{character string name of the original point pattern data} \item{stationary}{logical value indicating whether the model is stationary} \item{clusters}{the \code{clusters} argument to \code{\link{kppm}}} \item{modelname}{character string describing the model} \item{isPCP}{\code{TRUE} if the model is a Poisson cluster process, \code{FALSE} if it is a log-Gaussian Cox process} \item{lambda}{Estimated intensity: numeric value, or pixel image} \item{mu}{Mean cluster size: numeric value, pixel image, or \code{NULL}} \item{clustpar}{list of fitted parameters for the cluster model} \item{clustargs}{list of fixed parameters for the cluster model, if any} \item{callstring}{character string representing the original call to \code{\link{kppm}}} } \references{ Baddeley, A., Davies, T.M., Hazelton, M.L., Rakshit, S. and Turner, R. (2022) Fundamental problems in fitting spatial cluster process models. \emph{Spatial Statistics} \bold{52}, 100709. DOI: \code{10.1016/j.spasta.2022.100709} } \examples{ fit <- kppm(redwood ~ 1, "Thomas") summary(fit) coef(summary(fit)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.model/man/npfun.Rd0000644000176200001440000000143214331173077015412 0ustar liggesusers\name{npfun} \alias{npfun} \title{ Dummy Function Returns Number of Points } \description{ Returns a summary function which is constant with value equal to the number of points in the point pattern. } \usage{ npfun(X, ..., r) } \arguments{ \item{X}{ Point pattern. } \item{\dots}{ Ignored. } \item{r}{ Vector of values of the distance argument \eqn{r}. } } \details{ This function is normally not called by the user. Instead it is passed as an argument to the function \code{\link{psst}}. } \value{ Object of class \code{"fv"} representing a constant function. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ \code{\link{psst}} } \examples{ fit0 <- ppm(cells, ~1, nd=10) v <- psst(fit0, npfun) } \keyword{spatial} \keyword{nonparametric} spatstat.model/man/Ord.Rd0000644000176200001440000000352614331173075015014 0ustar liggesusers\name{Ord} \alias{Ord} \title{Generic Ord Interaction model} \description{ Creates an instance of an Ord-type interaction point process model which can then be fitted to point pattern data. } \usage{ Ord(pot, name) } \arguments{ \item{pot}{An S language function giving the user-supplied interaction potential.} \item{name}{Character string.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Ord's point process model (Ord, 1977) is a Gibbs point process of infinite order. Each point \eqn{x_i}{x[i]} in the point pattern \eqn{x} contributes a factor \eqn{g(a_i)}{g(a[i])} where \eqn{a_i = a(x_i, x)}{a[i] = a(x[i], x)} is the area of the tile associated with \eqn{x_i}{x[i]} in the Dirichlet tessellation of \eqn{x}. Ord (1977) proposed fitting this model to forestry data when \eqn{g(a)} has a simple ``threshold'' form. That model is implemented in our function \code{\link{OrdThresh}}. The present function \code{Ord} implements the case of a completely general Ord potential \eqn{g(a)} specified as an S language function \code{pot}. This is experimental. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ord, J.K. (1977) Contribution to the discussion of Ripley (1977). Ord, J.K. (1978) How many trees in a forest? \emph{Mathematical Scientist} \bold{3}, 23--33. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{OrdThresh}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/psstG.Rd0000644000176200001440000001225614510474261015370 0ustar liggesusers\name{psstG} \Rdversion{1.1} \alias{psstG} \title{ Pseudoscore Diagnostic For Fitted Model against Saturation Alternative } \description{ Given a point process model fitted to a point pattern dataset, this function computes the pseudoscore diagnostic of goodness-of-fit for the model, against moderately clustered or moderately inhibited alternatives of saturation type. } \usage{ psstG(object, r = NULL, breaks = NULL, \dots, model=NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), truecoef = NULL, hi.res = NULL) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the diagnostic should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Ignored. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord,ppmcorrection}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } } \details{ This function computes the pseudoscore test statistic which can be used as a diagnostic for goodness-of-fit of a fitted point process model. Consider a point process model fitted to \eqn{x}, with conditional intensity \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}. For the purpose of testing goodness-of-fit, we regard the fitted model as the null hypothesis. The alternative hypothesis is a family of hybrid models obtained by combining the fitted model with the Geyer saturation process (see \code{\link{Geyer}}) with saturation parameter 1. The family of alternatives includes models that are more regular than the fitted model, and others that are more clustered than the fitted model. For any point pattern \eqn{x}, and any \eqn{r > 0}, let \eqn{S(x,r)} be the number of points in \eqn{x} whose nearest neighbour (the nearest other point in \eqn{x}) is closer than \eqn{r} units. Then the pseudoscore for the null model is \deqn{ V(r) = \sum_i \Delta S(x_i, x, r ) - \int_W \Delta S(u,x,r) \lambda(u,x) {\rm d} u }{ V(r) = sum( Delta S(x[i], x, r)) - integral( Delta S(u,x, r) lambda(u,x) du) } where the \eqn{\Delta}{Delta} operator is \deqn{ \Delta S(u,x,r) = S(x\cup\{u\}, r) - S(x\setminus u, r) }{ Delta S(u,x, r) = S(x union u, r) - S(x setminus u, r) } the difference between the values of \eqn{S} for the point pattern with and without the point \eqn{u}. According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have mean zero if the model is correct (ignoring the fact that the parameters of the model have been estimated). Hence \eqn{V(r)} can be used as a diagnostic for goodness-of-fit. The diagnostic \eqn{V(r)} is also called the \bold{pseudoresidual} of \eqn{S}. On the right hand side of the equation for \eqn{V(r)} given above, the sum over points of \eqn{x} is called the \bold{pseudosum} and the integral is called the \bold{pseudocompensator}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. Columns in this data frame include \code{dat} for the pseudosum, \code{com} for the compensator and \code{res} for the pseudoresidual. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Alternative functions: \code{\link{psstA}}, \code{\link{psst}}, \code{\link{Kres}}, \code{\link{Gres}}. } \examples{ if(live <- interactive()) { X <- rStrauss(200,0.1,0.05) } else { pso <- spatstat.options(ndummy.min=16,npixel=32) X <- cells } plot(psstG(X)) plot(psstG(X, interaction=Strauss(0.05))) if(!live) spatstat.options(pso) } \keyword{spatial} \keyword{models} \concept{Model diagnostics} spatstat.model/man/dppBessel.Rd0000644000176200001440000000174714331173076016215 0ustar liggesusers\name{dppBessel} \alias{dppBessel} \title{Bessel Type Determinantal Point Process Model} \description{ Function generating an instance of the Bessel-type determinantal point process model. } \usage{dppBessel(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the model parameters. See Details. } } \details{ The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the shape parameter \code{sigma} as a non-negative numeric \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ Frederic Lavancier and Christophe Biscio. Modified by \ege , \adrian and \rolf } \examples{ m <- dppBessel(lambda=100, alpha=.05, sigma=0, d=2) } \seealso{ \code{\link{dppCauchy}}, \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppPowerExp}} } spatstat.model/man/cdf.test.mppm.Rd0000644000176200001440000001776414515336521016764 0ustar liggesusers\name{cdf.test.mppm} \alias{cdf.test.mppm} \title{Spatial Distribution Test for Multiple Point Process Model} \description{ Performs a spatial distribution test of a point process model fitted to multiple spatial point patterns. The test compares the observed and predicted distributions of the values of a spatial covariate, using either the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises or Anderson-Darling test of goodness-of-fit. } \usage{ \method{cdf.test}{mppm}(model, covariate, test=c("ks", "cvm", "ad"), ..., nsim=19, verbose=TRUE, interpolate=FALSE, fast=TRUE, jitter=TRUE) } \arguments{ \item{model}{ An object of class \code{"mppm"} representing a point process model fitted to multiple spatial point patterns. } \item{covariate}{ The spatial covariate on which the test will be based. A function, a pixel image, a list of functions, a list of pixel images, a hyperframe, a character string containing the name of one of the covariates in \code{model}, or one of the strings \code{"x"} or \code{"y"}. } \item{test}{ Character string identifying the test to be performed: \code{"ks"} for Kolmogorov-Smirnov test, \code{"cvm"} for \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or \code{"ad"} for Anderson-Darling test. } \item{\dots}{ Arguments passed to \code{\link{cdf.test}} to control the test. } \item{nsim}{ Number of simulated realisations which should be generated, if a Monte Carlo test is required. } \item{verbose}{Logical flag indicating whether to print progress reports. } \item{interpolate}{ Logical flag indicating whether to interpolate between pixel values when \code{covariate} is a pixel image. See \emph{Details}. } \item{fast}{ Logical flag. If \code{TRUE}, values of the covariate are only sampled at the original quadrature points used to fit the model. If \code{FALSE}, values of the covariate are sampled at all pixels, which can be slower by three orders of magnitude. } \item{jitter}{ Logical flag. If \code{TRUE}, observed values of the covariate are perturbed by adding small random values, to avoid tied observations. } } \details{ This function is a method for the generic function \code{\link{cdf.test}} for the class \code{mppm}. This function performs a goodness-of-fit test of a point process model that has been fitted to multiple point patterns. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises or Anderson-Darling test of goodness-of-fit. These are exact tests if the model is Poisson; otherwise, for a Gibbs model, a Monte Carlo p-value is computed by generating simulated realisations of the model and applying the selected goodness-of-fit test to each simulation. The argument \code{model} should be a fitted point process model fitted to multiple point patterns (object of class \code{"mppm"}). The argument \code{covariate} contains the values of a spatial function. It can be \itemize{ \item a \code{function(x,y)} \item a pixel image (object of class \code{"im"} \item a list of \code{function(x,y)}, one for each point pattern \item a list of pixel images, one for each point pattern \item a hyperframe (see \code{\link{hyperframe}}) of which the first column will be taken as containing the covariate \item a character string giving the name of one of the covariates in \code{model} \item one of the character strings \code{"x"} or \code{"y"}, indicating the spatial coordinates. } If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. The predicted distribution of the values of the \code{covariate} under the fitted \code{model} is computed as follows. The values of the \code{covariate} at all locations in the observation window are evaluated, weighted according to the point process intensity of the fitted model, and compiled into a cumulative distribution function \eqn{F} using \code{\link{ewcdf}}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. A goodness-of-fit test of the uniform distribution is applied to these numbers using \code{\link[stats]{ks.test}}, \code{\link[goftest]{cvm.test}} or \code{\link[goftest]{ad.test}}. The argument \code{interpolate} determines how pixel values will be handled when \code{covariate} is a pixel image. The value of the covariate at a data point is obtained by looking up the value of the nearest pixel if \code{interpolate=FALSE}, or by linearly interpolating between the values of the four nearest pixels if \code{interpolate=TRUE}. Linear interpolation is slower, but is sometimes necessary to avoid tied values of the covariate arising when the pixel grid is coarse. If \code{model} is a Poisson point process, then the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises and Anderson-Darling tests are theoretically exact. This test was apparently first described (in the context of spatial data, and for Kolmogorov-Smirnov) by Berman (1986). See also Baddeley et al (2005). If \code{model} is not a Poisson point process, then the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises and Anderson-Darling tests are biased. Instead they are used as the basis of a Monte Carlo test. First \code{nsim} simulated realisations of the model will be generated. Each simulated realisation consists of a list of simulated point patterns, one for each of the original data patterns. This can take a very long time. The model is then re-fitted to each simulation, and the refitted model is subjected to the goodness-of-fit test described above. A Monte Carlo p-value is then computed by comparing the p-value of the original test with the p-values obtained from the simulations. } \value{ An object of class \code{"cdftest"} and \code{"htest"} containing the results of the test. See \code{\link{cdf.test}} for details. } \seealso{ \code{\link{cdf.test}}, \code{\link{quadrat.test}}, \code{\link{mppm}} } \references{ \baddrubaturnbook Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \examples{ # three i.i.d. realisations of nonuniform Poisson process lambda <- as.im(function(x,y) { 200 * exp(x) }, square(1)) dat <- hyperframe(X=list(rpoispp(lambda), rpoispp(lambda), rpoispp(lambda))) # fit uniform Poisson process fit0 <- mppm(X~1, dat) # fit correct nonuniform Poisson process fit1 <- mppm(X~x, dat) # test wrong model cdf.test(fit0, "x") # test right model cdf.test(fit1, "x") # Gibbs model fitGibbs <- update(fit0, interaction=Strauss(0.05)) ns <- if(interactive()) 19 else 2 cdf.test(fitGibbs, "x", nsim=ns) } \keyword{htest} \keyword{spatial} \concept{Goodness-of-fit} spatstat.model/man/HierHard.Rd0000644000176200001440000001054714331173075015757 0ustar liggesusers\name{HierHard} \alias{HierHard} \title{The Hierarchical Hard Core Point Process Model} \description{ Creates an instance of the hierarchical hard core point process model which can then be fitted to point pattern data. } \usage{ HierHard(hradii=NULL, types=NULL, archy=NULL) } \arguments{ \item{hradii}{Optional matrix of hard core distances} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} \item{archy}{Optional: the hierarchical order. See Details.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hierarchical hard core process with hard core distances \eqn{hradii[i,j]}. } \details{ This is a hierarchical point process model for a multitype point pattern (\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander} and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, 1999; Grabarnik and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, 2009). It is appropriate for analysing multitype point pattern data in which the types are ordered so that the points of type \eqn{j} depend on the points of type \eqn{1,2,\ldots,j-1}{1,2,...,j-1}. The hierarchical version of the (stationary) hard core process with \eqn{m} types, with hard core distances \eqn{h_{ij}}{h[i,j]} and parameters \eqn{\beta_j}{beta[j]}, is a point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern. If any pair of points of types \eqn{i} and \eqn{j} lies closer than \eqn{h_{ij}}{h[i,j]} units apart, the configuration of points is impossible (probability density zero). The nonstationary hierarchical hard core process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hierarchical hard core process pairwise interaction is yielded by the function \code{HierHard()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the HierHard interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The argument \code{archy} can be used to specify a hierarchical ordering of the types. It can be either a vector of integers or a character vector matching the possible types. The default is the sequence \eqn{1,2, \ldots, m}{1,2, ..., m} meaning that type \eqn{j} depends on types \eqn{1,2, \ldots, j-1}{1,2, ..., j-1}. The matrix \code{iradii} must be square, with entries which are either positive numbers, or zero or \code{NA}. A value of zero or \code{NA} indicates that no hard core interaction term should be included for this combination of types. Note that only the hard core distances are specified in \code{HierHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} are estimated by \code{\link{ppm}()}, not fixed in \code{HierHard()}. } \seealso{ \code{\link{MultiHard}} for the corresponding symmetrical interaction. \code{\link{HierStrauss}}, \code{\link{HierStraussHard}}. } \examples{ h <- matrix(c(4, NA, 10, 15), 2, 2) HierHard(h) # prints a sensible description of itself ppm(ants ~1, HierHard(h)) # fit the stationary hierarchical hard core process to ants data } \author{\adrian , \rolf and \ege. } \references{ Grabarnik, P. and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, A. (2009) Modelling the spatial structure of forest stands by multivariate point processes with hierarchical interactions. \emph{Ecological Modelling} \bold{220}, 1232--1240. \ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}, H. and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, A. (1999) Multitype spatial point patterns with hierarchical interactions. \emph{Biometrics} \bold{55}, 1051--1058. } \keyword{spatial} \keyword{models} spatstat.model/man/diagnose.ppm.Rd0000644000176200001440000004141214331173076016651 0ustar liggesusers\name{diagnose.ppm} \alias{diagnose.ppm} \alias{plot.diagppm} \title{ Diagnostic Plots for Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, produce diagnostic plots based on residuals. } \usage{ diagnose.ppm(object, \dots, type="raw", which="all", sigma=NULL, rbord=reach(object), cumulative=TRUE, plot.it=TRUE, rv = NULL, compute.sd=is.poisson(object), compute.cts=TRUE, envelope=FALSE, nsim=39, nrank=1, typename, check=TRUE, repair=TRUE, oldstyle=FALSE, splineargs=list(spar=0.5)) \method{plot}{diagppm}(x, \dots, which, plot.neg=c("image", "discrete", "contour", "imagecontour"), plot.smooth=c("imagecontour", "image", "contour", "persp"), plot.sd, spacing=0.1, outer=3, srange=NULL, monochrome=FALSE, main=NULL) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which diagnostics should be produced. This object is usually obtained from \code{\link{ppm}}. } \item{type}{ String indicating the type of residuals or weights to be used. Current options are \code{"eem"} for the Stoyan-Grabarnik exponential energy weights, \code{"raw"} for the raw residuals, \code{"inverse"} for the inverse-lambda residuals, and \code{"pearson"} for the Pearson residuals. A partial match is adequate. } \item{which}{ Character string or vector indicating the choice(s) of plots to be generated. Options are \code{"all"}, \code{"marks"}, \code{"smooth"}, \code{"x"}, \code{"y"} and \code{"sum"}. Multiple choices may be given but must be matched exactly. See Details. } \item{sigma}{ Bandwidth for kernel smoother in \code{"smooth"} option. } \item{rbord}{ Width of border to avoid edge effects. The diagnostic calculations will be confined to those points of the data pattern which are at least \code{rbord} units away from the edge of the window. (An infinite value of \code{rbord} will be ignored.) } \item{cumulative}{ Logical flag indicating whether the lurking variable plots for the \eqn{x} and \eqn{y} coordinates will be the plots of cumulative sums of marks (\code{cumulative=TRUE}) or the plots of marginal integrals of the smoothed residual field (\code{cumulative=FALSE}). } \item{plot.it}{ Logical value indicating whether plots should be shown. If \code{plot.it=FALSE}, the computed diagnostic quantities are returned without plotting them. } \item{plot.neg}{ String indicating how the density part of the residual measure should be plotted. } \item{plot.smooth}{ String indicating how the smoothed residual field should be plotted. } \item{compute.sd,plot.sd}{ Logical values indicating whether error bounds should be computed and added to the \code{"x"} and \code{"y"} plots. The default is \code{TRUE} for Poisson models and \code{FALSE} for non-Poisson models. See Details. } \item{envelope,nsim,nrank}{ Arguments passed to \code{\link{lurking}} in order to plot simulation envelopes for the lurking variable plots. } \item{rv}{ Usually absent. Advanced use only. If this argument is present, the values of the residuals will not be calculated from the fitted model \code{object} but will instead be taken directly from \code{rv}. } \item{spacing}{ The spacing between plot panels (when a four-panel plot is generated) expressed as a fraction of the width of the window of the point pattern. } \item{outer}{ The distance from the outermost line of text to the nearest plot panel, expressed as a multiple of the spacing between plot panels. } \item{srange}{ Vector of length 2 that will be taken as giving the range of values of the smoothed residual field, when generating an image plot of this field. This is useful if you want to generate diagnostic plots for two different fitted models using the same colour map. } \item{monochrome}{ Flag indicating whether images should be displayed in greyscale (suitable for publication) or in colour (suitable for the screen). The default is to display in colour. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{object}, if it is found to be damaged. } \item{oldstyle}{ Logical flag indicating whether error bounds should be plotted using the approximation given in the original paper (\code{oldstyle=TRUE}), or using the correct asymptotic formula (\code{oldstyle=FALSE}). } \item{splineargs}{ Argument passed to \code{\link{lurking}} to control the smoothing in the lurking variable plot. } \item{x}{The value returned from a previous call to \code{diagnose.ppm}. An object of class \code{"diagppm"}. } \item{typename}{String to be used as the name of the residuals.} \item{main}{Main title for the plot.} \item{\dots}{ Extra arguments, controlling either the resolution of the smoothed image (passed from \code{diagnose.ppm} to \code{\link{density.ppp}}) or the appearance of the plots (passed from \code{diagnose.ppm} to \code{plot.diagppm} and from \code{plot.diagppm} to \code{\link{plot.default}}). } \item{compute.cts}{Advanced use only.} } \value{ An object of class \code{"diagppm"} which contains the coordinates needed to reproduce the selected plots. This object can be plotted using \code{plot.diagppm} and printed using \code{print.diagppm}. } \details{ The function \code{diagnose.ppm} generates several diagnostic plots for a fitted point process model. The plots display the residuals from the fitted model (Baddeley et al, 2005) or alternatively the `exponential energy marks' (Stoyan and Grabarnik, 1991). These plots can be used to assess goodness-of-fit, to identify outliers in the data, and to reveal departures from the fitted model. See also the companion function \code{\link{qqplot.ppm}}. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}) typically produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). The argument \code{type} selects the type of residual or weight that will be computed. Current options are: \describe{ \item{\code{"eem"}:}{ exponential energy marks (Stoyan and Grabarnik, 1991) computed by \code{\link{eem}}. These are positive weights attached to the data points (i.e. the points of the point pattern dataset to which the model was fitted). If the fitted model is correct, then the sum of these weights for all data points in a spatial region \eqn{B} has expected value equal to the area of \eqn{B}. See \code{\link{eem}} for further explanation. } \item{\code{"raw"}, \code{"inverse"} or \code{"pearson"}:}{ point process residuals (Baddeley et al, 2005) computed by the function \code{\link{residuals.ppm}}. These are residuals attached both to the data points and to some other points in the window of observation (namely, to the dummy points of the quadrature scheme used to fit the model). If the fitted model is correct, then the sum of the residuals in a spatial region \eqn{B} has mean zero. The options are \itemize{ \item \code{"raw"}: the raw residuals; \item \code{"inverse"}: the `inverse-lambda' residuals, a counterpart of the exponential energy weights; \item \code{"pearson"}: the Pearson residuals. } See \code{\link{residuals.ppm}} for further explanation. } } The argument \code{which} selects the type of plot that is produced. Options are: \describe{ \item{\code{"marks"}:}{ plot the residual measure. For the exponential energy weights (\code{type="eem"}) this displays circles centred at the points of the data pattern, with radii proportional to the exponential energy weights. For the residuals (\code{type="raw"}, \code{type="inverse"} or \code{type="pearson"}) this again displays circles centred at the points of the data pattern with radii proportional to the (positive) residuals, while the plotting of the negative residuals depends on the argument \code{plot.neg}. If \code{plot.neg="image"} then the negative part of the residual measure, which is a density, is plotted as a colour image. If \code{plot.neg="discrete"} then the discretised negative residuals (obtained by approximately integrating the negative density using the quadrature scheme of the fitted model) are plotted as squares centred at the dummy points with side lengths proportional to the (negative) residuals. [To control the size of the circles and squares, use the argument \code{maxsize}.] } \item{\code{"smooth"}:}{ plot a kernel-smoothed version of the residual measure. Each data or dummy point is taken to have a `mass' equal to its residual or exponential energy weight. (Note that residuals can be negative). This point mass is then replaced by a bivariate isotropic Gaussian density with standard deviation \code{sigma}. The value of the smoothed residual field at any point in the window is the sum of these weighted densities. If the fitted model is correct, this smoothed field should be flat, and its height should be close to 0 (for the residuals) or 1 (for the exponential energy weights). The field is plotted either as an image, contour plot or perspective view of a surface, according to the argument \code{plot.smooth}. The range of values of the smoothed field is printed if the option \code{which="sum"} is also selected. } \item{\code{"x"}:}{ produce a `lurking variable' plot for the \eqn{x} coordinate. This is a plot of \eqn{h(x)} against \eqn{x} (solid lines) and of \eqn{E(h(x))} against \eqn{x} (dashed lines), where \eqn{h(x)} is defined below, and \eqn{E(h(x))} denotes the expectation of \eqn{h(x)} assuming the fitted model is true. \itemize{ \item if \code{cumulative=TRUE} then \eqn{h(x)} is the cumulative sum of the weights or residuals for all points which have \eqn{X} coordinate less than or equal to \eqn{x}. For the residuals \eqn{E(h(x)) = 0}, and for the exponential energy weights \eqn{E(h(x)) = } area of the subset of the window to the left of the line \eqn{X=x}. \item if \code{cumulative=FALSE} then \eqn{h(x)} is the marginal integral of the smoothed residual field (see the case \code{which="smooth"} described above) on the \eqn{x} axis. This is approximately the derivative of the plot for \code{cumulative=TRUE}. The value of \eqn{h(x)} is computed by summing the values of the smoothed residual field over all pixels with the given \eqn{x} coordinate. For the residuals \eqn{E(h(x)) = 0}, and for the exponential energy weights \eqn{E(h(x)) = } length of the intersection between the observation window and the line \eqn{X=x}. } If \code{plot.sd = TRUE}, then superimposed on the lurking variable plot are the pointwise two-standard-deviation error limits for \eqn{h(x)} calculated for the inhomogeneous Poisson process. The default is \code{plot.sd = TRUE} for Poisson models and \code{plot.sd = FALSE} for non-Poisson models. } \item{\code{"y"}:}{ produce a similar lurking variable plot for the \eqn{y} coordinate. } \item{\code{"sum"}:}{ print the sum of the weights or residuals for all points in the window (clipped by a margin \code{rbord} if required) and the area of the same window. If the fitted model is correct the sum of the exponential energy weights should equal the area of the window, while the sum of the residuals should equal zero. Also print the range of values of the smoothed field displayed in the \code{"smooth"} case. } \item{\code{"all"}:}{ All four of the diagnostic plots listed above are plotted together in a two-by-two display. Top left panel is \code{"marks"} plot. Bottom right panel is \code{"smooth"} plot. Bottom left panel is \code{"x"} plot. Top right panel is \code{"y"} plot, rotated 90 degrees. } } The argument \code{rbord} ensures there are no edge effects in the computation of the residuals. The diagnostic calculations will be confined to those points of the data pattern which are at least \code{rbord} units away from the edge of the window. The value of \code{rbord} should be greater than or equal to the range of interaction permitted in the model. By default, the two-standard-deviation limits are calculated from the exact formula for the asymptotic variance of the residuals under the asymptotic normal approximation, equation (37) of Baddeley et al (2006). However, for compatibility with the original paper of Baddeley et al (2005), if \code{oldstyle=TRUE}, the two-standard-deviation limits are calculated using the innovation variance, an over-estimate of the true variance of the residuals. (However, see the section about Replicated Data). The argument \code{rv} would normally be used only by experts. It enables the user to substitute arbitrary values for the residuals or marks, overriding the usual calculations. If \code{rv} is present, then instead of calculating the residuals from the fitted model, the algorithm takes the residuals from the object \code{rv}, and plots them in the manner appropriate to the type of residual or mark selected by \code{type}. If \code{type ="eem"} then \code{rv} should be similar to the return value of \code{\link{eem}}, namely, a numeric vector of length equal to the number of points in the original data point pattern. Otherwise, \code{rv} should be similar to the return value of \code{\link{residuals.ppm}}, that is, it should be an object of class \code{"msr"} (see \code{\link{msr}}) representing a signed measure. The return value of \code{diagnose.ppm} is an object of class \code{"diagppm"}. The \code{plot} method for this class is documented here. There is also a \code{print} method. See the Examples. In \code{plot.diagppm}, if a four-panel diagnostic plot is produced (the default), then the extra arguments \code{xlab}, \code{ylab}, \code{rlab} determine the text labels for the \eqn{x} and \eqn{y} coordinates and the residuals, respectively. The undocumented arguments \code{col.neg} and \code{col.smooth} control the colour maps used in the top left and bottom right panels respectively. See also the companion functions \code{\link{qqplot.ppm}}, which produces a Q-Q plot of the residuals, and \code{\link{lurking}}, which produces lurking variable plots for any spatial covariate. } \section{Replicated Data}{ Note that if \code{object} is a model that was obtained by first fitting a model to replicated point pattern data using \code{\link{mppm}} and then using \code{\link{subfits}} to extract a model for one of the individual point patterns, then the variance calculations are only implemented for the innovation variance (\code{oldstyle=TRUE}) and this is the default in such cases. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \seealso{ \code{\link{residuals.ppm}}, \code{\link{eem}}, \code{\link{ppm.object}}, \code{\link{qqplot.ppm}}, \code{\link{lurking}}, \code{\link{ppm}} } \examples{ fit <- ppm(cells ~x, Strauss(r=0.15)) diagnose.ppm(fit) \donttest{ diagnose.ppm(fit, type="pearson") } diagnose.ppm(fit, which="marks") diagnose.ppm(fit, type="raw", plot.neg="discrete") diagnose.ppm(fit, type="pearson", which="smooth") # save the diagnostics and plot them later u <- diagnose.ppm(fit, rbord=0.15, plot.it=FALSE) if(interactive()) { plot(u) plot(u, which="marks") } } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat.model/man/plot.ppm.Rd0000644000176200001440000001532214331173077016040 0ustar liggesusers\name{plot.ppm} \alias{plot.ppm} \title{plot a Fitted Point Process Model} \description{ Given a fitted point process model obtained by \code{\link{ppm}}, create spatial trend and conditional intensity surfaces of the model, in a form suitable for plotting, and (optionally) plot these surfaces. } \usage{ \method{plot}{ppm}(x, ngrid = c(40,40), superimpose = TRUE, trend = TRUE, cif = TRUE, se = TRUE, pause = interactive(), how=c("persp","image", "contour"), plot.it = TRUE, locations = NULL, covariates=NULL, \dots) } \arguments{ \item{x}{ A fitted point process model, typically obtained from the model-fitting algorithm \code{\link{ppm}}. An object of class \code{"ppm"}. } \item{ngrid}{ The dimensions for a grid on which to evaluate, for plotting, the spatial trend and conditional intensity. A vector of 1 or 2 integers. If it is of length 1, \code{ngrid} is replaced by \code{c(ngrid,ngrid)}. } \item{superimpose}{ logical flag; if \code{TRUE} (and if \code{plot=TRUE}) the original data point pattern will be superimposed on the plots. } \item{trend}{ logical flag; if \code{TRUE}, the spatial trend surface will be produced. } \item{cif}{ logical flag; if \code{TRUE}, the conditional intensity surface will be produced. } \item{se}{ logical flag; if \code{TRUE}, the estimated standard error of the spatial trend surface will be produced. } \item{pause}{ logical flag indicating whether to pause with a prompt after each plot. Set \code{pause=FALSE} if plotting to a file. (This flag is ignored if \code{plot=FALSE}). } \item{how}{ character string or character vector indicating the style or styles of plots to be performed. Ignored if \code{plot=FALSE}. } \item{plot.it}{ logical scalar; should a plot be produced immediately? } \item{locations}{ If present, this determines the locations of the pixels at which predictions are computed. It must be a binary pixel image (an object of class \code{"owin"} with type \code{"mask"}). (Incompatible with \code{ngrid}). } \item{covariates}{ Values of external covariates required by the fitted model. Passed to \code{\link{predict.ppm}}. } \item{\dots}{ extra arguments to the plotting functions \code{\link{persp}}, \code{\link{image}} and \code{\link{contour}}. } } \value{ An object of class \code{plotppm}. Such objects may be plotted by \code{\link{plot.plotppm}()}. This is a list with components named \code{trend} and \code{cif}, either of which may be missing. They will be missing if the corresponding component does not make sense for the model, or if the corresponding argument was set equal to \code{FALSE}. Both \code{trend} and \code{cif} are lists of images. If the model is an unmarked point process, then they are lists of length 1, so that \code{trend[[1]]} is an image of the spatial trend and \code{cif[[1]]} is an image of the conditional intensity. If the model is a marked point process, then \code{trend[[i]]} is an image of the spatial trend for the mark \code{m[i]}, and \code{cif[[1]]} is an image of the conditional intensity for the mark \code{m[i]}, where \code{m} is the vector of levels of the marks. } \details{ This is the \code{plot} method for the class \code{"ppm"} (see \code{\link{ppm.object}} for details of this class). It invokes \code{\link{predict.ppm}} to compute the spatial trend and conditional intensity of the fitted point process model. See \code{\link{predict.ppm}} for more explanation about spatial trend and conditional intensity. The default action is to create a rectangular grid of points in (the bounding box of) the observation window of the data point pattern, and evaluate the spatial trend and conditional intensity of the fitted spatial point process model \code{x} at these locations. If the argument \code{locations=} is supplied, then the spatial trend and conditional intensity are calculated at the grid of points specified by this argument. The argument \code{locations}, if present, should be a binary image mask (an object of class \code{"owin"} and type \code{"mask"}). This determines a rectangular grid of locations, or a subset of such a grid, at which predictions will be computed. Binary image masks are conveniently created using \code{\link{as.mask}}. The argument \code{covariates} gives the values of any spatial covariates at the prediction locations. If the trend formula in the fitted model involves spatial covariates (other than the Cartesian coordinates \code{x}, \code{y}) then \code{covariates} is required. The argument \code{covariates} has the same format and interpretation as in \code{\link{predict.ppm}}. It may be either a data frame (the number of whose rows must match the number of pixels in \code{locations} multiplied by the number of possible marks in the point pattern), or a list of images. If argument \code{locations} is not supplied, and \code{covariates} \bold{is} supplied, then it \bold{must} be a list of images. If the fitted model was a marked (multitype) point process, then predictions are made for each possible mark value in turn. If the fitted model had no spatial trend, then the default is to omit calculating this (flat) surface, unless \code{trend=TRUE} is set explicitly. If the fitted model was Poisson, so that there were no spatial interactions, then the conditional intensity and spatial trend are identical, and the default is to omit the conditional intensity, unless \code{cif=TRUE} is set explicitly. If \code{plot.it=TRUE} then \code{\link{plot.plotppm}()} is called upon to plot the class \code{plotppm} object which is produced. (That object is also returned, silently.) Plots are produced successively using \code{\link{persp}}, \code{\link{image}} and \code{\link{contour}} (or only a selection of these three, if \code{how} is given). Extra graphical parameters controlling the display may be passed directly via the arguments \code{...} or indirectly reset using \code{\link{spatstat.options}}. } \seealso{ \code{\link{plot.plotppm}}, \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{predict.ppm}}, \code{\link{print.ppm}}, \code{\link{persp}}, \code{\link{image}}, \code{\link{contour}}, \code{\link{plot}}, \code{\link{spatstat.options}} } \section{Warnings}{ See warnings in \code{\link{predict.ppm}}. } \examples{ m <- ppm(cells ~1, Strauss(0.05)) pm <- plot(m) # The object ``pm'' will be plotted as well as saved # for future plotting. pm } \author{ \spatstatAuthors } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat.model/man/improve.kppm.Rd0000644000176200001440000001121114331173076016706 0ustar liggesusers\name{improve.kppm} \alias{improve.kppm} \title{Improve Intensity Estimate of Fitted Cluster Point Process Model} \description{ Update the fitted intensity of a fitted cluster point process model. } \usage{ improve.kppm(object, type=c("quasi", "wclik1", "clik1"), rmax = NULL, eps.rmax = 0.01, dimyx = 50, maxIter = 100, tolerance = 1e-06, fast = TRUE, vcov = FALSE, fast.vcov = FALSE, verbose = FALSE, save.internals = FALSE) } \arguments{ \item{object}{ Fitted cluster point process model (object of class \code{"kppm"}). } \item{type}{ A character string indicating the method of estimation. Current options are \code{"clik1"}, \code{"wclik1"} and \code{"quasi"} for, respectively, first order composite (Poisson) likelihood, weighted first order composite likelihood and quasi-likelihood. } \item{rmax}{ Optional. The dependence range. Not usually specified by the user. } \item{eps.rmax}{ Numeric. A small positive number which is used to determine \code{rmax} from the tail behaviour of the pair correlation function. Namely \code{rmax} is the smallest value of \eqn{r} at which \eqn{(g(r)-1)/(g(0)-1)} falls below \code{eps.rmax}. Ignored if \code{rmax} is provided. } \item{dimyx}{ Pixel array dimensions. See Details. } \item{maxIter}{ Integer. Maximum number of iterations of iterative weighted least squares (Fisher scoring). } \item{tolerance}{ Numeric. Tolerance value specifying when to stop iterative weighted least squares (Fisher scoring). } \item{fast}{ Logical value indicating whether tapering should be used to make the computations faster (requires the package \pkg{Matrix}). } \item{vcov}{ Logical value indicating whether to calculate the asymptotic variance covariance/matrix. } \item{fast.vcov}{ Logical value indicating whether tapering should be used for the variance/covariance matrix to make the computations faster (requires the package \pkg{Matrix}). Caution: This is expected to underestimate the true asymptotic variances/covariances. } \item{verbose}{ A logical indicating whether the details of computations should be printed. } \item{save.internals}{ A logical indicating whether internal quantities should be saved in the returned object (mostly for development purposes). } } \value{ A fitted cluster point process model of class \code{"kppm"}. } \details{ This function reestimates the intensity parameters in a fitted \code{"kppm"} object. If \code{type="clik1"} estimates are based on the first order composite (Poisson) likelihood, which ignores dependence between the points. Note that \code{type="clik1"} is mainly included for testing purposes and is not recommended for the typical user; instead the more efficient \code{\link{kppm}} with \code{improve.type="none"} should be used. When \code{type="quasi"} or \code{type="wclik1"} the dependence structure between the points is incorporated in the estimation procedure by using the estimated pair correlation function in the estimating equation. In all cases the estimating equation is based on dividing the observation window into small subregions and count the number of points in each subregion. To do this the observation window is first converted into a digital mask by \code{\link{as.mask}} where the resolution is controlled by the argument \code{dimyx}. The computational time grows with the cube of the number of subregions, so fine grids may take very long to compute (or even run out of memory). } \seealso{ \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{improve.kppm}} } \references{ Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes, \emph{Biometrics}, \bold{63}, 252-258. Guan, Y. and Shen, Y. (2010) A weighted estimating equation approach to inference for inhomogeneous spatial point processes, \emph{Biometrika}, \bold{97}, 867-880. Guan, Y., Jalilian, A. and Waagepetersen, R. (2015) Quasi-likelihood for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 677--697. } \examples{ # fit a Thomas process using minimum contrast estimation method # to model interaction between points of the pattern fit0 <- kppm(bei ~ elev + grad, data = bei.extra) # fit the log-linear intensity model with quasi-likelihood method fit1 <- improve.kppm(fit0, type="quasi") # compare coef(fit0) coef(fit1) } \author{ \abdollah and \rasmus. Adapted for \pkg{spatstat} by \adrian and \ege. } \keyword{spatial} \keyword{fit model} spatstat.model/man/plot.profilepl.Rd0000644000176200001440000000743314331173077017244 0ustar liggesusers\name{plot.profilepl} \alias{plot.profilepl} \title{ Plot Profile Likelihood } \description{ Plot the profile (pseudo) likelihood against the irregular parameters, for a model that was fitted by maximum profile (pseudo)likelihood. } \usage{ \method{plot}{profilepl}(x, \dots, add = FALSE, main = NULL, tag = TRUE, coeff = NULL, xvariable = NULL, col = 1, lty = 1, lwd = 1, col.opt = "green", lty.opt = 3, lwd.opt = 1) } \arguments{ \item{x}{ A point process model fitted by maximum profile (pseudo)likelihood. Object of class \code{"profilepl"}, obtained from \code{\link{profilepl}}. } \item{\dots}{ Additional plot arguments passed to \code{\link[graphics]{plot.default}} and \code{\link[graphics]{lines}}. } \item{add}{ Logical. If \code{TRUE}, the plot is drawn over the existing plot. } \item{main}{ Optional. Main title for the plot. A character string or character vector. } \item{tag}{ Logical value. If \code{TRUE} (the default), when the plot contains multiple curves corresponding to different values of a parameter, each curve will be labelled with the values of the irregular parameter. } \item{coeff}{ Optional. If this is given, it should be a character string matching the name of one of the fitted model coefficients. This coefficient will then be plotted on the vertical axis. } \item{xvariable}{ Optional. The name of the irregular parameter that should be plotted along the horizontal axis. The default is the first irregular parameter. } \item{col,lty,lwd}{ Graphical parameters (colour, line type, line width) for the curves on the plot. } \item{col.opt, lty.opt, lwd.opt}{ Graphical parameters for indicating the optimal parameter value. } } \details{ This is the \code{\link[graphics]{plot}} method for the class \code{"profilepl"} of fitted point process models obtained by maximising the profile likelihood or profile pseudolikelihood. The default behaviour is to plot the profile likelihood or profile pseudolikelihood on the vertical axis, against the value of the irregular parameter on the horizontal axis. If there are several irregular parameters, then one of them is plotted on the horizontal axis, and the plot consists of many different curves, corresponding to different values of the other parameters. The parameter to be plotted on the horizontal axis is specified by the argument \code{xvariable}; the default is to use the parameter that was listed first in the original call to \code{\link{profilepl}}. If \code{coeff} is given, it should be the name of one of the fitted model coefficients \code{names(coef(as.ppm(x)))}. The fitted value of that coefficient is plotted on the vertical axis. } \value{ Null. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. \baddrubaturnbook } \author{ \spatstatAuthors. } \seealso{ \code{\link{profilepl}} } \examples{ live <- interactive() nr <- if(live) 20 else 3 # one irregular parameter rr <- data.frame(r=seq(0.05,0.15, length=nr)) ps <- profilepl(rr, Strauss, cells) plot(ps) # profile pseudolikelihood plot(ps, coeff="Interaction") # fitted interaction coefficient log(gamma) # two irregular parameters smax <- if(live) 3 else 2 rs <- expand.grid(r=seq(0.05,0.15, length=nr), sat=1:smax) pg <- profilepl(rs, Geyer, cells) plot(pg) # profile pseudolikelihood against r for each value of 'sat' plot(pg, coeff="Interaction") plot(pg, xvariable="sat", col=ifelse(r < 0.1, "red", "green")) } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat.model/man/HierStrauss.Rd0000644000176200001440000001071714331173075016544 0ustar liggesusers\name{HierStrauss} \alias{HierStrauss} \title{The Hierarchical Strauss Point Process Model} \description{ Creates an instance of the hierarchical Strauss point process model which can then be fitted to point pattern data. } \usage{ HierStrauss(radii, types=NULL, archy=NULL) } \arguments{ \item{radii}{Matrix of interaction radii} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} \item{archy}{Optional: the hierarchical order. See Details.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hierarchical Strauss process with interaction radii \eqn{radii[i,j]}. } \details{ This is a hierarchical point process model for a multitype point pattern (\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander} and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, 1999; Grabarnik and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, 2009). It is appropriate for analysing multitype point pattern data in which the types are ordered so that the points of type \eqn{j} depend on the points of type \eqn{1,2,\ldots,j-1}{1,2,...,j-1}. The hierarchical version of the (stationary) Strauss process with \eqn{m} types, with interaction radii \eqn{r_{ij}}{r[i,j]} and parameters \eqn{\beta_j}{beta[j]} and \eqn{\gamma_{ij}}{gamma[i,j]} is a point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern, and a pair of points of types \eqn{i} and \eqn{j} closer than \eqn{r_{ij}}{r[i,j]} units apart contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the density \bold{provided} \eqn{i \le j}{i <= j}. The nonstationary hierarchical Strauss process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hierarchical Strauss process pairwise interaction is yielded by the function \code{HierStrauss()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the HierStrauss interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The argument \code{archy} can be used to specify a hierarchical ordering of the types. It can be either a vector of integers or a character vector matching the possible types. The default is the sequence \eqn{1,2, \ldots, m}{1,2, ..., m} meaning that type \eqn{j} depends on types \eqn{1,2, \ldots, j-1}{1,2, ..., j-1}. The matrix \code{radii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii are specified in \code{HierStrauss}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{HierStrauss()}. } \seealso{ \code{\link{MultiStrauss}} for the corresponding symmetrical interaction. \code{\link{HierHard}}, \code{\link{HierStraussHard}}. } \examples{ r <- matrix(10 * c(3,4,4,3), nrow=2,ncol=2) HierStrauss(r) # prints a sensible description of itself ppm(ants ~1, HierStrauss(r, , c("Messor", "Cataglyphis"))) # fit the stationary hierarchical Strauss process to ants data } \author{\adrian , \rolf and \ege. } \references{ Grabarnik, P. and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, A. (2009) Modelling the spatial structure of forest stands by multivariate point processes with hierarchical interactions. \emph{Ecological Modelling} \bold{220}, 1232--1240. \ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}, H. and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, A. (1999) Multitype spatial point patterns with hierarchical interactions. \emph{Biometrics} \bold{55}, 1051--1058. } \keyword{spatial} \keyword{models} spatstat.model/man/macros/0000755000176200001440000000000014243551505015257 5ustar liggesusersspatstat.model/man/macros/defns.Rd0000644000176200001440000001167714510474260016660 0ustar liggesusers%% macro definitions for spatstat man pages %% Authors \newcommand{\adrian}{Adrian Baddeley \email{Adrian.Baddeley@curtin.edu.au}} \newcommand{\rolf}{Rolf Turner \email{rolfturner@posteo.net}} \newcommand{\ege}{Ege Rubak \email{rubak@math.aau.dk}} \newcommand{\spatstatAuthors}{\adrian, \rolf and \ege} \newcommand{\spatstatAuthorsComma}{\adrian, \rolf, \ege} %% Contributors with emails \newcommand{\pavel}{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su}} \newcommand{\dominic}{Dominic Schuhmacher \email{dominic.schuhmacher@mathematik.uni-goettingen.de}, URL \code{http://dominic.schuhmacher.name/}} \newcommand{\wei}{Ang Qi Wei \email{aqw07398@hotmail.com}} \newcommand{\colette}{Marie-Colette van Lieshout \email{Marie-Colette.van.Lieshout@cwi.nl}} \newcommand{\rasmus}{Rasmus Plenge Waagepetersen \email{rw@math.auc.dk}} \newcommand{\abdollah}{Abdollah Jalilian \email{jalilian@razi.ac.ir}} \newcommand{\ottmar}{Ottmar Cronie \email{ottmar@chalmers.se}} \newcommand{\stephenEglen}{Stephen Eglen \email{S.J.Eglen@damtp.cam.ac.uk}} \newcommand{\mehdi}{Mehdi Moradi \email{m2.moradi@yahoo.com}} \newcommand{\yamei}{Ya-Mei Chang \email{yamei628@gmail.com}} \newcommand{\martinH}{Martin Hazelton \email{Martin.Hazelton@otago.ac.nz}} \newcommand{\tilman}{Tilman Davies \email{Tilman.Davies@otago.ac.nz}} % Names with accents \newcommand{\Bogsted}{\ifelse{latex}{\out{B\o gsted}}{Bogsted}} \newcommand{\Cramer}{\ifelse{latex}{\out{Cram\'er}}{Cramer}} \newcommand{\Francois}{\ifelse{latex}{\out{Fran\c{c}ois}}{Francois}} \newcommand{\Frederic}{\ifelse{latex}{\out{Fr{\'e}d{\'e}ric}}{Frederic}} \newcommand{\Hogmander}{\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}} \newcommand{\Jyvaskyla}{\ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}} \newcommand{\Lucia}{\ifelse{latex}{\out{Luc\'{\i{}}a}}{Lucia}} \newcommand{\Matern}{\ifelse{latex}{\out{Mat\'ern}}{Matern}} \newcommand{\Moller}{\ifelse{latex}{\out{M\o ller}}{Moller}} \newcommand{\Oehlschlaegel}{\ifelse{latex}{\out{Oehlschl\"{a}gel}}{Oehlschlaegel}} \newcommand{\Prokesova}{\ifelse{latex}{\out{Proke\u{s}ov{\'{a}}}}{Prokesova}} \newcommand{\Sarkka}{\ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}} \newcommand{\Sanchez}{\ifelse{latex}{\out{S\'{a}nchez}}{Sanchez}} \newcommand{\Martin}{\ifelse{latex}{\out{Mart\'{\i}n}}{Martin}} \newcommand{\Dominguez}{\ifelse{latex}{\out{Dom\'{\i}nguez}}{Dominguez}} \newcommand{\Rodriguez}{\ifelse{latex}{\out{Rodr\'{\i}guez}}{Rodriguez}} \newcommand{\Gonzalez}{\ifelse{latex}{\out{Gonz\'{a}lez}}{Gonzalez}} %% List of all Gibbs interactions \newcommand{\GibbsInteractionsList}{\code{\link[spatstat.model]{AreaInter}}, \code{\link[spatstat.model]{BadGey}}, \code{\link[spatstat.model]{Concom}}, \code{\link[spatstat.model]{DiggleGatesStibbard}}, \code{\link[spatstat.model]{DiggleGratton}}, \code{\link[spatstat.model]{Fiksel}}, \code{\link[spatstat.model]{Geyer}}, \code{\link[spatstat.model]{Hardcore}}, \code{\link[spatstat.model]{HierHard}}, \code{\link[spatstat.model]{HierStrauss}}, \code{\link[spatstat.model]{HierStraussHard}}, \code{\link[spatstat.model]{Hybrid}}, \code{\link[spatstat.model]{LennardJones}}, \code{\link[spatstat.model]{MultiHard}}, \code{\link[spatstat.model]{MultiStrauss}}, \code{\link[spatstat.model]{MultiStraussHard}}, \code{\link[spatstat.model]{OrdThresh}}, \code{\link[spatstat.model]{Ord}}, \code{\link[spatstat.model]{Pairwise}}, \code{\link[spatstat.model]{PairPiece}}, \code{\link[spatstat.model]{Penttinen}}, \code{\link[spatstat.model]{Poisson}}, \code{\link[spatstat.model]{Saturated}}, \code{\link[spatstat.model]{SatPiece}}, \code{\link[spatstat.model]{Softcore}}, \code{\link[spatstat.model]{Strauss}}, \code{\link[spatstat.model]{StraussHard}} and \code{\link[spatstat.model]{Triplets}}} %% List of interactions recognised by RMH code \newcommand{\rmhInteractionsList}{\code{\link[spatstat.model]{AreaInter}}, \code{\link[spatstat.model]{BadGey}}, \code{\link[spatstat.model]{DiggleGatesStibbard}}, \code{\link[spatstat.model]{DiggleGratton}}, \code{\link[spatstat.model]{Fiksel}}, \code{\link[spatstat.model]{Geyer}}, \code{\link[spatstat.model]{Hardcore}}, \code{\link[spatstat.model]{Hybrid}}, \code{\link[spatstat.model]{LennardJones}}, \code{\link[spatstat.model]{MultiStrauss}}, \code{\link[spatstat.model]{MultiStraussHard}}, \code{\link[spatstat.model]{PairPiece}}, \code{\link[spatstat.model]{Penttinen}}, \code{\link[spatstat.model]{Poisson}}, \code{\link[spatstat.model]{Softcore}}, \code{\link[spatstat.model]{Strauss}}, \code{\link[spatstat.model]{StraussHard}} and \code{\link[spatstat.model]{Triplets}}} %% Frequent references \newcommand{\baddrubaturnbook}{Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. } %% Citations of recent articles that will change rapidly \newcommand{\baddchangclustersim}{Baddeley, A. and Chang, Y.-M. (2023) Robust algorithms for simulating cluster point processes. \emph{Journal of Statistical Computation and Simulation}. In Press. DOI \code{10.1080/00949655.2023.2166045}.} spatstat.model/man/kppm.Rd0000644000176200001440000006554114514520744015246 0ustar liggesusers\name{kppm} \alias{kppm} \alias{kppm.formula} \alias{kppm.ppp} \alias{kppm.quad} \concept{point process model} \concept{Cox point process} \concept{cluster process} \concept{Neyman-Scott cluster process} \title{Fit Cluster or Cox Point Process Model} \description{ Fit a homogeneous or inhomogeneous cluster process or Cox point process model to a point pattern. } \usage{ kppm(X, \dots) \method{kppm}{formula}(X, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), \dots, data=NULL) \method{kppm}{ppp}(X, trend = ~1, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), data = NULL, ..., covariates=data, subset, method = c("mincon", "clik2", "palm", "adapcl"), penalised = FALSE, improve.type = c("none", "clik1", "wclik1", "quasi"), improve.args = list(), weightfun=NULL, control=list(), stabilize=TRUE, algorithm, trajectory=FALSE, statistic="K", statargs=list(), rmax = NULL, epsilon=0.01, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL, ppm.improve.type=c("none", "ho", "enet"), ppm.improve.args=list()) \method{kppm}{quad}(X, trend = ~1, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), data = NULL, ..., covariates=data, subset, method = c("mincon", "clik2", "palm", "adapcl"), penalised = FALSE, improve.type = c("none", "clik1", "wclik1", "quasi"), improve.args = list(), weightfun=NULL, control=list(), stabilize=TRUE, algorithm, trajectory=FALSE, statistic="K", statargs=list(), rmax = NULL, epsilon=0.01, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL, ppm.improve.type=c("none", "ho", "enet"), ppm.improve.args=list()) } \arguments{ \item{X}{ A point pattern dataset (object of class \code{"ppp"} or \code{"quad"}) to which the model should be fitted, or a \code{formula} in the \R language defining the model. See Details. } \item{trend}{ An \R formula, with no left hand side, specifying the form of the log intensity. } \item{clusters}{ Character string determining the cluster model. Partially matched. Options are \code{"Thomas"}, \code{"MatClust"}, \code{"Cauchy"}, \code{"VarGamma"} and \code{"LGCP"}. } \item{data,covariates}{ The values of spatial covariates (other than the Cartesian coordinates) required by the model. A named list of pixel images, functions, windows, tessellations or numeric constants. } \item{\dots}{ Additional arguments. See Details. } \item{subset}{ Optional. A subset of the spatial domain, to which the model-fitting should be restricted. A window (object of class \code{"owin"}) or a logical-valued pixel image (object of class \code{"im"}), or an expression (possibly involving the names of entries in \code{data}) which can be evaluated to yield a window or pixel image. } \item{method}{ The fitting method. Either \code{"mincon"} for minimum contrast, \code{"clik2"} for second order composite likelihood, \code{"adapcl"} for adaptive second order composite likelihood, or \code{"palm"} for Palm likelihood. Partially matched. } \item{penalised}{ Logical value specifying whether the objective function (the composite likelihood or contrast) should be modified by adding a penalty against extreme values of cluster scale. } \item{improve.type}{ Method for updating the initial estimate of the trend. Initially the trend is estimated as if the process is an inhomogeneous Poisson process. The default, \code{improve.type = "none"}, is to use this initial estimate. Otherwise, the trend estimate is updated by \code{\link{improve.kppm}}, using information about the pair correlation function. Options are \code{"clik1"} (first order composite likelihood, essentially equivalent to \code{"none"}), \code{"wclik1"} (weighted first order composite likelihood) and \code{"quasi"} (quasi likelihood). } \item{improve.args}{ Additional arguments passed to \code{\link{improve.kppm}} when \code{improve.type != "none"}. See Details. } \item{weightfun}{ Optional weighting function \eqn{w} in the composite likelihoods or Palm likelihood. A \code{function} in the \R language, or one of the strings \code{"threshold"} or \code{"taper"}. See Details. } \item{control}{ List of control parameters passed to the optimization function \code{\link[stats]{optim}}. } \item{stabilize}{ Logical value specifying whether to numerically stabilize the optimization algorithm, by specifying suitable default values of \code{control$fnscale} and \code{control$parscale}. } \item{algorithm}{ Character string determining the mathematical algorithm to be used to solve the fitting problem. If \code{method="mincon", "clik2"} or \code{"palm"} this argument is passed to the generic optimization function \code{\link[stats]{optim}} (renamed as the argument \code{method} to \code{\link[stats]{optim}}) with default \code{"Nelder-Mead"}. If \code{method="adapcl"} the argument is passed to the equation solver \code{\link[nleqslv]{nleqslv}} (renamed as the argument \code{method} to \code{\link[nleqslv]{nleqslv}}) with default \code{"Bryden"}. } \item{trajectory}{ Logical value specifying whether to save the history of all function evaluations performed by the optimization algorithm. } \item{statistic}{ Name of the summary statistic to be used for minimum contrast estimation: either \code{"K"} or \code{"pcf"}. } \item{statargs}{ Optional list of arguments to be used when calculating the \code{statistic}. See Details. } \item{rmax}{ Maximum value of interpoint distance to use in the composite likelihood. } \item{epsilon}{ Tuning parameter for the adaptive composite likelihood method. } \item{covfunargs,use.gam,nd,eps}{ Arguments passed to \code{\link{ppm}} when fitting the intensity. } \item{ppm.improve.type,ppm.improve.args}{ Arguments controlling the initial fit of the trend. Passed to \code{\link{ppm}} as the arguments \code{improve.type} and \code{improve.args} respectively. } } \details{ This function fits a clustered point process model to the point pattern dataset \code{X}. The model may be either a \emph{Neyman-Scott cluster process} or another \emph{Cox process}. The type of model is determined by the argument \code{clusters}. Currently the options are \code{clusters="Thomas"} for the Thomas process, \code{clusters="MatClust"} for the \Matern cluster process, \code{clusters="Cauchy"} for the Neyman-Scott cluster process with Cauchy kernel, \code{clusters="VarGamma"} for the Neyman-Scott cluster process with Variance Gamma kernel (requires an additional argument \code{nu} to be passed through the dots; see \code{\link{rVarGamma}} for details), and \code{clusters="LGCP"} for the log-Gaussian Cox process (may require additional arguments passed through \code{\dots}; see \code{\link{rLGCP}} for details on argument names). The first four models are Neyman-Scott cluster processes. The algorithm first estimates the intensity function of the point process using \code{\link{ppm}}. The argument \code{X} may be a point pattern (object of class \code{"ppp"}) or a quadrature scheme (object of class \code{"quad"}). The intensity is specified by the \code{trend} argument. If the trend formula is \code{~1} (the default) then the model is \emph{homogeneous}. The algorithm begins by estimating the intensity as the number of points divided by the area of the window. Otherwise, the model is \emph{inhomogeneous}. The algorithm begins by fitting a Poisson process with log intensity of the form specified by the formula \code{trend}. (See \code{\link{ppm}} for further explanation). The argument \code{X} may also be a \code{formula} in the \R language. The right hand side of the formula gives the \code{trend} as described above. The left hand side of the formula gives the point pattern dataset to which the model should be fitted. If \code{improve.type="none"} this is the final estimate of the intensity. Otherwise, the intensity estimate is updated, as explained in \code{\link{improve.kppm}}. Additional arguments to \code{\link{improve.kppm}} are passed as a named list in \code{improve.args}. The cluster parameters of the model are then fitted either by minimum contrast estimation, or by a composite likelihood method (maximum composite likelihood, maximum Palm likelihood, or by solving the adaptive composite likelihood estimating equation). \describe{ \item{Minimum contrast:}{ If \code{method = "mincon"} (the default) clustering parameters of the model will be fitted by minimum contrast estimation, that is, by matching the theoretical \eqn{K}-function of the model to the empirical \eqn{K}-function of the data, as explained in \code{\link{mincontrast}}. For a homogeneous model (\code{ trend = ~1 }) the empirical \eqn{K}-function of the data is computed using \code{\link{Kest}}, and the parameters of the cluster model are estimated by the method of minimum contrast. For an inhomogeneous model, the inhomogeneous \eqn{K} function is estimated by \code{\link{Kinhom}} using the fitted intensity. Then the parameters of the cluster model are estimated by the method of minimum contrast using the inhomogeneous \eqn{K} function. This two-step estimation procedure is due to Waagepetersen (2007). If \code{statistic="pcf"} then instead of using the \eqn{K}-function, the algorithm will use the pair correlation function \code{\link{pcf}} for homogeneous models and the inhomogeneous pair correlation function \code{\link{pcfinhom}} for inhomogeneous models. In this case, the smoothing parameters of the pair correlation can be controlled using the argument \code{statargs}, as shown in the Examples. Additional arguments \code{\dots} will be passed to \code{\link{clusterfit}} to control the minimum contrast fitting algorithm. The optimisation is performed by the generic optimisation algorithm \code{\link[stats]{optim}}. } \item{Second order composite likelihood:}{ If \code{method = "clik2"} the clustering parameters of the model will be fitted by maximising the second-order composite likelihood (Guan, 2006). The log composite likelihood is \deqn{ \sum_{i,j} w(d_{ij}) \log\rho(d_{ij}; \theta) - \left( \sum_{i,j} w(d_{ij}) \right) \log \int_D \int_D w(\|u-v\|) \rho(\|u-v\|; \theta)\, du\, dv }{ sum[i,j] w(d[i,j]) log(rho(d[i,j]; theta)) - (sum[i,j] w(d[i,j])) log(integral[D,D] w(||u-v||) rho(||u-v||; theta) du dv) } where the sums are taken over all pairs of data points \eqn{x_i, x_j}{x[i], x[j]} separated by a distance \eqn{d_{ij} = \| x_i - x_j\|}{d[i,j] = ||x[i] - x[j]||} less than \code{rmax}, and the double integral is taken over all pairs of locations \eqn{u,v} in the spatial window of the data. Here \eqn{\rho(d;\theta)}{rho(d;theta)} is the pair correlation function of the model with cluster parameters \eqn{\theta}{theta}. The function \eqn{w} in the composite likelihood is a weighting function and may be chosen arbitrarily. It is specified by the argument \code{weightfun}. If this is missing or \code{NULL} then the default is a threshold weight function, \eqn{w(d) = 1(d \le R)}{w(d) = 1(d <= R)}, where \eqn{R} is \code{rmax/2}. If it is specified, the argument \code{weightfun} should be a \code{function} in the \R language with one argument. Alternatively \code{weightfun} may be one of the strings \code{"threshold"} or \code{"taper"} representing the functions \eqn{w(d) = 1(d \le R)}{w(d) = 1(d <= R)} and \eqn{w(d) = min(1, R/d)} respectively. The optimisation is performed by the generic optimisation algorithm \code{\link[stats]{optim}}. } \item{Palm likelihood:}{ If \code{method = "palm"} the clustering parameters of the model will be fitted by maximising the Palm loglikelihood (Tanaka et al, 2008) \deqn{ \sum_{i,j} w(x_i, x_j) \log \lambda_P(x_j \mid x_i; \theta) - \int_D w(x_i, u) \lambda_P(u \mid x_i; \theta) {\rm d} u }{ sum[i,j] w(x[i], x[j]) log(lambdaP(x[j] | x[i]; theta)) - integral[D] w(x[i], u) lambdaP(u | x[i]; theta) du } with the same notation as above. Here \eqn{\lambda_P(u|v;\theta)}{lambdaP(u|v;theta)} is the Palm intensity of the model at location \eqn{u} given there is a point at \eqn{v}. The optimisation is performed by the generic optimisation algorithm \code{\link[stats]{optim}}. } \item{Adaptive Composite likelihood:}{ If \code{method = "cladap"} the clustering parameters of the model will be fitted by solving the adaptive second order composite likelihood estimating equation (Lavancier et al, 2021). The estimating function is \deqn{ \sum_{u, v} w(\epsilon \frac{| g(0; \theta) - 1 |}{g(\|u-v\|; \theta)-1}) \frac{\nabla_\theta g(\|u-v\|;\theta)}{g(\|u-v\|;\theta)} - \int_D \int_D w(\epsilon \frac{ | g(u,v; \theta) - 1|}{g(\|u-v\|; \theta)-1}) \nabla_\theta g(\|u-v\|; \theta) \rho(u) \rho(v)\, du\, dv }{ \sum[u,v] w(epsilon |g(0; theta)-1|/(g(||u-v||; theta)-1)) g'(||u-v||; theta) / g(||u-v||; theta) - integral[D,D] w(epsilon |g(0; theta)-1|/(g(||u-v||; theta)-1)) g'(||u-v||; theta) rho(u) rho(v) du dv } where the sum is taken over all distinct pairs of points. Here \eqn{g(d;\theta)}{g(d; theta)} is the pair correlation function with parameters \eqn{\theta}{theta}. The partial derivative with respect to \eqn{\theta}{theta} is \eqn{g'(d; \theta)}{g'(d; theta)}, and \eqn{\rho(u)}{rho(u)} denotes the fitted intensity function of the model. The tuning parameter \eqn{\epsilon}{epsilon} is independent of the data. It can be specified by the argument \code{epsilon} and has default value \eqn{0.01}. The function \eqn{w} in the estimating function is a weighting function of bounded support \eqn{[-1,1]}. It is specified by the argument \code{weightfun}. If this is missing or \code{NULL} then the default is \eqn{ w(d) = 1(\|d\| \le 1) \exp(1/(r^2-1))}{w(d) = 1(||d|| \le 1) exp(1/(d^2-1)) } The estimating equation is solved using the nonlinear equation solver \code{\link[nleqslv]{nleqslv}} from the package \pkg{nleqslv}. The package \pkg{nleqslv} must be installed in order to use this option. } } If \code{penalised=TRUE}, the fitting procedure is modified by adding a penalty against extreme values of the cluster scale, as proposed by Baddeley et al (2022). If \code{trajectory=TRUE}, the resulting object contains the history of all points in the cluster parameter space which were evaluated by the optimization algorithm. The trajectory can be extracted by \code{traj(fit)} or \code{traj(obsurf(fit))} where \code{fit} is the fitted model object. } \section{Cluster parameters for Neyman-Scott models}{ For Neyman-Scott models, the fitting procedure searches for the best-fitting values of the parameters that control the intensity of parents and the physical scale of the clusters. (Any parameters that control the shape of the clusters must be specified separately and are assumed to be fixed.) The fitted object \code{fit} contains the fitted cluster parameters as the element \code{fit$par} in the format described below. Initial estimates for these cluster parameters can be specified using the argument \code{startpar} in the same format. The cluster parameters will be stored in a \emph{named} numeric vector \code{par} of length 2. The first value is always \code{kappa}, the intensity of parents (cluster centres). The format is as follows: \itemize{ \item for \code{clusters="Thomas"}, a vector \code{c(kappa, sigma2)} where \code{sigma2} is the square of the cluster standard deviation; \item for \code{clusters="MatClust"}, a vector \code{c(kappa, R)} where \code{R} is the radius of the cluster; \item for \code{clusters="Cauchy"}, a vector \code{c(kappa, eta2)} where \code{eta2 = code{4 * scale^2}} where \code{scale} is the scale parameter for the model as used in \code{\link{rCauchy}}; \item for \code{clusters="VarGamma"}, a vector \code{c(kappa, eta)} where \code{eta} is equivalent to the scale parameter \code{omega} used in \code{\link{rVarGamma}}. } For \code{clusters="VarGamma"} it will be necessary to specify the shape parameter \code{nu} as described in the help for \code{\link{rVarGamma}}. This is specified separately as an argument \code{nu} in the call to \code{kppm}. } \section{Optimization algorithm}{ The following details allow greater control over the fitting procedure. For the first three fitting methods (\code{method="mincon", "clik2"} and \code{"palm"}), the optimisation is performed by the generic optimisation algorithm \code{\link[stats]{optim}}. The behaviour of this algorithm can be controlled by the following arguments to \code{kppm}: \itemize{ \item \code{startpar} determines the initial estimates of the cluster parameters. \item \code{algorithm} determines the particular optimization method. This argument is passed to \code{\link[stats]{optim}} as the argument \code{method}. Options are listed in the help for \code{\link[stats]{optim}}. The default is the Nelder-Mead simplex method. \item \code{control} is a named list of control parameters, documented in the help for \code{\link[stats]{optim}}. Useful control arguments include \code{trace}, \code{maxit} and \code{abstol}. \item \code{lower} and \code{upper} specify bounds for the cluster parameters, when \code{algorithm="L-BFGS-B"} or \code{algorithm="Brent"}, as described in the help for \code{\link[stats]{optim}}. } For \code{method="adapcl"}, the estimating equation is solved using the nonlinear equation solver \code{\link[nleqslv]{nleqslv}} from the package \pkg{nleqslv}. The package \pkg{nleqslv} must be installed in order to use this option. The behaviour of this algorithm can be controlled by the following arguments to \code{kppm}: \itemize{ \item \code{startpar} determines the initial estimates of the cluster parameters. \item \code{algorithm} determines the method for solving the equation. This argument is passed to \code{\link[nleqslv]{nleqslv}} as the argument \code{method}. Options are listed in the help for \code{\link[nleqslv]{nleqslv}}. \item \code{globStrat} determines the global strategy to be applied. This argument is is passed to \code{\link[nleqslv]{nleqslv}} as the argument \code{global}. Options are listed in the help for \code{\link[nleqslv]{nleqslv}}. \item \code{control} is a named list of control parameters, documented in the help for \code{\link[nleqslv]{nleqslv}}. } } \section{Log-Gaussian Cox Models}{ To fit a log-Gaussian Cox model, specify \code{clusters="LGCP"} and use additional arguments to specify the covariance structure. These additional arguments can be given individually in the call to \code{kppm}, or they can be collected together in a list called \code{covmodel}. For example a \Matern model with parameter \eqn{\nu=0.5} could be specified either by \code{kppm(X, clusters="LGCP", model="matern", nu=0.5)} or by \code{kppm(X, clusters="LGCP", covmodel=list(model="matern", nu=0.5))}. The argument \code{model} specifies the type of covariance model: the default is \code{model="exp"} for an exponential covariance. Additional arguments specify the shape parameters of the covariance model. For example if \code{model="matern"} then the additional argument \code{nu} is required. The available models are as follows: \describe{ \item{\code{model="exponential"}:}{ the exponential covariance function \deqn{C(r) = \sigma^2 \exp(-r/h)}{C(r) = sigma^2 * exp(-r/h)} where \eqn{\sigma^2} is the (fitted) variance parameter, and \eqn{h} is the (fitted) scale parameter. No shape parameters are required. } \item{\code{model="gauss"}:}{ the Gaussian covariance function \deqn{C(r) = \sigma^2 \exp(-(r/h)^2)}{C(r) = sigma^2 * exp(-(r/h)^2)} where \eqn{\sigma^2} is the (fitted) variance parameter, and \eqn{h} is the (fitted) scale parameter. No shape parameters are required. } \item{\code{model="stable"}:}{ the stable covariance function \deqn{ C(r) = \sigma^2 \exp(-(r/h)^\alpha) }{ C(r) = sigma^2 * exp(-(r/h)^alpha) } where \eqn{\sigma^2} is the (fitted) variance parameter, \eqn{h} is the (fitted) scale parameter, and \eqn{\alpha} is the shape parameter \code{alpha}. The parameter \code{alpha} must be given, either as a stand-alone argument, or as an entry in the list \code{covmodel}. } \item{\code{model="gencauchy"}:}{ the generalised Cauchy covariance function \deqn{ C(r) = \sigma^2 (1 + (x/h)^\alpha)^{-\beta/\alpha} }{ C(r) = sigma^2 * (1 + (x/h)^\alpha)^(-\beta/\alpha) } where \eqn{\sigma^2} is the (fitted) variance parameter, \eqn{h} is the (fitted) scale parameter, and \eqn{\alpha} and \eqn{\beta} are the shape parameters \code{alpha} and \code{beta}. The parameters \code{alpha} and \code{beta} must be given, either as stand-alone arguments, or as entries in the list \code{covmodel}. } \item{\code{model="matern"}:}{ the Whittle-\Matern covariance function \deqn{ C(r) = \sigma^2 \frac{1}{2^{\nu-1} \Gamma(\nu)} (\sqrt{2 \nu} \, r/h)^\nu K_\nu(\sqrt{2\nu}\, r/h) }{ C(r) = \sigma^2 * 2^(1-\nu) * \Gamma(\nu)^(-1) * (sqrt(2 *\nu) * r/h)^\nu * K[\nu](sqrt(2 * nu) * r/h) } where \eqn{\sigma^2} is the (fitted) variance parameter, \eqn{h} is the (fitted) scale parameter, and \eqn{\nu} is the shape parameter \code{nu}. The parameter \code{nu} must be given, either as a stand-alone argument, or as an entry in the list \code{covmodel}. } } Note that it is not possible to use \emph{anisotropic} covariance models because the \code{kppm} technique assumes the pair correlation function is isotropic. } \value{ An object of class \code{"kppm"} representing the fitted model. There are methods for printing, plotting, predicting, simulating and updating objects of this class. } \section{Error and warning messages}{ See \code{\link{ppm.ppp}} for a list of common error messages and warnings originating from the first stage of model-fitting. } \seealso{ Methods for \code{kppm} objects: \code{\link{plot.kppm}}, \code{\link{fitted.kppm}}, \code{\link{predict.kppm}}, \code{\link{simulate.kppm}}, \code{\link{update.kppm}}, \code{\link{vcov.kppm}}, \code{\link[spatstat.model:methods.kppm]{methods.kppm}}, \code{\link{as.ppm.kppm}}, \code{\link{as.fv.kppm}}, \code{\link{Kmodel.kppm}}, \code{\link{pcfmodel.kppm}}. See also \code{\link{improve.kppm}} for improving the fit of a \code{kppm} object. Minimum contrast fitting algorithm: higher level interface \code{\link{clusterfit}}; low-level algorithm \code{\link{mincontrast}}. Alternative fitting algorithms: \code{\link{thomas.estK}}, \code{\link{matclust.estK}}, \code{\link{lgcp.estK}}, \code{\link{cauchy.estK}}, \code{\link{vargamma.estK}}, \code{\link{thomas.estpcf}}, \code{\link{matclust.estpcf}}, \code{\link{lgcp.estpcf}}, \code{\link{cauchy.estpcf}}, \code{\link{vargamma.estpcf}}. Summary statistics: \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{pcf}}, \code{\link{pcfinhom}}. For fitting Poisson or Gibbs point process models, see \code{\link{ppm}}. } \references{ Baddeley, A., Davies, T.M., Hazelton, M.L., Rakshit, S. and Turner, R. (2022) Fundamental problems in fitting spatial cluster process models. \emph{Spatial Statistics} \bold{52}, 100709. DOI: \code{10.1016/j.spasta.2022.100709} Guan, Y. (2006) A composite likelihood approach in fitting spatial point process models. \emph{Journal of the American Statistical Association} \bold{101}, 1502--1512. Guan, Y., Jalilian, A. and Waagepetersen, R. (2015) Quasi-likelihood for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 677-697. Jalilian, A., Guan, Y. and Waagepetersen, R. (2012) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119--137. Lavancier, F., Poinas, A., and Waagepetersen, R. (2021) Adaptive estimating function inference for nonstationary determinantal point processes. \emph{Scandinavian Journal of Statistics}, \bold{48} (1), 87--107. Tanaka, U. and Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott point processes. \emph{Biometrical Journal} \bold{50}, 43--57. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \examples{ online <- interactive() if(!online) op <- spatstat.options(npixel=32, ndummy.min=16) # method for point patterns kppm(redwood, ~1, "Thomas") # method for formulas kppm(redwood ~ 1, "Thomas") # different models for clustering if(online) kppm(redwood ~ x, "MatClust") kppm(redwood ~ x, "MatClust", statistic="pcf", statargs=list(stoyan=0.2)) kppm(redwood ~ x, cluster="Cauchy", statistic="K") kppm(redwood, cluster="VarGamma", nu = 0.5, statistic="pcf") # log-Gaussian Cox process (LGCP) models kppm(redwood ~ 1, "LGCP", statistic="pcf") kppm(redwood ~ x, "LGCP", statistic="pcf", model="matern", nu=0.3, control=list(maxit=10)) # Different fitting techniques fitc <- kppm(redwood ~ 1, "Thomas", method="c") fitp <- kppm(redwood ~ 1, "Thomas", method="p") # penalised fit fitmp <- kppm(redwood ~ 1, "Thomas", penalised=TRUE) # quasi-likelihood improvement fitq <- kppm(redwood ~ x, "Thomas", improve.type = "quasi") if(!online) spatstat.options(op) } \author{ \spatstatAuthors, with contributions from \abdollah and \rasmus. Adaptive composite likelihood method contributed by Chiara Fend and modified by Adrian Baddeley. Penalised optimization developed by Adrian Baddeley, \tilman and \martinH. } \keyword{spatial} \keyword{models} spatstat.model/man/methods.zgibbsmodel.Rd0000644000176200001440000000240514374302015020221 0ustar liggesusers\name{methods.zgibbsmodel} \alias{methods.zgibbsmodel} % DoNotExport \alias{as.interact.zgibbsmodel} \alias{as.isf.zgibbsmodel} \alias{interactionorder.zgibbsmodel} \alias{intensity.zgibbsmodel} \alias{is.poisson.zgibbsmodel} \alias{is.stationary.zgibbsmodel} \alias{print.zgibbsmodel} \title{ Methods for Gibbs Models } \description{ Methods for the experimental class of Gibbs models } \usage{ \method{as.interact}{zgibbsmodel}(object) \method{as.isf}{zgibbsmodel}(object) \method{interactionorder}{zgibbsmodel}(object) \method{is.poisson}{zgibbsmodel}(x) \method{is.stationary}{zgibbsmodel}(x) \method{print}{zgibbsmodel}(x, \dots) \method{intensity}{zgibbsmodel}(X, \dots, approx=c("Poisson", "DPP")) } \arguments{ \item{object,x,X}{ Object of class \code{"zgibbsmodel"}. } \item{\dots}{Additional arguments.} \item{approx}{ Character string (partially matched) specifying the type of approximation. } } \details{ Experimental. } \value{ Same as for other methods. } \author{ \adrian } \seealso{ \code{\link{zgibbsmodel}} } \examples{ m <- zgibbsmodel(10, Strauss(0.1), -0.5) m is.poisson(m) is.stationary(m) interactionorder(m) as.interact(m) as.isf(m) intensity(m) intensity(m, approx="D") } \keyword{spatial} \keyword{models} spatstat.model/man/profilepl.Rd0000644000176200001440000002015414331173077016262 0ustar liggesusers\name{profilepl} \alias{profilepl} \title{Fit Models by Profile Maximum Pseudolikelihood or AIC} \description{ Fits point process models by maximising the profile likelihood, profile pseudolikelihood, profile composite likelihood or AIC. } \usage{ profilepl(s, f, \dots, aic=FALSE, rbord=NULL, verbose = TRUE, fast=TRUE) } \arguments{ \item{s}{ Data frame containing values of the irregular parameters over which the criterion will be computed. } \item{f}{ Function (such as \code{\link{Strauss}}) that generates an interpoint interaction object, given values of the irregular parameters. } \item{\dots}{ Data passed to \code{\link{ppm}} to fit the model. } \item{aic}{ Logical value indicating whether to find the parameter values which minimise the AIC (\code{aic=TRUE}) or maximise the profile likelihood (\code{aic=FALSE}, the default). } \item{rbord}{ Radius for border correction (same for all models). If omitted, this will be computed from the interactions. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{fast}{ Logical value indicating whether to use a faster, less accurate model-fitting technique when computing the profile pseudolikelihood. See Section on Speed and Accuracy. } } \details{ The model-fitting function \code{\link{ppm}} fits point process models to point pattern data. However, only the \sQuote{regular} parameters of the model can be fitted by \code{\link{ppm}}. The model may also depend on \sQuote{irregular} parameters that must be fixed in any call to \code{\link{ppm}}. This function \code{profilepl} is a wrapper which finds the values of the irregular parameters that give the best fit. If \code{aic=FALSE} (the default), the best fit is the model which maximises the likelihood (if the models are Poisson processes) or maximises the pseudolikelihood or logistic likelihood. If \code{aic=TRUE} then the best fit is the model which minimises the Akaike Information Criterion \code{\link{AIC.ppm}}. The argument \code{s} must be a data frame whose columns contain values of the irregular parameters over which the maximisation is to be performed. An irregular parameter may affect either the interpoint interaction or the spatial trend. \describe{ \item{interaction parameters:}{ in a call to \code{\link{ppm}}, the argument \code{interaction} determines the interaction between points. It is usually a call to a function such as \code{\link{Strauss}}. The arguments of this call are irregular parameters. For example, the interaction radius parameter \eqn{r} of the Strauss process, determined by the argument \code{r} to the function \code{\link{Strauss}}, is an irregular parameter. } \item{trend parameters:}{ in a call to \code{\link{ppm}}, the spatial trend may depend on covariates, which are supplied by the argument \code{covariates}. These covariates may be functions written by the user, of the form \code{function(x,y,...)}, and the extra arguments \code{\dots} are irregular parameters. } } The argument \code{f} determines the interaction for each model to be fitted. It would typically be one of the functions \code{\link{Poisson}}, \code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{LennardJones}}, \code{\link{OrdThresh}}, \code{\link{Softcore}}, \code{\link{Strauss}} or \code{\link{StraussHard}}. Alternatively it could be a function written by the user. Columns of \code{s} which match the names of arguments of \code{f} will be interpreted as interaction parameters. Other columns will be interpreted as trend parameters. The data frame \code{s} must provide values for each argument of \code{f}, except for the optional arguments, which are those arguments of \code{f} that have the default value \code{NA}. To find the best fit, each row of \code{s} will be taken in turn. Interaction parameters in this row will be passed to \code{f}, resulting in an interaction object. Then \code{\link{ppm}} will be applied to the data \code{...} using this interaction. Any trend parameters will be passed to \code{\link{ppm}} through the argument \code{covfunargs}. This results in a fitted point process model. The value of the log pseudolikelihood or AIC from this model is stored. After all rows of \code{s} have been processed in this way, the row giving the maximum value of log pseudolikelihood will be found. The object returned by \code{profilepl} contains the profile pseudolikelihood (or profile AIC) function, the best fitting model, and other data. It can be plotted (yielding a plot of the log pseudolikelihood or AIC values against the irregular parameters) or printed (yielding information about the best fitting values of the irregular parameters). In general, \code{f} may be any function that will return an interaction object (object of class \code{"interact"}) that can be used in a call to \code{\link{ppm}}. Each argument of \code{f} must be a single value. } \section{Speed and Accuracy}{ Computation of the profile pseudolikelihood can be time-consuming. We recommend starting with a small experiment in which \code{s} contains only a few rows of values. This will indicate roughly the optimal values of the parameters. Then a full calculation using more finely spaced values can identify the exact optimal values. It is normal that the procedure appears to slow down at the end. During the computation of the profile pseudolikelihood, the model-fitting procedure is accelerated by omitting some calculations that are not needed for computing the pseudolikelihood. When the optimal parameter values have been identified, they are used to fit the final model in its entirety. Fitting the final model can take longer than computing the profile pseudolikelihood. If \code{fast=TRUE} (the default), then additional shortcuts are taken in order to accelerate the computation of the profile log pseudolikelihood. These shortcuts mean that the values of the profile log pseudolikelihood in the result (\code{$prof}) may not be equal to the values that would be obtained if the model was fitted normally. Currently this happens only for the area interaction \code{\link{AreaInter}}. It may be wise to do a small experiment with \code{fast=TRUE} and then a definitive calculation with \code{fast=FALSE}. } \value{ An object of class \code{"profilepl"}. There are methods for \code{\link[graphics]{plot}}, \code{\link[base]{print}}, \code{\link[base]{summary}}, \code{\link[stats]{simulate}}, \code{\link{as.ppm}}, \code{\link{fitin}} and \code{\link{parameters}} for objects of this class. The components of the object include \item{fit}{Best-fitting model} \item{param}{The data frame \code{s}} \item{iopt}{Row index of the best-fitting parameters in \code{s}} To extract the best fitting model you can also use \code{\link{as.ppm}}. } \seealso{ \code{\link{plot.profilepl}} } \examples{ # one irregular parameter rr <- data.frame(r=seq(0.05,0.15, by=0.01)) \testonly{ rr <- data.frame(r=c(0.05,0.1,0.15)) } ps <- profilepl(rr, Strauss, cells) ps plot(ps) # two irregular parameters rs <- expand.grid(r=seq(0.05,0.15, by=0.01),sat=1:3) \testonly{ rs <- expand.grid(r=c(0.07,0.12),sat=1:2) } pg <- profilepl(rs, Geyer, cells) pg as.ppm(pg) ## more information summary(pg) \donttest{ # multitype pattern with a common interaction radius RR <- data.frame(R=seq(0.03,0.05,by=0.01)) MS <- function(R) { MultiStrauss(radii=diag(c(R,R))) } pm <- profilepl(RR, MS, amacrine ~marks) } } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat.model/man/fixef.mppm.Rd0000644000176200001440000000264114331173100016323 0ustar liggesusers\name{fixef.mppm} \alias{fixef.mppm} \title{ Extract Fixed Effects from Point Process Model } \description{ Given a point process model fitted to a list of point patterns, extract the fixed effects of the model. A method for \code{fixef}. } \usage{ \method{fixef}{mppm}(object, \dots) } \arguments{ \item{object}{ A fitted point process model (an object of class \code{"mppm"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[nlme]{fixef}}. The argument \code{object} must be a fitted point process model (object of class \code{"mppm"}) produced by the fitting algorithm \code{\link{mppm}}). This represents a point process model that has been fitted to a list of several point pattern datasets. See \code{\link{mppm}} for information. This function extracts the coefficients of the fixed effects of the model. } \value{ A numeric vector of coefficients. } \references{ \baddrubaturnbook } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{coef.mppm}} } \examples{ H <- hyperframe(Y = waterstriders) # Tweak data to exaggerate differences H$Y[[1]] <- rthin(H$Y[[1]], 0.3) m1 <- mppm(Y ~ id, data=H, Strauss(7)) fixef(m1) m2 <- mppm(Y ~ 1, random=~1|id, data=H, Strauss(7)) fixef(m2) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.model/man/plot.dppm.Rd0000644000176200001440000000351214331173077016202 0ustar liggesusers\name{plot.dppm} \alias{plot.dppm} \title{Plot a fitted determinantal point process} \description{ Plots a fitted determinantal point process model, displaying the fitted intensity and the fitted summary function. } \usage{ \method{plot}{dppm}(x, ..., what=c("intensity", "statistic")) } \arguments{ \item{x}{ Fitted determinantal point process model. An object of class \code{"dppm"}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppm}} and \code{\link{plot.fv}} to control the plot. } \item{what}{ Character vector determining what will be plotted. } } \details{ This is a method for the generic function \code{\link{plot}} for the class \code{"dppm"} of fitted determinantal point process models. The argument \code{x} should be a determinantal point process model (object of class \code{"dppm"}) obtained using the function \code{\link{dppm}}. The choice of plots (and the order in which they are displayed) is controlled by the argument \code{what}. The options (partially matched) are \code{"intensity"} and \code{"statistic"}. This command is capable of producing two different plots: \describe{ \item{what="intensity"}{specifies the fitted intensity of the model, which is plotted using \code{\link{plot.ppm}}. By default this plot is not produced for stationary models.} \item{what="statistic"}{specifies the empirical and fitted summary statistics, which are plotted using \code{\link{plot.fv}}. This is only meaningful if the model has been fitted using the Method of Minimum Contrast, and it is turned off otherwise.} } } \value{ Null. } \examples{ fit <- dppm(swedishpines ~ x + y, dppGauss, method="c") plot(fit) } \seealso{ \code{\link{dppm}}, \code{\link{plot.ppm}}, } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.model/man/methods.traj.Rd0000644000176200001440000000324614334117440016666 0ustar liggesusers\name{methods.traj} \alias{methods.traj} %DoNotExport \alias{lines.traj} \alias{plot.traj} \alias{print.traj} \title{ Methods for Trajectories of Function Evaluations } \description{ Methods for objects of class \code{"traj"}. } \usage{ \method{print}{traj}(x, \dots) \method{plot}{traj}(x, \dots, show.ends=TRUE, add=FALSE, xlab=NULL, ylab=NULL) \method{lines}{traj}(x, \dots, directed=FALSE) } \arguments{ \item{x}{ Object of class \code{"traj"}. } \item{\dots}{ Additional arguments passed to other methods. } \item{directed}{ Logical value specifying whether to draw arrows instead of undirected lines. } \item{show.ends}{ Logical value specifying whether to indicate the start and finish of the trajectory. The start is a blue circle; the finish is a red cross. } \item{add}{ Logical value specifying whether to draw the trajectory on the existing plot (\code{add=TRUE}) or to start a new plot (\code{add=FALSE}, the default). } \item{xlab,ylab}{Optional labels for the horizontal and vertical axes.} } \details{ An object of class \code{"traj"} represents the history of evaluations of the objective function performed when a cluster process model was fitted. It is a data frame containing the input parameter values for the objective function, and the corresponding value of the objective function, that were considered by the optimisation algorithm. These functions are methods for the generic \code{print}, \code{plot} and \code{lines}. } \value{ Null. } \author{ \adrian. } \seealso{ \code{\link{traj}} } \examples{ fit <- kppm(redwood, pspace=list(save=TRUE)) h <- traj(fit) h plot(h) lines(h) } spatstat.model/man/spatstat.model-package.Rd0000644000176200001440000006545714374302240020631 0ustar liggesusers\name{spatstat.model-package} \alias{spatstat.model-package} \alias{spatstat.model} \docType{package} \title{The spatstat.model Package} \description{ The \pkg{spatstat.model} package belongs to the \pkg{spatstat} family of packages. It contains the core functionality for parametric statistical modelling of spatial data. } \details{ \pkg{spatstat} is a family of \R packages for the statistical analysis of spatial data. Its main focus is the analysis of spatial patterns of points in two-dimensional space. The original \pkg{spatstat} package has now been split into several sub-packages. This sub-package \pkg{spatstat.model} contains all the main user-level functions that perform parametric statistical modelling of spatial data. (The main exception is that functions for linear networks are in the separate sub-package \pkg{spatstat.linnet}.) } \section{Structure of the spatstat family}{ The orginal \pkg{spatstat} package grew to be very large. It has now been divided into several \bold{sub-packages}: \itemize{ \item \pkg{spatstat.utils} containing basic utilities \item \pkg{spatstat.sparse} containing linear algebra utilities \item \pkg{spatstat.data} containing datasets \item \pkg{spatstat.geom} containing geometrical objects and geometrical operations \item \pkg{spatstat.explore} containing the functionality for exploratory analysis and nonparametric modelling of spatial data \item \pkg{spatstat.model} containing the main functionality for parametric modelling, analysis and inference for spatial data \item \pkg{spatstat.linnet} containing functions for spatial data on a linear network \item \pkg{spatstat}, which simply loads the other sub-packages listed above, and provides documentation. } When you install \pkg{spatstat}, these sub-packages are also installed. Then if you load the \pkg{spatstat} package by typing \code{library(spatstat)}, the other sub-packages listed above will automatically be loaded or imported. For an overview of all the functions available in the sub-packages of \pkg{spatstat}, see the help file for \code{"spatstat-package"} in the \pkg{spatstat} package. Additionally there are several \bold{extension packages:} \itemize{ \item \pkg{spatstat.gui} for interactive graphics \item \pkg{spatstat.local} for local likelihood (including geographically weighted regression) \item \pkg{spatstat.Knet} for additional, computationally efficient code for linear networks \item \pkg{spatstat.sphere} (under development) for spatial data on a sphere, including spatial data on the earth's surface } The extension packages must be installed separately and loaded explicitly if needed. They also have separate documentation. } \section{Overview of Functionality in \pkg{spatstat.model}}{ The \pkg{spatstat} family of packages is designed to support a complete statistical analysis of spatial data. It supports \itemize{ \item creation, manipulation and plotting of point patterns; \item exploratory data analysis; \item spatial random sampling; \item simulation of point process models; \item parametric model-fitting; \item non-parametric smoothing and regression; \item formal inference (hypothesis tests, confidence intervals); \item model diagnostics. } For an overview, see the help file for \code{"spatstat-package"} in the \pkg{spatstat} package. Following is a list of the functionality provided in the \pkg{spatstat.model} package only. \bold{To simulate a random point pattern:} Functions for generating random point patterns are now contained in the \pkg{spatstat.random} package. \bold{Exploratory analysis} Exploratory graphics, smoothing, and exploratory analysis of spatial data are now provided in the \pkg{spatstat.explore} package. \bold{Model fitting (Cox and cluster models)} Cluster process models (with homogeneous or inhomogeneous intensity) and Cox processes can be fitted by the function \code{\link[spatstat.model]{kppm}}. Its result is an object of class \code{"kppm"}. The fitted model can be printed, plotted, predicted, simulated and updated. \tabular{ll}{ \code{\link[spatstat.model]{kppm}} \tab Fit model\cr \code{\link[spatstat.model]{plot.kppm}} \tab Plot the fitted model\cr \code{\link[spatstat.model]{summary.kppm}} \tab Summarise the fitted model\cr \code{\link[spatstat.model]{fitted.kppm}} \tab Compute fitted intensity \cr \code{\link[spatstat.model]{predict.kppm}} \tab Compute fitted intensity \cr \code{\link[spatstat.model]{update.kppm}} \tab Update the model \cr \code{\link[spatstat.model]{improve.kppm}} \tab Refine the estimate of trend \cr \code{\link[spatstat.model]{simulate.kppm}} \tab Generate simulated realisations \cr \code{\link[spatstat.model]{vcov.kppm}} \tab Variance-covariance matrix of coefficients \cr \code{\link[spatstat.model:methods.kppm]{coef.kppm}} \tab Extract trend coefficients \cr \code{\link[spatstat.model:methods.kppm]{formula.kppm}} \tab Extract trend formula \cr \code{\link[spatstat.model]{parameters}} \tab Extract all model parameters \cr \code{\link[spatstat.model]{clusterfield.kppm}} \tab Compute offspring density \cr \code{\link[spatstat.model]{clusterradius.kppm}} \tab Radius of support of offspring density \cr \code{\link[spatstat.model]{Kmodel.kppm}} \tab \eqn{K} function of fitted model \cr \code{\link[spatstat.model]{pcfmodel.kppm}} \tab Pair correlation of fitted model } For model selection, you can also use the generic functions \code{\link[stats]{step}}, \code{\link[stats]{drop1}} and \code{\link[stats]{AIC}} on fitted point process models. For variable selection, see \code{\link[spatstat.explore]{sdr}}. The theoretical models can also be simulated, for any choice of parameter values, using \code{\link[spatstat.random]{rThomas}}, \code{\link[spatstat.random]{rMatClust}}, \code{\link[spatstat.random]{rCauchy}}, \code{\link[spatstat.random]{rVarGamma}}, and \code{\link[spatstat.random]{rLGCP}}. Lower-level fitting functions include: \tabular{ll}{ \code{\link[spatstat.model]{lgcp.estK}} \tab fit a log-Gaussian Cox process model\cr \code{\link[spatstat.model]{lgcp.estpcf}} \tab fit a log-Gaussian Cox process model\cr \code{\link[spatstat.model]{thomas.estK}} \tab fit the Thomas process model \cr \code{\link[spatstat.model]{thomas.estpcf}} \tab fit the Thomas process model \cr \code{\link[spatstat.model]{matclust.estK}} \tab fit the \Matern Cluster process model \cr \code{\link[spatstat.model]{matclust.estpcf}} \tab fit the \Matern Cluster process model \cr \code{\link[spatstat.model]{cauchy.estK}} \tab fit a Neyman-Scott Cauchy cluster process \cr \code{\link[spatstat.model]{cauchy.estpcf}} \tab fit a Neyman-Scott Cauchy cluster process\cr \code{\link[spatstat.model]{vargamma.estK}} \tab fit a Neyman-Scott Variance Gamma process\cr \code{\link[spatstat.model]{vargamma.estpcf}} \tab fit a Neyman-Scott Variance Gamma process\cr \code{\link[spatstat.model]{mincontrast}} \tab low-level algorithm for fitting models \cr \tab by the method of minimum contrast } \bold{Model fitting (Poisson and Gibbs models)} Poisson point processes are the simplest models for point patterns. A Poisson model assumes that the points are stochastically independent. It may allow the points to have a non-uniform spatial density. The special case of a Poisson process with a uniform spatial density is often called Complete Spatial Randomness. Poisson point processes are included in the more general class of Gibbs point process models. In a Gibbs model, there is \emph{interaction} or dependence between points. Many different types of interaction can be specified. For a detailed explanation of how to fit Poisson or Gibbs point process models to point pattern data using \pkg{spatstat}, see Baddeley and Turner (2005b) or Baddeley (2008). \bold{To fit a Poisson or Gibbs point process model:} Model fitting in \pkg{spatstat} is performed mainly by the function \code{\link[spatstat.model]{ppm}}. Its result is an object of class \code{"ppm"}. Here are some examples, where \code{X} is a point pattern (class \code{"ppp"}): \tabular{ll}{ \emph{command} \tab \emph{model} \cr \code{ppm(X)} \tab Complete Spatial Randomness \cr \code{ppm(X ~ 1)} \tab Complete Spatial Randomness \cr \code{ppm(X ~ x)} \tab Poisson process with \cr \tab intensity loglinear in \eqn{x} coordinate \cr \code{ppm(X ~ 1, Strauss(0.1))} \tab Stationary Strauss process \cr \code{ppm(X ~ x, Strauss(0.1))} \tab Strauss process with \cr \tab conditional intensity loglinear in \eqn{x} } It is also possible to fit models that depend on other covariates. \bold{Manipulating the fitted model:} \tabular{ll}{ \code{\link[spatstat.model]{plot.ppm}} \tab Plot the fitted model\cr \code{\link[spatstat.model]{predict.ppm}} \tab Compute the spatial trend and conditional intensity\cr \tab of the fitted point process model \cr \code{\link[spatstat.model]{coef.ppm}} \tab Extract the fitted model coefficients\cr \code{\link[spatstat.model]{parameters}} \tab Extract all model parameters\cr \code{\link[spatstat.model]{formula.ppm}} \tab Extract the trend formula\cr \code{\link[spatstat.model]{intensity.ppm}} \tab Compute fitted intensity \cr \code{\link[spatstat.model]{Kmodel.ppm}} \tab \eqn{K} function of fitted model \cr \code{\link[spatstat.model]{pcfmodel.ppm}} \tab pair correlation of fitted model \cr \code{\link[spatstat.model]{fitted.ppm}} \tab Compute fitted conditional intensity at quadrature points \cr \code{\link[spatstat.model]{residuals.ppm}} \tab Compute point process residuals at quadrature points \cr \code{\link[spatstat.model]{update.ppm}} \tab Update the fit \cr \code{\link[spatstat.model]{vcov.ppm}} \tab Variance-covariance matrix of estimates\cr \code{\link[spatstat.model]{rmh.ppm}} \tab Simulate from fitted model \cr \code{\link[spatstat.model]{simulate.ppm}} \tab Simulate from fitted model \cr \code{\link[spatstat.model]{print.ppm}} \tab Print basic information about a fitted model\cr \code{\link[spatstat.model]{summary.ppm}} \tab Summarise a fitted model\cr \code{\link[spatstat.model]{effectfun}} \tab Compute the fitted effect of one covariate\cr \code{\link[spatstat.model]{logLik.ppm}} \tab log-likelihood or log-pseudolikelihood\cr \code{\link[spatstat.model]{anova.ppm}} \tab Analysis of deviance \cr \code{\link[spatstat.model]{model.frame.ppm}} \tab Extract data frame used to fit model \cr \code{\link[spatstat.model]{model.images}} \tab Extract spatial data used to fit model \cr \code{\link[spatstat.model]{model.depends}} \tab Identify variables in the model \cr \code{\link[spatstat.model]{as.interact}} \tab Interpoint interaction component of model \cr \code{\link[spatstat.model]{fitin}} \tab Extract fitted interpoint interaction \cr \code{\link[spatstat.model]{is.hybrid}} \tab Determine whether the model is a hybrid \cr \code{\link[spatstat.model]{valid.ppm}} \tab Check the model is a valid point process \cr \code{\link[spatstat.model]{project.ppm}} \tab Ensure the model is a valid point process } For model selection, you can also use the generic functions \code{\link[stats]{step}}, \code{\link[stats]{drop1}} and \code{\link[stats]{AIC}} on fitted point process models. For variable selection, see \code{\link[spatstat.explore]{sdr}}. See \code{\link[spatstat.geom]{spatstat.options}} to control plotting of fitted model. \bold{To specify a point process model:} The first order ``trend'' of the model is determined by an \R language formula. The formula specifies the form of the \emph{logarithm} of the trend. \tabular{ll}{ \code{X ~ 1} \tab No trend (stationary) \cr \code{X ~ x} \tab Loglinear trend \eqn{\lambda(x,y) = \exp(\alpha + \beta x)}{lambda(x,y) = exp(alpha + beta * x)} \cr \tab where \eqn{x,y} are Cartesian coordinates \cr \code{X ~ polynom(x,y,3)} \tab Log-cubic polynomial trend \cr \code{X ~ harmonic(x,y,2)} \tab Log-harmonic polynomial trend \cr \code{X ~ Z} \tab Loglinear function of covariate \code{Z} \cr \tab \eqn{\lambda(x,y) = \exp(\alpha + \beta Z(x,y))}{lambda(x,y) = exp(alpha + beta * Z(x,y))} } The higher order (``interaction'') components are described by an object of class \code{"interact"}. Such objects are created by: \tabular{ll}{ \code{\link[spatstat.model]{Poisson}()} \tab the Poisson point process\cr \code{\link[spatstat.model]{AreaInter}()} \tab Area-interaction process\cr \code{\link[spatstat.model]{BadGey}()} \tab multiscale Geyer process\cr \code{\link[spatstat.model]{Concom}()} \tab connected component interaction\cr \code{\link[spatstat.model]{DiggleGratton}() } \tab Diggle-Gratton potential \cr \code{\link[spatstat.model]{DiggleGatesStibbard}() } \tab Diggle-Gates-Stibbard potential \cr \code{\link[spatstat.model]{Fiksel}()} \tab Fiksel pairwise interaction process\cr \code{\link[spatstat.model]{Geyer}()} \tab Geyer's saturation process\cr \code{\link[spatstat.model]{Hardcore}()} \tab Hard core process\cr \code{\link[spatstat.model]{HierHard}()} \tab Hierarchical multiype hard core process\cr \code{\link[spatstat.model]{HierStrauss}()} \tab Hierarchical multiype Strauss process\cr \code{\link[spatstat.model]{HierStraussHard}()} \tab Hierarchical multiype Strauss-hard core process\cr \code{\link[spatstat.model]{Hybrid}()} \tab Hybrid of several interactions\cr \code{\link[spatstat.model]{LennardJones}() } \tab Lennard-Jones potential \cr \code{\link[spatstat.model]{MultiHard}()} \tab multitype hard core process \cr \code{\link[spatstat.model]{MultiStrauss}()} \tab multitype Strauss process \cr \code{\link[spatstat.model]{MultiStraussHard}()} \tab multitype Strauss/hard core process \cr \code{\link[spatstat.model]{OrdThresh}()} \tab Ord process, threshold potential\cr \code{\link[spatstat.model]{Ord}()} \tab Ord model, user-supplied potential \cr \code{\link[spatstat.model]{PairPiece}()} \tab pairwise interaction, piecewise constant \cr \code{\link[spatstat.model]{Pairwise}()} \tab pairwise interaction, user-supplied potential\cr \code{\link[spatstat.model]{Penttinen}()} \tab Penttinen pairwise interaction\cr \code{\link[spatstat.model]{SatPiece}()} \tab Saturated pair model, piecewise constant potential\cr \code{\link[spatstat.model]{Saturated}()} \tab Saturated pair model, user-supplied potential\cr \code{\link[spatstat.model]{Softcore}()} \tab pairwise interaction, soft core potential\cr \code{\link[spatstat.model]{Strauss}()} \tab Strauss process \cr \code{\link[spatstat.model]{StraussHard}()} \tab Strauss/hard core point process \cr \code{\link[spatstat.model]{Triplets}()} \tab Geyer triplets process } Note that it is also possible to combine several such interactions using \code{\link[spatstat.model]{Hybrid}}. \bold{Simulation and goodness-of-fit for fitted models:} \tabular{ll}{ \code{\link[spatstat.model]{rmh.ppm}} \tab simulate realisations of a fitted model \cr \code{\link[spatstat.model]{simulate.ppm}} \tab simulate realisations of a fitted model \cr \code{\link[spatstat.model]{envelope}} \tab compute simulation envelopes for a fitted model } \bold{Model fitting (determinantal point process models)} Code for fitting \emph{determinantal point process models} has recently been added to \pkg{spatstat}. For information, see the help file for \code{\link[spatstat.model]{dppm}}. \bold{Model fitting (spatial logistic regression)} Pixel-based spatial logistic regression is an alternative technique for analysing spatial point patterns that is widely used in Geographical Information Systems. It is approximately equivalent to fitting a Poisson point process model. In pixel-based logistic regression, the spatial domain is divided into small pixels, the presence or absence of a data point in each pixel is recorded, and logistic regression is used to model the presence/absence indicators as a function of any covariates. Facilities for performing spatial logistic regression are provided in \pkg{spatstat} for comparison purposes. \bold{Fitting a spatial logistic regression} Spatial logistic regression is performed by the function \code{\link[spatstat.model]{slrm}}. Its result is an object of class \code{"slrm"}. There are many methods for this class, including methods for \code{print}, \code{fitted}, \code{predict}, \code{simulate}, \code{anova}, \code{coef}, \code{logLik}, \code{terms}, \code{update}, \code{formula} and \code{vcov}. For example, if \code{X} is a point pattern (class \code{"ppp"}): \tabular{ll}{ \emph{command} \tab \emph{model} \cr \code{slrm(X ~ 1)} \tab Complete Spatial Randomness \cr \code{slrm(X ~ x)} \tab Poisson process with \cr \tab intensity loglinear in \eqn{x} coordinate \cr \code{slrm(X ~ Z)} \tab Poisson process with \cr \tab intensity loglinear in covariate \code{Z} } \bold{Manipulating a fitted spatial logistic regression} \tabular{ll}{ \code{\link[spatstat.model]{anova.slrm}} \tab Analysis of deviance \cr \code{\link[spatstat.model]{coef.slrm}} \tab Extract fitted coefficients \cr \code{\link[spatstat.model]{vcov.slrm}} \tab Variance-covariance matrix of fitted coefficients \cr \code{\link[spatstat.model]{fitted.slrm}} \tab Compute fitted probabilities or intensity \cr \code{\link[spatstat.model]{logLik.slrm}} \tab Evaluate loglikelihood of fitted model \cr \code{\link[spatstat.model]{plot.slrm}} \tab Plot fitted probabilities or intensity \cr \code{\link[spatstat.model]{predict.slrm}} \tab Compute predicted probabilities or intensity with new data \cr \code{\link[spatstat.model]{simulate.slrm}} \tab Simulate model } There are many other undocumented methods for this class, including methods for \code{print}, \code{update}, \code{formula} and \code{terms}. Stepwise model selection is possible using \code{step} or \code{stepAIC}. For variable selection, see \code{\link[spatstat.explore]{sdr}}. \bold{Simulation} There are many ways to generate a random point pattern, line segment pattern, pixel image or tessellation in \pkg{spatstat}. \bold{Random point patterns:} Functions for random generation are now contained in the \pkg{spatstat.random} package. See also \code{\link[spatstat.explore]{varblock}} for estimating the variance of a summary statistic by block resampling, and \code{\link[spatstat.explore]{lohboot}} for another bootstrap technique. \bold{Fitted point process models:} If you have fitted a point process model to a point pattern dataset, the fitted model can be simulated. Cluster process models are fitted by the function \code{\link[spatstat.model]{kppm}} yielding an object of class \code{"kppm"}. To generate one or more simulated realisations of this fitted model, use \code{\link[spatstat.model]{simulate.kppm}}. Gibbs point process models are fitted by the function \code{\link[spatstat.model]{ppm}} yielding an object of class \code{"ppm"}. To generate a simulated realisation of this fitted model, use \code{\link[spatstat.model]{rmh.ppm}}. To generate one or more simulated realisations of the fitted model, use \code{\link[spatstat.model]{simulate.ppm}}. \bold{Other random patterns:} Functions for random generation are now contained in the \pkg{spatstat.random} package. \bold{Simulation-based inference} Simulation-based inference including simulation envelopes and hypothesis tests is now supported by the package \pkg{spatstat.explore}. \bold{Sensitivity diagnostics:} Classical measures of model sensitivity such as leverage and influence have been adapted to point process models. \tabular{ll}{ \code{\link[spatstat.model]{leverage.ppm}} \tab Leverage for point process model\cr \code{\link[spatstat.model]{influence.ppm}} \tab Influence for point process model\cr \code{\link[spatstat.model]{dfbetas.ppm}} \tab Parameter influence\cr \code{\link[spatstat.model]{dffit.ppm}} \tab Effect change diagnostic } \bold{Diagnostics for covariate effect:} Classical diagnostics for covariate effects have been adapted to point process models. \tabular{ll}{ \code{\link[spatstat.model]{parres}} \tab Partial residual plot\cr \code{\link[spatstat.model]{addvar}} \tab Added variable plot \cr \code{\link[spatstat.model]{rhohat.ppm}} \tab Kernel estimate of covariate effect\cr \code{\link[spatstat.explore]{rho2hat}} \tab Kernel estimate of covariate effect (bivariate) } \bold{Residual diagnostics:} Residuals for a fitted point process model, and diagnostic plots based on the residuals, were introduced in Baddeley et al (2005) and Baddeley, Rubak and \Moller (2011). Type \code{demo(diagnose)} for a demonstration of the diagnostics features. \tabular{ll}{ \code{\link[spatstat.model]{diagnose.ppm}} \tab diagnostic plots for spatial trend\cr \code{\link[spatstat.model]{qqplot.ppm}} \tab diagnostic Q-Q plot for interpoint interaction\cr \code{\link[spatstat.data]{residualspaper}} \tab examples from Baddeley et al (2005) \cr \code{\link[spatstat.model]{Kcom}} \tab model compensator of \eqn{K} function \cr \code{\link[spatstat.model]{Gcom}} \tab model compensator of \eqn{G} function \cr \code{\link[spatstat.model]{Kres}} \tab score residual of \eqn{K} function \cr \code{\link[spatstat.model]{Gres}} \tab score residual of \eqn{G} function \cr \code{\link[spatstat.model]{psst}} \tab pseudoscore residual of summary function \cr \code{\link[spatstat.model]{psstA}} \tab pseudoscore residual of empty space function \cr \code{\link[spatstat.model]{psstG}} \tab pseudoscore residual of \eqn{G} function \cr \code{\link[spatstat.model]{compareFit}} \tab compare compensators of several fitted models } \bold{Resampling and randomisation procedures} You can build your own tests based on randomisation and resampling using the following capabilities: \tabular{ll}{ \code{\link[spatstat.random]{quadratresample}} \tab block resampling \cr \code{\link[spatstat.random]{rshift}} \tab random shifting of (subsets of) points\cr \code{\link[spatstat.random]{rthin}} \tab random thinning } } \section{Licence}{ This library and its documentation are usable under the terms of the "GNU General Public License", a copy of which is distributed with the package. } \author{ \spatstatAuthors. } \section{Acknowledgements}{ Kasper Klitgaard Berthelsen, Ottmar Cronie, Tilman Davies, Julian Gilbey, Yongtao Guan, Ute Hahn, Kassel Hingee, Abdollah Jalilian, Marie-Colette van Lieshout, Greg McSwiggan, Tuomas Rajala, Suman Rakshit, Dominic Schuhmacher, Rasmus Waagepetersen and Hangsheng Wang made substantial contributions of code. For comments, corrections, bug alerts and suggestions, we thank Monsuru Adepeju, Corey Anderson, Ang Qi Wei, Ryan Arellano, Jens \ifelse{latex}{\out{{\AA}str{\" o}m}}{Astrom}, Robert Aue, Marcel Austenfeld, Sandro Azaele, Malissa Baddeley, Guy Bayegnak, Colin Beale, Melanie Bell, Thomas Bendtsen, Ricardo Bernhardt, Andrew Bevan, Brad Biggerstaff, Anders Bilgrau, Leanne Bischof, Christophe Biscio, Roger Bivand, Jose M. Blanco Moreno, Florent Bonneu, Jordan Brown, Ian Buller, Julian Burgos, Simon Byers, Ya-Mei Chang, Jianbao Chen, Igor Chernayavsky, Y.C. Chin, Bjarke Christensen, \ifelse{latex}{\out{Luc\'{\i{}}a}}{Lucia} Cobo Sanchez, Jean-Francois Coeurjolly, Kim Colyvas, Hadrien Commenges, Rochelle Constantine, Robin Corria Ainslie, Richard Cotton, Marcelino de la Cruz, Peter Dalgaard, Mario D'Antuono, Sourav Das, Peter Diggle, Patrick Donnelly, Ian Dryden, Stephen Eglen, Ahmed El-Gabbas, Belarmain Fandohan, Olivier Flores, David Ford, Peter Forbes, Shane Frank, Janet Franklin, Funwi-Gabga Neba, Oscar Garcia, Agnes Gault, Jonas Geldmann, Marc Genton, Shaaban Ghalandarayeshi, Jason Goldstick, Pavel Grabarnik, C. Graf, Ute Hahn, Andrew Hardegen, Martin \Bogsted Hansen, Martin Hazelton, Juha Heikkinen, Mandy Hering, Markus Herrmann, Maximilian Hesselbarth, Paul Hewson, Hamidreza Heydarian, Kurt Hornik, Philipp Hunziker, Jack Hywood, Ross Ihaka, \ifelse{latex}{\out{\u{C}enk I\c{c}\"{o}s}}{Cenk Icos}, Aruna Jammalamadaka, Robert John-Chandran, Devin Johnson, Mahdieh Khanmohammadi, Bob Klaver, Lily Kozmian-Ledward, Peter Kovesi, Mike Kuhn, Jeff Laake, Robert Lamb, \ifelse{latex}{\out{Fr\'{e}d\'{e}ric}}{Frederic} Lavancier, Tom Lawrence, Tomas Lazauskas, Jonathan Lee, George Leser, Angela Li, Li Haitao, George Limitsios, Andrew Lister, Nestor Luambua, Ben Madin, Martin Maechler, Kiran Marchikanti, Jeff Marcus, Robert Mark, Peter McCullagh, Monia Mahling, Jorge Mateu Mahiques, Ulf Mehlig, Frederico Mestre, Sebastian Wastl Meyer, Mi Xiangcheng, Lore De Middeleer, Robin Milne, Enrique Miranda, Jesper \Moller, Annie \ifelse{latex}{\out{Molli{\'e}}}{Mollie}, Ines Moncada, Mehdi Moradi, Virginia Morera Pujol, Erika Mudrak, Gopalan Nair, Nader Najari, Nicoletta Nava, Linda Stougaard Nielsen, Felipe Nunes, Jens Randel Nyengaard, Jens \Oehlschlaegel, Thierry Onkelinx, Sean O'Riordan, Evgeni Parilov, Jeff Picka, Nicolas Picard, Tim Pollington, Mike Porter, Sergiy Protsiv, Adrian Raftery, Ben Ramage, Pablo Ramon, Xavier Raynaud, Nicholas Read, Matt Reiter, Ian Renner, Tom Richardson, Brian Ripley, Ted Rosenbaum, Barry Rowlingson, Jason Rudokas, Tyler Rudolph, John Rudge, Christopher Ryan, Farzaneh Safavimanesh, Aila \Sarkka, Cody Schank, Katja Schladitz, Sebastian Schutte, Bryan Scott, Olivia Semboli, \ifelse{latex}{\out{Fran\c{c}ois S\'{e}m\'{e}curbe}}{Francois Semecurbe}, Vadim Shcherbakov, Shen Guochun, Shi Peijian, Harold-Jeffrey Ship, Tammy L Silva, Ida-Maria Sintorn, Yong Song, Malte Spiess, Mark Stevenson, Kaspar Stucki, Jan Sulavik, Michael Sumner, P. Surovy, Ben Taylor, Thordis Linda Thorarinsdottir, Leigh Torres, Berwin Turlach, Torben Tvedebrink, Kevin Ummer, Medha Uppala, Andrew van Burgel, Tobias Verbeke, Mikko Vihtakari, Alexendre Villers, Fabrice Vinatier, Maximilian Vogtland, Sasha Voss, Sven Wagner, Hao Wang, H. Wendrock, Jan Wild, Carl G. Witthoft, Selene Wong, Maxime Woringer, Luke Yates, Mike Zamboni and Achim Zeileis. } \keyword{spatial} \keyword{package} spatstat.model/man/subfits.Rd0000644000176200001440000000524414331173100015733 0ustar liggesusers\name{subfits} \alias{subfits} \alias{subfits.new} \alias{subfits.old} \title{Extract List of Individual Point Process Models} \description{ Takes a Gibbs point process model that has been fitted to several point patterns simultaneously, and produces a list of fitted point process models for the individual point patterns. } \usage{ subfits(object, what="models", verbose=FALSE, new.coef=NULL) subfits.old(object, what="models", verbose=FALSE, new.coef=NULL) subfits.new(object, what="models", verbose=FALSE) } \arguments{ \item{object}{ An object of class \code{"mppm"} representing a point process model fitted to several point patterns. } \item{what}{ What should be returned. Either \code{"models"} to return the fitted models, or \code{"interactions"} to return the fitted interactions only. } \item{verbose}{ Logical flag indicating whether to print progress reports. } \item{new.coef}{ Advanced use only. Numeric vector or matrix of coefficients to replaced the fitted coefficients \code{coef(object)}. } } \details{ \code{object} is assumed to have been generated by \code{\link{mppm}}. It represents a point process model that has been fitted to a list of several point patterns, with covariate data. For each of the \emph{individual} point pattern datasets, this function derives the corresponding fitted model for that dataset only (i.e. a point process model for the \eqn{i}th point pattern, that is consistent with \code{object}). If \code{what="models"}, the result is a list of point process models (a list of objects of class \code{"ppm"}), one model for each point pattern dataset in the original fit. If \code{what="interactions"}, the result is a list of fitted interpoint interactions (a list of objects of class \code{"fii"}). Two different algorithms are provided, as \code{subfits.old} and \code{subfits.new}. Currently \code{subfits} is the same as the old algorithm \code{subfits.old} because the newer algorithm is too memory-hungry. } \value{ A list of point process models (a list of objects of class \code{"ppm"}) or a list of fitted interpoint interactions (a list of objects of class \code{"fii"}). } \examples{ H <- hyperframe(Wat=waterstriders) fit <- mppm(Wat~x, data=H) subfits(fit) H$Wat[[3]] <- rthin(H$Wat[[3]], 0.1) fit2 <- mppm(Wat~x, data=H, random=~1|id) subfits(fit2) \testonly{a <- subfits.new(fit)} } \references{ \baddrubaturnbook } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{mppm}}, \code{\link{ppm}} } \keyword{spatial} \keyword{models} spatstat.model/man/residuals.kppm.Rd0000644000176200001440000000213114331173077017222 0ustar liggesusers\name{residuals.kppm} \alias{residuals.kppm} \title{ Residuals for Fitted Cox or Cluster Point Process Model } \description{ Given a Cox or cluster point process model fitted to a point pattern, compute residuals. } \usage{ \method{residuals}{kppm}(object, \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"kppm"}) for which residuals should be calculated. } \item{\dots}{ Arguments passed to \code{\link{residuals.ppm}}. } } \value{ An object of class \code{"msr"} representing a signed measure or vector-valued measure (see \code{\link{msr}}). This object can be plotted. } \details{ This function extracts the intensity component of the model using \code{\link{as.ppm}} and then applies \code{\link{residuals.ppm}} to compute the residuals. Use \code{\link{plot.msr}} to plot the residuals directly. } \seealso{ \code{\link{msr}}, \code{\link{kppm}} } \examples{ fit <- kppm(redwood ~ x, "Thomas") rr <- residuals(fit) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.model/man/is.multitype.ppm.Rd0000644000176200001440000000437114331173076017531 0ustar liggesusers\name{is.multitype.ppm} \alias{is.multitype.ppm} \title{Test Whether A Point Process Model is Multitype} \description{ Tests whether a fitted point process model involves ``marks'' attached to the points that classify the points into several types. } \usage{ \method{is.multitype}{ppm}(X, \dots) } \arguments{ \item{X}{ Fitted point process model (object of class \code{"ppm"}) usually obtained from \code{\link{ppm}}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a model that was fitted to a multitype point pattern dataset. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link[spatstat.data]{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. The argument \code{X} is a fitted point process model (an object of class \code{"ppm"}) typically obtained by fitting a model to point pattern data using \code{\link{ppm}}. This function returns \code{TRUE} if the \emph{original data} (to which the model \code{X} was fitted) were a multitype point pattern. Note that this is not the same as testing whether the model involves terms that depend on the marks (i.e. whether the fitted model ignores the marks in the data). Currently we have not implemented a test for this. If this function returns \code{TRUE}, the implications are (for example) that any simulation of this model will require simulation of random marks as well as random point locations. } \seealso{ \code{\link{is.multitype}}, \code{\link{is.multitype.ppp}} } \examples{ X <- lansing # Multitype point pattern --- trees marked by species \testonly{ # Smaller dataset X <- amacrine } fit1 <- ppm(X, ~ marks, Poisson()) is.multitype(fit1) # TRUE fit2 <- ppm(X, ~ 1, Poisson()) is.multitype(fit2) # TRUE # Unmarked point pattern fit3 <- ppm(cells, ~ 1, Poisson()) is.multitype(fit3) # FALSE } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat.model/man/Kres.Rd0000644000176200001440000000563014510474260015171 0ustar liggesusers\name{Kres} \Rdversion{1.1} \alias{Kres} \title{ Residual K Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the residual \eqn{K} function, which serves as a diagnostic for goodness-of-fit of the model. } \usage{ Kres(object, ...) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}), a point pattern (object of class \code{"ppp"}), a quadrature scheme (object of class \code{"quad"}), or the value returned by a previous call to \code{\link{Kcom}}. } \item{\dots}{ Arguments passed to \code{\link{Kcom}}. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes a residual version of the \eqn{K} function of the dataset, which should be approximately zero if the model is a good fit to the data. In normal use, \code{object} is a fitted point process model or a point pattern. Then \code{Kres} first calls \code{\link{Kcom}} to compute both the nonparametric estimate of the \eqn{K} function and its model compensator. Then \code{Kres} computes the difference between them, which is the residual \eqn{K}-function. Alternatively, \code{object} may be a function value table (object of class \code{"fv"}) that was returned by a previous call to \code{\link{Kcom}}. Then \code{Kres} computes the residual from this object. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Related functions: \code{\link{Kcom}}, \code{\link{Kest}}. Alternative functions: \code{\link{Gres}}, \code{\link{psstG}}, \code{\link{psstA}}, \code{\link{psst}}. Point process models: \code{\link{ppm}}. } \examples{ fit0 <- ppm(cells ~1) # uniform Poisson \testonly{ fit0 <- ppm(cells ~1, nd=16)} K0 <- Kres(fit0) K0 plot(K0) # isotropic-correction estimate plot(K0, ires ~ r) # uniform Poisson is clearly not correct fit1 <- ppm(cells ~1, Strauss(0.08)) \testonly{fit1 <- ppm(cells ~1, Strauss(0.08), nd=16)} K1 <- Kres(fit1) if(interactive()) { plot(K1, ires ~ r) # fit looks approximately OK; try adjusting interaction distance plot(Kres(cells, interaction=Strauss(0.12))) } # How to make envelopes \donttest{ E <- envelope(fit1, Kres, model=fit1, nsim=19) plot(E) } # For computational efficiency Kc <- Kcom(fit1) K1 <- Kres(Kc) } \keyword{spatial} \keyword{models} \concept{Model diagnostics} spatstat.model/man/measureWeighted.Rd0000644000176200001440000000244714331173077017415 0ustar liggesusers\name{measureWeighted} \alias{measureWeighted} \title{ Weighted Version of a Measure } \description{ Given a measure \code{m} (object of class \code{"msr"}) and a spatially-varying weight function, construct the weighted version of \code{m}. } \usage{ measureWeighted(m, w) } \arguments{ \item{m}{ A measure (object of class \code{"msr"}). } \item{w}{ A pixel image (object of class \code{"im"}) or a \code{function(x,y)} giving the numeric weight at each spatial location. } } \details{ For any region of space \code{B}, the weighted measure \code{wm} has the value \deqn{ wm(B) = \int_B w(x) dm(x) }{ wm(B) = integral[B] w(x) dm(x) } In any small region of space, the increment of the weighted measure \code{wm} is equal to the increment of the original measure \code{m} multiplied by the weight \code{w} at that location. } \value{ Another measure (object of class \code{"msr"}) on the same spatial domain. } \references{ Halmos, P.R. (1950) \emph{Measure Theory}. Van Nostrand. } \author{ \adrian. } \seealso{ \code{\link{msr}}, \code{\link{with.msr}}, \code{\link{split.msr}}, \code{\link{measurePositive}} } \examples{ fit <- ppm(cells ~ x) res <- residuals(fit) measureWeighted(res, function(x,y){x}) } \keyword{spatial} \keyword{math} spatstat.model/man/dppspecden.Rd0000644000176200001440000000110414331173076016404 0ustar liggesusers\name{dppspecden} \alias{dppspecden} \title{Extract Spectral Density from Determinantal Point Process Model Object} \description{ Returns the spectral density of a determinantal point process model as a function of one argument \code{x}. } \usage{dppspecden(model)} \arguments{ \item{model}{Model of class \code{"detpointprocfamily"}.} } \value{A function} \author{ \adrian \rolf and \ege } \examples{ model <- dppMatern(lambda = 100, alpha=.01, nu=1, d=2) dppspecden(model) } \seealso{ \code{\link{dppspecdenrange}} } \keyword{spatial} \keyword{models} spatstat.model/man/reach.kppm.Rd0000644000176200001440000000220714331173077016315 0ustar liggesusers\name{reach.kppm} \alias{reach.kppm} \title{Range of Interaction for a Cox or Cluster Point Process Model} \description{ Returns the range of interaction for a Cox or cluster point process model. } \usage{ \method{reach}{kppm}(x, \dots, epsilon) } \arguments{ \item{x}{ Fitted point process model of class \code{"kppm"}. } \item{epsilon}{ Optional numerical value. Differences smaller than \code{epsilon} are treated as zero. } \item{\dots}{ Additional arguments passed to the range function of the given model. } } \details{ The range of interaction for a fitted point process model of class \code{"kppm"} may defined as the smallest number \eqn{R} such that \eqn{g(r)=1} for all \eqn{r\ge R}{r>=R}, where \eqn{g} is the pair correlation function. For many models the range is infinite, but one may instead use a value where the pair correlation function is sufficiently close to 1. The argument \code{epsilon} specifies the tolerance; there is a sensible default. } \value{Numeric} \author{ \spatstatAuthors. } \examples{ fit <- kppm(redwood ~ 1) reach(fit) } \keyword{spatial} \keyword{models} spatstat.model/man/simulate.ppm.Rd0000644000176200001440000001025114331173077016701 0ustar liggesusers\name{simulate.ppm} \alias{simulate.ppm} \title{Simulate a Fitted Gibbs Point Process Model} \description{ Generates simulated realisations from a fitted Gibbs or Poisson point process model. } \usage{ \method{simulate}{ppm}(object, nsim=1, ..., singlerun = FALSE, start = NULL, control = default.rmhcontrol(object, w=w), w = window, window = NULL, project=TRUE, new.coef=NULL, verbose=FALSE, progress=(nsim > 1), drop=FALSE) } \arguments{ \item{object}{ Fitted point process model. An object of class \code{"ppm"}. } \item{nsim}{ Number of simulated realisations. } \item{singlerun}{ Logical. Whether to generate the simulated realisations from a single long run of the Metropolis-Hastings algorithm (\code{singlerun=TRUE}) or from separate, independent runs of the algorithm (\code{singlerun=FALSE}, the default). } \item{start}{Data determining the initial state of the Metropolis-Hastings algorithm. See \code{\link{rmhstart}} for description of these arguments. Defaults to \code{list(n.start=npoints(data.ppm(object)))} meaning that the initial state of the algorithm has the same number of points as the original dataset. } \item{control}{Data controlling the running of the Metropolis-Hastings algorithm. See \code{\link{rmhcontrol}} for description of these arguments. } \item{w, window}{ Optional. The window in which the model is defined. An object of class \code{"owin"}. } \item{\dots}{ Further arguments passed to \code{\link{rmhcontrol}}, or to \code{\link{rmh.default}}, or to covariate functions in the model. } \item{project}{ Logical flag indicating what to do if the fitted model is invalid (in the sense that the values of the fitted coefficients do not specify a valid point process). If \code{project=TRUE} the closest valid model will be simulated; if \code{project=FALSE} an error will occur. } \item{verbose}{ Logical flag indicating whether to print progress reports from \code{\link{rmh.ppm}} during the simulation of each point pattern. } \item{progress}{ Logical flag indicating whether to print progress reports for the sequence of simulations. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(object)}. } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE}, the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"ppm"} of fitted point process models. Simulations are performed by \code{\link{rmh.ppm}}. If \code{singlerun=FALSE} (the default), the simulated patterns are the results of independent runs of the Metropolis-Hastings algorithm. If \code{singlerun=TRUE}, a single long run of the algorithm is performed, and the state of the simulation is saved every \code{nsave} iterations to yield the simulated patterns. In the case of a single run, the behaviour is controlled by the parameters \code{nsave,nburn,nrep}. These are described in \code{\link{rmhcontrol}}. They may be passed in the \code{\dots} arguments or included in \code{control}. It is sufficient to specify two of the three parameters \code{nsave,nburn,nrep}. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"ppp"}). It also belongs to the class \code{"solist"}, so that it can be plotted, and the class \code{"timed"}, so that the total computation time is recorded. } \examples{ \testonly{op <- spatstat.options(rmh.nrep=10)} fit <- ppm(japanesepines, ~1, Strauss(0.1)) simulate(fit, 2) simulate(fit, 2, singlerun=TRUE, nsave=1e4, nburn=1e4) \testonly{spatstat.options(op)} } \seealso{ \code{\link{ppm}}, \code{\link{simulate.kppm}}, \code{\link[stats]{simulate}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/dppGauss.Rd0000644000176200001440000000215114331173076016050 0ustar liggesusers\name{dppGauss} \alias{dppGauss} \title{Gaussian Determinantal Point Process Model} \description{ Function generating an instance of the Gaussian determinantal point process model. } \usage{dppGauss(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the parameters. See Details.} } \details{ The Gaussian DPP is defined in (Lavancier, \Moller and Rubak, 2015) The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ \adrian \rolf and \ege } \references{ Lavancier, F. \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \examples{ m <- dppGauss(lambda=100, alpha=.05, d=2) } \seealso{ \code{\link{dppBessel}}, \code{\link{dppCauchy}}, \code{\link{dppMatern}}, \code{\link{dppPowerExp}} } spatstat.model/man/dummify.Rd0000644000176200001440000000342714331173076015743 0ustar liggesusers\name{dummify} \alias{dummify} \title{ Convert Data to Numeric Values by Constructing Dummy Variables } \description{ Converts data of any kind to numeric values. A factor is expanded to a set of dummy variables. } \usage{ dummify(x) } \arguments{ \item{x}{ Vector, factor, matrix or data frame to be converted. } } \details{ This function converts data (such as a factor) to numeric values in order that the user may calculate, for example, the mean, variance, covariance and correlation of the data. If \code{x} is a numeric vector or integer vector, it is returned unchanged. If \code{x} is a logical vector, it is converted to a 0-1 matrix with 2 columns. The first column contains a 1 if the logical value is \code{FALSE}, and the second column contains a 1 if the logical value is \code{TRUE}. If \code{x} is a complex vector, it is converted to a matrix with 2 columns, containing the real and imaginary parts. If \code{x} is a factor, the result is a matrix of 0-1 dummy variables. The matrix has one column for each possible level of the factor. The \code{(i,j)} entry is equal to 1 when the \code{i}th factor value equals the \code{j}th level, and is equal to 0 otherwise. If \code{x} is a matrix or data frame, the appropriate conversion is applied to each column of \code{x}. Note that, unlike \code{\link[stats]{model.matrix}}, this command converts a factor into a full set of dummy variables (one column for each level of the factor). } \value{ A numeric matrix. } \author{ \adrian } \examples{ chara <- sample(letters[1:3], 8, replace=TRUE) logi <- (runif(8) < 0.3) comp <- round(4*runif(8) + 3*runif(8) * 1i, 1) nume <- 8:1 + 0.1 df <- data.frame(nume, chara, logi, comp) df dummify(df) } \keyword{math} spatstat.model/man/envelope.Rd0000644000176200001440000007065514510474260016113 0ustar liggesusers\name{envelope.ppm} \alias{envelope.ppm} \alias{envelope.kppm} \alias{envelope.slrm} \title{Simulation Envelopes of Summary Function} \description{ Computes simulation envelopes of a summary function. } \usage{ \method{envelope}{ppm}(Y, fun=Kest, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, clipdata=TRUE, start=NULL, control=update(default.rmhcontrol(Y), nrep=nrep), nrep=1e5, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) \method{envelope}{kppm}(Y, fun=Kest, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) \method{envelope}{slrm}(Y, fun=Kest, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) } \arguments{ \item{Y}{ Object containing point pattern data. A point pattern (object of class \code{"ppp"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"slrm"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{nsim}{ Number of simulated point patterns to be generated when computing the envelopes. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{\dots}{ Extra arguments passed to \code{fun}. } \item{funargs}{ A list, containing extra arguments to be passed to \code{fun}. } \item{funYargs}{ Optional. A list, containing extra arguments to be passed to \code{fun} when applied to the original data \code{Y} only. } \item{simulate}{ Optional. Specifies how to generate the simulated point patterns. If \code{simulate} is an expression in the R language, then this expression will be evaluated \code{nsim} times, to obtain \code{nsim} point patterns which are taken as the simulated patterns from which the envelopes are computed. If \code{simulate} is a function, then this function will be repeatedly applied to the data pattern \code{Y} to obtain \code{nsim} simulated patterns. If \code{simulate} is a list of point patterns, then the entries in this list will be treated as the simulated patterns from which the envelopes are computed. Alternatively \code{simulate} may be an object produced by the \code{envelope} command: see Details. } \item{fix.n}{ Logical. If \code{TRUE}, simulated patterns will have the same number of points as the original data pattern. This option is currently not available for \code{envelope.kppm}. } \item{fix.marks}{ Logical. If \code{TRUE}, simulated patterns will have the same number of points \emph{and} the same marks as the original data pattern. In a multitype point pattern this means that the simulated patterns will have the same number of points \emph{of each type} as the original data. This option is currently not available for \code{envelope.kppm}. } \item{verbose}{ Logical flag indicating whether to print progress reports during the simulations. } \item{clipdata}{ Logical flag indicating whether the data point pattern should be clipped to the same window as the simulated patterns, before the summary function for the data is computed. This should usually be \code{TRUE} to ensure that the data and simulations are properly comparable. } \item{start,control}{ Optional. These specify the arguments \code{start} and \code{control} of \code{rmh}, giving complete control over the simulation algorithm. Applicable only when \code{Y} is a fitted model of class \code{"ppm"}. } \item{nrep}{ Number of iterations in the Metropolis-Hastings simulation algorithm. Applicable only when \code{Y} is a fitted model of class \code{"ppm"}. } \item{transform}{ Optional. A transformation to be applied to the function values, before the envelopes are computed. An expression object (see Details). } \item{global}{ Logical flag indicating whether envelopes should be pointwise (\code{global=FALSE}) or simultaneous (\code{global=TRUE}). } \item{ginterval}{ Optional. A vector of length 2 specifying the interval of \eqn{r} values for the simultaneous critical envelopes. Only relevant if \code{global=TRUE}. } \item{use.theory}{ Logical value indicating whether to use the theoretical value, computed by \code{fun}, as the reference value for simultaneous envelopes. Applicable only when \code{global=TRUE}. Default is \code{use.theory=TRUE} if \code{Y} is a point pattern, or a point process model equivalent to Complete Spatial Randomness, and \code{use.theory=FALSE} otherwise. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{side="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{side="less"}) or a one-sided test with an upper critical boundary (\code{side="greater"}). } \item{scale}{ Optional. Scaling function for global envelopes. A function in the \R language which determines the relative scale of deviations, as a function of distance \eqn{r}, when computing the global envelopes. Applicable only when \code{global=TRUE}. Summary function values for distance \code{r} will be \emph{divided} by \code{scale(r)} before the maximum deviation is computed. The resulting global envelopes will have width proportional to \code{scale(r)}. } \item{clamp}{ Logical value indicating how to compute envelopes when \code{alternative="less"} or \code{alternative="greater"}. Deviations of the observed summary function from the theoretical summary function are initially evaluated as signed real numbers, with large positive values indicating consistency with the alternative hypothesis. If \code{clamp=FALSE} (the default), these values are not changed. If \code{clamp=TRUE}, any negative values are replaced by zero. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } \item{nsim2}{ Number of extra simulated point patterns to be generated if it is necessary to use simulation to estimate the theoretical mean of the summary function. Only relevant when \code{global=TRUE} and the simulations are not based on CSR. } \item{VARIANCE}{ Logical. If \code{TRUE}, critical envelopes will be calculated as sample mean plus or minus \code{nSD} times sample standard deviation. } \item{nSD}{ Number of estimated standard deviations used to determine the critical envelopes, if \code{VARIANCE=TRUE}. } \item{Yname}{ Character string that should be used as the name of the data point pattern \code{Y} when printing or plotting the results. } \item{maxnerr}{ Maximum number of rejected patterns. If \code{fun} yields a fatal error when applied to a simulated point pattern (for example, because the pattern is empty and \code{fun} requires at least one point), the pattern will be rejected and a new random point pattern will be generated. If this happens more than \code{maxnerr} times, the algorithm will give up. } \item{rejectNA}{ Logical value specifying whether to reject a simulated pattern if the resulting values of \code{fun} are all equal to \code{NA}, \code{NaN} or infinite. If \code{FALSE} (the default), then simulated patterns are only rejected when \code{fun} gives a fatal error. } \item{silent}{ Logical value specifying whether to print a report each time a simulated pattern is rejected. } \item{do.pwrong}{ Logical. If \code{TRUE}, the algorithm will also estimate the true significance level of the \dQuote{wrong} test (the test that declares the summary function for the data to be significant if it lies outside the \emph{pointwise} critical boundary at any point). This estimate is printed when the result is printed. } \item{envir.simul}{ Environment in which to evaluate the expression \code{simulate}, if not the current environment. } } \value{ An object of class \code{"envelope"} and \code{"fv"}, see \code{\link{fv.object}}, which can be printed and plotted directly. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the summary function \code{fun} has been estimated } \item{obs}{ values of the summary function for the data point pattern } \item{lo}{ lower envelope of simulations } \item{hi}{ upper envelope of simulations } and \emph{either} \item{theo}{ theoretical value of the summary function under CSR (Complete Spatial Randomness, a uniform Poisson point process) if the simulations were generated according to CSR } \item{mmean}{ estimated theoretical value of the summary function, computed by averaging simulated values, if the simulations were not generated according to CSR. } Additionally, if \code{savepatterns=TRUE}, the return value has an attribute \code{"simpatterns"} which is a list containing the \code{nsim} simulated patterns. If \code{savefuns=TRUE}, the return value has an attribute \code{"simfuns"} which is an object of class \code{"fv"} containing the summary functions computed for each of the \code{nsim} simulated patterns. } \details{ The \code{envelope} command performs simulations and computes envelopes of a summary statistic based on the simulations. The result is an object that can be plotted to display the envelopes. The envelopes can be used to assess the goodness-of-fit of a point process model to point pattern data. For the most basic use, if you have a point pattern \code{X} and you want to test Complete Spatial Randomness (CSR), type \code{plot(envelope(X, Kest,nsim=39))} to see the \eqn{K} function for \code{X} plotted together with the envelopes of the \eqn{K} function for 39 simulations of CSR. The \code{envelope} function is generic, with methods for the classes \code{"ppp"}, \code{"ppm"}, \code{"kppm"} and \code{"slrm"} described here. There are also methods for the classes \code{"pp3"}, \code{"lpp"} and \code{"lppm"} which are described separately under \code{\link{envelope.pp3}} and \code{envelope.lpp}. Envelopes can also be computed from other envelopes, using \code{\link{envelope.envelope}}. To create simulation envelopes, the command \code{envelope(Y, ...)} first generates \code{nsim} random point patterns in one of the following ways. \itemize{ \item If \code{Y} is a point pattern (an object of class \code{"ppp"}) and \code{simulate=NULL}, then we generate \code{nsim} simulations of Complete Spatial Randomness (i.e. \code{nsim} simulated point patterns each being a realisation of the uniform Poisson point process) with the same intensity as the pattern \code{Y}. (If \code{Y} is a multitype point pattern, then the simulated patterns are also given independent random marks; the probability distribution of the random marks is determined by the relative frequencies of marks in \code{Y}.) \item If \code{Y} is a fitted point process model (an object of class \code{"ppm"} or \code{"kppm"} or \code{"slrm"}) and \code{simulate=NULL}, then this routine generates \code{nsim} simulated realisations of that model. \item If \code{simulate} is supplied, then it determines how the simulated point patterns are generated. It may be either \itemize{ \item an expression in the R language, typically containing a call to a random generator. This expression will be evaluated \code{nsim} times to yield \code{nsim} point patterns. For example if \code{simulate=expression(runifpoint(100))} then each simulated pattern consists of exactly 100 independent uniform random points. \item a function in the R language, typically containing a call to a random generator. This function will be applied repeatedly to the original data pattern \code{Y} to yield \code{nsim} point patterns. For example if \code{simulate=\link{rlabel}} then each simulated pattern was generated by evaluating \code{\link{rlabel}(Y)} and consists of a randomly-relabelled version of \code{Y}. \item a list of point patterns. The entries in this list will be taken as the simulated patterns. \item an object of class \code{"envelope"}. This should have been produced by calling \code{envelope} with the argument \code{savepatterns=TRUE}. The simulated point patterns that were saved in this object will be extracted and used as the simulated patterns for the new envelope computation. This makes it possible to plot envelopes for two different summary functions based on exactly the same set of simulated point patterns. } } The summary statistic \code{fun} is applied to each of these simulated patterns. Typically \code{fun} is one of the functions \code{Kest}, \code{Gest}, \code{Fest}, \code{Jest}, \code{pcf}, \code{Kcross}, \code{Kdot}, \code{Gcross}, \code{Gdot}, \code{Jcross}, \code{Jdot}, \code{Kmulti}, \code{Gmulti}, \code{Jmulti} or \code{Kinhom}. It may also be a character string containing the name of one of these functions. The statistic \code{fun} can also be a user-supplied function; if so, then it must have arguments \code{X} and \code{r} like those in the functions listed above, and it must return an object of class \code{"fv"}. Upper and lower critical envelopes are computed in one of the following ways: \describe{ \item{pointwise:}{by default, envelopes are calculated pointwise (i.e. for each value of the distance argument \eqn{r}), by sorting the \code{nsim} simulated values, and taking the \code{m}-th lowest and \code{m}-th highest values, where \code{m = nrank}. For example if \code{nrank=1}, the upper and lower envelopes are the pointwise maximum and minimum of the simulated values. The pointwise envelopes are \bold{not} \dQuote{confidence bands} for the true value of the function! Rather, they specify the critical points for a Monte Carlo test (Ripley, 1981). The test is constructed by choosing a \emph{fixed} value of \eqn{r}, and rejecting the null hypothesis if the observed function value lies outside the envelope \emph{at this value of} \eqn{r}. This test has exact significance level \code{alpha = 2 * nrank/(1 + nsim)}. } \item{simultaneous:}{if \code{global=TRUE}, then the envelopes are determined as follows. First we calculate the theoretical mean value of the summary statistic (if we are testing CSR, the theoretical value is supplied by \code{fun}; otherwise we perform a separate set of \code{nsim2} simulations, compute the average of all these simulated values, and take this average as an estimate of the theoretical mean value). Then, for each simulation, we compare the simulated curve to the theoretical curve, and compute the maximum absolute difference between them (over the interval of \eqn{r} values specified by \code{ginterval}). This gives a deviation value \eqn{d_i}{d[i]} for each of the \code{nsim} simulations. Finally we take the \code{m}-th largest of the deviation values, where \code{m=nrank}, and call this \code{dcrit}. Then the simultaneous envelopes are of the form \code{lo = expected - dcrit} and \code{hi = expected + dcrit} where \code{expected} is either the theoretical mean value \code{theo} (if we are testing CSR) or the estimated theoretical value \code{mmean} (if we are testing another model). The simultaneous critical envelopes have constant width \code{2 * dcrit}. The simultaneous critical envelopes allow us to perform a different Monte Carlo test (Ripley, 1981). The test rejects the null hypothesis if the graph of the observed function lies outside the envelope \bold{at any value of} \eqn{r}. This test has exact significance level \code{alpha = nrank/(1 + nsim)}. This test can also be performed using \code{\link{mad.test}}. } \item{based on sample moments:}{if \code{VARIANCE=TRUE}, the algorithm calculates the (pointwise) sample mean and sample variance of the simulated functions. Then the envelopes are computed as mean plus or minus \code{nSD} standard deviations. These envelopes do not have an exact significance interpretation. They are a naive approximation to the critical points of the Neyman-Pearson test assuming the summary statistic is approximately Normally distributed. } } The return value is an object of class \code{"fv"} containing the summary function for the data point pattern, the upper and lower simulation envelopes, and the theoretical expected value (exact or estimated) of the summary function for the model being tested. It can be plotted using \code{\link{plot.envelope}}. If \code{VARIANCE=TRUE} then the return value also includes the sample mean, sample variance and other quantities. Arguments can be passed to the function \code{fun} through \code{...}. This means that you simply specify these arguments in the call to \code{envelope}, and they will be passed to \code{fun}. In particular, the argument \code{correction} determines the edge correction to be used to calculate the summary statistic. See the section on Edge Corrections, and the Examples. Arguments can also be passed to the function \code{fun} through the list \code{funargs}. This mechanism is typically used if an argument of \code{fun} has the same name as an argument of \code{envelope}. The list \code{funargs} should contain entries of the form \code{name=value}, where each \code{name} is the name of an argument of \code{fun}. There is also an option, rarely used, in which different function arguments are used when computing the summary function for the data \code{Y} and for the simulated patterns. If \code{funYargs} is given, it will be used when the summary function for the data \code{Y} is computed, while \code{funargs} will be used when computing the summary function for the simulated patterns. This option is only needed in rare cases: usually the basic principle requires that the data and simulated patterns must be treated equally, so that \code{funargs} and \code{funYargs} should be identical. If \code{Y} is a fitted cluster point process model (object of class \code{"kppm"}), and \code{simulate=NULL}, then the model is simulated directly using \code{\link[spatstat.model]{simulate.kppm}}. If \code{Y} is a fitted Gibbs point process model (object of class \code{"ppm"}), and \code{simulate=NULL}, then the model is simulated by running the Metropolis-Hastings algorithm \code{\link{rmh}}. Complete control over this algorithm is provided by the arguments \code{start} and \code{control} which are passed to \code{\link{rmh}}. For simultaneous critical envelopes (\code{global=TRUE}) the following options are also useful: \describe{ \item{\code{ginterval}}{determines the interval of \eqn{r} values over which the deviation between curves is calculated. It should be a numeric vector of length 2. There is a sensible default (namely, the recommended plotting interval for \code{fun(X)}, or the range of \code{r} values if \code{r} is explicitly specified). } \item{\code{transform}}{specifies a transformation of the summary function \code{fun} that will be carried out before the deviations are computed. Such transforms are useful if \code{global=TRUE} or \code{VARIANCE=TRUE}. The \code{transform} must be an expression object using the symbol \code{.} to represent the function value (and possibly other symbols recognised by \code{\link{with.fv}}). For example, the conventional way to normalise the \eqn{K} function (Ripley, 1981) is to transform it to the \eqn{L} function \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/\pi)} and this is implemented by setting \code{transform=expression(sqrt(./pi))}. } } It is also possible to extract the summary functions for each of the individual simulated point patterns, by setting \code{savefuns=TRUE}. Then the return value also has an attribute \code{"simfuns"} containing all the summary functions for the individual simulated patterns. It is an \code{"fv"} object containing functions named \code{sim1, sim2, ...} representing the \code{nsim} summary functions. It is also possible to save the simulated point patterns themselves, by setting \code{savepatterns=TRUE}. Then the return value also has an attribute \code{"simpatterns"} which is a list of length \code{nsim} containing all the simulated point patterns. See \code{\link{plot.envelope}} and \code{\link{plot.fv}} for information about how to plot the envelopes. Different envelopes can be recomputed from the same data using \code{\link{envelope.envelope}}. Envelopes can be combined using \code{\link{pool.envelope}}. } \section{Errors and warnings}{ An error may be generated if one of the simulations produces a point pattern that is empty, or is otherwise unacceptable to the function \code{fun}. The upper envelope may be \code{NA} (plotted as plus or minus infinity) if some of the function values computed for the simulated point patterns are \code{NA}. Whether this occurs will depend on the function \code{fun}, but it usually happens when the simulated point pattern does not contain enough points to compute a meaningful value. } \section{Confidence intervals}{ Simulation envelopes do \bold{not} compute confidence intervals; they generate significance bands. If you really need a confidence interval for the true summary function of the point process, use \code{\link{lohboot}}. See also \code{\link{varblock}}. } \section{Edge corrections}{ It is common to apply a correction for edge effects when calculating a summary function such as the \eqn{K} function. Typically the user has a choice between several possible edge corrections. In a call to \code{envelope}, the user can specify the edge correction to be applied in \code{fun}, using the argument \code{correction}. See the Examples below. \describe{ \item{Summary functions in \pkg{spatstat}}{ Summary functions that are available in \pkg{spatstat}, such as \code{\link{Kest}}, \code{\link{Gest}} and \code{\link{pcf}}, have a standard argument called \code{correction} which specifies the name of one or more edge corrections. The list of available edge corrections is different for each summary function, and may also depend on the kind of window in which the point pattern is recorded. In the case of \code{Kest} (the default and most frequently used value of \code{fun}) the best edge correction is Ripley's isotropic correction if the window is rectangular or polygonal, and the translation correction if the window is a binary mask. See the help files for the individual functions for more information. All the summary functions in \pkg{spatstat} recognise the option \code{correction="best"} which gives the \dQuote{best} (most accurate) available edge correction for that function. In a call to \code{envelope}, if \code{fun} is one of the summary functions provided in \pkg{spatstat}, then the default is \code{correction="best"}. This means that \emph{by default, the envelope will be computed using the \dQuote{best} available edge correction}. The user can override this default by specifying the argument \code{correction}. For example the computation can be accelerated by choosing another edge correction which is less accurate than the \dQuote{best} one, but faster to compute. } \item{User-written summary functions}{ If \code{fun} is a function written by the user, then \code{envelope} has to guess what to do. If \code{fun} has an argument called \code{correction}, or has \code{\dots} arguments, then \code{envelope} assumes that the function can handle a correction argument. To compute the envelope, \code{fun} will be called with a \code{correction} argument. The default is \code{correction="best"}, unless overridden in the call to \code{envelope}. Otherwise, if \code{fun} does not have an argument called \code{correction} and does not have \code{\dots} arguments, then \code{envelope} assumes that the function \emph{cannot} handle a correction argument. To compute the envelope, \code{fun} is called without a correction argument. } } } \references{ Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Arnold, 2003. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{dclf.test}}, \code{\link{mad.test}} for envelope-based tests. \code{\link{fv.object}}, \code{\link{plot.envelope}}, \code{\link{plot.fv}}, \code{\link{envelope.envelope}}, \code{\link{pool.envelope}} for handling envelopes. There are also methods for \code{print} and \code{summary}. \code{\link{Kest}}, \code{\link{Gest}}, \code{\link{Fest}}, \code{\link{Jest}}, \code{\link{pcf}}, \code{\link{ppp}}, \code{\link[spatstat.model]{ppm}}, \code{\link{default.expand}} } \examples{ X <- simdat online <- interactive() Nsim <- if(online) 19 else 3 # Envelope of K function for simulations from Gibbs model if(online) { fit <- ppm(cells ~1, Strauss(0.05)) plot(envelope(fit)) plot(envelope(fit, global=TRUE)) } else { fit <- ppm(cells ~1, Strauss(0.05), nd=20) E <- envelope(fit, nsim=Nsim, correction="border", nrep=100) E <- envelope(fit, nsim=Nsim, correction="border", global=TRUE, nrep=100) } # Envelope of K function for simulations from cluster model fit <- kppm(redwood ~1, "Thomas") if(online) { plot(envelope(fit, Gest)) plot(envelope(fit, Gest, global=TRUE)) } else { E <- envelope(fit, Gest, correction="rs", nsim=Nsim, global=TRUE, nrep=100) } # Envelope of INHOMOGENEOUS K-function with fitted trend # The following is valid. # Setting lambda=fit means that the fitted model is re-fitted to # each simulated pattern to obtain the intensity estimates for Kinhom. # (lambda=NULL would also be valid) fit <- kppm(redwood ~1, clusters="MatClust") if(online) { plot(envelope(fit, Kinhom, lambda=fit, nsim=19)) } else { envelope(fit, Kinhom, lambda=fit, nsim=Nsim) } } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} \concept{Goodness-of-fit} \concept{Test of randomness} \concept{Envelope of simulations} spatstat.model/man/inforder.family.Rd0000644000176200001440000000200514331173076017350 0ustar liggesusers\name{inforder.family} \alias{inforder.family} \title{Infinite Order Interaction Family} \description{ An object describing the family of all Gibbs point processes with infinite interaction order. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the interaction structure of Gibbs point processes which have infinite order of interaction, such as the area-interaction process \cite{\link{AreaInter}}. } \value{ Object of class \code{"isf"}, see \code{\link{isf.object}}. } \seealso{ \code{\link{AreaInter}} to create the area interaction process structure. Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{ord.family}}. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/response.Rd0000644000176200001440000000411014331173077016116 0ustar liggesusers\name{response} \alias{response} \alias{response.lm} \alias{response.glm} \alias{response.ppm} \alias{response.dppm} \alias{response.kppm} \alias{response.slrm} \alias{response.rppm} \alias{response.mppm} \title{ Extract the Values of the Response from a Fitted Model } \description{ Given a fitted model (of any kind) extract the values of the response variable. For a point process model, the observed point pattern is extracted. } \usage{ response(object) \method{response}{lm}(object) \method{response}{glm}(object) \method{response}{ppm}(object) \method{response}{kppm}(object) \method{response}{dppm}(object) \method{response}{slrm}(object) \method{response}{rppm}(object) \method{response}{mppm}(object) } \arguments{ \item{object}{ A fitted model (object of class \code{"lm"}, \code{"glm"}, \code{"ppm"}, \code{"kppm"}, \code{"dppm"}, \code{"slrm"}, \code{"rppm"}, or \code{"mppm"} or some other class). } } \details{ For fitted linear models of class \code{"lm"} and fitted generalized linear models of class \code{"glm"}, the numerical values of the response variable are extracted if they are available, and otherwise \code{NULL} is returned. For fitted point process models of class \code{"ppm"}, \code{"kppm"}, \code{"dppm"}, \code{"slrm"}, \code{"lppm"} or \code{"rppm"}, the original data point pattern is extracted. For a fitted point process model of class \code{"mppm"}, the list of original data point patterns is extracted. } \value{ For \code{response.lm} and \code{response.glm}, a numeric vector, or \code{NULL}. For \code{response.ppm}, \code{response.kppm}, \code{response.dppm}, \code{response.slrm} and \code{response.rppm}, a two-dimensional spatial point pattern (class \code{"ppp"}). For \code{response.mppm}, a list of two-dimensional spatial point patterns (objects of class \code{"ppp"}). The list also belongs to classes \code{"solist"} and \code{"ppplist"}. } \author{ \adrian. } \examples{ fit <- ppm(cells ~ x) response(fit) } \keyword{manip} \keyword{models} spatstat.model/man/plot.plotppm.Rd0000644000176200001440000000635614331173077016746 0ustar liggesusers\name{plot.plotppm} \alias{plot.plotppm} \title{Plot a plotppm Object Created by plot.ppm} \description{ The function plot.ppm produces objects which specify plots of fitted point process models. The function plot.plotppm carries out the actual plotting of these objects. } \usage{ \method{plot}{plotppm}(x, data = NULL, trend = TRUE, cif = TRUE, se = TRUE, pause = interactive(), how = c("persp", "image", "contour"), \dots, pppargs) } \arguments{ \item{x}{ An object of class \code{plotppm} produced by \code{\link{plot.ppm}()} }. \item{data}{ The point pattern (an object of class \code{ppp}) to which the point process model was fitted (by \code{\link{ppm}}). } \item{trend}{ Logical scalar; should the trend component of the fitted model be plotted? } \item{cif}{ Logical scalar; should the complete conditional intensity of the fitted model be plotted? } \item{se}{ Logical scalar; should the estimated standard error of the fitted intensity be plotted? } \item{pause}{ Logical scalar indicating whether to pause with a prompt after each plot. Set \code{pause=FALSE} if plotting to a file. } \item{how}{ Character string or character vector indicating the style or styles of plots to be performed. } \item{\dots}{ Extra arguments to the plotting functions \code{\link{persp}}, \code{\link{image}} and \code{\link{contour}}. } \item{pppargs}{ List of extra arguments passed to \code{\link{plot.ppp}} when displaying the original point pattern data. } } \details{ If argument \code{data} is supplied then the point pattern will be superimposed on the image and contour plots. Sometimes a fitted model does not have a trend component, or the trend component may constitute all of the conditional intensity (if the model is Poisson). In such cases the object \code{x} will not contain a trend component, or will contain only a trend component. This will also be the case if one of the arguments \code{trend} and \code{cif} was set equal to \code{FALSE} in the call to \code{plot.ppm()} which produced \code{x}. If this is so then only the item which is present will be plotted. Explicitly setting \code{trend=TRUE}, or \code{cif=TRUE}, respectively, will then give an error. } \value{ None. } \section{Warning}{ Arguments which are passed to \code{persp}, \code{image}, and \code{contour} via the \dots argument get passed to any of the other functions listed in the \code{how} argument, and won't be recognized by them. This leads to a lot of annoying but harmless warning messages. Arguments to \code{persp} may be supplied via \code{\link{spatstat.options}()} which alleviates the warning messages in this instance. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.ppm}()} } \examples{ if(interactive()) { m <- ppm(cells ~ 1, Strauss(0.05)) mpic <- plot(m) # Perspective plot only, with altered parameters: plot(mpic,how="persp", theta=-30,phi=40,d=4) # All plots, with altered parameters for perspective plot: op <- spatstat.options(par.persp=list(theta=-30,phi=40,d=4)) plot(mpic) # Revert spatstat.options(op) } } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat.model/man/auc.Rd0000644000176200001440000000465514331173076015045 0ustar liggesusers\name{auc.ppm} \alias{auc.ppm} \alias{auc.kppm} \alias{auc.slrm} \title{ Area Under ROC Curve } \description{ Compute the AUC (area under the Receiver Operating Characteristic curve) for a fitted point process model. } \usage{ \method{auc}{ppm}(X, \dots) \method{auc}{kppm}(X, \dots) \method{auc}{slrm}(X, \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"} or \code{"lpp"}) or fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"slrm"} or \code{"lppm"}). } \item{\dots}{ Arguments passed to \code{\link{as.mask}} controlling the pixel resolution for calculations. } } \details{ This command computes the AUC, the area under the Receiver Operating Characteristic curve. The ROC itself is computed by \code{\link{roc}}. For a fitted point process model \code{X}, the AUC measures the ability of the fitted model intensity to separate the spatial domain into areas of high and low density of points. Suppose \eqn{\lambda(u)}{\lambda(u)} is the intensity function of the model. The AUC is the probability that \eqn{\lambda(x_i) > \lambda(U)}{\lambda(x[i]) > \lambda(U)}. That is, AUC is the probability that a randomly-selected data point has higher predicted intensity than does a randomly-selected spatial location. The AUC is \bold{not} a measure of the goodness-of-fit of the model (Lobo et al, 2007). (For spatial logistic regression models (class \code{"slrm"}) replace \dQuote{intensity} by \dQuote{probability of presence} in the text above.) } \value{ Numeric. For \code{auc.ppm}, \code{auc.kppm} and \code{auc.lppm}, the result is a numeric vector of length 2 giving the AUC value and the theoretically expected AUC value for this model. } \references{ Lobo, J.M., \ifelse{latex}{\out{Jim{\'e}nez}}{Jimenez}-Valverde, A. and Real, R. (2007) AUC: a misleading measure of the performance of predictive distribution models. \emph{Global Ecology and Biogeography} \bold{17}(2) 145--151. Nam, B.-H. and D'Agostino, R. (2002) Discrimination index, the area under the {ROC} curve. Pages 267--279 in Huber-Carol, C., Balakrishnan, N., Nikulin, M.S. and Mesbah, M., \emph{Goodness-of-fit tests and model validity}, \ifelse{latex}{\out{Birkh{\"a}user}}{Birkhauser}, Basel. } \author{ \spatstatAuthors. } \seealso{ \code{\link{roc}} } \examples{ fit <- ppm(swedishpines ~ x+y) auc(fit) } \keyword{spatial} spatstat.model/man/dppeigen.Rd0000644000176200001440000000124314331173076016056 0ustar liggesusers\name{dppeigen} \alias{dppeigen} \title{Internal function calculating eig and index} \description{This function is mainly for internal package use and is usually not called by the user.} \usage{dppeigen(model, trunc, Wscale, stationary = FALSE)} \arguments{ \item{model}{object of class \code{"detpointprocfamily"} } \item{trunc}{numeric giving the truncation} \item{Wscale}{numeric giving the scale of the window relative to a unit box} \item{stationary}{logical indicating whether the stationarity of the model should be used (only works in dimension 2).} } \value{A list} \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat.model/man/psst.Rd0000644000176200001440000001217014510474260015253 0ustar liggesusers\name{psst} \alias{psst} \title{ Pseudoscore Diagnostic For Fitted Model against General Alternative } \description{ Given a point process model fitted to a point pattern dataset, and any choice of functional summary statistic, this function computes the pseudoscore test statistic of goodness-of-fit for the model. } \usage{ psst(object, fun, r = NULL, breaks = NULL, ..., model=NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), truecoef=NULL, hi.res=NULL, funargs = list(correction="best"), verbose=TRUE) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{fun}{ Summary function to be applied to each point pattern. } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the function \eqn{S(r)} should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Ignored. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } \item{funargs}{ List of additional arguments to be passed to \code{fun}. } \item{verbose}{ Logical value determining whether to print progress reports during the computation. } } \details{ Let \eqn{x} be a point pattern dataset consisting of points \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} in a window \eqn{W}. Consider a point process model fitted to \eqn{x}, with conditional intensity \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}. For the purpose of testing goodness-of-fit, we regard the fitted model as the null hypothesis. Given a functional summary statistic \eqn{S}, consider a family of alternative models obtained by exponential tilting of the null model by \eqn{S}. The pseudoscore for the null model is \deqn{ V(r) = \sum_i \Delta S(x_i, x, r) - \int_W \Delta S(u,x, r) \lambda(u,x) {\rm d} u }{ V(r) = sum( Delta S(x[i], x, r)) - integral( Delta S(u,x, r) lambda(u,x) du) } where the \eqn{\Delta}{Delta} operator is \deqn{ \Delta S(u,x, r) = S(x\cup\{u\}, r) - S(x\setminus u, r) }{ Delta S(u,x, r) = S(x union u, r) - S(x setminus u, r) } the difference between the values of \eqn{S} for the point pattern with and without the point \eqn{u}. According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have mean zero if the model is correct (ignoring the fact that the parameters of the model have been estimated). Hence \eqn{V(r)} can be used as a diagnostic for goodness-of-fit. This algorithm computes \eqn{V(r)} by direct evaluation of the sum and integral. It is computationally intensive, but it is available for any summary statistic \eqn{S(r)}. The diagnostic \eqn{V(r)} is also called the \bold{pseudoresidual} of \eqn{S}. On the right hand side of the equation for \eqn{V(r)} given above, the sum over points of \eqn{x} is called the \bold{pseudosum} and the integral is called the \bold{pseudocompensator}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. Columns in this data frame include \code{dat} for the pseudosum, \code{com} for the compensator and \code{res} for the pseudoresidual. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Special cases: \code{\link{psstA}}, \code{\link{psstG}}. Alternative functions: \code{\link{Kres}}, \code{\link{Gres}}. } \examples{ if(live <- interactive()) { fit0 <- ppm(cells ~ 1) } else { fit0 <- ppm(cells ~ 1, nd=8) } G0 <- psst(fit0, Gest) G0 if(live) plot(G0) } \keyword{spatial} \keyword{models} \concept{Model diagnostics} spatstat.model/man/ord.family.Rd0000644000176200001440000000240614331173077016332 0ustar liggesusers\name{ord.family} \alias{ord.family} \title{Ord Interaction Process Family} \description{ An object describing the family of all Ord interaction point processes } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the family of point process models introduced by Ord (1977). If you need to create a specific Ord-type model for use in analysis, use the function \code{\link{OrdThresh}} or \code{\link{Ord}}. } \value{ Object of class \code{"isf"}, see \code{\link{isf.object}}. } \seealso{ \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{Ord}}, \code{\link{OrdThresh}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ord, J.K. (1977) Contribution to the discussion of Ripley (1977). Ord, J.K. (1978) How many trees in a forest? \emph{Mathematical Scientist} \bold{3}, 23--33. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/lurking.mppm.Rd0000644000176200001440000000671114331173076016713 0ustar liggesusers\name{lurking.mppm} \alias{lurking.mppm} \title{ Lurking Variable Plot for Multiple Point Patterns } \description{ Generate a lurking variable plot of spatial point process residuals against a covariate, for a model fitted to several point patterns. } \usage{ \method{lurking}{mppm}(object, covariate, type="eem", \dots, separate = FALSE, plot.it = TRUE, covname, oldstyle = FALSE, nx = 512, main="") } \arguments{ \item{object}{ The fitted model. An object of class \code{"mppm"} representing a point process model fitted to several point patterns. } \item{covariate}{ The covariate to be used on the horizontal axis. Either an \code{expression} which can be evaluated in the original data, or a list of pixel images, one image for each point pattern in the original data. } \item{type}{ String indicating the type of residuals or weights to be computed. Choices include \code{"eem"}, \code{"raw"}, \code{"inverse"} and \code{"pearson"}. See \code{\link{diagnose.ppm}} for all possible choices. } \item{\dots}{ Additional arguments passed to \code{\link{lurking.ppm}}, including arguments controlling the plot. } \item{separate}{ Logical value indicating whether to compute a separate lurking variable plot for each of the original point patterns. If \code{FALSE} (the default), a single lurking-variable plot is produced by combining residuals from all patterns. } \item{plot.it}{ Logical value indicating whether plots should be shown. If \code{plot.it=FALSE}, only the computed coordinates for the plots are returned. See \emph{Value}. } \item{covname}{ A string name for the covariate, to be used in axis labels of plots. } \item{oldstyle}{ Logical flag indicating whether error bounds should be plotted using the approximation given in the original paper (\code{oldstyle=TRUE}), or using the correct asymptotic formula (\code{oldstyle=FALSE}). } \item{nx}{ Integer. Number of covariate values to be used in the plot. } \item{main}{ Character string giving a main title for the plot. } } \details{ This function generates a \sQuote{lurking variable} plot for a point process model fitted to several point patterns. Residuals from the model represented by \code{object} are plotted against the covariate specified by \code{covariate}. This plot can be used to reveal departures from the fitted model. The function \code{lurking} is generic. This is the method for the class \code{mppm}. The argument \code{object} must be a fitted point process model object of class \code{"mppm"}) produced by the model-fitting algorithm \code{\link{mppm}}. } \value{ If \code{separate=FALSE} (the default), the return value is an object belonging to the class \code{"lurk"}, for which there are methods for \code{plot} and \code{print}. See \code{\link{lurking}} for details of the format. If \code{separate=TRUE}, the result is a list of such objects, and also belongs to the class \code{anylist} so that it can be printed and plotted. } \author{ \adrian, with thanks to Nicholas Read. } \seealso{ \code{\link{lurking.ppm}} } \examples{ fit <- mppm(Points ~ Image + Group, demohyper) lurking(fit, expression(Image), type="P") lurking(fit, expression(Image), type="P", separate=TRUE) } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat.model/man/spatstat.model-internal.Rd0000644000176200001440000004170214514520744021044 0ustar liggesusers\name{spatstat.model-internal} \title{Internal spatstat.model functions} \alias{accumulateStatus} \alias{active.interactions} \alias{adaptcoef} \alias{affine.msr} \alias{areadelta2} \alias{as.isf} \alias{augment.msr} \alias{bigvaluerule} \alias{blankcoefnames} \alias{bt.frame} \alias{cannot.update} \alias{check.separable} \alias{coef.summary.kppm} \alias{coef.summary.ppm} \alias{coef.summary.slrm} \alias{coef.vblogit} \alias{condSimCox} \alias{damaged.ppm} \alias{data.mppm} \alias{deltasuffstat} \alias{dfbetas.ppmInfluence} \alias{diagnose.ppm.engine} \alias{dim.msr} \alias{dimnames.msr} \alias{doMultiStraussHard} \alias{dppDpcf} \alias{dppmFixAlgorithm} \alias{dppmFixIntensity} \alias{DPPSaddle} \alias{DPPSaddlePairwise} \alias{enet.engine} \alias{equalpairs} \alias{evalInteraction} \alias{evalInterEngine} \alias{evalPairPotential} \alias{evalPairwiseTerm} \alias{expandDot} \alias{extractAIC.slrm} \alias{fakefii} \alias{family.hackglmmPQL} \alias{family.vblogit} \alias{fill.coefs} \alias{findCovariate} \alias{fii} \alias{flipxy.msr} \alias{forbid.logi} \alias{formula.hackglmmPQL} \alias{getdataname} \alias{getglmdata} \alias{getglmfit} \alias{getglmsubset} \alias{getppmdatasubset} \alias{getppmOriginalCovariates} \alias{geyercounts} \alias{geyerdelta2} \alias{GLMpredict} \alias{hackglmmPQL} \alias{hasglmfit} \alias{hierarchicalordering} \alias{hiermat} \alias{ho.engine} \alias{illegal.iformula} \alias{impliedpresence} \alias{impliedcoefficients} \alias{influence.ppmInfluence} \alias{instantiate.interact} \alias{interactionfamilyname} \alias{intermaker} \alias{is.expandable.ppm} \alias{is.interact} \alias{is.marked.mppm} \alias{is.marked.msr} \alias{is.marked.rppm} \alias{is.marked.slrm} \alias{is.mppm} \alias{is.multitype.mppm} \alias{is.multitype.msr} \alias{is.multitype.rppm} \alias{is.multitype.slrm} \alias{is.poisson.mppm} \alias{is.poisson.rppm} \alias{Kpcf.kppm} \alias{Kmodel.slrm} \alias{killinteraction} \alias{kppmComLik} \alias{kppmMinCon} \alias{kppmPalmLik} \alias{kppmCLadap} \alias{labels.ppm} \alias{leverage.ppmInfluence} \alias{lines.objsurf} \alias{logi.engine} \alias{logLik.vblogit} \alias{LurkEngine} \alias{make.pspace} \alias{mapInterVars} \alias{Mayer} \alias{model.se.image} \alias{modelFrameGam} \alias{mpl.engine} \alias{mpl.get.covariates} \alias{mpl.prepare} \alias{mpl.usable} \alias{newformula} \alias{newstyle.coeff.handling} \alias{nndcumfun} \alias{no.trend.ppm} \alias{objsurfEngine} \alias{optimConverged} \alias{optimStatus} \alias{optimNsteps} \alias{outdated.interact} \alias{oversize.quad} \alias{parameters.detpointprocfamily} \alias{PairPotentialType} \alias{partialModelMatrix} \alias{pcfmodel.slrm} \alias{ploterodewin} \alias{ploterodeimage} \alias{plot.addvar} \alias{plot.lurk} \alias{plot.minconfit} \alias{plot.parres} \alias{plot.qqppm} \alias{poisson.fits.better} \alias{PoissonCompareCalc} \alias{PoisSaddle} \alias{PoisSaddleArea} \alias{PoisSaddleGeyer} \alias{PoisSaddlePairwise} \alias{PPMmodelmatrix} \alias{ppm.default} \alias{ppmCovariates} \alias{ppmDerivatives} \alias{ppmInfluenceEngine} \alias{predict.profilepl} \alias{predict.vblogit} \alias{printStatus} \alias{printStatusList} \alias{print.addvar} \alias{print.bt.frame} \alias{print.diagppm} \alias{print.detpointprocfamily} \alias{print.detpointprocfamilyfun} \alias{print.hierarchicalordering} \alias{print.influence.ppm} \alias{print.interact} \alias{print.intermaker} \alias{print.isf} \alias{print.leverage.ppm} \alias{print.lurk} \alias{print.minconfit} \alias{print.mppm} \alias{print.msr} \alias{print.parres} \alias{print.plotppm} \alias{print.profilepl} \alias{print.qqppm} \alias{print.rppm} \alias{print.summary.mppm} \alias{print.summary.slrm} \alias{print.vblogit} \alias{quad.mppm} \alias{quadBlockSizes} \alias{reach.slrm} \alias{reduceformula} \alias{reincarnate.interact} \alias{rescale.msr} \alias{resid4plot} \alias{resid1plot} \alias{resid1panel} \alias{rotate.msr} \alias{SaddleApprox} \alias{safeFiniteValue} \alias{safePositiveValue} \alias{scalardilate.msr} \alias{shift.influence.ppm} \alias{shift.leverage.ppm} \alias{shift.msr} \alias{signalStatus} \alias{simulate.profilepl} \alias{slr.prepare} \alias{slrAssemblePixelData} \alias{slrmInfluence} \alias{spatialCovariateEvidence.ppm} \alias{spatialCovariateEvidence.slrm} \alias{spatialCovariateUnderModel} \alias{spatialCovariateUnderModel.ppm} \alias{spatialCovariateUnderModel.kppm} \alias{spatialCovariateUnderModel.dppm} \alias{spatialCovariateUnderModel.slrm} \alias{spatstatDPPModelInfo} \alias{splitHybridInteraction} \alias{sp.foundclass} \alias{sp.foundclasses} \alias{strausscounts} \alias{stripGLMM} \alias{suffloc} \alias{suffstat.generic} \alias{suffstat.poisson} \alias{summary.mppm} \alias{summary.msr} \alias{summary.profilepl} \alias{summary.vblogit} \alias{terms.rppm} \alias{tweak.coefs} \alias{unitname.msr} \alias{unitname<-.msr} \alias{update.ippm} \alias{update.msr} \alias{updateData.ppm} \alias{updateData.kppm} \alias{updateData.dppm} \alias{updateData.slrm} \alias{varcountEngine} \alias{versionstring.interact} \alias{versionstring.ppm} \alias{windows.mppm} %%%%%%% \description{ Internal spatstat.model functions. } \usage{ accumulateStatus(x, stats) active.interactions(object) adaptcoef(new.coef, fitcoef, drop) \method{affine}{msr}(X, mat, vec, \dots) areadelta2(X, r, \dots, sparseOK) as.isf(object) augment.msr(x, \dots, sigma, recompute) blankcoefnames(x) bt.frame(Q, trend, interaction, \dots, covariates, correction, rbord, use.gam, allcovar) bigvaluerule(objfun, objargs, startpar, \dots) cannot.update(\dots) check.separable(dmat, covname, isconstant, fatal) \method{coef}{summary.kppm}(object, \dots) \method{coef}{summary.ppm}(object, \dots) \method{coef}{summary.slrm}(object, \dots) \method{coef}{vblogit}(object, \dots) condSimCox(object, nsim, \dots, window, n.cond, w.cond, giveup, maxchunk, saveLambda, verbose, drop) damaged.ppm(object) data.mppm(x) deltasuffstat(model, \dots, restrict, dataonly, sparseOK, quadsub, force, warn.forced, verbose, use.special) \method{dfbetas}{ppmInfluence}(model, \dots) diagnose.ppm.engine(object, \dots, type, typename, opt, sigma, rbord, compute.sd, compute.cts, envelope, nsim, nrank, rv, oldstyle, splineargs, verbose) \method{dim}{msr}(x) \method{dimnames}{msr}(x) doMultiStraussHard(iradii, hradii, types) dppDpcf(model, \dots) dppmFixIntensity(DPP, lambda, po) dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar) DPPSaddle(beta, fi) DPPSaddlePairwise(beta, fi) enet.engine(model, \dots, standardize, lambda, alpha, adaptive) equalpairs(U, X, marked=FALSE) evalInteraction(X,P,E,interaction,correction,splitInf,\dots, precomputed,savecomputed) evalInterEngine(X,P,E,interaction,correction,splitInf,\dots, Reach,precomputed,savecomputed) evalPairPotential(X,P,E,pairpot,potpars,R) evalPairwiseTerm(fint, d) expandDot(f, dotvars) \method{extractAIC}{slrm}(fit, scale = 0, k = 2, \dots) fakefii(model) \method{family}{hackglmmPQL}(object, \dots) \method{family}{vblogit}(object, \dots) fill.coefs(coefs, required) findCovariate(covname, scope, scopename=NULL) fii(interaction, coefs, Vnames, IsOffset, vnameprefix) \method{flipxy}{msr}(X) forbid.logi(object) \method{formula}{hackglmmPQL}(x, \dots) getdataname(defaultvalue, \dots, dataname) getglmdata(object, drop=FALSE) getglmfit(object) getglmsubset(object) getppmdatasubset(object) getppmOriginalCovariates(object) geyercounts(U,X,r,sat,Xcounts,EqualPairs) geyerdelta2(X,r,sat,\dots,sparseOK, correction) GLMpredict(fit, data, coefs, changecoef, type) hackglmmPQL(fixed, random, family, data, correlation, weights, control, niter, verbose, subset, \dots, reltol) hasglmfit(object) hierarchicalordering(i, s) hiermat(x, h) ho.engine(model, \dots, nsim, nrmh, start, control, verb) illegal.iformula(ifmla, itags, dfvarnames) impliedpresence(tags, formula, df, extranames=character(0)) impliedcoefficients(object, tag, new.coef) \method{influence}{ppmInfluence}(model, \dots) instantiate.interact(x, par) interactionfamilyname(object) intermaker(f, blank) \method{is.expandable}{ppm}(x) is.interact(x) \method{is.marked}{mppm}(X, \dots) \method{is.marked}{msr}(X, \dots) \method{is.marked}{rppm}(X, \dots) \method{is.marked}{slrm}(X, \dots) is.mppm(x) \method{is.multitype}{mppm}(X, \dots) \method{is.multitype}{msr}(X, \dots) \method{is.multitype}{rppm}(X, \dots) \method{is.multitype}{slrm}(X, \dots) \method{is.poisson}{mppm}(x) \method{is.poisson}{rppm}(x) Kpcf.kppm(model, what) \method{Kmodel}{slrm}(model, \dots) killinteraction(model) kppmComLik(X, Xname, po, clusters, control, stabilize, weightfun, rmax, algorithm, DPP, \dots, pspace) kppmMinCon(X, Xname, po, clusters, control, stabilize, statistic, statargs, algorithm, DPP, \dots, pspace) kppmPalmLik(X, Xname, po, clusters, control, stabilize, weightfun, rmax, algorithm, DPP, \dots, pspace) kppmCLadap(X, Xname, po, clusters, control, weightfun, rmax, epsilon, DPP, algorithm, \dots, startpar, globStrat) \method{labels}{ppm}(object, \dots) \method{leverage}{ppmInfluence}(model, \dots) \method{lines}{objsurf}(x, \dots, directed) logi.engine(Q, trend, interaction, \dots, covariates, subsetexpr, clipwin, correction, rbord, covfunargs, allcovar, vnamebase, vnameprefix, justQ, savecomputed, precomputed, VB) \method{logLik}{vblogit}(object, \dots) LurkEngine(object, type, cumulative, plot.sd, quadpoints, wts, Z, subQset, covvalues, resvalues, clip, clipwindow, cov.is.im, covrange, typename, covname, cl, clenv, oldstyle, check, verbose, nx, splineargs, envelope, nsim, nrank, Xsim, internal, checklength) make.pspace(\dots, canonical, adjusted, trace, save, trajectory, nhalfgrid, strict, penalised, penalty, penal.args, tau, clusters, fitmethod, flatness, C0factor, xval, xval.args, debug, transfo) mapInterVars(object, subs, mom) Mayer(fi, exponent) model.se.image(fit, W, \dots, what, new.coef) modelFrameGam(formula, \dots) mpl.engine(Q, trend, interaction, \dots, covariates, subsetexpr, clipwin, covfunargs, correction, rbord, use.gam, gcontrol, GLM, GLMfamily, GLMcontrol, famille, forcefit, nd, eps, allcovar, callstring, precomputed, savecomputed, preponly, rename.intercept, justQ, weightfactor) mpl.get.covariates(covariates, locations, type, covfunargs, need.deriv) mpl.prepare(Q, X, P, trend, interaction, covariates, want.trend, want.inter, correction, rbord, Pname, callstring, \dots, subsetexpr, covfunargs, allcovar, precomputed, savecomputed, vnamebase, vnameprefix, warn.illegal, warn.unidentifiable, weightfactor, skip.border, clip.interaction, splitInf) mpl.usable(x) newformula(old, change, eold, enew, expandpoly, dotvars) newstyle.coeff.handling(object) nndcumfun(X, \dots, r) no.trend.ppm(x) objsurfEngine(objfun, optpar, objargs, \dots, dotargs, objname, new.objargs, parmap, ngrid, xlim, ylim, ratio, verbose) optimConverged(x) optimStatus(x, call) optimNsteps(x) outdated.interact(object) oversize.quad(Q, \dots, nU, nX, p) PairPotentialType(pairpot) \method{parameters}{detpointprocfamily}(model, \dots) partialModelMatrix(X,D,model,callstring,\dots) \method{pcfmodel}{slrm}(model, \dots) ploterodewin(W1, W2, col.edge, col.inside, do.plot, \dots) ploterodeimage(W, Z, \dots, Wcol, rangeZ, colsZ, do.plot) \method{plot}{addvar}(x, \dots, do.points=FALSE) \method{plot}{lurk}(x, \dots, shade) \method{plot}{minconfit}(x, \dots) \method{plot}{parres}(x, \dots) \method{plot}{qqppm}(x, \dots, limits=TRUE, monochrome=spatstat.options('monochrome'), limcol=if(monochrome) "black" else "red") poisson.fits.better(object) PoissonCompareCalc(object) PoisSaddle(beta, fi) PoisSaddleArea(beta, fi) PoisSaddleGeyer(beta, fi) PoisSaddlePairwise(beta, fi) PPMmodelmatrix(object, data, \dots, subset, Q, keepNA, irregular, splitInf) \method{ppm}{default}(Q, trend, interaction, \dots, covariates, data, covfunargs, subset, clipwin, correction, rbord, use.gam, method, forcefit, improve.type, improve.args, emend, project, prior.mean, prior.var, nd, eps, gcontrol, nsim, nrmh, start, control, verb, callstring) ppmCovariates(model) ppmDerivatives(fit, what, Dcovfun, loc, covfunargs) ppmInfluenceEngine(fit, what, \dots, iScore, iHessian, iArgs, drop, method, fine, precomputed, sparseOK, fitname, multitypeOK, entrywise, matrix.action, dimyx, eps, rule.eps, geomsmooth) \method{predict}{vblogit}(object, newdata, type, se.fit, dispersion, terms, na.action, \dots) \method{predict}{profilepl}(object, \dots) printStatus(x, errors.only) printStatusList(stats) \method{print}{addvar}(x, \dots) \method{print}{bt.frame}(x, \dots) \method{print}{diagppm}(x, \dots) \method{print}{detpointprocfamily}(x, \dots) \method{print}{detpointprocfamilyfun}(x, \dots) \method{print}{hierarchicalordering}(x, \dots) \method{print}{influence.ppm}(x, \dots) \method{print}{interact}(x, \dots, family, brief, banner) \method{print}{intermaker}(x, \dots) \method{print}{isf}(x, \dots) \method{print}{leverage.ppm}(x, \dots) \method{print}{lurk}(x, \dots) \method{print}{minconfit}(x, \dots) \method{print}{mppm}(x, \dots) \method{print}{msr}(x, \dots) \method{print}{parres}(x, \dots) \method{print}{plotppm}(x, \dots) \method{print}{profilepl}(x, \dots) \method{print}{qqppm}(x, \dots) \method{print}{rppm}(x, \dots) \method{print}{summary.mppm}(x, \dots, brief) \method{print}{summary.slrm}(x, \dots) \method{print}{vblogit}(x, \dots) quad.mppm(x) quadBlockSizes(nX, nD, p, nMAX, announce) \method{reach}{slrm}(x, \dots) reduceformula(fmla, deletevar, verbose) reincarnate.interact(object) \method{rescale}{msr}(X, s, unitname) resid4plot(RES, plot.neg, plot.smooth, spacing, outer, srange, monochrome, main, xlab, ylab, rlab, col.neg, col.smooth, \dots) resid1plot(RES, opt, plot.neg, plot.smooth, srange, monochrome, main, add, show.all, do.plot, col.neg, col.smooth, \dots) resid1panel(observedX, observedV, theoreticalX, theoreticalV, theoreticalSD, xlab,ylab, \dots, do.plot) \method{rotate}{msr}(X, angle, \dots, centre) SaddleApprox(beta, fi, approx) safeFiniteValue(x, default) safePositiveValue(x, default) \method{scalardilate}{msr}(X, f, \dots) \method{shift}{influence.ppm}(X, \dots) \method{shift}{leverage.ppm}(X, \dots) \method{shift}{msr}(X, \dots) signalStatus(x, errors.only) \method{simulate}{profilepl}(object, \dots) slr.prepare(CallInfo, envir, data, dataAtPoints, splitby, clip) slrAssemblePixelData(Y, Yname, W, covimages, dataAtPoints, pixelarea) slrmInfluence(model, what, \dots) \method{spatialCovariateEvidence}{ppm}(model, covariate, \dots, lambdatype, dimyx, eps, rule.eps, interpolate, jitter, jitterfactor, modelname, covname, dataname, subset, clip.predict) \method{spatialCovariateEvidence}{slrm}(model, covariate, \dots, lambdatype, jitter, jitterfactor, modelname, covname, dataname, subset, raster.action) spatialCovariateUnderModel(model, covariate, \dots) \method{spatialCovariateUnderModel}{ppm}(model, covariate, \dots) \method{spatialCovariateUnderModel}{kppm}(model, covariate, \dots) \method{spatialCovariateUnderModel}{dppm}(model, covariate, \dots) \method{spatialCovariateUnderModel}{slrm}(model, covariate, \dots) spatstatDPPModelInfo(model) splitHybridInteraction(coeffs, inte) sp.foundclass(cname, inlist, formalname, argsgiven) sp.foundclasses(cnames, inlist, formalname, argsgiven) strausscounts(U,X,r,EqualPairs) stripGLMM(object) suffloc(object) suffstat.generic(model, X, callstring) suffstat.poisson(model, X, callstring) \method{summary}{mppm}(object, \dots, brief=FALSE) \method{summary}{msr}(object, \dots) \method{summary}{profilepl}(object, \dots) \method{summary}{vblogit}(object, \dots) \method{terms}{rppm}(x, \dots) tweak.coefs(model, new.coef) \method{unitname}{msr}(x) \method{unitname}{msr}(x) <- value \method{update}{ippm}(object, \dots, envir) \method{update}{msr}(object, \dots) \method{updateData}{ppm}(model, X, \dots, warn) \method{updateData}{kppm}(model, X, \dots) \method{updateData}{dppm}(model, X, \dots) \method{updateData}{slrm}(model, X, \dots) varcountEngine(g, B, lambdaB, f, R, what) versionstring.interact(object) versionstring.ppm(object) windows.mppm(x) } \details{ These internal \pkg{spatstat.model} functions should not be called directly by the user. Their names and capabilities may change without warning from one version of \pkg{spatstat.model} to the next. } \value{ The return values of these functions are not documented, and may change without warning. } \keyword{internal} spatstat.model/man/data.ppm.Rd0000644000176200001440000000221214331173076015764 0ustar liggesusers\name{data.ppm} \alias{data.ppm} \title{Extract Original Data from a Fitted Point Process Model} \description{ Given a fitted point process model, this function extracts the original point pattern dataset to which the model was fitted. } \usage{ data.ppm(object) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"}). } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The object contains complete information about the original data point pattern to which the model was fitted. This function extracts the original data pattern. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppp.object}} } \examples{ fit <- ppm(cells, ~1, Strauss(r=0.1)) X <- data.ppm(fit) # 'X' is identical to 'cells' } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{manip} \keyword{models} spatstat.model/man/simulate.mppm.Rd0000644000176200001440000000354114331173077017062 0ustar liggesusers\name{simulate.mppm} \alias{simulate.mppm} \title{Simulate a Point Process Model Fitted to Several Point Patterns} \description{ Generates simulated realisations from a point process model that was fitted to several point patterns. } \usage{ \method{simulate}{mppm}(object, nsim=1, \dots, verbose=TRUE) } \arguments{ \item{object}{ Point process model fitted to several point patterns. An object of class \code{"mppm"}. } \item{nsim}{ Number of simulated realisations (of each original pattern). } \item{\dots}{ Further arguments passed to \code{\link{simulate.ppm}} to control the simulation. } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"mppm"} of fitted point process models for replicated point pattern data. The result is a hyperframe with \code{n} rows and \code{nsim} columns, where \code{n} is the number of original point pattern datasets to which the model was fitted. Each column of the hyperframe contains a simulated version of the original data. For each of the original point pattern datasets, the fitted model for this dataset is extracted using \code{\link{subfits}}, then \code{nsim} simulated realisations of this model are generated using \code{\link{simulate.ppm}}, and these are stored in the corresponding row of the output. } \value{ A hyperframe. } \examples{ H <- hyperframe(Bugs=waterstriders) fit <- mppm(Bugs ~ id, H) y <- simulate(fit, nsim=2) y plot(y[1,,drop=TRUE], main="Simulations for Waterstriders pattern 1") plot(y[,1,drop=TRUE], main="Simulation 1 for each Waterstriders pattern") } \seealso{ \code{\link{mppm}}, \code{\link{simulate.ppm}}. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.model/man/pairwise.family.Rd0000644000176200001440000000304714331173077017373 0ustar liggesusers\name{pairwise.family} \alias{pairwise.family} \title{Pairwise Interaction Process Family} \description{ An object describing the family of all pairwise interaction Gibbs point processes. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the pairwise interaction family of point process models. If you need to create a specific pairwise interaction model for use in modelling, use the function \code{\link{Pairwise}} or one of the existing functions listed below. Anyway, \code{pairwise.family} is an object of class \code{"isf"} containing a function \code{pairwise.family$eval} for evaluating the sufficient statistics of any pairwise interaction point process model taking an exponential family form. } \value{ Object of class \code{"isf"}, see \code{\link{isf.object}}. } \seealso{ Other families: \code{\link{pairsat.family}}, \code{\link{ord.family}}, \code{\link{inforder.family}}. Pairwise interactions: \code{\link{Poisson}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Fiksel}}, \code{\link{Hardcore}}, \code{\link{LennardJones}}, \code{\link{MultiHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Softcore}}. Other interactions: \code{\link{AreaInter}}, \code{\link{Geyer}}, \code{\link{Saturated}}, \code{\link{Ord}}, \code{\link{OrdThresh}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/harmonic.Rd0000644000176200001440000000416514331173076016071 0ustar liggesusers\name{harmonic} \alias{harmonic} \title{Basis for Harmonic Functions} \description{ Evaluates a basis for the harmonic polynomials in \eqn{x} and \eqn{y} of degree less than or equal to \eqn{n}. } \usage{ harmonic(x, y, n) } \arguments{ \item{x}{ Vector of \eqn{x} coordinates } \item{y}{ Vector of \eqn{y} coordinates } \item{n}{ Maximum degree of polynomial } } \value{ A data frame with \code{2 * n} columns giving the values of the basis functions at the coordinates. Each column is labelled by an algebraic expression for the corresponding basis function. } \details{ This function computes a basis for the harmonic polynomials in two variables \eqn{x} and \eqn{y} up to a given degree \eqn{n} and evaluates them at given \eqn{x,y} locations. It can be used in model formulas (for example in the model-fitting functions \code{\link[stats]{lm},\link[stats]{glm},\link[mgcv]{gam}} and \code{\link[spatstat.model]{ppm}}) to specify a linear predictor which is a harmonic function. A function \eqn{f(x,y)} is harmonic if \deqn{\frac{\partial^2}{\partial x^2} f + \frac{\partial^2}{\partial y^2}f = 0.}{ (d/dx)^2 f + (d/dy)^2 f = 0.} The harmonic polynomials of degree less than or equal to \eqn{n} have a basis consisting of \eqn{2 n} functions. This function was implemented on a suggestion of P. McCullagh for fitting nonstationary spatial trend to point process models. } \seealso{ \code{\link[spatstat.model]{ppm}}, \code{\link[spatstat.model]{polynom}} } \examples{ # inhomogeneous point pattern X <- unmark(longleaf) \testonly{ # smaller dataset X <- X[seq(1,npoints(X), by=50)] } # fit Poisson point process with log-cubic intensity fit.3 <- ppm(X ~ polynom(x,y,3), Poisson()) # fit Poisson process with log-cubic-harmonic intensity fit.h <- ppm(X ~ harmonic(x,y,3), Poisson()) # Likelihood ratio test lrts <- 2 * (logLik(fit.3) - logLik(fit.h)) df <- with(coords(X), ncol(polynom(x,y,3)) - ncol(harmonic(x,y,3))) pval <- 1 - pchisq(lrts, df=df) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.model/man/effectfun.Rd0000644000176200001440000000766614331173076016247 0ustar liggesusers\name{effectfun} \alias{effectfun} \title{Compute Fitted Effect of a Spatial Covariate in a Point Process Model} \description{ Compute the trend or intensity of a fitted point process model as a function of one of its covariates. } \usage{ effectfun(model, covname, \dots, se.fit=FALSE, nvalues=256) } \arguments{ \item{model}{ A fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"}, \code{"dppm"}, \code{"rppm"} or \code{"profilepl"}). } \item{covname}{ The name of the covariate. A character string. (Needed only if the model has more than one covariate.) } \item{\dots}{ The fixed values of other covariates (in the form \code{name=value}) if required. } \item{se.fit}{ Logical. If \code{TRUE}, asymptotic standard errors of the estimates will be computed, together with a 95\% confidence interval. } \item{nvalues}{ Integer. The number of values of the covariate (if it is numeric) for which the effect function should be evaluated. We recommend at least 256. } } \details{ The object \code{model} should be an object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"}, \code{"dppm"}, \code{"rppm"} or \code{"profilepl"} representing a point process model fitted to point pattern data. The model's trend formula should involve a spatial covariate named \code{covname}. This could be \code{"x"} or \code{"y"} representing one of the Cartesian coordinates. More commonly the covariate is another, external variable that was supplied when fitting the model. The command \code{effectfun} computes the fitted trend of the point process \code{model} as a function of the covariate named \code{covname}. The return value can be plotted immediately, giving a plot of the fitted trend against the value of the covariate. If the model also involves covariates other than \code{covname}, then these covariates will be held fixed. Values for these other covariates must be provided as arguments to \code{effectfun} in the form \code{name=value}. If \code{se.fit=TRUE}, the algorithm also calculates the asymptotic standard error of the fitted trend, and a (pointwise) asymptotic 95\% confidence interval for the true trend. This command is just a wrapper for the prediction method \code{\link{predict.ppm}}. For more complicated computations about the fitted intensity, use \code{\link{predict.ppm}}. } \section{Trend and intensity}{ For a Poisson point process model, the trend is the same as the intensity of the point process. For a more general Gibbs model, the trend is the first order potential in the model (the first order term in the Gibbs representation). In Poisson or Gibbs models fitted by \code{\link{ppm}}, the trend is the only part of the model that depends on the covariates. } \section{Determinantal point process models with fixed intensity}{ The function \code{\link{dppm}} which fits a determinantal point process model allows the user to specify the intensity \code{lambda}. In such cases the effect function is undefined, and \code{effectfun} stops with an error message. } \value{ A data frame containing a column of values of the covariate and a column of values of the fitted trend. If \code{se.fit=TRUE}, there are 3 additional columns containing the standard error and the upper and lower limits of a confidence interval. If the covariate named \code{covname} is numeric (rather than a factor or logical variable), the return value is also of class \code{"fv"} so that it can be plotted immediately. } \seealso{ \code{\link{ppm}}, \code{\link{predict.ppm}}, \code{\link{fv.object}} } \examples{ X <- copper$SouthPoints D <- distfun(copper$SouthLines) fit <- ppm(X ~ polynom(D, 5)) effectfun(fit) plot(effectfun(fit, se.fit=TRUE)) fitx <- ppm(X ~ x + polynom(D, 5)) plot(effectfun(fitx, "D", x=20)) } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{models} spatstat.model/man/residualMeasure.Rd0000644000176200001440000000357414331173077017427 0ustar liggesusers\name{residualMeasure} \alias{residualMeasure} \title{ Residual Measure for an Observed Point Pattern and a Fitted Intensity } \description{ Given a point pattern and an estimate of its intensity function obtained in any fashion, compute the residual measure. } \usage{ residualMeasure(Q, lambda, type = c("raw", "inverse", "Pearson", "pearson"), ...) } \arguments{ \item{Q}{ A point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{lambda}{ Predicted intensity. An image (object of class \code{"im"}) or a list of images. } \item{type}{ Character string (partially matched) specifying the type of residuals. } \item{\dots}{ Arguments passed to \code{\link[spatstat.geom]{quadscheme}} if \code{Q} is a point pattern. } } \details{ This command constructs the residual measure for the model in which \code{Q} is the observed point pattern or quadrature scheme, and \code{lambda} is the estimated intensity function obtained in some fashion. } \value{ A measure (object of class \code{"msr"}). } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. } \author{ \adrian. } \seealso{ \code{\link{residuals.ppm}} } \examples{ ## nonparametric regression estimate of intensity ## as a function of terrain elevation f <- rhohat(bei, bei.extra$elev) ## predicted intensity as a function of location lam <- predict(f) ## residuals res <- residualMeasure(bei, lam) res plot(res) } \keyword{spatial} \keyword{models} spatstat.model/man/Extract.leverage.ppm.Rd0000644000176200001440000000451614510474260020265 0ustar liggesusers\name{Extract.leverage.ppm} \alias{[.leverage.ppm} \title{Extract Subset of Leverage Object} \description{ Extract a subset of a leverage map, or extract the leverage values at specified locations. } \usage{ \method{[}{leverage.ppm}(x, i, \dots, update=TRUE) } \arguments{ \item{x}{ A leverage object (of class \code{"leverage.ppm"}) computed by \code{\link{leverage.ppm}}. } \item{i}{ Subset index (passed to \code{\link{[.im}}). Either a spatial window (object of class \code{"owin"}) or a spatial point pattern (object of class \code{"ppp"}). } \item{\dots}{ Further arguments passed to \code{\link{[.im}}, especially the argument \code{drop}. } \item{update}{ Logical value indicating whether to update the internally-stored value of the mean leverage, by averaging over the specified subset. } } \value{ Another object of class \code{"leverage.ppm"}, or a vector of numeric values of leverage. } \details{ An object of class \code{"leverage.ppm"} contains the values of the leverage function for a point process model, computed by \code{\link{leverage.ppm}}. This function extracts a designated subset of the leverage values, either as another leverage object, or as a vector of numeric values. The function \code{[.leverage.ppm} is a method for \code{\link{[}} for the class \code{"leverage.ppm"}. The argument \code{i} should be either \itemize{ \item a spatial window (object of class \code{"owin"}) determining a region where the leverage map is required. The result will typically be another leverage map (object of class \code{leverage.ppm}). \item a spatial point pattern (object of class \code{"ppp"}) specifying locations at which the leverage values are required. The result will be a numeric vector. } The subset operator for images, \code{\link{[.im}}, is applied to the leverage map. If this yields a pixel image, then the result of \code{\link{[.leverage.ppm}} is another leverage object. Otherwise, a vector containing the numeric values of leverage is returned. } \seealso{ \code{\link{leverage.ppm}}. } \examples{ fit <- ppm(cells ~x) lev <- leverage(fit) b <- owin(c(0.1, 0.3), c(0.2, 0.4)) lev[b] lev[cells] } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} \concept{Model diagnostics} spatstat.model/man/dppCauchy.Rd0000644000176200001440000000245114331173076016205 0ustar liggesusers\name{dppCauchy} \alias{dppCauchy} \title{Generalized Cauchy Determinantal Point Process Model} \description{ Function generating an instance of the (generalized) Cauchy determinantal point process model. } \usage{dppCauchy(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the parameters. See Details.} } \details{ The (generalized) Cauchy DPP is defined in (Lavancier, \Moller and Rubak, 2015) The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the shape parameter \code{nu} as a positive numeric (artificially required to be less than 20 in the code for numerical stability) \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ \adrian \rolf and \ege } \references{ Lavancier, F. \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \examples{ m <- dppCauchy(lambda=100, alpha=.05, nu=1, d=2) } \seealso{ \code{\link{dppBessel}}, \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppPowerExp}} } spatstat.model/man/zgibbsmodel.Rd0000644000176200001440000000170014374302015016554 0ustar liggesusers\name{zgibbsmodel} \alias{zgibbsmodel} \title{ Gibbs Model } \description{ Experimental code. Creates an object representing a Gibbs point process model. Typically used for theoretical calculations about such a model. } \usage{ zgibbsmodel(beta = 1, interaction = NULL, icoef = NULL) } \arguments{ \item{beta}{ First order trend term. A numeric value, numeric vector, pixel image, function, or a list of such objects. } \item{interaction}{ Object of class \code{"interact"} specifying the interpoint interaction structure, or \code{NULL} representing the Poisson process. } \item{icoef}{ Numeric vector of coefficients for the interpoint interaction. } } \details{ Experimental. } \value{ Object belonging to the experimental class \code{zgibbsmodel}. } \author{ \adrian. } \seealso{ \code{\link{methods.zgibbsmodel}} } \examples{ m <- zgibbsmodel(10, Strauss(0.1), -0.5) } \keyword{spatial} \keyword{models} spatstat.model/man/measureVariation.Rd0000644000176200001440000000423014331173077017601 0ustar liggesusers\name{measureVariation} \alias{measureVariation} \alias{measurePositive} \alias{measureNegative} \alias{totalVariation} \title{ Positive and Negative Parts, and Variation, of a Measure } \description{ Given a measure \code{A} (object of class \code{"msr"}) these functions find the positive part, negative part and variation of \code{A}. } \usage{ measurePositive(x) measureNegative(x) measureVariation(x) totalVariation(x) } \arguments{ \item{x}{ A measure (object of class \code{"msr"}). } } \details{ The functions \code{measurePositive} and \code{measureNegative} return the positive and negative parts of the measure, and \code{measureVariation} returns the variation (sum of positive and negative parts). The function \code{totalVariation} returns the total variation norm. If \eqn{\mu} is a signed measure, it can be represented as \deqn{\mu = \mu_{+} - \mu_{-}}{\mu = \mu[+] - \mu[-]} where \eqn{\mu_{+}}{\mu[+]} and \eqn{\mu_{-}}{\mu[-]} are \emph{nonnegative} measures called the positive and negative parts of \eqn{\mu}. In a nutshell, the positive part of \eqn{\mu} consists of all positive contributions or increments, and the negative part consists of all negative contributions multiplied by \code{-1}. The variation \eqn{|\mu|} is defined by \deqn{\mu = \mu_{+} + \mu_{-}}{\mu = \mu[+] + \mu[-]} and is also a nonnegative measure. The total variation norm is the integral of the variation. } \value{ The result of \code{measurePositive}, \code{measureNegative} and \code{measureVariation} is another measure (object of class \code{"msr"}) on the same spatial domain. The result of \code{totalVariation} is a non-negative number. } \references{ Halmos, P.R. (1950) \emph{Measure Theory}. Van Nostrand. } \author{ \adrian. } \seealso{ \code{\link{msr}}, \code{\link{with.msr}}, \code{\link{split.msr}}, \code{\link{measureDiscrete}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") measurePositive(rp) measureNegative(rp) measureVariation(rp) # total variation norm totalVariation(rp) } \keyword{spatial} \keyword{math} spatstat.model/man/msr.Rd0000644000176200001440000001552514331173077015075 0ustar liggesusers\name{msr} \alias{msr} \title{ Signed or Vector-Valued Measure } \description{ Defines an object representing a signed measure or vector-valued measure on a spatial domain. } \usage{ msr(qscheme, discrete, density, check=TRUE) } \arguments{ \item{qscheme}{ A quadrature scheme (object of class \code{"quad"} usually extracted from a fitted point process model). } \item{discrete}{ Vector or matrix containing the values (masses) of the discrete component of the measure, for each of the data points in \code{qscheme}. } \item{density}{ Vector or matrix containing values of the density of the diffuse component of the measure, for each of the quadrature points in \code{qscheme}. } \item{check}{ Logical. Whether to check validity of the arguments. } } \details{ This function creates an object that represents a signed or vector valued \emph{measure} on the two-dimensional plane. It is not normally called directly by the user. A signed measure is a classical mathematical object (Diestel and Uhl, 1977) which can be visualised as a collection of electric charges, positive and/or negative, spread over the plane. Electric charges may be concentrated at specific points (atoms), or spread diffusely over a region. An object of class \code{"msr"} represents a signed (i.e. real-valued) or vector-valued measure in the \pkg{spatstat} package. Spatial residuals for point process models (Baddeley et al, 2005, 2008) take the form of a real-valued or vector-valued measure. The function \code{\link{residuals.ppm}} returns an object of class \code{"msr"} representing the residual measure. Various other diagnostic tools such as \code{\link{dfbetas.ppm}} and \code{\link{dffit.ppm}} also return an object of class \code{"msr"}. The function \code{msr} would not normally be called directly by the user. It is the low-level creator function that makes an object of class \code{"msr"} from raw data. The first argument \code{qscheme} is a quadrature scheme (object of class \code{"quad"}). It is typically created by \code{\link{quadscheme}} or extracted from a fitted point process model using \code{\link{quad.ppm}}. A quadrature scheme contains both data points and dummy points. The data points of \code{qscheme} are used as the locations of the atoms of the measure. All quadrature points (i.e. both data points and dummy points) of \code{qscheme} are used as sampling points for the density of the continuous component of the measure. The argument \code{discrete} gives the values of the atomic component of the measure for each \emph{data point} in \code{qscheme}. It should be either a numeric vector with one entry for each data point, or a numeric matrix with one row for each data point. The argument \code{density} gives the values of the \emph{density} of the diffuse component of the measure, at each \emph{quadrature point} in \code{qscheme}. It should be either a numeric vector with one entry for each quadrature point, or a numeric matrix with one row for each quadrature point. If both \code{discrete} and \code{density} are vectors (or one-column matrices) then the result is a signed (real-valued) measure. Otherwise, the result is a vector-valued measure, with the dimension of the vector space being determined by the number of columns in the matrices \code{discrete} and/or \code{density}. (If one of these is a \eqn{k}-column matrix and the other is a 1-column matrix, then the latter is replicated to \eqn{k} columns). The class \code{"msr"} has methods for \code{print}, \code{plot} and \code{[}. There is also a function \code{\link{Smooth.msr}} for smoothing a measure. } \value{ An object of class \code{"msr"}. } \section{Guide to using measures}{ Objects of class \code{"msr"}, representing measures, are returned by the functions \code{\link{residuals.ppm}}, \code{\link{dfbetas.ppm}}, \code{\link{dffit.ppm}} and possibly by other functions. There are methods for printing and plotting a measure, along with many other operations, which can be listed by typing \code{methods(class="msr")}. The \code{print} and \code{summary} methods report basic information about a measure, such as the total value of the measure, and the spatial domain on which it is defined. The \code{plot} method displays the measure. It is documented separately in \code{\link{plot.msr}}. A measure can be smoothed using \code{\link{Smooth.msr}}, yielding a pixel image which is sometimes easier to interpret than the plot of the measure itself. The subset operator \code{[} can be used to restrict the measure to a subregion of space, or to extract one of the scalar components of a vector-valued measure. It is documented separately in \code{\link{[.msr}}. The total value of a measure, or the value on a subregion, can be obtained using \code{\link{integral.msr}}. The value of a measure \code{m} on a subregion \code{B} can be obtained by \code{integral(m, domain=B)} or \code{integral(m[B])}. The values of a measure \code{m} on each tile of a tessellation \code{A} can be obtained by \code{integral(m, domain=A)}. Some mathematical operations on measures are supported, such as multiplying a measure by a single number, or adding two measures. Measures can be separated into components in different ways using \code{\link{as.layered.msr}}, \code{\link{unstack.msr}} and \code{\link{split.msr}}. Internal components of the data structure of an \code{"msr"} object can be extracted using \code{\link{with.msr}}. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. Diestel, J. and Uhl, J.J. Jr (1977) \emph{Vector measures}. Providence, RI, USA: American Mathematical Society. Halmos, P.R. (1950) \emph{Measure Theory}. Van Nostrand. } \author{ \adrian } \seealso{ \code{\link{plot.msr}}, \code{\link{Smooth.msr}}, \code{\link{[.msr}}, \code{\link{with.msr}}, \code{\link{split.msr}}, \code{\link{Ops.msr}}, \code{\link{measureVariation}}, \code{\link{measureWeighted}}, \code{\link{measureContinuous}}. } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rp rs <- residuals(fit, type="score") rs colnames(rs) # An equivalent way to construct the Pearson residual measure by hand Q <- quad.ppm(fit) lambda <- fitted(fit) slam <- sqrt(lambda) Z <- is.data(Q) m <- msr(Q, discrete=1/slam[Z], density = -slam) m } \keyword{spatial} \keyword{models} spatstat.model/man/thomas.estpcf.Rd0000644000176200001440000001470314331173077017047 0ustar liggesusers\name{thomas.estpcf} \alias{thomas.estpcf} \title{Fit the Thomas Point Process by Minimum Contrast} \description{ Fits the Thomas point process to a point pattern dataset by the Method of Minimum Contrast using the pair correlation function. } \usage{ thomas.estpcf(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list()) } \arguments{ \item{X}{ Data to which the Thomas model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the Thomas process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Thomas point process model to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function \code{\link{pcf}}. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Thomas point process to \code{X}, by finding the parameters of the Thomas model which give the closest match between the theoretical pair correlation function of the Thomas process and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Thomas point process is described in \Moller and Waagepetersen (2003, pp. 61--62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and isotropically Normally distributed around the parent point with standard deviation \eqn{\sigma}{sigma} which is equal to the parameter \code{scale}. The named vector of stating values can use either \code{sigma2} (\eqn{\sigma^2}{sigma^2}) or \code{scale} as the name of the second component, but the latter is recommended for consistency with other cluster models. The theoretical pair correlation function of the Thomas process is \deqn{ g(r) = 1 + \frac 1 {4\pi \kappa \sigma^2} \exp(-\frac{r^2}{4\sigma^2})). }{ g(r) = 1 + exp(-r^2/(4 * sigma^2)))/(4 * pi * kappa * sigma^2). } The theoretical intensity of the Thomas process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{\sigma^2}{sigma^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The Thomas process can be simulated, using \code{\link{rThomas}}. Homogeneous or inhomogeneous Thomas process models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Diggle, P. J., Besag, J. and Gleaves, J. T. (1976) Statistical analysis of spatial point patterns by means of distance methods. \emph{Biometrics} \bold{32} 659--667. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Thomas, M. (1949) A generalisation of Poisson's binomial limit for use in ecology. \emph{Biometrika} \bold{36}, 18--25. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ \adrian } \seealso{ \code{\link{thomas.estK}} \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{rThomas}} to simulate the fitted model. } \examples{ u <- thomas.estpcf(redwood, c(kappa=10, scale=0.1)) u plot(u, legendpos="topright") u2 <- thomas.estpcf(redwood, c(kappa=10, scale=0.1), pcfargs=list(stoyan=0.12)) } \keyword{spatial} \keyword{models} spatstat.model/man/integral.msr.Rd0000644000176200001440000000516314331173076016675 0ustar liggesusers\name{integral.msr} \alias{integral.msr} \title{ Integral of a Measure } \description{ Computes the integral (total value) of a measure over its domain. } \usage{ \method{integral}{msr}(f, domain=NULL, weight=NULL, \dots) } \arguments{ \item{f}{ A signed measure or vector-valued measure (object of class \code{"msr"}). } \item{domain}{ Optional window specifying the domain of integration. Alternatively a tessellation. } \item{weight}{ Optional. A pixel image (object of class \code{"im"}) or a \code{function(x,y)} giving a numerical weight to be applied to the integration. } \item{\dots}{ Ignored. } } \details{ The integral (total value) of the measure over its domain is calculated. If \code{domain} is a window (class \code{"owin"}) then the integration will be restricted to this window. If \code{domain} is a tessellation (class \code{"tess"}) then the integral of \code{f} in each tile of \code{domain} will be computed. For a multitype measure \code{m}, use \code{\link{split.msr}} to separate the contributions for each type of point, as shown in the Examples. If \code{weight} is given, it should be a pixel image or a function of coordinates \eqn{x} and \eqn{y} returning numerical values. Then each increment of the measure will be multiplied by the corresponding value of \code{weight}. Effectively, \code{weight} becomes the integrand, and the result is the integral of \code{weight} with respect to the measure \code{f}. } \value{ A numeric value, vector, or matrix. \code{integral(f)} returns a numeric value (for a signed measure) or a vector of values (for a vector-valued measure). If \code{domain} is a tessellation then \code{integral(f, domain)} returns a numeric vector with one entry for each tile (if \code{f} is a signed measure) or a numeric matrix with one row for each tile (if \code{f} is a vector-valued measure). } \seealso{ \code{\link{msr}}, \code{\link{integral}} } \examples{ fit <- ppm(cells ~ x) rr <- residuals(fit) integral(rr) # vector-valued measure rs <- residuals(fit, type="score") integral(rs) # multitype fitA <- ppm(amacrine ~ x) rrA <- residuals(fitA) sapply(split(rrA), integral) # multitype and vector-valued rsA <- residuals(fitA, type="score") sapply(split(rsA), integral) ## integral over a subregion integral(rr, domain=square(0.5)) ## integrals over the tiles of a tessellation integral(rr, domain=quadrats(cells, 2)) ## weighted integral integral(rr, weight=function(x,y){y}) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat.model/man/Triplets.Rd0000644000176200001440000000631614331173075016076 0ustar liggesusers\name{Triplets} \alias{Triplets} \title{The Triplet Point Process Model} \description{ Creates an instance of Geyer's triplet interaction point process model which can then be fitted to point pattern data. } \usage{ Triplets(r) } \arguments{ \item{r}{The interaction radius of the Triplets process} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Triplets process with interaction radius \eqn{r}. } \details{ The (stationary) Geyer triplet process (Geyer, 1999) with interaction radius \eqn{r} and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma} is the point process in which each point contributes a factor \eqn{\beta}{beta} to the probability density of the point pattern, and each triplet of close points contributes a factor \eqn{\gamma}{gamma} to the density. A triplet of close points is a group of 3 points, each pair of which is closer than \eqn{r} units apart. Thus the probability density is \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of unordered triples of points that are closer than \eqn{r} units apart, and \eqn{\alpha}{alpha} is the normalising constant. The interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1} so that this model describes an ``ordered'' or ``inhibitive'' pattern. The nonstationary Triplets process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Triplets process pairwise interaction is yielded by the function \code{Triplets()}. See the examples below. Note the only argument is the interaction radius \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\gamma)}{log(gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Triplets()}. } \seealso{ \code{\link{ppm}}, \code{\link{triplet.family}}, \code{\link{ppm.object}} } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \examples{ Triplets(r=0.1) # prints a sensible description of itself ppm(cells ~1, Triplets(r=0.2)) # fit the stationary Triplets process to `cells' \donttest{ ppm(cells ~polynom(x,y,3), Triplets(r=0.2)) # fit a nonstationary Triplets process with log-cubic polynomial trend } } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/hybrid.family.Rd0000644000176200001440000000145214331173076017026 0ustar liggesusers\name{hybrid.family} \alias{hybrid.family} \title{ Hybrid Interaction Family } \description{ An object describing the family of all hybrid interactions. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the family of all hybrid point process models. If you need to create a specific hybrid interaction model for use in modelling, use the function \code{\link{Hybrid}}. } \value{ Object of class \code{"isf"}, see \code{\link{isf.object}}. } \seealso{ Use \code{\link{Hybrid}} to make hybrid interactions. Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{ord.family}}, \code{\link{inforder.family}}. } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{models} spatstat.model/man/update.ppm.Rd0000644000176200001440000001426414331173100016333 0ustar liggesusers\name{update.ppm} \alias{update.ppm} \title{Update a Fitted Point Process Model} \description{ \code{update} method for class \code{"ppm"}. } \usage{ \method{update}{ppm}(object, \dots, fixdummy=TRUE, use.internal=NULL, envir=environment(terms(object))) } \arguments{ \item{object}{ An existing fitted point process model, typically produced by \code{\link{ppm}}. } \item{\dots}{ Arguments to be updated in the new call to \code{\link{ppm}}. } \item{fixdummy}{ Logical flag indicating whether the quadrature scheme for the call to \code{\link{ppm}} should use the same set of dummy points as that in the original call. } \item{use.internal}{ Optional. Logical flag indicating whether the model should be refitted using the internally saved data (\code{use.internal=TRUE}) or by re-evaluating these data in the current frame (\code{use.internal=FALSE}). } \item{envir}{ Environment in which to re-evaluate the call to \code{\link{ppm}}. } } \details{ This is a method for the generic function \code{\link{update}} for the class \code{"ppm"}. An object of class \code{"ppm"} describes a fitted point process model. See \code{\link{ppm.object}}) for details of this class. \code{update.ppm} will modify the point process model specified by \code{object} according to the new arguments given, then re-fit it. The actual re-fitting is performed by the model-fitting function \code{\link{ppm}}. If you are comparing several model fits to the same data, or fits of the same model to different data, it is strongly advisable to use \code{update.ppm} rather than trying to fit them by hand. This is because \code{update.ppm} re-fits the model in a way which is comparable to the original fit. The arguments \code{...} are matched to the formal arguments of \code{\link{ppm}} as follows. First, all the \emph{named} arguments in \code{...} are matched with the formal arguments of \code{\link{ppm}}. Use \code{name=NULL} to remove the argument \code{name} from the call. Second, any \emph{unnamed} arguments in \code{...} are matched with formal arguments of \code{\link{ppm}} if the matching is obvious from the class of the object. Thus \code{...} may contain \itemize{ \item exactly one argument of class \code{"ppp"} or \code{"quad"}, which will be interpreted as the named argument \code{Q}; \item exactly one argument of class \code{"formula"}, which will be interpreted as the named argument \code{trend} (or as specifying a change to the trend formula); \item exactly one argument of class \code{"interact"}, which will be interpreted as the named argument \code{interaction}; \item exactly one argument of class \code{"data.frame"}, which will be interpreted as the named argument \code{covariates}. } The \code{trend} argument can be a formula that specifies a \emph{change} to the current trend formula. For example, the formula \code{~ . + Z} specifies that the additional covariate \code{Z} will be added to the right hand side of the trend formula in the existing \code{object}. The argument \code{fixdummy=TRUE} ensures comparability of the objects before and after updating. When \code{fixdummy=FALSE}, calling \code{update.ppm} is exactly the same as calling \code{ppm} with the updated arguments. However, the original and updated models are not strictly comparable (for example, their pseudolikelihoods are not strictly comparable) unless they used the same set of dummy points for the quadrature scheme. Setting \code{fixdummy=TRUE} ensures that the re-fitting will be performed using the same set of dummy points. This is highly recommended. The value of \code{use.internal} determines where to find data to re-evaluate the model (data for the arguments mentioned in the original call to \code{ppm} that are not overwritten by arguments to \code{update.ppm}). If \code{use.internal=FALSE}, then arguments to \code{ppm} are \emph{re-evaluated} in the frame where you call \code{update.ppm}. This is like the behaviour of the other methods for \code{\link{update}}. This means that if you have changed any of the objects referred to in the call, these changes will be taken into account. Also if the original call to \code{ppm} included any calls to random number generators, these calls will be recomputed, so that you will get a different outcome of the random numbers. If \code{use.internal=TRUE}, then arguments to \code{ppm} are extracted from internal data stored inside the current fitted model \code{object}. This is useful if you don't want to re-evaluate anything. It is also necessary if if \code{object} has been restored from a dump file using \code{\link{load}} or \code{\link{source}}. In such cases, we have lost the environment in which \code{object} was fitted, and data cannot be re-evaluated. By default, if \code{use.internal} is missing, \code{update.ppm} will re-evaluate the arguments if this is possible, and use internal data if not. } \value{ Another fitted point process model (object of class \code{"ppm"}). } \examples{ data(cells) # fit the stationary Poisson process fit <- ppm(nztrees, ~ 1) # fit a nonstationary Poisson process fitP <- update(fit, trend=~x) fitP <- update(fit, ~x) # change the trend formula: add another term to the trend fitPxy <- update(fitP, ~ . + y) # change the trend formula: remove the x variable fitPy <- update(fitPxy, ~ . - x) # fit a stationary Strauss process fitS <- update(fit, interaction=Strauss(13)) fitS <- update(fit, Strauss(13)) # refit using a different edge correction fitS <- update(fitS, correction="isotropic") # re-fit the model to a subset # of the original point pattern nzw <- owin(c(0,148),c(0,95)) nzsub <- nztrees[,nzw] fut <- update(fitS, Q=nzsub) fut <- update(fitS, nzsub) # WARNING: the point pattern argument is called 'Q' ranfit <- ppm(rpoispp(42), ~1, Poisson()) ranfit # different random data! update(ranfit) # the original data update(ranfit, use.internal=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.model/man/closepaircounts.Rd0000644000176200001440000000473714331173076017513 0ustar liggesusers\name{closepaircounts} \alias{closepaircounts} \alias{crosspaircounts} \title{ Count Close Pairs of Points } \description{ Low-level functions to count the number of close pairs of points. } \usage{ closepaircounts(X, r) crosspaircounts(X, Y, r) } \arguments{ \item{X,Y}{ Point patterns (objects of class \code{"ppp"}). } \item{r}{ Maximum distance between pairs of points to be counted as close pairs. } } \details{ These are the efficient low-level functions used by \pkg{spatstat} to count close pairs of points in a point pattern or between two point patterns. \code{closepaircounts(X,r)} counts the number of neighbours for each point in the pattern \code{X}. That is, for each point \code{X[i]}, it counts the number of other points \code{X[j]} with \code{j != i} such that \code{d(X[i],X[j]) <= r} where \code{d} denotes Euclidean distance. The result is an integer vector \code{v} such that \code{v[i]} is the number of neighbours of \code{X[i]}. \code{crosspaircounts(X,Y,r)} counts, for each point in the pattern \code{X}, the number of neighbours in the pattern \code{Y}. That is, for each point \code{X[i]}, it counts the number of points \code{Y[j]} such that \code{d(X[i],X[j]) <= r}. The result is an integer vector \code{v} such that \code{v[i]} is the number of neighbours of \code{X[i]} in the pattern \code{Y}. } \section{Warning about accuracy}{ The results of these functions may not agree exactly with the correct answer (as calculated by a human) and may not be consistent between different computers and different installations of \R. The discrepancies arise in marginal cases where the interpoint distance is equal to, or very close to, the threshold \code{rmax}. Floating-point numbers in a computer are not mathematical Real Numbers: they are approximations using finite-precision binary arithmetic. The approximation is accurate to a tolerance of about \code{.Machine$double.eps}. If the true interpoint distance \eqn{d} and the threshold \code{rmax} are equal, or if their difference is no more than \code{.Machine$double.eps}, the result may be incorrect. } \value{ An integer vector of length equal to the number of points in \code{X}. } \author{ \adrian and \rolf } \seealso{ \code{\link{closepairs}} to identify all close pairs of points. } \examples{ a <- closepaircounts(cells, 0.1) sum(a) Y <- split(amacrine) b <- crosspaircounts(Y$on, Y$off, 0.1) } \keyword{spatial} \keyword{math} spatstat.model/man/update.rppm.Rd0000644000176200001440000000206414372605454016531 0ustar liggesusers\name{update.rppm} \alias{update.rppm} \title{Update a Recursively Partitioned Point Process Model} \description{ \code{update} method for class \code{"rppm"}. } \usage{ \method{update}{rppm}(object, \dots, envir=environment(terms(object))) } \arguments{ \item{object}{ Fitted recursively partitioned point process model. An object of class \code{"rppm"}, obtained from \code{\link{rppm}}. } \item{\dots}{ Arguments passed to \code{\link{rppm}}. } \item{envir}{ Environment in which to re-evaluate the call to \code{\link{rppm}}. } } \details{ \code{object} should be a fitted recursively partitioned point process model, obtained from the model-fitting function \code{\link{rppm}}. The model will be updated according to the new arguments provided. } \value{ Another fitted recursively partitioned point process model (object of class \code{"rppm"}. } \seealso{ \code{\link{rppm}}. } \examples{ fit <- rppm(nztrees ~ x) newfit <- update(fit, . ~ x + y) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.model/man/parameters.Rd0000644000176200001440000000301614420376374016433 0ustar liggesusers\name{parameters} \alias{parameters} \alias{parameters.dppm} \alias{parameters.kppm} \alias{parameters.slrm} \alias{parameters.ppm} \alias{parameters.profilepl} \alias{parameters.interact} \alias{parameters.fii} \title{ Extract Model Parameters in Understandable Form } \description{ Given a fitted model of some kind, this function extracts all the parameters needed to specify the model, and returns them as a list. } \usage{ parameters(model, \dots) \method{parameters}{dppm}(model, \dots) \method{parameters}{kppm}(model, \dots) \method{parameters}{slrm}(model, \dots) \method{parameters}{ppm}(model, \dots) \method{parameters}{profilepl}(model, \dots) \method{parameters}{fii}(model, \dots) \method{parameters}{interact}(model, \dots) } \arguments{ \item{model}{ A fitted model of some kind. } \item{\dots}{ Arguments passed to methods. } } \details{ The argument \code{model} should be a fitted model of some kind. This function extracts all the parameters that would be needed to specify the model, and returns them as a list. The function \code{parameters} is generic, with methods for class \code{"ppm"}, \code{"kppm"}, \code{"dppm"} and \code{"profilepl"} and other classes. } \value{ A named list, whose format depends on the fitted model. } \author{ \spatstatAuthors } \seealso{ \code{\link{coef}} } \examples{ parameters(Strauss(0.1)) fit1 <- ppm(cells ~ x, Strauss(0.1)) parameters(fit1) fit2 <- kppm(redwood ~ x, "Thomas") parameters(fit2) } \keyword{spatial} \keyword{models} spatstat.model/man/coef.slrm.Rd0000644000176200001440000000173114331173076016155 0ustar liggesusers\name{coef.slrm} \Rdversion{1.1} \alias{coef.slrm} \title{ Coefficients of Fitted Spatial Logistic Regression Model } \description{ Extracts the coefficients (parameters) from a fitted Spatial Logistic Regression model. } \usage{ \method{coef}{slrm}(object, ...) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Ignored. } } \details{ This is a method for \code{\link{coef}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). It extracts the fitted canonical parameters, i.e.\ the coefficients in the linear predictor of the spatial logistic regression. } \value{ Numeric vector of coefficients. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) coef(fit) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.model/man/vcov.mppm.Rd0000644000176200001440000000541314331173100016177 0ustar liggesusers\name{vcov.mppm} \alias{vcov.mppm} \title{Calculate Variance-Covariance Matrix for Fitted Multiple Point Process Model} \description{ Given a fitted multiple point process model, calculate the variance-covariance matrix of the parameter estimates. } \usage{ \method{vcov}{mppm}(object, ..., what="vcov", err="fatal") } \arguments{ \item{object}{ A multiple point process model (object of class \code{"mppm"}). } \item{\dots}{ Arguments recognised by \code{\link{vcov.ppm}}. } \item{what}{ Character string indicating which quantity should be calculated. Options include \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} for the Fisher information matrix. } \item{err}{ Character string indicating what action to take if an error occurs. Either \code{"fatal"}, \code{"warn"} or \code{"null"}. } } \details{ This is a method for the generic function \code{\link{vcov}}. The argument \code{object} should be a fitted multiple point process model (object of class \code{"mppm"}) generated by \code{\link{mppm}}. The variance-covariance matrix of the parameter estimates is computed using asymptotic theory for maximum likelihood (for Poisson processes) or estimating equations (for other Gibbs models). If \code{what="vcov"} (the default), the variance-covariance matrix is returned. If \code{what="corr"}, the variance-covariance matrix is normalised to yield a correlation matrix, and this is returned. If \code{what="fisher"}, the Fisher information matrix is returned instead. In all three cases, the rows and columns of the matrix correspond to the parameters (coefficients) in the same order as in \code{coef{model}}. If errors or numerical problems occur, the argument \code{err} determines what will happen. If \code{err="fatal"} an error will occur. If \code{err="warn"} a warning will be issued and \code{NA} will be returned. If \code{err="null"}, no warning is issued, but \code{NULL} is returned. } \value{ A numeric matrix (or \code{NA} or \code{NULL}). } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix of one of the models was either too large or too small for reliable numerical calculation. See \code{\link{vcov.ppm}} for suggestions on how to handle this. } \seealso{ \code{\link{vcov}}, \code{\link{vcov.ppm}}, \code{\link{mppm}} } \examples{ fit <- mppm(Wat ~x, data=hyperframe(Wat=waterstriders)) vcov(fit) } \references{ \baddrubaturnbook } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.model/man/vcov.ppm.Rd0000644000176200001440000002303614331173100016023 0ustar liggesusers\name{vcov.ppm} \alias{vcov.ppm} \title{Variance-Covariance Matrix for a Fitted Point Process Model} \description{ Returns the variance-covariance matrix of the estimates of the parameters of a fitted point process model. } \usage{ \method{vcov}{ppm}(object, \dots, what = "vcov", verbose = TRUE, fine=FALSE, gam.action=c("warn", "fatal", "silent"), matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), nacoef.action=c("warn", "fatal", "silent"), hessian=FALSE) } \arguments{ \item{object}{A fitted point process model (an object of class \code{"ppm"}.)} \item{\dots}{Ignored.} \item{what}{Character string (partially-matched) that specifies what matrix is returned. Options are \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} or \code{"Fisher"} for the Fisher information matrix. } \item{fine}{ Logical value indicating whether to use a quick estimate (\code{fine=FALSE}, the default) or a slower, more accurate estimate (\code{fine=TRUE}). } \item{verbose}{Logical. If \code{TRUE}, a message will be printed if various minor problems are encountered. } \item{gam.action}{String indicating what to do if \code{object} was fitted by \code{gam}. } \item{matrix.action}{String indicating what to do if the matrix is ill-conditioned (so that its inverse cannot be calculated). } \item{logi.action}{String indicating what to do if \code{object} was fitted via the logistic regression approximation using a non-standard dummy point process. } \item{nacoef.action}{ String indicating what to do if some of the fitted coefficients are \code{NA} (so that variance cannot be calculated). } \item{hessian}{ Logical. Use the negative Hessian matrix of the log pseudolikelihood instead of the Fisher information. } } \details{ This function computes the asymptotic variance-covariance matrix of the estimates of the canonical parameters in the point process model \code{object}. It is a method for the generic function \code{\link{vcov}}. \code{object} should be an object of class \code{"ppm"}, typically produced by \code{\link{ppm}}. The canonical parameters of the fitted model \code{object} are the quantities returned by \code{coef.ppm(object)}. The function \code{vcov} calculates the variance-covariance matrix for these parameters. The argument \code{what} provides three options: \describe{ \item{\code{what="vcov"}}{ return the variance-covariance matrix of the parameter estimates } \item{\code{what="corr"}}{ return the correlation matrix of the parameter estimates } \item{\code{what="fisher"}}{ return the observed Fisher information matrix. } } In all three cases, the result is a square matrix. The rows and columns of the matrix correspond to the canonical parameters given by \code{\link{coef.ppm}(object)}. The row and column names of the matrix are also identical to the names in \code{\link{coef.ppm}(object)}. For models fitted by the Berman-Turner approximation (Berman and Turner, 1992; Baddeley and Turner, 2000) to the maximum pseudolikelihood (using the default \code{method="mpl"} in the call to \code{\link{ppm}}), the implementation works as follows. \itemize{ \item If the fitted model \code{object} is a Poisson process, the calculations are based on standard asymptotic theory for the maximum likelihood estimator (Kutoyants, 1998). The observed Fisher information matrix of the fitted model \code{object} is first computed, by summing over the Berman-Turner quadrature points in the fitted model. The asymptotic variance-covariance matrix is calculated as the inverse of the observed Fisher information. The correlation matrix is then obtained by normalising. \item If the fitted model is not a Poisson process (i.e. it is some other Gibbs point process) then the calculations are based on Coeurjolly and Rubak (2012). A consistent estimator of the variance-covariance matrix is computed by summing terms over all pairs of data points. If required, the Fisher information is calculated as the inverse of the variance-covariance matrix. } For models fitted by the Huang-Ogata method (\code{method="ho"} in the call to \code{\link{ppm}}), the implementation uses the Monte Carlo estimate of the Fisher information matrix that was computed when the original model was fitted. For models fitted by the logistic regression approximation to the maximum pseudolikelihood (\code{method="logi"} in the call to \code{\link{ppm}}), \itemize{ \item Calculations are based on Baddeley \emph{et al.} (2013). A consistent estimator of the variance-covariance matrix is computed by summing terms over all pairs of data points. If required, the Fisher information is calculated as the inverse of the variance-covariance matrix. \item The calculations depend on the type of dummy pattern used when the model was fitted: \itemize{ \item currently only the dummy types \code{"stratrand"} (the default), \code{"binomial"} and \code{"poisson"} as generated by \code{\link{quadscheme.logi}} are supported. \item For other dummy types the behavior depends on the argument \code{logi.action}. If \code{logi.action="fatal"} an error is produced. Otherwise, for dummy types \code{"grid"} and \code{"transgrid"} the formulas for \code{"stratrand"} are used which in many cases should be conservative. For an arbitrary, user-specified dummy pattern (type \code{"given"}), the formulas for \code{"poisson"} are used which in many cases should be conservative. If \code{logi.action="warn"} a warning is issued, otherwise the calculation proceeds without a warning. } \item The result of the calculation is \bold{random} (i.e. not deterministic) when dummy type is \code{"stratrand"} (the default) because some of the variance terms are estimated by random sampling. This can be avoided by specifying \code{dummytype='poisson'} or \code{dummytype='binomial'} in the call to \code{\link{ppm}} when the model is fitted. } The argument \code{verbose} makes it possible to suppress some diagnostic messages. The asymptotic theory is not correct if the model was fitted using \code{gam} (by calling \code{\link{ppm}} with \code{use.gam=TRUE}). The argument \code{gam.action} determines what to do in this case. If \code{gam.action="fatal"}, an error is generated. If \code{gam.action="warn"}, a warning is issued and the calculation proceeds using the incorrect theory for the parametric case, which is probably a reasonable approximation in many applications. If \code{gam.action="silent"}, the calculation proceeds without a warning. If \code{hessian=TRUE} then the negative Hessian (second derivative) matrix of the log pseudolikelihood, and its inverse, will be computed. For non-Poisson models, this is not a valid estimate of variance, but is useful for other calculations. Note that standard errors and 95\% confidence intervals for the coefficients can also be obtained using \code{confint(object)} or \code{coef(summary(object))}. } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix was either too large or too small for reliable numerical calculation. If this message occurs, try repeating the calculation using \code{fine=TRUE}. Singularity can occur because of numerical overflow or collinearity in the covariates. To check this, rescale the coordinates of the data points and refit the model. See the Examples. In a Gibbs model, a singular matrix may also occur if the fitted model is a hard core process: this is a feature of the variance estimator. } \value{ A square matrix. } \examples{ X <- rpoispp(42) fit <- ppm(X ~ x + y) vcov(fit) vcov(fit, what="Fish") # example of singular system m <- ppm(demopat ~polynom(x,y,2)) \donttest{ try(v <- vcov(m)) } # rescale x, y coordinates to range [0,1] x [0,1] approximately demopatScale <- rescale(demopat, 10000) m <- ppm(demopatScale ~ polynom(x,y,2)) v <- vcov(m) # Gibbs example fitS <- ppm(swedishpines ~1, Strauss(9)) coef(fitS) sqrt(diag(vcov(fitS))) } \author{ Original code for Poisson point process was written by \adrian and \rolf. New code for stationary Gibbs point processes was generously contributed by \ege and Jean-\Francois Coeurjolly. New code for generic Gibbs process written by \adrian. New code for logistic method written by \ege. } \seealso{ \code{\link{vcov}} for the generic, \code{\link{ppm}} for information about fitted models, \code{\link[stats]{confint}} for confidence intervals. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014) Logistic regression for spatial Gibbs point processes. \emph{Biometrika} \bold{101} (2) 377--392. Coeurjolly, J.-F. and Rubak, E. (2013) Fast covariance estimation for innovations computed from a spatial Gibbs point process. Scandinavian Journal of Statistics \bold{40} 669--684. Kutoyants, Y.A. (1998) \bold{Statistical Inference for Spatial Poisson Processes}, Lecture Notes in Statistics 134. New York: Springer 1998. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.model/man/dppparbounds.Rd0000644000176200001440000000145014331173076016764 0ustar liggesusers\name{dppparbounds} \alias{dppparbounds} \title{Parameter Bound for a Determinantal Point Process Model} \description{ Returns the lower and upper bound for a specific parameter of a determinantal point process model when all other parameters are fixed. } \usage{dppparbounds(model, name, \dots)} \arguments{ \item{model}{Model of class \code{"detpointprocfamily"}.} \item{name}{name of the parameter for which the bound should be computed.} \item{\dots}{ Additional arguments passed to the \code{parbounds} function of the given model } } \value{A \code{data.frame} containing lower and upper bounds.} \author{ \adrian \rolf and \ege } \examples{ model <- dppMatern(lambda=100, alpha=.01, nu=1, d=2) dppparbounds(model, "lambda") } \keyword{spatial} \keyword{models} spatstat.model/man/LambertW.Rd0000644000176200001440000000252614331173075016004 0ustar liggesusers\name{LambertW} \alias{LambertW} \title{ Lambert's W Function } \description{ Computes Lambert's W-function. } \usage{ LambertW(x) } \arguments{ \item{x}{ Vector of nonnegative numbers. } } \details{ Lambert's W-function is the inverse function of \eqn{f(y) = y e^y}{f(y) = y * exp(y)}. That is, \eqn{W} is the function such that \deqn{ W(x) e^{W(x)} = x }{ W(x) * exp(W(x)) = x } This command \code{LambertW} computes \eqn{W(x)} for each entry in the argument \code{x}. If the library \pkg{gsl} has been installed, then the function \code{lambert_W0} in that library is invoked. Otherwise, values of the W-function are computed by root-finding, using the function \code{\link[stats]{uniroot}}. Computation using \pkg{gsl} is about 100 times faster. If any entries of \code{x} are infinite or \code{NA}, the corresponding results are \code{NA}. } \value{ Numeric vector. } \references{ Corless, R, Gonnet, G, Hare, D, Jeffrey, D and Knuth, D (1996), On the Lambert W function. \emph{Computational Mathematics}, \bold{5}, 325--359. Roy, R and Olver, F (2010), Lambert W function. In Olver, F, Lozier, D and Boisvert, R (eds.), \emph{{NIST} Handbook of Mathematical Functions}, Cambridge University Press. } \author{\adrian and \rolf } \examples{ LambertW(exp(1)) } \keyword{math} spatstat.model/man/addvar.Rd0000644000176200001440000001512714510474260015530 0ustar liggesusers\name{addvar} \alias{addvar} \title{ Added Variable Plot for Point Process Model } \description{ Computes the coordinates for an Added Variable Plot for a fitted point process model. } \usage{ addvar(model, covariate, ..., subregion=NULL, bw="nrd0", adjust=1, from=NULL, to=NULL, n=512, bw.input = c("points", "quad"), bw.restrict = FALSE, covname, crosscheck=FALSE) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{covariate}{ The covariate to be added to the model. Either a pixel image, a \code{function(x,y)}, or a character string giving the name of a covariate that was supplied when the model was fitted. } \item{subregion}{ Optional. A window (object of class \code{"owin"}) specifying a subset of the spatial domain of the data. The calculation will be confined to the data in this subregion. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link[stats]{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link[stats]{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link[stats]{density.default}} to control the number and range of values at which the function will be estimated. } \item{\dots}{ Additional arguments passed to \code{\link[stats]{density.default}}. } \item{bw.input}{ Character string specifying the input data used for automatic bandwidth selection. } \item{bw.restrict}{ Logical value, specifying whether bandwidth selection is performed using data from the entire spatial domain or from the \code{subregion}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } \item{crosscheck}{ For developers only. Logical value indicating whether to perform cross-checks on the validity of the calculation. } } \details{ This command generates the plot coordinates for an Added Variable Plot for a spatial point process model. Added Variable Plots (Cox, 1958, sec 4.5; Wang, 1985) are commonly used in linear models and generalized linear models, to decide whether a model with response \eqn{y} and predictors \eqn{x} would be improved by including another predictor \eqn{z}. In a (generalised) linear model with response \eqn{y} and predictors \eqn{x}, the Added Variable Plot for a new covariate \eqn{z} is a plot of the smoothed Pearson residuals from the original model against the scaled residuals from a weighted linear regression of \eqn{z} on \eqn{x}. If this plot has nonzero slope, then the new covariate \eqn{z} is needed. For general advice see Cook and Weisberg(1999); Harrell (2001). Essentially the same technique can be used for a spatial point process model (Baddeley et al, 2012). The argument \code{model} should be a fitted spatial point process model (object of class \code{"ppm"}). The argument \code{covariate} identifies the covariate that is to be considered for addition to the model. It should be either a pixel image (object of class \code{"im"}) or a \code{function(x,y)} giving the values of the covariate at any spatial location. Alternatively \code{covariate} may be a character string, giving the name of a covariate that was supplied (in the \code{covariates} argument to \code{\link{ppm}}) when the model was fitted, but was not used in the model. The result of \code{addvar(model, covariate)} is an object belonging to the classes \code{"addvar"} and \code{"fv"}. Plot this object to generate the added variable plot. Note that the plot method shows the pointwise significance bands for a test of the \emph{null} model, i.e. the null hypothesis that the new covariate has no effect. The smoothing bandwidth is controlled by the arguments \code{bw}, \code{adjust}, \code{bw.input} and \code{bw.restrict}. If \code{bw} is a numeric value, then the bandwidth is taken to be \code{adjust * bw}. If \code{bw} is a string representing a bandwidth selection rule (recognised by \code{\link[stats]{density.default}}) then the bandwidth is selected by this rule. The data used for automatic bandwidth selection are specified by \code{bw.input} and \code{bw.restrict}. If \code{bw.input="points"} (the default) then bandwidth selection is based on the covariate values at the points of the original point pattern dataset to which the model was fitted. If \code{bw.input="quad"} then bandwidth selection is based on the covariate values at every quadrature point used to fit the model. If \code{bw.restrict=TRUE} then the bandwidth selection is performed using only data from inside the \code{subregion}. } \section{Slow computation}{ In a large dataset, computation can be very slow if the default settings are used, because the smoothing bandwidth is selected automatically. To avoid this, specify a numerical value for the bandwidth \code{bw}. One strategy is to use a coarser subset of the data to select \code{bw} automatically. The selected bandwidth can be read off the print output for \code{addvar}. } \value{ An object of class \code{"addvar"} containing the coordinates for the added variable plot. There is a \code{plot} method. } \section{Internal data}{ The return value has an attribute \code{"spatial"} which contains the internal data: the computed values of the residuals, and of all relevant covariates, at each quadrature point of the model. It is an object of class \code{"ppp"} with a data frame of marks. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2013) Residual diagnostics for covariate effects in spatial point process models. \emph{Journal of Computational and Graphical Statistics}, \bold{22}, 886--905. Cook, R.D. and Weisberg, S. (1999) \emph{Applied regression, including computing and graphics}. New York: Wiley. Cox, D.R. (1958) \emph{Planning of Experiments}. New York: Wiley. Harrell, F. (2001) \emph{Regression Modeling Strategies}. New York: Springer. Wang, P. (1985) Adding a variable in generalized linear models. \emph{Technometrics} \bold{27}, 273--276. } \author{ \adrian, \rolf, Ya-Mei Chang and Yong Song. } \seealso{ \code{\link{parres}}, \code{\link{rhohat}}, \code{\link{rho2hat}}. } \examples{ X <- rpoispp(function(x,y){exp(3+3*x)}) model <- ppm(X, ~y) adv <- addvar(model, "x") plot(adv) adv <- addvar(model, "x", subregion=square(0.5)) } \keyword{spatial} \keyword{models} \concept{Model diagnostics} spatstat.model/man/clusterfield.kppm.Rd0000644000176200001440000000333114374302015017710 0ustar liggesusers\name{clusterfield.kppm} \alias{clusterfield.kppm} \title{Field of clusters} \description{ Calculate the superposition of cluster kernels at the location of a point pattern. } \usage{ \method{clusterfield}{kppm}(model, locations = NULL, \dots) } \arguments{ \item{model}{ Cluster model. Either a fitted cluster model (object of class \code{"kppm"}), a character string specifying the type of cluster model, or a function defining the cluster kernel. See Details. } \item{locations}{ A point pattern giving the locations of the kernels. Defaults to the centroid of the observation window for the \code{"kppm"} method and to the center of a unit square otherwise. } \item{\dots}{ Additional arguments passed to \code{\link[spatstat.explore]{density.ppp}} or the cluster kernel. See Details. } } \details{ The function \code{clusterfield} is generic, with a method for \code{"kppm"} (described here) and methods for \code{"character"} and \code{"function"}. The method \code{clusterfield.kppm} extracts the relevant information from the fitted model and calls \code{\link[spatstat.random]{clusterfield.function}}. The calculations are performed by \code{\link[spatstat.explore]{density.ppp}} and \code{\dots} arguments are passed thereto for control over the pixel resolution etc. (These arguments are then passed on to \code{\link[spatstat.geom]{pixellate.ppp}} and \code{\link[spatstat.geom]{as.mask}}.) } \value{ A pixel image (object of class \code{"im"}). } \seealso{ \code{\link{kppm}}, \code{\link[spatstat.random]{clusterfield}}. } \examples{ fit <- kppm(redwood~1, "Thomas") Z <- clusterfield(fit, eps = 0.01) } \author{ \spatstatAuthors. } \keyword{spatial} spatstat.model/man/vcov.slrm.Rd0000644000176200001440000000661414331173100016207 0ustar liggesusers\name{vcov.slrm} \alias{vcov.slrm} \title{Variance-Covariance Matrix for a Fitted Spatial Logistic Regression} \description{ Returns the variance-covariance matrix of the estimates of the parameters of a point process model that was fitted by spatial logistic regression. } \usage{ \method{vcov}{slrm}(object, \dots, what=c("vcov", "corr", "fisher", "Fisher")) } \arguments{ \item{object}{A fitted point process model of class \code{"slrm"}.} \item{\dots}{Ignored.} \item{what}{Character string (partially-matched) that specifies what matrix is returned. Options are \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} or \code{"Fisher"} for the Fisher information matrix. } } \details{ This function computes the asymptotic variance-covariance matrix of the estimates of the canonical parameters in the point process model \code{object}. It is a method for the generic function \code{\link{vcov}}. \code{object} should be an object of class \code{"slrm"}, typically produced by \code{\link{slrm}}. It represents a Poisson point process model fitted by spatial logistic regression. The canonical parameters of the fitted model \code{object} are the quantities returned by \code{coef.slrm(object)}. The function \code{vcov} calculates the variance-covariance matrix for these parameters. The argument \code{what} provides three options: \describe{ \item{\code{what="vcov"}}{ return the variance-covariance matrix of the parameter estimates } \item{\code{what="corr"}}{ return the correlation matrix of the parameter estimates } \item{\code{what="fisher"}}{ return the observed Fisher information matrix. } } In all three cases, the result is a square matrix. The rows and columns of the matrix correspond to the canonical parameters given by \code{\link{coef.slrm}(object)}. The row and column names of the matrix are also identical to the names in \code{\link{coef.slrm}(object)}. Note that standard errors and 95\% confidence intervals for the coefficients can also be obtained using \code{confint(object)} or \code{coef(summary(object))}. Standard errors for the fitted intensity can be obtained using \code{\link{predict.slrm}}. } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix was either too large or too small for reliable numerical calculation. This can occur because of numerical overflow or collinearity in the covariates. } \value{ A square matrix. } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x + y) vcov(fit) vcov(fit, what="corr") vcov(fit, what="f") } \author{ \adrian and \rolf . } \seealso{ \code{\link{vcov}} for the generic, \code{\link{slrm}} for information about fitted models, \code{\link{predict.slrm}} for other kinds of calculation about the model, \code{\link[stats]{confint}} for confidence intervals. } \references{ Baddeley, A., Berman, M., Fisher, N.I., Hardegen, A., Milne, R.K., Schuhmacher, D., Shah, R. and Turner, R. (2010) Spatial logistic regression and change-of-support for spatial Poisson point processes. \emph{Electronic Journal of Statistics} \bold{4}, 1151--1201. \code{DOI: 10.1214/10-EJS581} } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.model/man/Kmodel.Rd0000644000176200001440000000326414331173075015502 0ustar liggesusers\name{Kmodel} \alias{Kmodel} \alias{pcfmodel} \title{K Function or Pair Correlation Function of a Point Process Model} \description{ Returns the theoretical \eqn{K} function or the pair correlation function of a point process model. } \usage{ Kmodel(model, \dots) pcfmodel(model, \dots) } \arguments{ \item{model}{ A fitted point process model of some kind. } \item{\dots}{ Ignored. } } \value{ A \code{function} in the \R language, which takes one argument \code{r}. } \details{ For certain types of point process models, it is possible to write down a mathematical expression for the \eqn{K} function or the pair correlation function of the model. The functions \code{Kmodel} and \code{pcfmodel} give the theoretical \eqn{K}-function and the theoretical pair correlation function for a point process model that has been fitted to data. The functions \code{Kmodel} and \code{pcfmodel} are generic, with methods for the classes \code{"kppm"} (cluster processes and Cox processes) and \code{"ppm"} (Gibbs processes). The return value is a \code{function} in the \R language, which takes one argument \code{r}. Evaluation of this function, on a numeric vector \code{r}, yields values of the desired \eqn{K} function or pair correlation function at these distance values. } \seealso{ \code{\link{Kest}} or \code{\link{pcf}} to estimate the \eqn{K} function or pair correlation function nonparametrically from data. \code{\link{Kmodel.kppm}} for the method for cluster processes and Cox processes. \code{\link{Kmodel.ppm}} for the method for Gibbs processes. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/MultiStrauss.Rd0000644000176200001440000000763414331173075016753 0ustar liggesusers\name{MultiStrauss} \alias{MultiStrauss} \title{The Multitype Strauss Point Process Model} \description{ Creates an instance of the multitype Strauss point process model which can then be fitted to point pattern data. } \usage{ MultiStrauss(radii, types=NULL) } \arguments{ \item{radii}{Matrix of interaction radii} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the multitype Strauss process with interaction radii \eqn{radii[i,j]}. } \details{ The (stationary) multitype Strauss process with \eqn{m} types, with interaction radii \eqn{r_{ij}}{r[i,j]} and parameters \eqn{\beta_j}{beta[j]} and \eqn{\gamma_{ij}}{gamma[i,j]} is the pairwise interaction point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern, and a pair of points of types \eqn{i} and \eqn{j} closer than \eqn{r_{ij}}{r[i,j]} units apart contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the density. The nonstationary multitype Strauss process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the multitype Strauss process pairwise interaction is yielded by the function \code{MultiStrauss()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the MultiStrauss interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The matrix \code{radii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii are specified in \code{MultiStrauss}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{MultiStrauss()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}}, \code{\link{MultiHard}} } \examples{ r <- matrix(c(1,2,2,1), nrow=2,ncol=2) MultiStrauss(r) # prints a sensible description of itself r <- 0.03 * matrix(c(1,2,2,1), nrow=2,ncol=2) X <- amacrine \testonly{ X <- X[ owin(c(0, 0.8), c(0, 1)) ] } ppm(X ~1, MultiStrauss(r)) # fit the stationary multitype Strauss process to `amacrine' \donttest{ ppm(X ~polynom(x,y,3), MultiStrauss(r, c("off","on"))) # fit a nonstationary multitype Strauss process with log-cubic trend } } \section{Warnings}{ In order that \code{\link{ppm}} can fit the multitype Strauss model correctly to a point pattern \code{X}, this pattern must be marked, with \code{markformat} equal to \code{vector} and the mark vector \code{marks(X)} must be a factor. If the argument \code{types} is specified it is interpreted as a set of factor levels and this set must equal \code{levels(marks(X))}. } \section{Changed Syntax}{ Before \pkg{spatstat} version \code{1.37-0}, the syntax of this function was different: \code{MultiStrauss(types=NULL, radii)}. The new code attempts to handle the old syntax as well. } \author{\spatstatAuthors.} \keyword{spatial} \keyword{models} spatstat.model/man/fitted.ppm.Rd0000644000176200001440000001177114331173076016344 0ustar liggesusers\name{fitted.ppm} \alias{fitted.ppm} \title{ Fitted Conditional Intensity for Point Process Model } \description{ Given a point process model fitted to a point pattern, compute the fitted conditional intensity or fitted trend of the model at the points of the pattern, or at the points of the quadrature scheme used to fit the model. } \usage{ \method{fitted}{ppm}(object, \dots, type="lambda", dataonly=FALSE, new.coef=NULL, leaveoneout=FALSE, drop=FALSE, check=TRUE, repair=TRUE, ignore.hardcore=FALSE, dropcoef=FALSE) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) } \item{\dots}{ Ignored. } \item{type}{ String (partially matched) indicating whether the fitted value is the conditional intensity (\code{"lambda"} or \code{"cif"}) or the first order trend (\code{"trend"}) or the logarithm of conditional intensity (\code{"link"}). } \item{dataonly}{ Logical. If \code{TRUE}, then values will only be computed at the points of the data point pattern. If \code{FALSE}, then values will be computed at all the points of the quadrature scheme used to fit the model, including the points of the data point pattern. } \item{new.coef}{ Numeric vector of parameter values to replace the fitted model parameters \code{coef(object)}. } \item{leaveoneout}{ Logical. If \code{TRUE} the fitted value at each data point will be computed using a leave-one-out method. See Details. } \item{drop}{ Logical value determining whether to delete quadrature points that were not used to fit the model. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{object}, if it is found to be damaged. } \item{ignore.hardcore}{ Advanced use only. Logical value specifying whether to compute only the finite part of the interaction potential (effectively removing any hard core interaction terms). } \item{dropcoef}{ Internal use only. } } \value{ A vector containing the values of the fitted conditional intensity, fitted spatial trend, or logarithm of the fitted conditional intensity. Entries in this vector correspond to the quadrature points (data or dummy points) used to fit the model. The quadrature points can be extracted from \code{object} by \code{union.quad(quad.ppm(object))}. } \details{ The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the model-fitting algorithm \code{\link{ppm}}). This function evaluates the conditional intensity \eqn{\hat\lambda(u, x)}{lambdahat(u,x)} or spatial trend \eqn{\hat b(u)}{bhat(u)} of the fitted point process model for certain locations \eqn{u}, where \code{x} is the original point pattern dataset to which the model was fitted. The locations \eqn{u} at which the fitted conditional intensity/trend is evaluated, are the points of the quadrature scheme used to fit the model in \code{\link{ppm}}. They include the data points (the points of the original point pattern dataset \code{x}) and other ``dummy'' points in the window of observation. If \code{leaveoneout=TRUE}, fitted values will be computed for the data points only, using a \sQuote{leave-one-out} rule: the fitted value at \code{X[i]} is effectively computed by deleting this point from the data and re-fitting the model to the reduced pattern \code{X[-i]}, then predicting the value at \code{X[i]}. (Instead of literally performing this calculation, we apply a Taylor approximation using the influence function computed in \code{\link{dfbetas.ppm}}. The argument \code{drop} is explained in \code{\link{quad.ppm}}. Use \code{\link{predict.ppm}} to compute the fitted conditional intensity at other locations or with other values of the explanatory variables. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005). Residual analysis for spatial point processes (with discussion). \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppm}}, \code{\link{predict.ppm}} } \examples{ str <- ppm(cells ~x, Strauss(r=0.1)) lambda <- fitted(str) # extract quadrature points in corresponding order quadpoints <- union.quad(quad.ppm(str)) # plot conditional intensity values # as circles centred on the quadrature points quadmarked <- setmarks(quadpoints, lambda) plot(quadmarked) if(!interactive()) str <- ppm(cells ~ x) lambdaX <- fitted(str, leaveoneout=TRUE) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.model/man/logLik.ppm.Rd0000644000176200001440000001241314331173076016300 0ustar liggesusers\name{logLik.ppm} \alias{logLik.ppm} \alias{deviance.ppm} \alias{AIC.ppm} \alias{extractAIC.ppm} \alias{nobs.ppm} \title{Log Likelihood and AIC for Point Process Model} \description{ Extracts the log likelihood, deviance, and AIC of a fitted Poisson point process model, or analogous quantities based on the pseudolikelihood or logistic likelihood for a fitted Gibbs point process model. } \usage{ \method{logLik}{ppm}(object, \dots, new.coef=NULL, warn=TRUE, absolute=FALSE) \method{deviance}{ppm}(object, \dots) \method{AIC}{ppm}(object, \dots, k=2, takeuchi=TRUE) \method{extractAIC}{ppm}(fit, scale=0, k=2, \dots, takeuchi=TRUE) \method{nobs}{ppm}(object, \dots) } \arguments{ \item{object,fit}{Fitted point process model. An object of class \code{"ppm"}. } \item{\dots}{Ignored.} \item{warn}{ If \code{TRUE}, a warning is given when the pseudolikelihood or logistic likelihood is returned instead of the likelihood. } \item{absolute}{ Logical value indicating whether to include constant terms in the loglikelihood. } \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(object)}. } \item{takeuchi}{ Logical value specifying whether to use the Takeuchi penalty (\code{takeuchi=TRUE}) or the number of fitted parameters (\code{takeuchi=FALSE}) in calculating AIC. } } \details{ These functions are methods for the generic commands \code{\link[stats]{logLik}}, \code{\link[stats]{deviance}}, \code{\link[stats]{extractAIC}} and \code{\link[stats]{nobs}} for the class \code{"ppm"}. An object of class \code{"ppm"} represents a fitted Poisson or Gibbs point process model. It is obtained from the model-fitting function \code{\link{ppm}}. The method \code{logLik.ppm} computes the maximised value of the log likelihood for the fitted model \code{object} (as approximated by quadrature using the Berman-Turner approximation) is extracted. If \code{object} is not a Poisson process, the maximised log \emph{pseudolikelihood} is returned, with a warning (if \code{warn=TRUE}). The Akaike Information Criterion AIC for a fitted model is defined as \deqn{ AIC = -2 \log(L) + k \times \mbox{penalty} }{ AIC = -2 * log(L) + k * penalty } where \eqn{L} is the maximised likelihood of the fitted model, and \eqn{\mbox{penalty}}{penalty} is a penalty for model complexity, usually equal to the effective degrees of freedom of the model. The method \code{extractAIC.ppm} returns the \emph{analogous} quantity \eqn{AIC*} in which \eqn{L} is replaced by \eqn{L*}, the quadrature approximation to the likelihood (if \code{fit} is a Poisson model) or the pseudolikelihood or logistic likelihood (if \code{fit} is a Gibbs model). The \eqn{\mbox{penalty}}{penalty} term is calculated as follows. If \code{takeuchi=FALSE} then \eqn{\mbox{penalty}}{penalty} is the number of fitted parameters. If \code{takeuchi=TRUE} then \eqn{\mbox{penalty} = \mbox{trace}(J H^{-1})}{penalty = trace(J H^(-1))} where \eqn{J} and \eqn{H} are the estimated variance and hessian, respectively, of the composite score. These two choices are equivalent for a Poisson process. The method \code{nobs.ppm} returns the number of points in the original data point pattern to which the model was fitted. The \R function \code{\link[stats]{step}} uses these methods. } \section{Model comparison}{ The values of \code{logLik} and \code{AIC} returned by these functions are based on the \emph{pseudolikelihood} of the Gibbs point process model. If the model is a Poisson process, then the pseudolikelihood is the same as the likelihood, but for other Gibbs models, the pseudolikelihood is different from the likelihood (and the likelihood of a Gibbs model is hard to compute). For model comparison and model selection, it is valid to compare the \code{logLik} values, or to compare the \code{AIC} values, but only when all the models are of class \code{"ppm"}. } \value{ \code{logLik} returns a numerical value, belonging to the class \code{"logLik"}, with an attribute \code{"df"} giving the degrees of freedom. \code{AIC} returns a numerical value. \code{extractAIC} returns a numeric vector of length 2 containing the degrees of freedom and the AIC value. \code{nobs} returns an integer value. } \references{ Varin, C. and Vidoni, P. (2005) A note on composite likelihood inference and model selection. \emph{Biometrika} \bold{92}, 519--528. } \seealso{ \code{\link{ppm}}, \code{\link{as.owin}}, \code{\link{anova.ppm}}, \code{\link{coef.ppm}}, \code{\link{fitted.ppm}}, \code{\link{formula.ppm}}, \code{\link{model.frame.ppm}}, \code{\link{model.matrix.ppm}}, \code{\link{plot.ppm}}, \code{\link{predict.ppm}}, \code{\link{residuals.ppm}}, \code{\link{simulate.ppm}}, \code{\link{summary.ppm}}, \code{\link{terms.ppm}}, \code{\link{update.ppm}}, \code{\link{vcov.ppm}}. } \author{ \spatstatAuthors. } \examples{ fit <- ppm(cells, ~x) nobs(fit) logLik(fit) deviance(fit) extractAIC(fit) AIC(fit) step(fit) } \keyword{spatial} \keyword{models} spatstat.model/man/parres.Rd0000644000176200001440000001766414510474260015573 0ustar liggesusers\name{parres} \alias{parres} \title{ Partial Residuals for Point Process Model } \description{ Computes the smoothed partial residuals, a diagnostic for transformation of a covariate in a Poisson point process model. } \usage{ parres(model, covariate, ..., smooth.effect=FALSE, subregion=NULL, bw = "nrd0", adjust=1, from = NULL, to = NULL, n = 512, bw.input = c("points", "quad"), bw.restrict=FALSE, covname) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{covariate}{ The covariate of interest. Either a character string matching the name of one of the canonical covariates in the model, or one of the names \code{"x"} or \code{"y"} referring to the Cartesian coordinates, or one of the names of the covariates given when \code{model} was fitted, or a pixel image (object of class \code{"im"}) or \code{function(x,y)} supplying the values of a covariate at any location. If the \code{model} depends on only one covariate, then this covariate is the default; otherwise a covariate must be specified. } \item{smooth.effect}{ Logical. Determines the choice of algorithm. See Details. } \item{subregion}{ Optional. A window (object of class \code{"owin"}) specifying a subset of the spatial domain of the data. The calculation will be confined to the data in this subregion. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link[stats]{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link[stats]{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link[stats]{density.default}} to control the number and range of values at which the function will be estimated. } \item{\dots}{ Additional arguments passed to \code{\link[stats]{density.default}}. } \item{bw.input}{ Character string specifying the input data used for automatic bandwidth selection. } \item{bw.restrict}{ Logical value, specifying whether bandwidth selection is performed using data from the entire spatial domain or from the \code{subregion}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } } \details{ This command computes the smoothed partial residual diagnostic (Baddeley, Chang, Song and Turner, 2012) for the transformation of a covariate in a Poisson point process model. The argument \code{model} must be a fitted Poisson point process model. The diagnostic works in two different ways: \describe{ \item{Canonical covariate:}{ The argument \code{covariate} may be a character string which is the name of one of the \emph{canonical covariates} in the model. The canonical covariates are the functions \eqn{Z_j}{Z[j]} that appear in the expression for the Poisson point process intensity \deqn{ \lambda(u) = \exp(\beta_1 Z_1(u) + \ldots + \beta_p Z_p(u)) }{ lambda(u) = exp(beta[1] * Z[1](u) + \ldots + \beta[p] * Z[p](u)) } at spatial location \eqn{u}. Type \code{names(coef(model))} to see the names of the canonical covariates in \code{model}. If the selected covariate is \eqn{Z_j}{Z[j]}, then the diagnostic plot concerns the model term \eqn{\beta_j Z_j(u)}{beta[j] * Z[j](u)}. The plot shows a smooth estimate of a function \eqn{h(z)} that should replace this linear term, that is, \eqn{\beta_j Z_j(u)}{beta[j] * Z[j](u)} should be replaced by \eqn{h(Z_j(u))}{h(Z[j](u))}. The linear function is also plotted as a dotted line. } \item{New covariate:}{ If the argument \code{covariate} is a pixel image (object of class \code{"im"}) or a \code{function(x,y)}, it is assumed to provide the values of a covariate that is not present in the model. Alternatively \code{covariate} can be the name of a covariate that was supplied when the model was fitted (i.e. in the call to \code{\link{ppm}}) but which does not feature in the model formula. In either case we speak of a new covariate \eqn{Z(u)}. If the fitted model intensity is \eqn{\lambda(u)}{lambda(u)} then we consider modifying this to \eqn{\lambda(u) \exp(h(Z(u)))}{lambda(u) * exp(h(Z(u)))} where \eqn{h(z)} is some function. The diagnostic plot shows an estimate of \eqn{h(z)}. \bold{Warning: in this case the diagnostic is not theoretically justified. This option is provided for research purposes.} } } Alternatively \code{covariate} can be one of the character strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. The behaviour here depends on whether the coordinate was one of the canonical covariates in the model. If there is more than one canonical covariate in the model that depends on the specified \code{covariate}, then the covariate effect is computed using all these canonical covariates. For example in a log-quadratic model which includes the terms \code{x} and \code{I(x^2)}, the quadratic effect involving both these terms will be computed. There are two choices for the algorithm. If \code{smooth.effect=TRUE}, the fitted covariate effect (according to \code{model}) is added to the point process residuals, then smoothing is applied to these values. If \code{smooth.effect=FALSE}, the point process residuals are smoothed first, and then the fitted covariate effect is added to the result. The smoothing bandwidth is controlled by the arguments \code{bw}, \code{adjust}, \code{bw.input} and \code{bw.restrict}. If \code{bw} is a numeric value, then the bandwidth is taken to be \code{adjust * bw}. If \code{bw} is a string representing a bandwidth selection rule (recognised by \code{\link[stats]{density.default}}) then the bandwidth is selected by this rule. The data used for automatic bandwidth selection are specified by \code{bw.input} and \code{bw.restrict}. If \code{bw.input="points"} (the default) then bandwidth selection is based on the covariate values at the points of the original point pattern dataset to which the model was fitted. If \code{bw.input="quad"} then bandwidth selection is based on the covariate values at every quadrature point used to fit the model. If \code{bw.restrict=TRUE} then the bandwidth selection is performed using only data from inside the \code{subregion}. } \section{Slow computation}{ In a large dataset, computation can be very slow if the default settings are used, because the smoothing bandwidth is selected automatically. To avoid this, specify a numerical value for the bandwidth \code{bw}. One strategy is to use a coarser subset of the data to select \code{bw} automatically. The selected bandwidth can be read off the print output for \code{parres}. } \value{ A function value table (object of class \code{"fv"}) containing the values of the smoothed partial residual, the estimated variance, and the fitted effect of the covariate. Also belongs to the class \code{"parres"} which has methods for \code{print} and \code{plot}. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2013) Residual diagnostics for covariate effects in spatial point process models. \emph{Journal of Computational and Graphical Statistics}, \bold{22}, 886--905. } \author{ \adrian, \rolf, Ya-Mei Chang and Yong Song. } \seealso{ \code{\link{addvar}}, \code{\link{rhohat}}, \code{\link{rho2hat}} } \examples{ X <- rpoispp(function(x,y){exp(3+x+2*x^2)}) model <- ppm(X ~x+y) tra <- parres(model, "x") plot(tra) tra plot(parres(model, "x", subregion=square(0.5))) model2 <- ppm(X ~x+I(x^2)+y) plot(parres(model2, "x")) Z <- setcov(owin()) plot(parres(model2, Z)) #' when the model involves only one covariate modelb <- ppm(bei ~ elev + I(elev^2), data=bei.extra) plot(parres(modelb)) } \keyword{spatial} \keyword{models} \concept{Model diagnostics} spatstat.model/man/berman.test.Rd0000644000176200001440000001435314510474260016511 0ustar liggesusers\name{berman.test.ppm} \alias{berman.test.ppm} \title{Berman's Tests for Point Process Model} \description{ Tests the goodness-of-fit of a Poisson point process model using methods of Berman (1986). } \usage{ \method{berman.test}{ppm}(model, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) } \arguments{ \item{model}{ A fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{covariate}{ The spatial covariate on which the test will be based. An image (object of class \code{"im"}) or a function. } \item{which}{ Character string specifying the choice of test. } \item{alternative}{ Character string specifying the alternative hypothesis. } \item{\dots}{ Additional arguments controlling the pixel resolution (arguments \code{dimyx}, \code{eps} and \code{rule.eps} passed to \code{\link{as.mask}}) or other undocumented features. } } \details{ These functions perform a goodness-of-fit test of a Poisson point process model fitted to point pattern data. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using either of two test statistics \eqn{Z_1}{Z[1]} and \eqn{Z_2}{Z[2]} proposed by Berman (1986). The \eqn{Z_1}{Z[1]} test is also known as the Lawson-Waller test. The function \code{berman.test} is generic, with methods for point patterns (\code{"ppp"} or \code{"lpp"}) and point process models (\code{"ppm"} or \code{"lppm"}). \itemize{ \item If \code{X} is a point pattern dataset (object of class \code{"ppp"} or \code{"lpp"}), then \code{berman.test(X, ...)} performs a goodness-of-fit test of the uniform Poisson point process (Complete Spatial Randomness, CSR) for this dataset. \item If \code{model} is a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) then \code{berman.test(model, ...)} performs a test of goodness-of-fit for this fitted model. In this case, \code{model} should be a Poisson point process. } The test is performed by comparing the observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same covariate under the model. Thus, you must nominate a spatial covariate for this test. The argument \code{covariate} should be either a \code{function(x,y)} or a pixel image (object of class \code{"im"} containing the values of a spatial function. If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. Next the values of the \code{covariate} at all locations in the observation window are evaluated. The point process intensity of the fitted model is also evaluated at all locations in the window. \itemize{ \item If \code{which="Z1"}, the test statistic \eqn{Z_1}{Z[1]} is computed as follows. The sum \eqn{S} of the covariate values at all data points is evaluated. The predicted mean \eqn{\mu}{\mu} and variance \eqn{\sigma^2}{\sigma^2} of \eqn{S} are computed from the values of the covariate at all locations in the window. Then we compute \eqn{Z_1 = (S-\mu)/\sigma}{Z[1]=(S-\mu)/\sigma}. Closely-related tests were proposed independently by Waller et al (1993) and Lawson (1993) so this test is often termed the Lawson-Waller test in epidemiological literature. \item If \code{which="Z2"}, the test statistic \eqn{Z_2}{Z[2]} is computed as follows. The values of the \code{covariate} at all locations in the observation window, weighted by the point process intensity, are compiled into a cumulative distribution function \eqn{F}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. The standardised sample mean of these numbers is the statistic \eqn{Z_2}{Z[2]}. } In both cases the null distribution of the test statistic is the standard normal distribution, approximately. The return value is an object of class \code{"htest"} containing the results of the hypothesis test. The print method for this class gives an informative summary of the test outcome. } \value{ An object of class \code{"htest"} (hypothesis test) and also of class \code{"bermantest"}, containing the results of the test. The return value can be plotted (by \code{\link{plot.bermantest}}) or printed to give an informative summary of the test. } \section{Warning}{ The meaning of a one-sided test must be carefully scrutinised: see the printed output. } \author{ \spatstatAuthors. } \seealso{ \code{\link{cdf.test}}, \code{\link{quadrat.test}}, \code{\link[spatstat.model]{ppm}} } \references{ Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. Lawson, A.B. (1993) On the analysis of mortality events around a prespecified fixed point. \emph{Journal of the Royal Statistical Society, Series A} \bold{156} (3) 363--377. Waller, L., Turnbull, B., Clark, L.C. and Nasca, P. (1992) Chronic Disease Surveillance and testing of clustering of disease and exposure: Application to leukaemia incidence and TCE-contaminated dumpsites in upstate New York. \emph{Environmetrics} \bold{3}, 281--300. } \examples{ # Berman's data X <- copper$SouthPoints L <- copper$SouthLines D <- distmap(L, eps=1) # test of fitted model fit <- ppm(X ~ x+y) berman.test(fit, D) } \keyword{htest} \keyword{spatial} \concept{Goodness-of-fit} spatstat.model/man/methods.fii.Rd0000644000176200001440000000540014331173077016474 0ustar liggesusers\name{methods.fii} \alias{methods.fii} %DoNotExport \Rdversion{1.1} \alias{print.fii} \alias{plot.fii} \alias{coef.fii} \alias{coef<-.fii} \alias{summary.fii} \alias{print.summary.fii} \alias{coef.summary.fii} \title{ Methods for Fitted Interactions } \description{ These are methods specifically for the class \code{"fii"} of fitted interpoint interactions. } \usage{ \method{print}{fii}(x, \dots) \method{coef}{fii}(object, \dots) \method{coef}{fii}(object, \dots) <- value \method{plot}{fii}(x, \dots) \method{summary}{fii}(object,\dots) \method{print}{summary.fii}(x, ...) \method{coef}{summary.fii}(object, ...) } \arguments{ \item{x,object}{ An object of class \code{"fii"} representing a fitted interpoint interaction. } \item{\dots}{ Arguments passed to other methods. } \item{value}{ Numeric vector containing new values for the fitted interaction coefficients. } } \details{ These are methods for the class \code{"fii"}. An object of class \code{"fii"} represents a fitted interpoint interaction. It is usually obtained by using the command \code{\link{fitin}} to extract the fitted interaction part of a fitted point process model. See \code{\link{fitin}} for further explanation of this class. The commands listed here are methods for the generic functions \code{\link[base]{print}}, \code{\link[base]{summary}}, \code{\link[base]{plot}}, \code{\link[stats]{coef}} and \code{\link[nlme]{coef<-}} for objects of the class \code{"fii"}. Following the usual convention, \code{summary.fii} returns an object of class \code{summary.fii}, for which there is a print method. The effect is that, when the user types \code{summary(x)}, the summary is printed, but when the user types \code{y <- summary(x)}, the summary information is saved. The method \code{coef.fii} extracts the canonical coefficients of the fitted interaction, and returns them as a numeric vector. The method \code{coef.summary.fii} transforms these values into quantities that are more easily interpretable, in a format that depends on the particular model. There are also methods for the generic commands \code{\link{reach}} and \code{\link{as.interact}}, described elsewhere. } \value{ The \code{print} and \code{plot} methods return \code{NULL}. The \code{summary} method returns an object of class \code{summary.fii}. \code{coef.fii} returns a numeric vector. \code{coef.summary.fii} returns data whose structure depends on the model. } \author{ \adrian } \seealso{ \code{\link{fitin}}, \code{\link{reach.fii}}, \code{\link{as.interact.fii}} } \examples{ mod <- ppm(cells, ~1, Strauss(0.1)) f <- fitin(mod) f summary(f) plot(f) coef(f) coef(summary(f)) } \keyword{spatial} \keyword{methods} spatstat.model/man/slrm.Rd0000644000176200001440000001627514331173077015254 0ustar liggesusers\name{slrm} \alias{slrm} \title{Spatial Logistic Regression} \description{ Fits a spatial logistic regression model to a spatial point pattern. } \usage{ slrm(formula, ..., data = NULL, offset = TRUE, link = "logit", dataAtPoints=NULL, splitby=NULL) } \arguments{ \item{formula}{The model formula. See Details.} \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} determining the pixel resolution for the discretisation of the point pattern. } \item{data}{ Optional. A list containing data required in the formula. The names of entries in the list should correspond to variable names in the formula. The entries should be point patterns, pixel images or windows. } \item{offset}{ Logical flag indicating whether the model formula should be augmented by an offset equal to the logarithm of the pixel area. } \item{link}{The link function for the regression model. A character string, specifying a link function for binary regression. } \item{dataAtPoints}{Optional. Exact values of the covariates at the data points. A data frame, with column names corresponding to variables in the \code{formula}, with one row for each point in the point pattern dataset. } \item{splitby}{ Optional. Character string identifying a window. The window will be used to split pixels into sub-pixels. } } \details{ This function fits a Spatial Logistic Regression model (Tukey, 1972; Agterberg, 1974) to a spatial point pattern dataset. The logistic function may be replaced by another link function. The \code{formula} specifies the form of the model to be fitted, and the data to which it should be fitted. The \code{formula} must be an \R formula with a left and right hand side. The left hand side of the \code{formula} is the name of the point pattern dataset, an object of class \code{"ppp"}. The right hand side of the \code{formula} is an expression, in the usual \R formula syntax, representing the functional form of the linear predictor for the model. Each variable name that appears in the formula may be \itemize{ \item one of the reserved names \code{x} and \code{y}, referring to the Cartesian coordinates; \item the name of an entry in the list \code{data}, if this argument is given; \item the name of an object in the parent environment, that is, in the environment where the call to \code{slrm} was issued. } Each object appearing on the right hand side of the formula may be \itemize{ \item a pixel image (object of class \code{"im"}) containing the values of a covariate; \item a window (object of class \code{"owin"}), which will be interpreted as a logical covariate which is \code{TRUE} inside the window and \code{FALSE} outside it; \item a \code{function} in the \R language, with arguments \code{x,y}, which can be evaluated at any location to obtain the values of a covariate. } See the Examples below. The fitting algorithm discretises the point pattern onto a pixel grid. The value in each pixel is 1 if there are any points of the point pattern in the pixel, and 0 if there are no points in the pixel. The dimensions of the pixel grid will be determined as follows: \itemize{ \item The pixel grid will be determined by the extra arguments \code{\dots} if they are specified (for example the argument \code{dimyx} can be used to specify the number of pixels). \item Otherwise, if the right hand side of the \code{formula} includes the names of any pixel images containing covariate values, these images will determine the pixel grid for the discretisation. The covariate image with the finest grid (the smallest pixels) will be used. \item Otherwise, the default pixel grid size is given by \code{spatstat.options("npixel")}. } The covariates are evaluated at the centre of each pixel. If \code{dataAtPoints} is given, then the covariate values at the corresponding pixels are overwritten by the entries of \code{dataAtPoints} (and the spatial coordinates are overwritten by the exact spatial coordinates of the data points). If \code{link="logit"} (the default), the algorithm fits a Spatial Logistic Regression model. This model states that the probability \eqn{p} that a given pixel contains a data point, is related to the covariates through \deqn{\log\frac{p}{1-p} = \eta}{log(p/(1-p)) = eta} where \eqn{\eta}{eta} is the linear predictor of the model (a linear combination of the covariates, whose form is specified by the \code{formula}). If \code{link="cloglog"} then the algorithm fits a model stating that \deqn{\log(-\log(1-p)) = \eta}{log(-log(1-p)) = eta}. If \code{offset=TRUE} (the default), the model formula will be augmented by adding an offset term equal to the logarithm of the pixel area. This ensures that the fitted parameters are approximately independent of pixel size. If \code{offset=FALSE}, the offset is not included, and the traditional form of Spatial Logistic Regression is fitted. } \value{ An object of class \code{"slrm"} representing the fitted model. There are many methods for this class, including methods for \code{print}, \code{fitted}, \code{predict}, \code{anova}, \code{coef}, \code{logLik}, \code{terms}, \code{update}, \code{formula} and \code{vcov}. Automated stepwise model selection is possible using \code{\link{step}}. Confidence intervals for the parameters can be computed using \code{\link[stats]{confint}}. } \seealso{ \code{\link{anova.slrm}}, \code{\link{coef.slrm}}, \code{\link{fitted.slrm}}, \code{\link{logLik.slrm}}, \code{\link{plot.slrm}}, \code{\link{predict.slrm}}, \code{\link{vcov.slrm}} } \references{ Agterberg, F.P. (1974) Automatic contouring of geological maps to detect target areas for mineral exploration. \emph{Journal of the International Association for Mathematical Geology} \bold{6}, 373--395. Baddeley, A., Berman, M., Fisher, N.I., Hardegen, A., Milne, R.K., Schuhmacher, D., Shah, R. and Turner, R. (2010) Spatial logistic regression and change-of-support for spatial Poisson point processes. \emph{Electronic Journal of Statistics} \bold{4}, 1151--1201. \code{DOI: 10.1214/10-EJS581} Tukey, J.W. (1972) Discussion of paper by F.P. Agterberg and S.C. Robinson. \emph{Bulletin of the International Statistical Institute} \bold{44} (1) p. 596. Proceedings, 38th Congress, International Statistical Institute. } \examples{ if(offline <- !interactive()) op <- spatstat.options(npixel=32) X <- copper$SouthPoints slrm(X ~ 1) slrm(X ~ x+y) slrm(X ~ x+y, link="cloglog") # specify a grid of 2-km-square pixels slrm(X ~ 1, eps=2) Y <- copper$SouthLines Z <- distmap(Y) slrm(X ~ Z) slrm(X ~ Z, dataAtPoints=list(Z=nncross(X,Y,what="dist"))) mur <- solapply(murchison, rescale, s=1000, unitname="km") mur$dfault <- distfun(mur$faults) slrm(gold ~ dfault, data=mur) slrm(gold ~ dfault + greenstone, data=mur) slrm(gold ~ dfault, data=mur, splitby="greenstone") if(offline) spatstat.options(op) } \author{\adrian and \rolf. } \keyword{spatial} \keyword{models} spatstat.model/man/residuals.rppm.Rd0000644000176200001440000000336414331173077017242 0ustar liggesusers\name{residuals.rppm} \alias{residuals.rppm} \title{ Residuals for Recursively Partitioned Point Process Model } \description{ Given a point process model that was fitted to a point pattern by recursive partitioning (regression tree) methods, compute the residual measure. } \usage{ \method{residuals}{rppm}(object, type=c("raw", "inverse", "Pearson"), \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which residuals should be calculated. } \item{type}{ String (partially matched) indicating the type of residuals to be calculated. } \item{\dots}{ Ignored. } } \value{ A measure (object of class \code{"msr"}). } \details{ This function computes the residual measure for a point process model that was fitted to point pattern data by recursive partitioning of the covariates. The argument \code{object} must be a fitted model object of class \code{"rppm"}). Such objects are created by the fitting algorithm \code{\link{rppm}}. The type of residual is chosen by the argument \code{type}. } \seealso{ \code{\link{residuals.ppm}} } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. } \examples{ fit <- rppm(bei ~ elev + grad, data=bei.extra) res <- residuals(fit) plot(res) } \author{ \adrian. } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.model/man/pseudoR2.Rd0000644000176200001440000000400414331173077015765 0ustar liggesusers\name{pseudoR2} \alias{pseudoR2} \alias{pseudoR2.ppm} \alias{pseudoR2.slrm} \title{ Calculate Pseudo-R-Squared for Point Process Model } \description{ Given a fitted point process model, calculate the pseudo-R-squared value, which measures the fraction of variation in the data that is explained by the model. } \usage{ pseudoR2(object, \dots) \method{pseudoR2}{ppm}(object, \dots, keepoffset=TRUE) \method{pseudoR2}{slrm}(object, \dots, keepoffset=TRUE) } \arguments{ \item{object}{ Fitted point process model. An object of class \code{"ppm"} or \code{"slrm"}. } \item{keepoffset}{ Logical value indicating whether to retain offset terms in the model when computing the deviance difference. See Details. } \item{\dots}{ Additional arguments passed to \code{\link{deviance.ppm}} or \code{\link{deviance.slrm}}. } } \details{ The function \code{pseudoR2} is generic, with methods for fitted point process models of class \code{"ppm"} and \code{"slrm"}. This function computes McFadden's pseudo-Rsquared \deqn{ R^2 = 1 - \frac{D}{D_0} }{ R^2 = 1 - D/D0 } where \eqn{D} is the deviance of the fitted model \code{object}, and \eqn{D_0}{D0} is the deviance of the null model. Deviance is defined as twice the negative log-likelihood or log-pseudolikelihood. The null model is usually obtained by re-fitting the model using the trend formula \code{~1}. However if the original model formula included \code{offset} terms, and if \code{keepoffset=TRUE} (the default), then the null model formula consists of these offset terms. This ensures that the \code{pseudoR2} value is non-negative. } \value{ A single numeric value. } \author{ \spatstatAuthors. } \seealso{ \code{\link{deviance.ppm}}, \code{\link{deviance.slrm}}. } \examples{ fit <- ppm(swedishpines ~ x+y) pseudoR2(fit) xcoord <- as.im(function(x,y) x, Window(swedishpines)) fut <- ppm(swedishpines ~ offset(xcoord/200) + y) pseudoR2(fut) } \keyword{spatial} \keyword{models} spatstat.model/man/interactionorder.Rd0000644000176200001440000000352414374302015017634 0ustar liggesusers\name{interactionorder} \alias{interactionorder} \alias{interactionorder.isf} \alias{interactionorder.interact} \alias{interactionorder.fii} \alias{interactionorder.ppm} \title{ Determine the Order of Interpoint Interaction in a Model } \description{ Given a point process model, report the order of interpoint interaction. } \usage{ interactionorder(object) \method{interactionorder}{ppm}(object) \method{interactionorder}{interact}(object) \method{interactionorder}{isf}(object) \method{interactionorder}{fii}(object) } \arguments{ \item{object}{ A point process model (class \code{"ppm"}) or similar information. } } \details{ This function determines the order of interpoint interaction in a Gibbs point process model (or a related object). The interaction order is defined as the largest number \code{k} such that the probability density of the model contains terms involving \code{k} points at a time. For example, in a pairwise interaction process such as the Strauss process, the probability density contains interaction terms between each pair of points, but does not contain any terms that involve three points at a time, so the interaction order is 2. Poisson point processes have interaction order 1. Pairwise-interaction processes have interaction order 2. Point processes with the triplet interaction \code{\link{Triplets}} have interaction order 3. The Geyer saturation model \code{\link{Geyer}} and the area-interaction model \code{\link{AreaInter}} have infinite order of interaction. } \value{ A positive integer, or \code{Inf}. } \author{ \spatstatAuthors. } \examples{ interactionorder(ppm(cells ~ 1)) interactionorder(Strauss(0.1)) interactionorder(Triplets(0.1)) interactionorder(Geyer(0.1, 2)) interactionorder(Hybrid(Strauss(0.1), Triplets(0.2))) } \keyword{spatial} \keyword{models} spatstat.model/man/model.matrix.mppm.Rd0000644000176200001440000000401614331173077017640 0ustar liggesusers\name{model.matrix.mppm} \alias{model.matrix.mppm} \title{Extract Design Matrix of Point Process Model for Several Point Patterns} \description{ Given a point process model fitted to a list of point patterns, this function extracts the design matrix. } \usage{ \method{model.matrix}{mppm}(object, ..., keepNA=TRUE, separate=FALSE) } \arguments{ \item{object}{ A point process model fitted to several point patterns. An object of class \code{"mppm"}. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link{model.matrix.lm}}. } \item{keepNA}{ Logical. Determines whether rows containing \code{NA} values will be deleted or retained. } \item{separate}{ Logical value indicating whether to split the model matrix into sub-matrices corresponding to each of the original point patterns. } } \details{ This command is a method for the generic function \code{\link{model.matrix}}. It extracts the design matrix of a point process model fitted to several point patterns. The argument \code{object} must be a fitted point process model (object of class \code{"mppm"}) produced by the fitting algorithm \code{\link{mppm}}). This represents a point process model that has been fitted to a list of several point pattern datasets. See \code{\link{mppm}} for information. The result is a matrix with one column for every constructed covariate in the model, and one row for every quadrature point. If \code{separate=TRUE} this matrix will be split into sub-matrices corresponding to the original point patterns, and the result will be a list containing these matrices. } \value{ A matrix (or list of matrices). Columns of the matrix are canonical covariates in the model. } \author{ \spatstatAuthors. } \seealso{ \code{\link{model.matrix}}, \code{\link{mppm}}. } \examples{ fit <- mppm(Points ~ Image + x, demohyper) head(model.matrix(fit)) # matrix with three columns: '(Intercept)', 'x' and 'Image' } \keyword{spatial} \keyword{models} spatstat.model/man/emend.ppm.Rd0000644000176200001440000000776414331173076016164 0ustar liggesusers\name{emend.ppm} \alias{emend.ppm} \alias{project.ppm} \title{ Force Point Process Model to be Valid } \description{ Ensures that a fitted point process model satisfies the integrability conditions for existence of the point process. } \usage{ project.ppm(object, \dots, fatal=FALSE, trace=FALSE) \method{emend}{ppm}(object, \dots, fatal=FALSE, trace=FALSE) } \arguments{ \item{object}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{Ignored.} \item{fatal}{ Logical value indicating whether to generate an error if the model cannot be projected to a valid model. } \item{trace}{ Logical value indicating whether to print a trace of the decision process. } } \details{ The functions \code{emend.ppm} and \code{project.ppm} are identical: \code{emend.ppm} is a method for the generic \code{\link{emend}}, while \code{project.ppm} is an older name for the same function. The purpose of the function is to ensure that a fitted model is valid. The model-fitting function \code{\link{ppm}} fits Gibbs point process models to point pattern data. By default, the fitted model returned by \code{\link{ppm}} may not actually exist as a point process. First, some of the fitted coefficients of the model may be \code{NA} or infinite values. This usually occurs when the data are insufficient to estimate all the parameters. The model is said to be \emph{unidentifiable} or \emph{confounded}. Second, unlike a regression model, which is well-defined for any finite values of the fitted regression coefficients, a Gibbs point process model is only well-defined if the fitted interaction parameters satisfy some constraints. A famous example is the Strauss process (see \code{\link{Strauss}}) which exists only when the interaction parameter \eqn{\gamma}{gamma} is less than or equal to 1. For values \eqn{\gamma > 1}{gamma > 1}, the probability density is not integrable and the process does not exist (and cannot be simulated). By default, \code{\link{ppm}} does not enforce the constraint that a fitted Strauss process (for example) must satisfy \eqn{\gamma \le 1}{gamma <= 1}. This is because a fitted parameter value of \eqn{\gamma > 1}{gamma > 1} could be useful information for data analysis, as it indicates that the Strauss model is not appropriate, and suggests a clustered model should be fitted. The function \code{emend.ppm} or \code{project.ppm} modifies the model \code{object} so that the model is valid. It identifies the terms in the model \code{object} that are associated with illegal parameter values (i.e. parameter values which are either \code{NA}, infinite, or outside their permitted range). It considers all possible sub-models of \code{object} obtained by deleting one or more of these terms. It identifies which of these submodels are valid, and chooses the valid submodel with the largest pseudolikelihood. The result of \code{emend.ppm} or \code{project.ppm} is the true maximum pseudolikelihood fit to the data. For large datasets or complex models, the algorithm used in \code{emend.ppm} or \code{project.ppm} may be time-consuming, because it takes time to compute all the sub-models. A faster, approximate algorithm can be applied by setting \code{spatstat.options(project.fast=TRUE)}. This produces a valid submodel, which may not be the maximum pseudolikelihood submodel. Use the function \code{\link{valid.ppm}} to check whether a fitted model object specifies a well-defined point process. Use the expression \code{all(is.finite(coef(object)))} to determine whether all parameters are identifiable. } \value{ Another point process model (object of class \code{"ppm"}). } \author{\adrian and \rolf } \seealso{ \code{\link{ppm}}, \code{\link{valid.ppm}}, \code{\link{emend}}, \code{\link{spatstat.options}} } \examples{ fit <- ppm(redwood ~1, Strauss(0.1)) coef(fit) fit2 <- emend(fit) coef(fit2) } \keyword{spatial} \keyword{models} spatstat.model/man/is.ppm.Rd0000644000176200001440000000145014331173076015471 0ustar liggesusers\name{is.ppm} \alias{is.ppm} \alias{is.lppm} \alias{is.kppm} \alias{is.slrm} \title{Test Whether An Object Is A Fitted Point Process Model} \description{ Checks whether its argument is a fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"} or \code{"slrm"}). } \usage{ is.ppm(x) is.kppm(x) is.lppm(x) is.slrm(x) } \arguments{ \item{x}{Any object.} } \details{ These functions test whether the object \code{x} is a fitted point process model object of the specified class. The result of \code{is.ppm(x)} is \code{TRUE} if \code{x} has \code{"ppm"} amongst its classes, and otherwise \code{FALSE}. Similarly for the other functions. } \value{ A single logical value. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} \keyword{models} spatstat.model/man/logLik.dppm.Rd0000644000176200001440000000554514331173076016454 0ustar liggesusers\name{logLik.dppm} \alias{logLik.dppm} \alias{AIC.dppm} \alias{extractAIC.dppm} \alias{nobs.dppm} \title{Log Likelihood and AIC for Fitted Determinantal Point Process Model} \description{ Extracts the log Palm likelihood, deviance, and AIC of a fitted determinantal point process model. } \usage{ \method{logLik}{dppm}(object, ...) \method{AIC}{dppm}(object, \dots, k=2) \method{extractAIC}{dppm}(fit, scale=0, k=2, \dots) \method{nobs}{dppm}(object, ...) } \arguments{ \item{object,fit}{Fitted point process model. An object of class \code{"dppm"}. } \item{\dots}{Ignored.} \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details. } } \details{ These functions are methods for the generic commands \code{\link{logLik}}, \code{\link{extractAIC}} and \code{\link{nobs}} for the class \code{"dppm"}. An object of class \code{"dppm"} represents a fitted Cox or cluster point process model. It is obtained from the model-fitting function \code{\link{dppm}}. These methods apply only when the model was fitted by maximising the Palm likelihood (Tanaka et al, 2008) by calling \code{\link{dppm}} with the argument \code{method="palm"}. The method \code{logLik.dppm} computes the maximised value of the log Palm likelihood for the fitted model \code{object}. The methods \code{AIC.dppm} and \code{extractAIC.dppm} compute the Akaike Information Criterion AIC for the fitted model based on the Palm likelihood (Tanaka et al, 2008) \deqn{ AIC = -2 \log(PL) + k \times \mbox{edf} }{ AIC = -2 * log(PL) + k * edf } where \eqn{PL} is the maximised Palm likelihood of the fitted model, and \eqn{\mbox{edf}}{edf} is the effective degrees of freedom of the model. The method \code{nobs.dppm} returns the number of points in the original data point pattern to which the model was fitted. The \R function \code{\link{step}} uses these methods, but it does not work for determinantal models yet due to a missing implementation of \code{update.dppm}. } \value{ \code{logLik} returns a numerical value, belonging to the class \code{"logLik"}, with an attribute \code{"df"} giving the degrees of freedom. \code{AIC} returns a numerical value. \code{extractAIC} returns a numeric vector of length 2 containing the degrees of freedom and the AIC value. \code{nobs} returns an integer value. } \references{ Tanaka, U. and Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott point processes. \emph{Biometrical Journal} \bold{50}, 43--57. } \seealso{ \code{\link{dppm}}, \code{\link{logLik.ppm}} } \author{\adrian \rolf and \ege } \examples{ fit <- dppm(swedishpines ~ x, dppGauss(), method="palm") nobs(fit) logLik(fit) extractAIC(fit) AIC(fit) } \keyword{spatial} \keyword{models} spatstat.model/man/residuals.ppm.Rd0000644000176200001440000002006614510474261017054 0ustar liggesusers\name{residuals.ppm} \alias{residuals.ppm} \title{ Residuals for Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, compute residuals. } \usage{ \method{residuals}{ppm}(object, type="raw", \dots, check=TRUE, drop=FALSE, fittedvalues=NULL, new.coef=NULL, dropcoef=FALSE, quad=NULL) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which residuals should be calculated. } \item{type}{ String indicating the type of residuals to be calculated. Current options are \code{"raw"}, \code{"inverse"}, \code{"pearson"} and \code{"score"}. A partial match is adequate. } \item{\dots}{ Ignored. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{drop}{ Logical value determining whether to delete quadrature points that were not used to fit the model. See \code{\link{quad.ppm}} for explanation. } \item{fittedvalues}{ Vector of fitted values for the conditional intensity at the quadrature points, from which the residuals will be computed. For expert use only. } \item{new.coef}{ Optional. Numeric vector of coefficients for the model, replacing \code{coef(object)}. See the section on Modified Residuals below. } \item{dropcoef}{ Internal use only. } \item{quad}{ Optional. Data specifying how to re-fit the model. A list of arguments passed to \code{\link{quadscheme}}. See the section on Modified Residuals below. } } \value{ An object of class \code{"msr"} representing a signed measure or vector-valued measure (see \code{\link{msr}}). This object can be plotted. } \details{ This function computes several kinds of residuals for the fit of a point process model to a spatial point pattern dataset (Baddeley et al, 2005). Use \code{\link{plot.msr}} to plot the residuals directly, or \code{\link{diagnose.ppm}} to produce diagnostic plots based on these residuals. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}. This fitted model object contains complete information about the original data pattern. Residuals are attached both to the data points and to some other points in the window of observation (namely, to the dummy points of the quadrature scheme used to fit the model). If the fitted model is correct, then the sum of the residuals over all (data and dummy) points in a spatial region \eqn{B} has mean zero. For further explanation, see Baddeley et al (2005). The type of residual is chosen by the argument \code{type}. Current options are \describe{ \item{\code{"raw"}:}{ the raw residuals \deqn{ r_j = z_j - w_j \lambda_j }{ r[j] = z[j] - w[j] lambda[j] } at the quadrature points \eqn{u_j}{u[j]}, where \eqn{z_j}{z[j]} is the indicator equal to 1 if \eqn{u_j}{u[j]} is a data point and 0 if \eqn{u_j}{u[j]} is a dummy point; \eqn{w_j}{w[j]} is the quadrature weight attached to \eqn{u_j}{u[j]}; and \deqn{\lambda_j = \hat\lambda(u_j,x)}{lambda[j] = lambda(u[j],x)} is the conditional intensity of the fitted model at \eqn{u_j}{u[j]}. These are the spatial analogue of the martingale residuals of a one-dimensional counting process. } \item{\code{"inverse"}:}{ the `inverse-lambda' residuals (Baddeley et al, 2005) \deqn{ r^{(I)}_j = \frac{r_j}{\lambda_j} = \frac{z_j}{\lambda_j} - w_j }{ rI[j] = r[j]/lambda[j] = z[j]/lambda[j] - w[j] } obtained by dividing the raw residuals by the fitted conditional intensity. These are a counterpart of the exponential energy marks (see \code{\link{eem}}). } \item{\code{"pearson"}:}{ the Pearson residuals (Baddeley et al, 2005) \deqn{ r^{(P)}_j = \frac{r_j}{\sqrt{\lambda_j}} = \frac{z_j}{\sqrt{\lambda_j}} - w_j \sqrt{\lambda_j} }{ rP[j] = r[j]/sqrt(lambda[j]) = z[j]/sqrt(lambda[j]) - w[j] sqrt(lambda[j]) } obtained by dividing the raw residuals by the square root of the fitted conditional intensity. The Pearson residuals are standardised, in the sense that if the model (true and fitted) is Poisson, then the sum of the Pearson residuals in a spatial region \eqn{B} has variance equal to the area of \eqn{B}. } \item{\code{"score"}:}{ the score residuals (Baddeley et al, 2005) \deqn{ r_j = (z_j - w_j \lambda_j) x_j }{ r[j] = (z[j] - w[j] lambda[j]) * x[j,] } obtained by multiplying the raw residuals \eqn{r_j}{r[j]} by the covariates \eqn{x_j}{x[j,]} for quadrature point \eqn{j}. The score residuals always sum to zero. } } The result of \code{residuals.ppm} is a measure (object of class \code{"msr"}). Use \code{\link{plot.msr}} to plot the residuals directly, or \code{\link{diagnose.ppm}} to produce diagnostic plots based on these residuals. Use \code{\link{integral.msr}} to compute the total residual. By default, the window of the measure is the same as the original window of the data. If \code{drop=TRUE} then the window is the domain of integration of the pseudolikelihood or composite likelihood. This only matters when the model \code{object} was fitted using the border correction: in that case, if \code{drop=TRUE} the window of the residuals is the erosion of the original data window by the border correction distance \code{rbord}. } \section{Modified Residuals}{ Sometimes we want to modify the calculation of residuals by using different values for the model parameters. This capability is provided by the arguments \code{new.coef} and \code{quad}. If \code{new.coef} is given, then the residuals will be computed by taking the model parameters to be \code{new.coef}. This should be a numeric vector of the same length as the vector of fitted model parameters \code{coef(object)}. If \code{new.coef} is missing and \code{quad} is given, then the model parameters will be determined by re-fitting the model using a new quadrature scheme specified by \code{quad}. Residuals will be computed for the original model \code{object} using these new parameter values. The argument \code{quad} should normally be a list of arguments in \code{name=value} format that will be passed to \code{\link{quadscheme}} (together with the original data points) to determine the new quadrature scheme. It may also be a quadrature scheme (object of class \code{"quad"}) to which the model should be fitted, or a point pattern (object of class \code{"ppp"}) specifying the \emph{dummy points} in a new quadrature scheme. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. } \seealso{ \code{\link{msr}}, \code{\link{diagnose.ppm}}, \code{\link{ppm.object}}, \code{\link{ppm}} } \examples{ fit <- ppm(cells, ~x, Strauss(r=0.15)) # Pearson residuals rp <- residuals(fit, type="pe") rp # simulated data X <- rStrauss(100,0.7,0.05) # fit Strauss model fit <- ppm(X, ~1, Strauss(0.05)) res.fit <- residuals(fit) # check that total residual is 0 integral.msr(residuals(fit, drop=TRUE)) # true model parameters truecoef <- c(log(100), log(0.7)) res.true <- residuals(fit, new.coef=truecoef) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.model/man/matclust.estK.Rd0000644000176200001440000001435114331173076017030 0ustar liggesusers\name{matclust.estK} \alias{matclust.estK} \title{Fit the Matern Cluster Point Process by Minimum Contrast} \description{ Fits the \Matern Cluster point process to a point pattern dataset by the Method of Minimum Contrast. } \usage{ matclust.estK(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the \Matern Cluster model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the \Matern Cluster process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the \Matern Cluster point process model to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the \Matern Cluster point process to \code{X}, by finding the parameters of the \Matern Cluster model which give the closest match between the theoretical \eqn{K} function of the \Matern Cluster process and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The \Matern Cluster point process is described in \Moller and Waagepetersen (2003, p. 62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and uniformly distributed inside a circle of radius \eqn{R}{R} centred on the parent point, where \eqn{R}{R} is equal to the parameter \code{scale}. The named vector of stating values can use either \code{R} or \code{scale} as the name of the second component, but the latter is recommended for consistency with other cluster models. The theoretical \eqn{K}-function of the \Matern Cluster process is \deqn{ K(r) = \pi r^2 + \frac 1 \kappa h(\frac{r}{2R}) }{ K(r) = pi r^2 + h(r/(2*R))/kappa } where the radius R is the parameter \code{scale} and \deqn{ h(z) = 2 + \frac 1 \pi [ ( 8 z^2 - 4 ) \mbox{arccos}(z) - 2 \mbox{arcsin}(z) + 4 z \sqrt{(1 - z^2)^3} - 6 z \sqrt{1 - z^2} ] }{ h(z) = 2 + (1/pi) * ((8 * z^2 - 4) * arccos(z) - 2 * arcsin(z) + 4 * z * sqrt((1 - z^2)^3) - 6 * z * sqrt(1 - z^2)) } for \eqn{z <= 1}, and \eqn{h(z) = 1} for \eqn{z > 1}. The theoretical intensity of the \Matern Cluster process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{R}{R}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The \Matern Cluster process can be simulated, using \code{\link{rMatClust}}. Homogeneous or inhomogeneous \Matern Cluster models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ \rasmus. Adapted for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{kppm}}, \code{\link{lgcp.estK}}, \code{\link{thomas.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{rMatClust}} to simulate the fitted model. } \examples{ u <- matclust.estK(redwood, c(kappa=10, scale=0.1)) u plot(u) } \keyword{spatial} \keyword{models} spatstat.model/man/logLik.kppm.Rd0000644000176200001440000000725214331173076016460 0ustar liggesusers\name{logLik.kppm} \alias{logLik.kppm} \alias{AIC.kppm} \alias{extractAIC.kppm} \alias{nobs.kppm} \title{Log Likelihood and AIC for Fitted Cox or Cluster Point Process Model} \description{ Extracts the log composite likelihood, deviance, and AIC of a fitted Cox or cluster point process model. } \usage{ \method{logLik}{kppm}(object, ...) \method{AIC}{kppm}(object, \dots, k=2) \method{extractAIC}{kppm}(fit, scale=0, k=2, \dots) \method{nobs}{kppm}(object, ...) } \arguments{ \item{object,fit}{Fitted point process model. An object of class \code{"kppm"}. } \item{\dots}{Ignored.} \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details. } } \details{ These functions are methods for the generic commands \code{\link{logLik}}, \code{\link{extractAIC}} and \code{\link{nobs}} for the class \code{"kppm"}. An object of class \code{"kppm"} represents a fitted Cox or cluster point process model. It is obtained from the model-fitting function \code{\link{kppm}}. These methods apply only when the model was fitted by maximising a composite likelihood: either the Palm likelihood (Tanaka et al, 2008) or the second order composite likelihood (Guan, 2006), by calling \code{\link{kppm}} with the argument \code{method="palm"} or \code{method="clik2"} respectively. The method \code{logLik.kppm} computes the maximised value of the log composite likelihood for the fitted model \code{object}. The methods \code{AIC.kppm} and \code{extractAIC.kppm} compute the Akaike Information Criterion AIC for the fitted model based on the composite likelihood \deqn{ AIC = -2 \log(CL) + k \times \mbox{edf} }{ AIC = -2 * log(CL) + k * edf } where \eqn{CL} is the maximised composite likelihood of the fitted model, and \eqn{\mbox{edf}}{edf} is the effective degrees of freedom of the model. The method \code{nobs.kppm} returns the number of points in the original data point pattern to which the model was fitted. The \R function \code{\link{step}} uses these methods. } \section{Model comparison}{ The values of log-likelihood and AIC returned by these functions are based on the \emph{composite likelihood} of the cluster process or Cox process model. They are available only when the model was fitted using \code{method="palm"} or \code{method="clik2"}. For model comparison and model selection, it is valid to compare the \code{logLik} values, or to compare the \code{AIC} values, but only when all the models are of class \code{"kppm"} and were fitted using the same \code{method}. For \code{method="palm"} some theoretical justification was provided by Tanaka et al (2008). } \value{ \code{logLik} returns a numerical value, belonging to the class \code{"logLik"}, with an attribute \code{"df"} giving the degrees of freedom. \code{AIC} returns a numerical value. \code{extractAIC} returns a numeric vector of length 2 containing the degrees of freedom and the AIC value. \code{nobs} returns an integer value. } \references{ Guan, Y. (2006) A composite likelihood approach in fitting spatial point process models. \emph{Journal of the American Statistical Association} \bold{101}, 1502--1512. Tanaka, U. and Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott point processes. \emph{Biometrical Journal} \bold{50}, 43--57. } \seealso{ \code{\link{kppm}}, \code{\link{logLik.ppm}} } \author{ \spatstatAuthors. } \examples{ fit <- kppm(redwood ~ x, "Thomas", method="palm") nobs(fit) logLik(fit) extractAIC(fit) AIC(fit) step(fit) } \keyword{spatial} \keyword{models} spatstat.model/man/bc.ppm.Rd0000644000176200001440000000363514331173076015451 0ustar liggesusers\name{bc.ppm} \alias{bc} \alias{bc.ppm} \title{ Bias Correction for Fitted Model } \description{ Applies a first-order bias correction to a fitted model. } \usage{ bc(fit, \dots) \method{bc}{ppm}(fit, \dots, nfine = 256) } \arguments{ \item{fit}{ A fitted point process model (object of class \code{"ppm"}) or a model of some other class. } \item{\dots}{ Additional arguments are currently ignored. } \item{nfine}{ Grid dimensions for fine grid of locations. An integer, or a pair of integers. See Details. } } \details{ This command applies the first order Newton-Raphson bias correction method of Baddeley and Turner (2014, sec 4.2) to a fitted model. The function \code{bc} is generic, with a method for fitted point process models of class \code{"ppm"}. A fine grid of locations, of dimensions \code{nfine * nfine} or \code{nfine[2] * nfine[1]}, is created over the original window of the data, and the intensity or conditional intensity of the fitted model is calculated on this grid. The result is used to update the fitted model parameters once by a Newton-Raphson update. This is only useful if the quadrature points used to fit the original model \code{fit} are coarser than the grid of points specified by \code{nfine}. } \value{ A numeric vector, of the same length as \code{coef(fit)}, giving updated values for the fitted model coefficients. } \references{ Baddeley, A. and Turner, R. (2014) Bias correction for parameter estimates of spatial point process models. \emph{Journal of Statistical Computation and Simulation} \bold{84}, 1621--1643. DOI: 10.1080/00949655.2012.755976 } \author{ \adrian and \rolf. } \seealso{ \code{\link{rex}} } \examples{ fit <- ppm(cells ~ x, Strauss(0.07)) coef(fit) if(!interactive()) { bc(fit, nfine=64) } else { bc(fit) } } \keyword{spatial} \keyword{models} \keyword{math} \keyword{optimize} spatstat.model/man/emend.slrm.Rd0000644000176200001440000000526314331173076016335 0ustar liggesusers\name{emend.slrm} \alias{emend.slrm} \title{ Force Spatial Logistic Regression Model to be Valid } \description{ Ensures that a fitted spatial logistic regression specifies a well-defined model. } \usage{ \method{emend}{slrm}(object, \dots, fatal=FALSE, trace=FALSE) } \arguments{ \item{object}{ Fitted point process model (object of class \code{"slrm"}). } \item{\dots}{Ignored.} \item{fatal}{ Logical value indicating whether to generate an error if the model cannot be projected to a valid model. } \item{trace}{ Logical value indicating whether to print a trace of the decision process. } } \details{ \code{emend.slrm} is a method for the generic \code{\link{emend}}, The purpose of the function is to ensure that a fitted model is valid. The model-fitting function \code{\link{slrm}} fits spatial logistic regression models to point pattern data. In some circumstances, the fitted model returned by \code{\link{slrm}} may not specify a well-defined model, because some of the fitted coefficients of the model may be \code{NA} or infinite values. This usually occurs when the data are insufficient to estimate all the parameters. The model is said to be \emph{unidentifiable} or \emph{confounded}. The function \code{emend.slrm} modifies the model \code{object} so that the model is valid. It identifies the terms in the model \code{object} that are associated with illegal parameter values (i.e. parameter values which are either \code{NA}, infinite, or outside their permitted range). It considers all possible sub-models of \code{object} obtained by deleting one or more of these terms. It identifies which of these submodels are valid, and chooses the valid submodel with the largest pseudolikelihood. The result of \code{emend.slrm} or \code{project.slrm} is the true maximum pseudolikelihood fit to the data. For large datasets or complex models, the algorithm used in \code{emend.slrm} may be time-consuming, because it takes time to compute all the sub-models. A faster, approximate algorithm can be applied by setting \code{spatstat.options(project.fast=TRUE)}. This produces a valid submodel, which may not be the maximum likelihood submodel. Use the function \code{\link{valid.slrm}} to check whether a fitted model object specifies a well-defined model. } \value{ Another point process model (object of class \code{"slrm"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{slrm}}, \code{\link{valid.slrm}}, \code{\link{emend}}, \code{\link{spatstat.options}} } \examples{ fit <- slrm(redwood ~ x + I(x)) coef(fit) fit2 <- emend(fit) coef(fit2) } \keyword{spatial} \keyword{models} spatstat.model/man/as.layered.msr.Rd0000644000176200001440000000162114331173076017112 0ustar liggesusers\name{as.layered.msr} \alias{as.layered.msr} \title{Convert Measure To Layered Object} \description{ Converts a measure into a layered object. } \usage{ \method{as.layered}{msr}(X) } \arguments{ \item{X}{ A measure (object of class \code{"msr"}). } } \value{ An object of class \code{"layered"} (see \code{\link{layered}}). } \details{ This function converts the object \code{X} into an object of class \code{"layered"}. It is a method for the generic \code{\link{as.layered}} for the class of measures. If \code{X} is a vector-valued measure, then \code{as.layered(X)} consists of several layers, each containing a scalar-valued measure. } \seealso{ \code{\link{as.layered}}, \code{\link{msr}}. } \examples{ P <- rpoispp(100) fit <- ppm(P ~ x+y) rs <- residuals(fit, type="score") as.layered(rs) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat.model/man/rmh.ppm.Rd0000644000176200001440000002116614331173077015653 0ustar liggesusers\name{rmh.ppm} \alias{rmh.ppm} \title{Simulate from a Fitted Point Process Model} \description{ Given a point process model fitted to data, generate a random simulation of the model, using the Metropolis-Hastings algorithm. } \usage{ \method{rmh}{ppm}(model, start=NULL, control=default.rmhcontrol(model, w=w), \dots, w = NULL, project=TRUE, nsim=1, drop=TRUE, saveinfo=TRUE, verbose=TRUE, new.coef=NULL) } \arguments{ \item{model}{A fitted point process model (object of class \code{"ppm"}, see \code{\link{ppm.object}}) which it is desired to simulate. This fitted model is usually the result of a call to \code{\link{ppm}}. See \bold{Details} below. } \item{start}{Data determining the initial state of the Metropolis-Hastings algorithm. See \code{\link{rmhstart}} for description of these arguments. Defaults to \code{list(x.start=data.ppm(model))} } \item{control}{Data controlling the iterative behaviour of the Metropolis-Hastings algorithm. See \code{\link{rmhcontrol}} for description of these arguments. } \item{\dots}{ Further arguments passed to \code{\link{rmhcontrol}}, or to \code{\link{rmh.default}}, or to covariate functions in the model. } \item{w}{ Optional. Window in which the simulations should be generated. Default is the window of the original data. } \item{project}{ Logical flag indicating what to do if the fitted model is invalid (in the sense that the values of the fitted coefficients do not specify a valid point process). If \code{project=TRUE} the closest valid model will be simulated; if \code{project=FALSE} an error will occur. } \item{nsim}{ Number of simulated point patterns that should be generated. } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a single point pattern. } \item{saveinfo}{ Logical value indicating whether to save auxiliary information. } \item{verbose}{ Logical flag indicating whether to print progress reports. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(model)}. } } \value{ A point pattern (an object of class \code{"ppp"}; see \code{\link{ppp.object}}) or a list of point patterns. } \details{ This function generates simulated realisations from a point process model that has been fitted to point pattern data. It is a method for the generic function \code{\link{rmh}} for the class \code{"ppm"} of fitted point process models. To simulate other kinds of point process models, see \code{\link{rmh}} or \code{\link{rmh.default}}. The argument \code{model} describes the fitted model. It must be an object of class \code{"ppm"} (see \code{\link{ppm.object}}), and will typically be the result of a call to the point process model fitting function \code{\link{ppm}}. The current implementation enables simulation from any fitted model involving the interactions \code{\link{AreaInter}}, \code{\link{DiggleGratton}}, \code{\link{DiggleGatesStibbard}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}} and \code{\link{Softcore}}, including nonstationary models. See the examples. It is also possible to simulate \emph{hybrids} of several such models. See \code{\link{Hybrid}} and the examples. It is possible that the fitted coefficients of a point process model may be ``illegal'', i.e. that there may not exist a mathematically well-defined point process with the given parameter values. For example, a Strauss process with interaction parameter \eqn{\gamma > 1}{gamma > 1} does not exist, but the model-fitting procedure used in \code{\link{ppm}} will sometimes produce values of \eqn{\gamma}{gamma} greater than 1. In such cases, if \code{project=FALSE} then an error will occur, while if \code{project=TRUE} then \code{rmh.ppm} will find the nearest legal model and simulate this model instead. (The nearest legal model is obtained by projecting the vector of coefficients onto the set of valid coefficient vectors. The result is usually the Poisson process with the same fitted intensity.) The arguments \code{start} and \code{control} are lists of parameters determining the initial state and the iterative behaviour, respectively, of the Metropolis-Hastings algorithm. The argument \code{start} is passed directly to \code{\link{rmhstart}}. See \code{\link{rmhstart}} for details of the parameters of the initial state, and their default values. The argument \code{control} is first passed to \code{\link{rmhcontrol}}. Then if any additional arguments \code{\dots} are given, \code{\link{update.rmhcontrol}} is called to update the parameter values. See \code{\link{rmhcontrol}} for details of the iterative behaviour parameters, and \code{\link{default.rmhcontrol}} for their default values. Note that if you specify expansion of the simulation window using the parameter \code{expand} (so that the model will be simulated on a window larger than the original data window) then the model must be capable of extrapolation to this larger window. This is usually not possible for models which depend on external covariates, because the domain of a covariate image is usually the same as the domain of the fitted model. After extracting the relevant information from the fitted model object \code{model}, \code{rmh.ppm} invokes the default \code{rmh} algorithm \code{\link{rmh.default}}, unless the model is Poisson. If the model is Poisson then the Metropolis-Hastings algorithm is not needed, and the model is simulated directly, using one of \code{\link{rpoispp}}, \code{\link{rmpoispp}}, \code{\link{rpoint}} or \code{\link{rmpoint}}. See \code{\link{rmh.default}} for further information about the implementation, or about the Metropolis-Hastings algorithm. } \section{Warnings}{ See Warnings in \code{\link{rmh.default}}. } \seealso{ \code{\link{simulate.ppm}}, \code{\link{rmh}}, \code{\link{rmhmodel}}, \code{\link{rmhcontrol}}, \code{\link{default.rmhcontrol}}, \code{\link{update.rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{rmh.default}}, \code{\link{ppp.object}}, \code{\link{ppm}}, Interactions: \code{\link{AreaInter}}, \code{\link{DiggleGratton}}, \code{\link{DiggleGatesStibbard}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{Hybrid}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Softcore}} } \examples{ live <- interactive() op <- spatstat.options() spatstat.options(rmh.nrep=1e5) Nrep <- 1e5 X <- swedishpines if(live) plot(X, main="Swedish Pines data") # Poisson process fit <- ppm(X, ~1, Poisson()) Xsim <- rmh(fit) if(live) plot(Xsim, main="simulation from fitted Poisson model") # Strauss process fit <- ppm(X, ~1, Strauss(r=7)) Xsim <- rmh(fit) if(live) plot(Xsim, main="simulation from fitted Strauss model") if(live) { # Strauss process simulated on a larger window # then clipped to original window Xsim <- rmh(fit, control=list(nrep=Nrep, expand=1.1, periodic=TRUE)) Xsim <- rmh(fit, nrep=Nrep, expand=2, periodic=TRUE) } if(live) { X <- rSSI(0.05, 100) # piecewise-constant pairwise interaction function fit <- ppm(X, ~1, PairPiece(seq(0.02, 0.1, by=0.01))) Xsim <- rmh(fit) } # marked point pattern Y <- amacrine if(live) { # marked Poisson models fit <- ppm(Y) fit <- ppm(Y,~marks) fit <- ppm(Y,~polynom(x,2)) fit <- ppm(Y,~marks+polynom(x,2)) fit <- ppm(Y,~marks*polynom(x,y,2)) Ysim <- rmh(fit) } # multitype Strauss models MS <- MultiStrauss(radii=matrix(0.07, ncol=2, nrow=2), types = levels(Y$marks)) if(live) { fit <- ppm(Y ~marks, MS) Ysim <- rmh(fit) } fit <- ppm(Y ~ marks*polynom(x,y,2), MS) Ysim <- rmh(fit) if(live) plot(Ysim, main="simulation from fitted inhomogeneous Multitype Strauss") spatstat.options(op) if(live) { # Hybrid model fit <- ppm(redwood, ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2))) Y <- rmh(fit) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{datagen} spatstat.model/man/summary.ppm.Rd0000644000176200001440000000506114331173077016556 0ustar liggesusers\name{summary.ppm} \alias{summary.ppm} \alias{print.summary.ppm} \title{Summarizing a Fitted Point Process Model} \description{ \code{summary} method for class \code{"ppm"}. } \usage{ \method{summary}{ppm}(object, \dots, quick=FALSE, fine=FALSE) \method{print}{summary.ppm}(x, \dots) } \arguments{ \item{object}{A fitted point process model.} \item{\dots}{Ignored.} \item{quick}{Logical flag controlling the scope of the summary.} \item{fine}{ Logical value passed to \code{\link{vcov.ppm}} determining whether to compute the quick, coarse estimate of variance (\code{fine=FALSE}, the default) or the slower, finer estimate (\code{fine=TRUE}). } \item{x}{Object of class \code{"summary.ppm"} as returned by \code{summary.ppm}. } } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"ppm"}. An object of class \code{"ppm"} describes a fitted point process model. See \code{\link{ppm.object}}) for details of this class. \code{summary.ppm} extracts information about the type of model that has been fitted, the data to which the model was fitted, and the values of the fitted coefficients. (If \code{quick=TRUE} then only the information about the type of model is extracted.) \code{print.summary.ppm} prints this information in a comprehensible format. In normal usage, \code{print.summary.ppm} is invoked implicitly when the user calls \code{summary.ppm} without assigning its value to anything. See the examples. You can also type \code{coef(summary(object))} to extract a table of the fitted coefficients of the point process model \code{object} together with standard errors and confidence limits. } \value{ \code{summary.ppm} returns an object of class \code{"summary.ppm"}, while \code{print.summary.ppm} returns \code{NULL}. } \examples{ # invent some data X <- rpoispp(42) # fit a model to it fit <- ppm(X ~ x, Strauss(r=0.1)) # summarize the fitted model summary(fit) # `quick' option summary(fit, quick=TRUE) # coefficients with standard errors and CI coef(summary(fit)) coef(summary(fit, fine=TRUE)) # save the full summary s <- summary(fit) # print it print(s) s # extract stuff names(s) coef(s) s$args$correction s$name s$trend$value \donttest{ # multitype pattern fit <- ppm(demopat ~marks, Poisson()) summary(fit) } # model with external covariates fitX <- ppm(X, ~Z, covariates=list(Z=function(x,y){x+y})) summary(fitX) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.model/man/ippm.Rd0000644000176200001440000001340114331173076015227 0ustar liggesusers\name{ippm} \alias{ippm} \title{ Fit Point Process Model Involving Irregular Trend Parameters } \description{ Experimental extension to \code{ppm} which finds optimal values of the irregular trend parameters in a point process model. } \usage{ ippm(Q, \dots, iScore=NULL, start=list(), covfunargs=start, nlm.args=list(stepmax=1/2), silent=FALSE, warn.unused=TRUE) } \arguments{ \item{Q,\dots}{ Arguments passed to \code{\link{ppm}} to fit the point process model. } \item{iScore}{ Optional. A named list of \R functions that compute the partial derivatives of the logarithm of the trend, with respect to each irregular parameter. See Details. } \item{start}{ Named list containing initial values of the irregular parameters over which to optimise. } \item{covfunargs}{ Argument passed to \code{\link{ppm}}. A named list containing values for \emph{all} irregular parameters required by the covariates in the model. Must include all the parameters named in \code{start}. } \item{nlm.args}{ Optional list of arguments passed to \code{\link[stats]{nlm}} to control the optimization algorithm. } \item{silent}{ Logical. Whether to print warnings if the optimization algorithm fails to converge. } \item{warn.unused}{ Logical. Whether to print a warning if some of the parameters in \code{start} are not used in the model. } } \details{ This function is an experimental extension to the point process model fitting command \code{\link{ppm}}. The extension allows the trend of the model to include irregular parameters, which will be maximised by a Newton-type iterative method, using \code{\link[stats]{nlm}}. For the sake of explanation, consider a Poisson point process with intensity function \eqn{\lambda(u)}{\lambda(u)} at location \eqn{u}. Assume that \deqn{ \lambda(u) = \exp(\alpha + \beta Z(u)) \, f(u, \gamma) }{ \lambda(u) = exp(\alpha + \beta * Z(u)) * f(u, \gamma) } where \eqn{\alpha,\beta,\gamma} are parameters to be estimated, \eqn{Z(u)} is a spatial covariate function, and \eqn{f} is some known function. Then the parameters \eqn{\alpha,\beta} are called \emph{regular} because they appear in a loglinear form; the parameter \eqn{\gamma} is called \emph{irregular}. To fit this model using \code{ippm}, we specify the intensity using the \code{trend} formula in the same way as usual for \code{\link{ppm}}. The trend formula is a representation of the log intensity. In the above example the log intensity is \deqn{ \log\lambda(u) = \alpha + \beta Z(u) + \log f(u, \gamma) }{ log(\lambda(u)) = \alpha + \beta * Z(u) + log(f(u, \gamma)) } So the model above would be encoded with the trend formula \code{~Z + offset(log(f))}. Note that the irregular part of the model is an \emph{offset} term, which means that it is included in the log trend as it is, without being multiplied by another regular parameter. The optimisation runs faster if we specify the derivative of \eqn{\log f(u,\gamma)}{log(f(u,\gamma))} with respect to \eqn{\gamma}. We call this the \emph{irregular score}. To specify this, the user must write an \R function that computes the irregular score for any value of \eqn{\gamma} at any location \code{(x,y)}. Thus, to code such a problem, \enumerate{ \item The argument \code{trend} should define the log intensity, with the irregular part as an offset; \item The argument \code{start} should be a list containing initial values of each of the irregular parameters; \item The argument \code{iScore}, if provided, must be a list (with one entry for each entry of \code{start}) of functions with arguments \code{x,y,\dots}, that evaluate the partial derivatives of \eqn{\log f(u,\gamma)}{log(f(u,gamma))} with respect to each irregular parameter. } The coded example below illustrates the model with two irregular parameters \eqn{\gamma,\delta}{gamma,delta} and irregular term \deqn{ f((x,y), (\gamma, \delta)) = 1 + \exp(\gamma - \delta x^3) }{ f((x,y), (\gamma, \delta)) = 1 + \exp(\gamma - \delta * x^3) } Arguments \code{\dots} passed to \code{\link{ppm}} may also include \code{interaction}. In this case the model is not a Poisson point process but a more general Gibbs point process; the trend formula \code{trend} determines the first-order trend of the model (the first order component of the conditional intensity), not the intensity. } \value{ A fitted point process model (object of class \code{"ppm"}) which also belongs to the special class \code{"ippm"}. } \author{\spatstatAuthors.} \seealso{ \code{\link{ppm}}, \code{\link{profilepl}} } \examples{ nd <- 32 \testonly{nd <- 10} gamma0 <- 3 delta0 <- 5 POW <- 3 # Terms in intensity Z <- function(x,y) { -2*y } f <- function(x,y,gamma,delta) { 1 + exp(gamma - delta * x^POW) } # True intensity lamb <- function(x,y,gamma,delta) { 200 * exp(Z(x,y)) * f(x,y,gamma,delta) } # Simulate realisation lmax <- max(lamb(0,0,gamma0,delta0), lamb(1,1,gamma0,delta0)) set.seed(42) X <- rpoispp(lamb, lmax=lmax, win=owin(), gamma=gamma0, delta=delta0) # Partial derivatives of log f DlogfDgamma <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) topbit/(1 + topbit) } DlogfDdelta <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) - (x^POW) * topbit/(1 + topbit) } # irregular score Dlogf <- list(gamma=DlogfDgamma, delta=DlogfDdelta) # fit model ippm(X ~Z + offset(log(f)), covariates=list(Z=Z, f=f), iScore=Dlogf, start=list(gamma=1, delta=1), nlm.args=list(stepmax=1), nd=nd) } \keyword{spatial} \keyword{models} spatstat.model/man/update.dppm.Rd0000644000176200001440000000462214366424525016516 0ustar liggesusers\name{update.dppm} \alias{update.dppm} \title{Update a Fitted Determinantal Point Process Model} \description{ \code{update} method for class \code{"dppm"}. } \usage{ \method{update}{dppm}(object, \dots, evaluate=TRUE, envir=environment(terms(object))) } \arguments{ \item{object}{ Fitted determinantal point process model. An object of class \code{"dppm"}, obtained from \code{\link{dppm}}. } \item{\dots}{ Arguments passed to \code{\link{dppm}}. } \item{evaluate}{ Logical value indicating whether to return the updated fitted model (\code{evaluate=TRUE}, the default) or just the updated call to \code{dppm} (\code{evaluate=FALSE}). } \item{envir}{ Environment in which to re-evaluate the call to \code{\link{dppm}}. } } \details{ \code{object} should be a fitted determinantal point process model, obtained from the model-fitting function \code{\link{dppm}}. The model will be updated according to the new arguments provided. If the argument \code{trend} is provided, it determines the intensity in the updated model. It should be an \R formula (with or without a left hand side). It may include the symbols \code{+} or \code{-} to specify addition or deletion of terms in the current model formula, as shown in the Examples below. The symbol \code{.} refers to the current contents of the formula. The intensity in the updated model is determined by the argument \code{trend} if it is provided, or otherwise by any unnamed argument that is a formula, or otherwise by the formula of the original model, \code{formula(object)}. The spatial point pattern data to which the new model is fitted is determined by the left hand side of the updated model formula, if this is present. Otherwise it is determined by the argument \code{X} if it is provided, or otherwise by any unnamed argument that is a point pattern or a quadrature scheme. The model is refitted using \code{\link{dppm}}. } \value{ Another fitted cluster point process model (object of class \code{"dppm"}. } \seealso{ \code{\link{dppm}}, \code{\link{plot.dppm}}, \code{\link{predict.dppm}}, \code{\link{simulate.dppm}}, \code{\link{methods.dppm}}. } \examples{ fit <- dppm(swedishpines ~ x + y, dppGauss, method="c") fitx <- update(fit, ~x) fit2 <- update(fit, flipxy(swedishpines)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.model/man/Ops.msr.Rd0000644000176200001440000000300514331173075015621 0ustar liggesusers\name{Ops.msr} \alias{Ops.msr} \title{Arithmetic Operations on Measures} \description{ These group generic methods for the class \code{"msr"} allow the arithmetic operators \code{+}, \code{-}, \code{*} and \code{/} to be applied directly to measures. } \usage{ ## S3 methods for group generics have prototypes: \special{Ops(e1, e2)} %NAMESPACE S3method("Ops", "msr") } \arguments{ \item{e1, e2}{objects of class \code{"msr"}.} } \details{ Arithmetic operators on a measure \code{A} are only defined in some cases. The arithmetic operator is effectively applied to the value of \code{A(W)} for every spatial domain \code{W}. If the result is a measure, then this operation is valid. If \code{A} is a measure (object of class \code{"msr"}) then the operations \code{-A} and \code{+A} are defined. If \code{A} and \code{B} are measures with the same dimension (i.e. both are scalar-valued, or both are \code{k}-dimensional vector-valued) then \code{A + B} and \code{A - B} are defined. If \code{A} is a measure and \code{z} is a numeric value, then \code{A * z} and \code{A / z} are defined, and \code{z * A} is defined. } \value{ Another measure (object of class \code{"msr"}). } \seealso{ \code{\link{with.msr}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rp -rp 2 * rp rp /2 rp - rp rr <- residuals(fit, type="raw") rp - rr } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat.model/man/clusterfit.Rd0000644000176200001440000001320014331173076016443 0ustar liggesusers\name{clusterfit} \alias{clusterfit} \title{Fit Cluster or Cox Point Process Model via Minimum Contrast} \description{ Fit a homogeneous or inhomogeneous cluster process or Cox point process model to a point pattern by the Method of Minimum Contrast. } \usage{ clusterfit(X, clusters, lambda = NULL, startpar = NULL, \dots, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax), statistic = NULL, statargs = NULL, algorithm="Nelder-Mead", verbose=FALSE, pspace=NULL) } \arguments{ \item{X}{ Data to which the cluster or Cox model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{clusters}{ Character string determining the cluster or Cox model. Partially matched. Options are \code{"Thomas"}, \code{"MatClust"}, \code{"Cauchy"}, \code{"VarGamma"} and \code{"LGCP"}. } \item{lambda}{ Optional. An estimate of the intensity of the point process. Either a single numeric specifying a constant intensity, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{startpar}{ Vector of initial values of the parameters of the point process mode. If \code{X} is a point pattern sensible defaults are used. Otherwise rather arbitrary values are used. } \item{q,p}{ Optional. Exponents for the contrast criterion. See \code{\link{mincontrast}}. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. See \code{\link{mincontrast}}. } \item{ctrl}{ Optional. Named list containing values of the parameters \code{q,p,rmin,rmax}. } \item{\dots}{ Additional arguments passed to \code{\link{mincontrast}.} } \item{statistic}{ Optional. Name of the summary statistic to be used for minimum contrast estimation: either \code{"K"} or \code{"pcf"}. } \item{statargs}{ Optional list of arguments to be used when calculating the \code{statistic}. See Details. } \item{algorithm}{ Character string determining the mathematical optimisation algorithm to be used by \code{\link[stats]{optim}}. See the argument \code{method} of \code{\link[stats]{optim}}. } \item{verbose}{ Logical value indicating whether to print detailed progress reports for debugging purposes. } \item{pspace}{For internal use by package code only.} } \details{ This function fits the clustering parameters of a cluster or Cox point process model by the Method of Minimum Contrast, that is, by matching the theoretical \eqn{K}-function of the model to the empirical \eqn{K}-function of the data, as explained in \code{\link{mincontrast}}. If \code{statistic="pcf"} (or \code{X} appears to be an estimated pair correlation function) then instead of using the \eqn{K}-function, the algorithm will use the pair correlation function. If \code{X} is a point pattern of class \code{"ppp"} an estimate of the summary statistic specfied by \code{statistic} (defaults to \code{"K"}) is first computed before minimum contrast estimation is carried out as described above. In this case the argument \code{statargs} can be used for controlling the summary statistic estimation. The precise algorithm for computing the summary statistic depends on whether the intensity specification (\code{lambda}) is: \describe{ \item{homogeneous:}{ If \code{lambda} is \code{NUll} or a single numeric the pattern is considered homogeneous and either \code{\link{Kest}} or \code{\link{pcf}} is invoked. In this case \code{lambda} is \bold{not} used for anything when estimating the summary statistic. } \item{inhomogeneous:}{ If \code{lambda} is a pixel image (object of class \code{"im"}), a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} the pattern is considered inhomogeneous. In this case either \code{\link{Kinhom}} or \code{\link{pcfinhom}} is invoked with \code{lambda} as an argument. } } After the clustering parameters of the model have been estimated by minimum contrast \code{lambda} (if non-null) is used to compute the additional model parameter \eqn{\mu}{\mu}. The algorithm parameters \code{q,p,rmax,rmin} are described in the help for \code{\link{mincontrast}}. They may be provided either as individually-named arguments, or as entries in the list \code{ctrl}. The individually-named arguments \code{q,p,rmax,rmin} override the entries in the list \code{ctrl}. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. See \code{\link{mincontrast}}. } \references{ Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007). An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63} (2007) 252--258. } \author{ \spatstatAuthors. } \seealso{ \code{\link{kppm}} } \examples{ fit <- clusterfit(redwood, "Thomas") fit if(interactive()){ plot(fit) } K <- Kest(redwood) fit2 <- clusterfit(K, "MatClust") } \keyword{spatial} \keyword{models} spatstat.model/man/rmhmodel.ppm.Rd0000644000176200001440000001102014331173077016660 0ustar liggesusers\name{rmhmodel.ppm} \alias{rmhmodel.ppm} \title{Interpret Fitted Model for Metropolis-Hastings Simulation.} \description{ Converts a fitted point process model into a format that can be used to simulate the model by the Metropolis-Hastings algorithm. } \usage{ \method{rmhmodel}{ppm}(model, w, ..., verbose=TRUE, project=TRUE, control=rmhcontrol(), new.coef=NULL) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{w}{ Optional. Window in which the simulations should be generated. } \item{\dots}{Ignored.} \item{verbose}{ Logical flag indicating whether to print progress reports while the model is being converted. } \item{project}{Logical flag indicating what to do if the fitted model does not correspond to a valid point process. See Details.} \item{control}{ Parameters determining the iterative behaviour of the simulation algorithm. Passed to \code{\link{rmhcontrol}}. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(model)}. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ The generic function \code{\link{rmhmodel}} takes a description of a point process model in some format, and converts it into an object of class \code{"rmhmodel"} so that simulations of the model can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. This function \code{rmhmodel.ppm} is the method for the class \code{"ppm"} of fitted point process models. The argument \code{model} should be a fitted point process model (object of class \code{"ppm"}) typically obtained from the model-fitting function \code{\link{ppm}}. This will be converted into an object of class \code{"rmhmodel"}. The optional argument \code{w} specifies the window in which the pattern is to be generated. If specified, it must be in a form which can be coerced to an object of class \code{owin} by \code{\link{as.owin}}. Not all fitted point process models obtained from \code{\link{ppm}} can be simulated. We have not yet implemented simulation code for the \code{\link{LennardJones}} and \code{\link{OrdThresh}} models. It is also possible that a fitted point process model obtained from \code{\link{ppm}} may not correspond to a valid point process. For example a fitted model with the \code{\link{Strauss}} interpoint interaction may have any value of the interaction parameter \eqn{\gamma}{gamma}; however the Strauss process is not well-defined for \eqn{\gamma > 1}{gamma > 1} (Kelly and Ripley, 1976). The argument \code{project} determines what to do in such cases. If \code{project=FALSE}, a fatal error will occur. If \code{project=TRUE}, the fitted model parameters will be adjusted to the nearest values which do correspond to a valid point process. For example a Strauss process with \eqn{\gamma > 1}{gamma > 1} will be projected to a Strauss process with \eqn{\gamma = 1}{gamma = 1}, equivalent to a Poisson process. } \references{ Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. } \seealso{ \code{\link{rmhmodel}}, \code{\link{rmhmodel.list}}, \code{\link{rmhmodel.default}}, \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \rmhInteractionsList. } \examples{ fit1 <- ppm(cells ~1, Strauss(0.07)) mod1 <- rmhmodel(fit1) fit2 <- ppm(cells ~x, Geyer(0.07, 2)) mod2 <- rmhmodel(fit2) fit3 <- ppm(cells ~x, Hardcore(0.07)) mod3 <- rmhmodel(fit3) # Then rmh(mod1), etc } \author{ \adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat.model/man/Poisson.Rd0000644000176200001440000000346514331173075015724 0ustar liggesusers\name{Poisson} \alias{Poisson} \title{Poisson Point Process Model} \description{ Creates an instance of the Poisson point process model which can then be fitted to point pattern data. } \usage{ Poisson() } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Poisson point process (namely, there are no interactions). } \details{ The function \code{\link{ppm}}, which fits point process models to point pattern data, requires an argument \code{interaction} of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Poisson process is provided by the value of the function \code{Poisson}. This works for all types of Poisson processes including multitype and nonstationary Poisson processes. } \seealso{ \code{\link{ppm}}, \code{\link{Strauss}} } \examples{ ppm(nztrees ~1, Poisson()) # fit the stationary Poisson process to 'nztrees' # no edge correction needed lon <- longleaf \testonly{ lon <- lon[seq(1, npoints(lon), by=50)] } longadult <- unmark(subset(lon, marks >= 30)) ppm(longadult ~ x, Poisson()) # fit the nonstationary Poisson process # with intensity lambda(x,y) = exp( a + bx) # trees marked by species lans <- lansing \testonly{ lans <- lans[seq(1, npoints(lans), by=30)] } ppm(lans ~ marks, Poisson()) # fit stationary marked Poisson process # with different intensity for each species \donttest{ ppm(lansing ~ marks * polynom(x,y,3), Poisson()) # fit nonstationary marked Poisson process # with different log-cubic trend for each species } \testonly{ # equivalent functionality - smaller dataset ppm(amacrine ~ marks * polynom(x,y,2), Poisson()) } } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat.model/man/is.poissonclusterprocess.Rd0000644000176200001440000000316314334117440021367 0ustar liggesusers\name{is.poissonclusterprocess} \alias{is.poissonclusterprocess} \alias{is.poissonclusterprocess.kppm} \alias{is.poissonclusterprocess.zclustermodel} \alias{is.poissonclusterprocess.default} \title{ Recognise Poisson Cluster Process Models } \description{ Given a point process model (either a model that has been fitted to data, or a model specified by its parameters), determine whether the model is a Poisson cluster process. } \usage{ is.poissonclusterprocess(model) \method{is.poissonclusterprocess}{kppm}(model) \method{is.poissonclusterprocess}{zclustermodel}(model) \method{is.poissonclusterprocess}{default}(model) } \arguments{ \item{model}{ Any kind of object representing a spatial point process model, either a model fitted to data, or a specification of a point process model. } } \details{ The argument \code{model} represents a fitted spatial point process model (such as an object of class \code{"ppm"}, \code{"kppm"} or similar) or a specification of a point process model (such as an object of class \code{"zclustermodel"}). This function returns \code{TRUE} if the \code{model} is a Poisson cluster process, and \code{FALSE} otherwise. The function \code{is.poissonclusterprocess} is generic, with methods for classes \code{kppm} and \code{zclustermodel}, and a default method. } \value{ A logical value. } \author{ \spatstatAuthors. } \seealso{ \code{\link{kppm}}, \code{\link{zclustermodel}}. } \examples{ fut <- kppm(redwood ~ 1, "Thomas") is.poissonclusterprocess(fut) fot <- slrm(cells ~ x, dimyx=16) is.poissonclusterprocess(fot) } \keyword{spatial} \keyword{models} spatstat.model/man/predict.kppm.Rd0000644000176200001440000000274014331173077016667 0ustar liggesusers\name{predict.kppm} \alias{predict.kppm} \alias{fitted.kppm} \title{Prediction from a Fitted Cluster Point Process Model} \description{ Given a fitted cluster point process model, these functions compute the fitted intensity. } \usage{ \method{fitted}{kppm}(object, ...) \method{predict}{kppm}(object, ...) } \arguments{ \item{object}{ Fitted cluster point process model. An object of class \code{"kppm"}. } \item{\dots}{ Arguments passed to \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } } \details{ These functions are methods for the generic functions \code{\link[stats]{fitted}} and \code{\link[stats]{predict}}. The argument \code{object} should be a cluster point process model (object of class \code{"kppm"}) obtained using the function \code{\link{kppm}}. The \emph{intensity} of the fitted model is computed, using \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } \value{ The value of \code{fitted.kppm} is a numeric vector giving the fitted values at the quadrature points. The value of \code{predict.kppm} is usually a pixel image (object of class \code{"im"}), but see \code{\link{predict.ppm}} for details. } \seealso{ \code{\link{kppm}}, \code{\link{plot.kppm}}, \code{\link{vcov.kppm}}, \code{\link{fitted.ppm}}, \code{\link{predict.ppm}} } \examples{ fit <- kppm(redwood ~ x, "Thomas") predict(fit) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/cdf.test.Rd0000644000176200001440000002456314510474260016005 0ustar liggesusers\name{cdf.test.ppm} \alias{cdf.test.ppm} \alias{cdf.test.slrm} \title{Spatial Distribution Test for Point Pattern or Point Process Model} \description{ Performs a test of goodness-of-fit of a point process model. The observed and predicted distributions of the values of a spatial covariate are compared using either the Kolmogorov-Smirnov test, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or Anderson-Darling test. For non-Poisson models, a Monte Carlo test is used. } \usage{ \method{cdf.test}{ppm}(model, covariate, test=c("ks", "cvm", "ad"), \dots, interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE) \method{cdf.test}{slrm}(model, covariate, test=c("ks", "cvm", "ad"), \dots, modelname=NULL, covname=NULL) } \arguments{ \item{model}{ A fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) or fitted spatial logistic regression (object of class \code{"slrm"}). } \item{covariate}{ The spatial covariate on which the test will be based. A function, a pixel image (object of class \code{"im"}), a list of pixel images, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{test}{ Character string identifying the test to be performed: \code{"ks"} for Kolmogorov-Smirnov test, \code{"cvm"} for \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or \code{"ad"} for Anderson-Darling test. } \item{\dots}{ Arguments passed to \code{\link[stats]{ks.test}} (from the \pkg{stats} package) or \code{\link[goftest]{cvm.test}} or \code{\link[goftest]{ad.test}} (from the \pkg{goftest} package) to control the test; and arguments passed to \code{\link[spatstat.geom]{as.mask}} to control the pixel resolution. } \item{interpolate}{ Logical flag indicating whether to interpolate pixel images. If \code{interpolate=TRUE}, the value of the covariate at each point of \code{X} will be approximated by interpolating the nearby pixel values. If \code{interpolate=FALSE}, the nearest pixel value will be used. } \item{jitter}{ Logical flag. If \code{jitter=TRUE}, values of the covariate will be slightly perturbed at random, to avoid tied values in the test. } \item{modelname,covname}{ Character strings giving alternative names for \code{model} and \code{covariate} to be used in labelling plot axes. } \item{nsim}{ Number of simulated realisations from the \code{model} to be used for the Monte Carlo test, when \code{model} is not a Poisson process. } \item{verbose}{ Logical value indicating whether to print progress reports when performing a Monte Carlo test. } } \details{ These functions perform a goodness-of-fit test of a Poisson or Gibbs point process model fitted to point pattern data. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using the Kolmogorov-Smirnov test, the \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or the Anderson-Darling test. For Gibbs models, a Monte Carlo test is performed using these test statistics. The function \code{cdf.test} is generic, with methods for point patterns (\code{"ppp"} or \code{"lpp"}), point process models (\code{"ppm"} or \code{"lppm"}) and spatial logistic regression models (\code{"slrm"}). \itemize{ \item If \code{X} is a point pattern dataset (object of class \code{"ppp"}), then \code{cdf.test(X, \dots)} performs a goodness-of-fit test of the uniform Poisson point process (Complete Spatial Randomness, CSR) for this dataset. For a multitype point pattern, the uniform intensity is assumed to depend on the type of point (sometimes called Complete Spatial Randomness and Independence, CSRI). \item If \code{model} is a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) then \code{cdf.test(model, \dots)} performs a test of goodness-of-fit for this fitted model. \item If \code{model} is a fitted spatial logistic regression (object of class \code{"slrm"}) then \code{cdf.test(model, \dots)} performs a test of goodness-of-fit for this fitted model. } The test is performed by comparing the observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same covariate under the model, using a classical goodness-of-fit test. Thus, you must nominate a spatial covariate for this test. If \code{X} is a point pattern that does not have marks, the argument \code{covariate} should be either a \code{function(x,y)} or a pixel image (object of class \code{"im"} containing the values of a spatial function, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. If \code{X} is a multitype point pattern, the argument \code{covariate} can be either a \code{function(x,y,marks)}, or a pixel image, or a list of pixel images corresponding to each possible mark value, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. The predicted distribution of the values of the \code{covariate} under the fitted \code{model} is computed as follows. The values of the \code{covariate} at all locations in the observation window are evaluated, weighted according to the point process intensity of the fitted model, and compiled into a cumulative distribution function \eqn{F} using \code{\link{ewcdf}}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. The A goodness-of-fit test of the uniform distribution is applied to these numbers using \code{stats::\link[stats]{ks.test}}, \code{goftest::\link[goftest]{cvm.test}} or \code{goftest::\link[goftest]{ad.test}}. This test was apparently first described (in the context of spatial data, and using Kolmogorov-Smirnov) by Berman (1986). See also Baddeley et al (2005). If \code{model} is not a Poisson process, then a Monte Carlo test is performed, by generating \code{nsim} point patterns which are simulated realisations of the \code{model}, re-fitting the model to each simulated point pattern, and calculating the test statistic for each fitted model. The Monte Carlo \eqn{p} value is determined by comparing the simulated values of the test statistic with the value for the original data. The return value is an object of class \code{"htest"} containing the results of the hypothesis test. The print method for this class gives an informative summary of the test outcome. The return value also belongs to the class \code{"cdftest"} for which there is a plot method \code{\link{plot.cdftest}}. The plot method displays the empirical cumulative distribution function of the covariate at the data points, and the predicted cumulative distribution function of the covariate under the model, plotted against the value of the covariate. The argument \code{jitter} controls whether covariate values are randomly perturbed, in order to avoid ties. If the original data contains any ties in the covariate (i.e. points with equal values of the covariate), and if \code{jitter=FALSE}, then the Kolmogorov-Smirnov test implemented in \code{\link[stats]{ks.test}} will issue a warning that it cannot calculate the exact \eqn{p}-value. To avoid this, if \code{jitter=TRUE} each value of the covariate will be perturbed by adding a small random value. The perturbations are normally distributed with standard deviation equal to one hundredth of the range of values of the covariate. This prevents ties, and the \eqn{p}-value is still correct. There is a very slight loss of power. } \value{ An object of class \code{"htest"} containing the results of the test. See \code{\link[stats]{ks.test}} for details. The return value can be printed to give an informative summary of the test. The value also belongs to the class \code{"cdftest"} for which there is a plot method. } \section{Warning}{ The outcome of the test involves a small amount of random variability, because (by default) the coordinates are randomly perturbed to avoid tied values. Hence, if \code{cdf.test} is executed twice, the \eqn{p}-values will not be exactly the same. To avoid this behaviour, set \code{jitter=FALSE}. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.cdftest}}, \code{\link{quadrat.test}}, \code{\link{berman.test}}, \code{\link[stats]{ks.test}}, \code{\link[goftest]{cvm.test}}, \code{\link[goftest]{ad.test}}, \code{\link[spatstat.model]{ppm}} } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. } \examples{ op <- options(useFancyQuotes=FALSE) # fit inhomogeneous Poisson model and test model <- ppm(nztrees ~x) cdf.test(model, "x") if(interactive()) { # synthetic data: nonuniform Poisson process X <- rpoispp(function(x,y) { 100 * exp(x) }, win=square(1)) # fit uniform Poisson process fit0 <- ppm(X ~1) # fit correct nonuniform Poisson process fit1 <- ppm(X ~x) # test wrong model cdf.test(fit0, "x") # test right model cdf.test(fit1, "x") } # multitype point pattern yimage <- as.im(function(x,y){y}, W=Window(amacrine)) cdf.test(ppm(amacrine ~marks+y), yimage) options(op) } \keyword{htest} \keyword{spatial} \concept{Goodness-of-fit} spatstat.model/man/thomas.estK.Rd0000644000176200001440000001417314331173077016472 0ustar liggesusers\name{thomas.estK} \alias{thomas.estK} \title{Fit the Thomas Point Process by Minimum Contrast} \description{ Fits the Thomas point process to a point pattern dataset by the Method of Minimum Contrast using the K function. } \usage{ thomas.estK(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the Thomas model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the Thomas process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Thomas point process model to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Thomas point process to \code{X}, by finding the parameters of the Thomas model which give the closest match between the theoretical \eqn{K} function of the Thomas process and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Thomas point process is described in \Moller and Waagepetersen (2003, pp. 61--62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and isotropically Normally distributed around the parent point with standard deviation \eqn{\sigma}{sigma} which is equal to the parameter \code{scale}. The named vector of stating values can use either \code{sigma2} (\eqn{\sigma^2}{sigma^2}) or \code{scale} as the name of the second component, but the latter is recommended for consistency with other cluster models. The theoretical \eqn{K}-function of the Thomas process is \deqn{ K(r) = \pi r^2 + \frac 1 \kappa (1 - \exp(-\frac{r^2}{4\sigma^2})). }{ K(r) = pi r^2 + (1 - exp(-r^2/(4 sigma^2)))/kappa. } The theoretical intensity of the Thomas process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{\sigma^2}{sigma^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The Thomas process can be simulated, using \code{\link{rThomas}}. Homogeneous or inhomogeneous Thomas process models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Diggle, P. J., Besag, J. and Gleaves, J. T. (1976) Statistical analysis of spatial point patterns by means of distance methods. \emph{Biometrics} \bold{32} 659--667. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Thomas, M. (1949) A generalisation of Poisson's binomial limit for use in ecology. \emph{Biometrika} \bold{36}, 18--25. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ \rasmus. Adapted for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{kppm}}, \code{\link{lgcp.estK}}, \code{\link{matclust.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{rThomas}} to simulate the fitted model. } \examples{ u <- thomas.estK(redwood, c(kappa=10, scale=0.1)) u plot(u) } \keyword{spatial} \keyword{models} spatstat.model/man/Penttinen.Rd0000644000176200001440000000520014331173075016223 0ustar liggesusers\name{Penttinen} \alias{Penttinen} \title{Penttinen Interaction} \description{ Creates an instance of the Penttinen pairwise interaction point process model, which can then be fitted to point pattern data. } \usage{ Penttinen(r) } \arguments{ \item{r}{circle radius} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Penttinen (1984, Example 2.1, page 18), citing Cormack (1979), described the pairwise interaction point process with interaction factor \deqn{ h(d) = e^{\theta A(d)} = \gamma^{A(d)} }{ h(d) = exp(theta * A(d)) = gamma^(A(d)) } between each pair of points separated by a distance $d$. Here \eqn{A(d)} is the area of intersection between two discs of radius \eqn{r} separated by a distance \eqn{d}, normalised so that \eqn{A(0) = 1}. The scale of interaction is controlled by the disc radius \eqn{r}: two points interact if they are closer than \eqn{2 r}{2 * r} apart. The strength of interaction is controlled by the canonical parameter \eqn{\theta}{theta}, which must be less than or equal to zero, or equivalently by the parameter \eqn{\gamma = e^\theta}{gamma = exp(theta)}, which must lie between 0 and 1. The potential is inhibitory, i.e.\ this model is only appropriate for regular point patterns. For \eqn{\gamma=0}{gamma=0} the model is a hard core process with hard core diameter \eqn{2 r}{2 * r}. For \eqn{\gamma=1}{gamma=1} the model is a Poisson process. The irregular parameter \eqn{r} must be given in the call to \code{Penttinen}, while the regular parameter \eqn{\theta}{theta} will be estimated. This model can be considered as a pairwise approximation to the area-interaction model \code{\link{AreaInter}}. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{Pairwise}}, \code{\link{AreaInter}}. } \examples{ fit <- ppm(cells ~ 1, Penttinen(0.07)) fit reach(fit) # interaction range is circle DIAMETER } \references{ Cormack, R.M. (1979) Spatial aspects of competition between individuals. Pages 151--212 in \emph{Spatial and Temporal Analysis in Ecology}, eds. R.M. Cormack and J.K. Ord, International Co-operative Publishing House, Fairland, MD, USA. Penttinen, A. (1984) \emph{Modelling Interaction in Spatial Point Patterns: Parameter Estimation by the Maximum Likelihood Method.} \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla} Studies in Computer Science, Economics and Statistics \bold{7}, University of \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}, Finland. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat.model/man/dfbetas.ppm.Rd0000644000176200001440000000735214331173076016475 0ustar liggesusers\name{dfbetas.ppm} \alias{dfbetas.ppm} \title{ Parameter Influence Measure } \description{ Computes the deletion influence measure for each parameter in a fitted point process model. } \usage{ \method{dfbetas}{ppm}(model, \dots, drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored, except for the arguments \code{dimyx} and \code{eps} which are passed to \code{\link{as.mask}} to control the spatial resolution of the image of the density component. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } } \details{ Given a fitted spatial point process \code{model}, this function computes the influence measure for each parameter, as described in Baddeley, Chang and Song (2013) and Baddeley, Rubak and Turner (2019). This is a method for the generic function \code{\link[stats]{dfbetas}}. The influence measure for each parameter \eqn{\theta}{\theta} is a signed measure in two-dimensional space. It consists of a discrete mass on each data point (i.e. each point in the point pattern to which the \code{model} was originally fitted) and a continuous density at all locations. The mass at a data point represents the change in the fitted value of the parameter \eqn{\theta}{\theta} that would occur if this data point were to be deleted. The density at other non-data locations represents the effect (on the fitted value of \eqn{\theta}{\theta}) of deleting these locations (and their associated covariate values) from the input to the fitting procedure. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the influence calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. } \value{ An object of class \code{"msr"} representing a signed or vector-valued measure. This object can be printed and plotted. } \references{ Baddeley, A. and Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. Baddeley, A., Rubak, E. and Turner, R. (2019) Leverage and influence diagnostics for Gibbs spatial point processes. \emph{Spatial Statistics} \bold{29}, {15--48}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{leverage.ppm}}, \code{\link{influence.ppm}}, \code{\link{ppmInfluence}}. See \code{\link{msr}} for information on how to use a measure. } \examples{ \testonly{op <- spatstat.options(npixel=32)} X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) \testonly{fit <- ppm(X ~x+y, nd=16)} plot(dfbetas(fit)) plot(Smooth(dfbetas(fit))) \testonly{spatstat.options(op)} } \keyword{spatial} \keyword{models} spatstat.model/man/repul.Rd0000644000176200001440000000340014331173077015410 0ustar liggesusers\name{repul.dppm} \alias{repul} \alias{repul.dppm} \title{ Repulsiveness Index of a Determinantal Point Process Model } \description{ Computes a measure of the degree of repulsion between points in a determinantal point process model. } \usage{ repul(model, \dots) \method{repul}{dppm}(model, \dots) } \arguments{ \item{model}{ A fitted point process model of determinantal type (object of class \code{"dppm"}). } \item{\dots}{ Ignored. } } \details{ The repulsiveness index \eqn{\mu}{mu} of a determinantal point process model was defined by Lavancier, \Moller and Rubak (2015) as \deqn{ \mu = \lambda \int (1- g(x)) \, dx }{ mu = lambda * integral[(1-g(x))] } where \eqn{\lambda}{\lambda} is the intensity of the model and \eqn{g(x)} is the pair correlation function, and the integral is taken over all two-dimensional vectors \eqn{x}. Values of \eqn{\mu} are dimensionless. Larger values of \eqn{\mu}{mu} indicate stronger repulsion between points. If the model is stationary, the result is a single number. If the model is not stationary, the result is a pixel image (obtained by multiplying the spatially-varying intensity by the integral defined above). } \value{ A numeric value or a pixel image. } \references{ Lavancier, F., \Moller, J. and Rubak, E. (2015), Determinantal point process models and statistical inference. \emph{Journal of Royal Statistical Society: Series B (Statistical Methodology)}, \bold{77}, 853--877. } \author{ \adrian. } \seealso{ \code{\link{dppm}} } \examples{ jpines <- residualspaper$Fig1 \testonly{ # smaller dataset for testing jpines <- jpines[c(TRUE,FALSE)] } fit <- dppm(jpines ~ 1, dppGauss) repul(fit) } \keyword{spatial} \keyword{models} spatstat.model/man/harmonise.msr.Rd0000644000176200001440000000173314331173076017054 0ustar liggesusers\name{harmonise.msr} \alias{harmonise.msr} \title{Make Measures Compatible} \description{ Convert several measures to a common quadrature scheme } \usage{ \method{harmonise}{msr}(\dots) } \arguments{ \item{\dots}{ Any number of measures (objects of class \code{"msr"}). } } \details{ This function makes any number of measures compatible, by converting them all to a common quadrature scheme. The command \code{\link{harmonise}} is generic. This is the method for objects of class \code{"msr"}. } \value{ A list, of length equal to the number of arguments \code{\dots}, whose entries are measures. } \author{ \spatstatAuthors. } \examples{ fit1 <- ppm(cells ~ x) fit2 <- ppm(rpoispp(ex=cells) ~ x) m1 <- residuals(fit1) m2 <- residuals(fit2) harmonise(m1, m2) s1 <- residuals(fit1, type="score") s2 <- residuals(fit2, type="score") harmonise(s1, s2) } \seealso{ \code{\link{harmonise}}, \code{\link{msr}} } \keyword{spatial} \keyword{manip} spatstat.model/man/Gres.Rd0000644000176200001440000000537314510474260015171 0ustar liggesusers\name{Gres} \Rdversion{1.1} \alias{Gres} \title{ Residual G Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the residual \eqn{G} function, which serves as a diagnostic for goodness-of-fit of the model. } \usage{ Gres(object, ...) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}), a point pattern (object of class \code{"ppp"}), a quadrature scheme (object of class \code{"quad"}), or the value returned by a previous call to \code{\link{Gcom}}. } \item{\dots}{ Arguments passed to \code{\link{Gcom}}. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes a residual version of the \eqn{G} function of the dataset, which should be approximately zero if the model is a good fit to the data. In normal use, \code{object} is a fitted point process model or a point pattern. Then \code{Gres} first calls \code{\link{Gcom}} to compute both the nonparametric estimate of the \eqn{G} function and its model compensator. Then \code{Gres} computes the difference between them, which is the residual \eqn{G}-function. Alternatively, \code{object} may be a function value table (object of class \code{"fv"}) that was returned by a previous call to \code{\link{Gcom}}. Then \code{Gres} computes the residual from this object. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Related functions: \code{\link{Gcom}}, \code{\link{Gest}}. Alternative functions: \code{\link{Kres}}, \code{\link{psstA}}, \code{\link{psstG}}, \code{\link{psst}}. Model-fitting: \code{\link{ppm}}. } \examples{ fit0 <- ppm(cells, ~1) # uniform Poisson G0 <- Gres(fit0) plot(G0) # Hanisch correction estimate plot(G0, hres ~ r) # uniform Poisson is clearly not correct fit1 <- ppm(cells, ~1, Strauss(0.08)) plot(Gres(fit1), hres ~ r) # fit looks approximately OK; try adjusting interaction distance plot(Gres(cells, interaction=Strauss(0.12))) # How to make envelopes if(interactive()) { E <- envelope(fit1, Gres, model=fit1, nsim=39) plot(E) } # For computational efficiency Gc <- Gcom(fit1) G1 <- Gres(Gc) } \keyword{spatial} \keyword{models} \concept{Model diagnostics} spatstat.model/man/summary.dppm.Rd0000644000176200001440000000414214331173077016721 0ustar liggesusers\name{summary.dppm} \alias{summary.dppm} \alias{print.summary.dppm} \title{Summarizing a Fitted Determinantal Point Process Model} \description{ \code{summary} method for class \code{"dppm"}. } \usage{ \method{summary}{dppm}(object, \dots, quick=FALSE) \method{print}{summary.dppm}(x, \dots) } \arguments{ \item{object}{ A fitted determinantal point process model (object of class \code{"dppm"}). } \item{quick}{Logical value controlling the scope of the summary.} \item{\dots}{Arguments passed to \code{\link{summary.ppm}} or \code{\link{print.summary.ppm}} controlling the treatment of the trend component of the model.} \item{x}{Object of class \code{"summary.dppm"} as returned by \code{summary.dppm}. } } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"dppm"}. An object of class \code{"dppm"} describes a fitted determinantal point process model. See \code{\link{dppm}}. \code{summary.dppm} extracts information about the type of model that has been fitted, the data to which the model was fitted, and the values of the fitted coefficients. \code{print.summary.dppm} prints this information in a comprehensible format. In normal usage, \code{print.summary.dppm} is invoked implicitly when the user calls \code{summary.dppm} without assigning its value to anything. See the examples. } \value{ \code{summary.dppm} returns an object of class \code{"summary.dppm"}, while \code{print.summary.dppm} returns \code{NULL}. The result of \code{summary.dppm} includes at least the following components: \item{Xname}{character string name of the original point pattern data} \item{stationary}{logical value indicating whether the model is stationary} \item{trend}{Object of class \code{summary.ppm} summarising the trend} \item{repul}{Repulsiveness index} } \examples{ jpines <- residualspaper$Fig1 \testonly{ # smaller dataset for testing jpines <- jpines[c(TRUE,FALSE)] } fit <- dppm(jpines ~ 1, dppGauss) summary(fit) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.model/man/is.stationary.ppm.Rd0000644000176200001440000000602714331173076017672 0ustar liggesusers\name{is.stationary.ppm} \alias{is.stationary.ppm} \alias{is.stationary.kppm} \alias{is.stationary.slrm} \alias{is.stationary.dppm} \alias{is.stationary.detpointprocfamily} \alias{is.poisson.ppm} \alias{is.poisson.kppm} \alias{is.poisson.slrm} \alias{is.poisson.interact} \title{ Recognise Stationary and Poisson Point Process Models } \description{ Given a point process model (either a model that has been fitted to data, or a model specified by its parameters), determine whether the model is a stationary point process, and whether it is a Poisson point process. } \usage{ \method{is.stationary}{ppm}(x) \method{is.stationary}{kppm}(x) \method{is.stationary}{slrm}(x) \method{is.stationary}{dppm}(x) \method{is.stationary}{detpointprocfamily}(x) \method{is.poisson}{ppm}(x) \method{is.poisson}{kppm}(x) \method{is.poisson}{slrm}(x) \method{is.poisson}{interact}(x) } \arguments{ \item{x}{ A fitted spatial point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"}, \code{"dppm"} or \code{"slrm"}) or a specification of a Gibbs point process model (object of class \code{"rmhmodel"}) or a similar object. } } \details{ The argument \code{x} represents a fitted spatial point process model or a similar object. \code{is.stationary(x)} returns \code{TRUE} if \code{x} represents a stationary point process, and \code{FALSE} if not. \code{is.poisson(x)} returns \code{TRUE} if \code{x} represents a Poisson point process, and \code{FALSE} if not. The functions \code{is.stationary} and \code{is.poisson} are generic, with methods for the classes \code{"ppm"} (Gibbs point process models), \code{"kppm"} (cluster or Cox point process models), \code{"slrm"} (spatial logistic regression models) and \code{"rmhmodel"} (model specifications for the Metropolis-Hastings algorithm). Additionally \code{is.stationary} has a method for classes \code{"detpointprocfamily"} and \code{"dppm"} (both determinantal point processes) and \code{is.poisson} has a method for class \code{"interact"} (interaction structures for Gibbs models). \code{is.poisson.kppm} will return \code{FALSE}, unless the model \code{x} is degenerate: either \code{x} has zero intensity so that its realisations are empty with probability 1, or it is a log-Gaussian Cox process where the log intensity has zero variance. \code{is.poisson.slrm} will always return \code{TRUE}, by convention. } \value{ A logical value. } \author{ \spatstatAuthors. } \seealso{ \code{\link{is.marked}} to determine whether a model is a marked point process. \code{\link[spatstat.model]{summary.ppm}} for detailed information about a fitted model. Model-fitting functions \code{\link{ppm}}, \code{\link{dppm}}, \code{\link{kppm}}, \code{\link{slrm}}. } \examples{ fit <- ppm(cells ~ x) is.stationary(fit) is.poisson(fit) fut <- kppm(redwood ~ 1, "MatClust") is.stationary(fut) is.poisson(fut) fot <- slrm(cells ~ x) is.stationary(fot) is.poisson(fot) } \keyword{spatial} \keyword{models} spatstat.model/man/influence.ppm.Rd0000644000176200001440000000711514510474260017030 0ustar liggesusers\name{influence.ppm} \alias{influence.ppm} \title{ Influence Measure for Spatial Point Process Model } \description{ Computes the influence measure for a fitted spatial point process model. } \usage{ \method{influence}{ppm}(model, \dots, drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } } \details{ Given a fitted spatial point process model \code{model}, this function computes the influence measure described in Baddeley, Chang and Song (2013) and Baddeley, Rubak and Turner (2019). The function \code{\link[stats]{influence}} is generic, and \code{influence.ppm} is the method for objects of class \code{"ppm"} representing point process models. The influence of a point process model is a value attached to each data point (i.e. each point of the point pattern to which the \code{model} was fitted). The influence value \eqn{s(x_i)}{s(x[i])} at a data point \eqn{x_i}{x[i]} represents the change in the maximised log (pseudo)likelihood that occurs when the point \eqn{x_i}{x[i]} is deleted. A relatively large value of \eqn{s(x_i)}{s(x[i])} indicates a data point with a large influence on the fitted model. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the influence calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. The result of \code{influence.ppm} is an object of class \code{"influence.ppm"}. It can be printed and plotted. It can be converted to a marked point pattern by \code{as.ppp} (see \code{\link{as.ppp.influence.ppm}}). There are also methods for \code{[}, \code{\link{as.owin}}, \code{\link{domain}}, \code{\link{shift}}, \code{\link{integral}} and \code{\link{Smooth}}. } \value{ An object of class \code{"influence.ppm"}. } \references{ Baddeley, A. and Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. Baddeley, A., Rubak, E. and Turner, R. (2019) Leverage and influence diagnostics for Gibbs spatial point processes. \emph{Spatial Statistics} \bold{29}, {15--48}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{leverage.ppm}}, \code{\link{dfbetas.ppm}}, \code{\link{ppmInfluence}}, \code{\link{plot.influence.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) plot(influence(fit)) } \keyword{spatial} \keyword{models} \concept{Model diagnostics} spatstat.model/man/leverage.ppm.Rd0000644000176200001440000001021714510474260016647 0ustar liggesusers\name{leverage.ppm} \alias{leverage} \alias{leverage.ppm} \title{ Leverage Measure for Spatial Point Process Model } \description{ Computes the leverage measure for a fitted spatial point process model. } \usage{ leverage(model, \dots) \method{leverage}{ppm}(model, \dots, drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored, except for the arguments \code{dimyx} and \code{eps} which are passed to \code{\link{as.mask}} to control the spatial resolution of the result. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } } \details{ The function \code{leverage} is generic, and \code{leverage.ppm} is the method for objects of class \code{"ppm"}. Given a fitted spatial point process model \code{model}, the function \code{leverage.ppm} computes the leverage of the model, described in Baddeley, Chang and Song (2013) and Baddeley, Rubak and Turner (2019). The leverage of a spatial point process model is a function of spatial location, and is typically displayed as a colour pixel image. The leverage value \eqn{h(u)} at a spatial location \eqn{u} represents the change in the fitted trend of the fitted point process model that would have occurred if a data point were to have occurred at the location \eqn{u}. A relatively large value of \eqn{h()} indicates a part of the space where the data have a \emph{potentially} strong effect on the fitted model (specifically, a strong effect on the intensity or conditional intensity of the fitted model) due to the values of the covariates. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the leverage calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. The result of \code{leverage.ppm} is an object of class \code{"leverage.ppm"}. It can be printed or plotted. It can be converted to a pixel image by \code{as.im} (see \code{\link{as.im.leverage.ppm}}). There are also methods for \code{contour}, \code{persp}, \code{[}, \code{as.function}, \code{\link{as.owin}}, \code{\link{domain}}, \code{\link{Smooth}}, \code{\link{integral}}, and \code{mean}. } \value{ An object of class \code{"leverage.ppm"}. } \references{ Baddeley, A., Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. Baddeley, A., Rubak, E. and Turner, R. (2019) Leverage and influence diagnostics for Gibbs spatial point processes. \emph{Spatial Statistics} \bold{29}, {15--48}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{influence.ppm}}, \code{\link{dfbetas.ppm}}, \code{\link{ppmInfluence}}, \code{\link{plot.leverage.ppm}} \code{\link{as.function.leverage.ppm}} } \examples{ if(offline <- !interactive()) op <- spatstat.options(npixel=32, ndummy.min=16) X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) le <- leverage(fit) if(!offline) plot(le) mean(le) if(offline) spatstat.options(op) } \keyword{spatial} \keyword{models} \concept{diagnostics} \concept{Model diagnostics} spatstat.model/man/residuals.dppm.Rd0000644000176200001440000000215314331173077017217 0ustar liggesusers\name{residuals.dppm} \alias{residuals.dppm} \title{ Residuals for Fitted Determinantal Point Process Model } \description{ Given a determinantal point process model fitted to a point pattern, compute residuals. } \usage{ \method{residuals}{dppm}(object, \dots) } \arguments{ \item{object}{ The fitted determinatal point process model (an object of class \code{"dppm"}) for which residuals should be calculated. } \item{\dots}{ Arguments passed to \code{\link{residuals.ppm}}. } } \value{ An object of class \code{"msr"} representing a signed measure or vector-valued measure (see \code{\link{msr}}). This object can be plotted. } \details{ This function extracts the intensity component of the model using \code{\link{as.ppm}} and then applies \code{\link{residuals.ppm}} to compute the residuals. Use \code{\link{plot.msr}} to plot the residuals directly. } \seealso{ \code{\link{msr}}, \code{\link{dppm}} } \examples{ fit <- dppm(swedishpines ~ x, dppGauss, method="c") rr <- residuals(fit) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.model/man/panel.contour.Rd0000644000176200001440000000442214400354234017046 0ustar liggesusers\name{panel.contour} \alias{panel.contour} \alias{panel.image} \alias{panel.histogram} \title{ Panel Plots using Colour Image or Contour Lines } \description{ These functions can be passed to \code{\link[graphics]{pairs}} or \code{\link[graphics]{coplot}} to determine what kind of plotting is done in each panel of a multi-panel graphical display. } \usage{ panel.contour(x, y, ..., sigma = NULL) panel.image(x, y, ..., sigma = NULL) panel.histogram(x, ...) } \arguments{ \item{x,y}{ Coordinates of points in a scatterplot. } \item{\dots}{ Extra graphics arguments, passed to \code{\link{contour.im}}, \code{\link{plot.im}} or \code{\link[graphics]{rect}}, respectively, to control the appearance of the panel. } \item{sigma}{ Bandwidth of kernel smoother, on a scale where \eqn{x} and \eqn{y} range between 0 and 1. } } \details{ These functions can serve as one of the arguments \code{panel}, \code{lower.panel}, \code{upper.panel}, \code{diag.panel} passed to graphics commands like \code{\link[graphics]{pairs}} or \code{\link[graphics]{coplot}}, to determine what kind of plotting is done in each panel of a multi-panel graphical display. In particular they work with \code{\link{pairs.im}}. The functions \code{panel.contour} and \code{panel.contour} are suitable for the off-diagonal plots which involve two datasets \code{x} and \code{y}. They first rescale \code{x} and \code{y} to the unit square, then apply kernel smoothing with bandwidth \code{sigma} using \code{\link{density.ppp}}. Then \code{panel.contour} draws a contour plot while \code{panel.image} draws a colour image. The function \code{panel.histogram} is suitable for the diagonal plots which involve a single dataset \code{x}. It displays a histogram of the data. } \value{ Null. } \author{ \spatstatAuthors. } \seealso{ \code{\link{pairs.im}}, \code{\link{pairs.default}}, \code{\link{panel.smooth}} } \examples{ pairs(bei.extra, panel = panel.contour, diag.panel = panel.histogram) with(bei.extra, pairs(grad, elev, panel = panel.image, diag.panel = panel.histogram)) pairs(marks(finpines), panel=panel.contour, diag.panel=panel.histogram) } \keyword{spatial} \keyword{hplot} spatstat.model/man/methods.dppm.Rd0000644000176200001440000000307114366424525016674 0ustar liggesusers\name{methods.dppm} \alias{methods.dppm} %DoNotExport \alias{coef.dppm} \alias{formula.dppm} \alias{print.dppm} \alias{terms.dppm} \alias{labels.dppm} \title{ Methods for Determinantal Point Process Models } \description{ These are methods for the class \code{"dppm"}. } \usage{ \method{coef}{dppm}(object, \dots) \method{formula}{dppm}(x, \dots) \method{print}{dppm}(x, ...) \method{terms}{dppm}(x, \dots) \method{labels}{dppm}(object, \dots) } \arguments{ \item{x,object}{ An object of class \code{"dppm"}, representing a fitted determinantal point process model. } \item{\dots}{ Arguments passed to other methods. } } \details{ These functions are methods for the generic commands \code{\link[stats]{coef}}, \code{\link[stats]{formula}}, \code{\link[base]{print}}, \code{\link[stats]{terms}} and \code{\link[base]{labels}} for the class \code{"dppm"}. An object of class \code{"dppm"} represents a fitted determinantal point process model. It is obtained from \code{\link{dppm}}. The method \code{coef.dppm} returns the vector of \emph{regression coefficients} of the fitted model. It does not return the interaction parameters. } \value{ See the help files for the corresponding generic functions. } \author{ \adrian } \seealso{ \code{\link{dppm}}, \code{\link{plot.dppm}}, \code{\link{predict.dppm}}, \code{\link{simulate.dppm}}, \code{\link{as.ppm.dppm}}. } \examples{ fit <- dppm(swedishpines ~ x + y, dppGauss, method="c") coef(fit) formula(fit) tf <- terms(fit) labels(fit) } \keyword{spatial} \keyword{methods} spatstat.model/man/as.interact.Rd0000644000176200001440000000361014331173076016476 0ustar liggesusers\name{as.interact} \alias{as.interact} \alias{as.interact.fii} \alias{as.interact.interact} \alias{as.interact.ppm} \title{Extract Interaction Structure} \description{ Extracts the interpoint interaction structure from a point pattern model. } \usage{ as.interact(object) \method{as.interact}{fii}(object) \method{as.interact}{interact}(object) \method{as.interact}{ppm}(object) } \arguments{ \item{object}{A fitted point process model (object of class \code{"ppm"}) or an interpoint interaction structure (object of class \code{"interact"}). } } \details{ The function \code{as.interact} extracts the interpoint interaction structure from a suitable object. An object of class \code{"interact"} describes an interpoint interaction structure, before it has been fitted to point pattern data. The irregular parameters of the interaction (such as the interaction range) are fixed, but the regular parameters (such as interaction strength) are undetermined. Objects of this class are created by the functions \code{\link{Poisson}}, \code{\link{Strauss}} and so on. The main use of such objects is in a call to \code{\link{ppm}}. The function \code{as.interact} is generic, with methods for the classes \code{"ppm"}, \code{"fii"} and \code{"interact"}. The result is an object of class \code{"interact"} which can be printed. } \section{Note on parameters}{ This function does \bold{not} extract the fitted coefficients of the interaction. To extract the fitted interaction including the fitted coefficients, use \code{\link{fitin}}. } \value{ An object of class \code{"interact"} representing the interpoint interaction. This object can be printed and plotted. } \author{\adrian and \rolf } \seealso{ \code{\link{fitin}}, \code{\link{ppm}}. } \examples{ model <- ppm(cells ~1, Strauss(0.07)) f <- as.interact(model) f } \keyword{spatial} \keyword{models} spatstat.model/man/ic.kppm.Rd0000644000176200001440000000570214374302015015622 0ustar liggesusers\name{ic.kppm} \alias{ic} \alias{ic.ppm} \alias{ic.kppm} \title{Model selection criteria for the intensity function of a point process} \description{ Information criteria for selecting the intensity function model of a Poisson, cluster or Cox point process. } \usage{ ic(object) \method{ic}{ppm}(object) \method{ic}{kppm}(object) } \arguments{ \item{object}{ Fitted point process model (object of class \code{"ppm"} or \code{"kppm"}). } } \details{ This function returns information criteria for selecting the intensity function model of a Poisson, Cox or cluster point process fitted by first order composite likelihood (i.e. using the Poisson likelihood function). Degrees of freedom \eqn{df} for the information criteria are given by the trace of \eqn{S^{-1} \Sigma} where \eqn{S} is the sensitivity matrix and \eqn{\Sigma} is the variance matrix for the log composite likelihood score function. In case of a Poisson process, \eqn{df} is the number of parameters in the model for the intensity function. The composite Bayesian information criterion (cbic) is \eqn{-2\ell + \log(n) df}{-2 * ll + log(n) * df} where \eqn{\ell}{ll} is the maximal log first-order composite likelihood (Poisson loglikelihood for the intensity function) and \eqn{n} is the observed number of points. It reduces to the BIC criterion in case of a Poisson process. The composite information criterion (cic) is \eqn{-2\ell + 2 df}{- 2 * ll + 2 * df} and reduces to the AIC in case of a Poisson process. NOTE: the information criteria are for selecting the intensity function model (a set of covariates) within a given model class. They cannot be used to choose among different types of cluster or Cox point process models (e.g. can not be used to choose between Thomas and LGCP models). } \value{ A list with entries \code{loglike}, \code{cbic}, \code{cic} and \code{df}. Here \code{loglike} is the fitted log first-order composite likelihood, \code{cbic} is composite Bayesian information criterion, \code{cic} is is the composite likelihood criterion and \code{df} is the adjusted degrees of freedom for the fitted intensity function model. } \seealso{ \code{\link{kppm}} } \references{ Choiruddin, A., Coeurjolly, J.F. and Waagepetersen, R. (2020) Information criteria for inhomogeneous spatial point processes. \emph{Australian and New Zealand Journal of Statistics}. To appear. } \examples{ if(interactive()) { # model with one covariate fit1 <- kppm(bei~elev,data=bei.extra) ic1 <- ic(fit1) # model with two covariates fit2 <- kppm(bei~elev+grad,data=bei.extra) ic2 <- ic(fit2) # smallest cbic for fit1 but smallest cic for fit2 } } \author{ Achmad Choiruddin, Jean-Francois Coeurjolly and Rasmus Waagepetersen. } \keyword{spatial} \keyword{models} \concept{point process model} \concept{Cox point process} \concept{cluster process} \concept{Neyman-Scott cluster process} spatstat.model/man/dppm.Rd0000644000176200001440000003563614331173076015240 0ustar liggesusers\name{dppm} \alias{dppm} \concept{point process model} \concept{determinantal point process} \title{Fit Determinantal Point Process Model} \description{ Fit a determinantal point process model to a point pattern. } \usage{ dppm(formula, family, data=NULL, ..., startpar = NULL, method = c("mincon", "clik2", "palm", "adapcl"), weightfun = NULL, control = list(), algorithm, statistic = "K", statargs = list(), rmax = NULL, epsilon = 0.01, covfunargs = NULL, use.gam = FALSE, nd = NULL, eps = NULL) } \arguments{ \item{formula}{ A \code{formula} in the \R language specifying the data (on the left side) and the form of the model to be fitted (on the right side). For a stationary model it suffices to provide a point pattern without a formula. See Details. } \item{family}{ Information specifying the family of point processes to be used in the model. Typically one of the family functions \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppCauchy}}, \code{\link{dppBessel}} or \code{\link{dppPowerExp}}. Alternatively a character string giving the name of a family function, or the result of calling one of the family functions. See Details. } \item{data}{ The values of spatial covariates (other than the Cartesian coordinates) required by the model. A named list of pixel images, functions, windows, tessellations or numeric constants. } \item{\dots}{ Additional arguments. See Details. } \item{startpar}{ Named vector of starting parameter values for the optimization. } \item{method}{ The fitting method. Either \code{"mincon"} for minimum contrast, \code{"clik2"} for second order composite likelihood, \code{"adapcl"} for adaptive second order composite likelihood, or \code{"palm"} for Palm likelihood. Partially matched. } \item{weightfun}{ Optional weighting function \eqn{w} in the composite likelihoods or Palm likelihood. A \code{function} in the \R language. See Details. } \item{control}{ List of control parameters passed to the optimization function \code{\link[stats]{optim}}. } \item{algorithm}{ Character string determining the mathematical algorithm to be used to solve the fitting problem. If \code{method="mincon", "clik2"} or \code{"palm"} this argument is passed to the generic optimization function \code{\link[stats]{optim}} (renamed as the argument \code{method}) with default \code{"Nelder-Mead"}. If \code{method="adapcl"}) the argument is passed to the equation solver \code{\link[nleqslv]{nleqslv}}, with default \code{"Bryden"}. } \item{statistic}{ Name of the summary statistic to be used for minimum contrast estimation: either \code{"K"} or \code{"pcf"}. } \item{statargs}{ Optional list of arguments to be used when calculating the \code{statistic}. See Details. } \item{rmax}{ Maximum value of interpoint distance to use in the composite likelihood. } \item{epsilon}{ Tuning parameter for the adaptive composite likelihood. } \item{covfunargs,use.gam,nd,eps}{ Arguments passed to \code{\link{ppm}} when fitting the intensity. } } \details{ This function fits a determinantal point process model to a point pattern dataset as described in Lavancier et al. (2015). The model to be fitted is specified by the arguments \code{formula} and \code{family}. The argument \code{formula} should normally be a \code{formula} in the \R language. The left hand side of the formula specifies the point pattern dataset to which the model should be fitted. This should be a single argument which may be a point pattern (object of class \code{"ppp"}) or a quadrature scheme (object of class \code{"quad"}). The right hand side of the formula is called the \code{trend} and specifies the form of the \emph{logarithm of the intensity} of the process. Alternatively the argument \code{formula} may be a point pattern or quadrature scheme, and the trend formula is taken to be \code{~1}. The argument \code{family} specifies the family of point processes to be used in the model. It is typically one of the family functions \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppCauchy}}, \code{\link{dppBessel}} or \code{\link{dppPowerExp}}. Alternatively it may be a character string giving the name of a family function, or the result of calling one of the family functions. A family function belongs to class \code{"detpointprocfamilyfun"}. The result of calling a family function is a point process family, which belongs to class \code{"detpointprocfamily"}. The algorithm first estimates the intensity function of the point process using \code{\link{ppm}}. If the trend formula is \code{~1} (the default if a point pattern or quadrature scheme is given rather than a \code{"formula"}) then the model is \emph{homogeneous}. The algorithm begins by estimating the intensity as the number of points divided by the area of the window. Otherwise, the model is \emph{inhomogeneous}. The algorithm begins by fitting a Poisson process with log intensity of the form specified by the formula \code{trend}. (See \code{\link{ppm}} for further explanation). The interaction parameters of the model are then fitted either by minimum contrast estimation, or by a composite likelihood method (maximum composite likelihood, maximum Palm likelihood, or by solving the adaptive composite likelihood estimating equation). \describe{ \item{Minimum contrast:}{ If \code{method = "mincon"} (the default) interaction parameters of the model will be fitted by minimum contrast estimation, that is, by matching the theoretical \eqn{K}-function of the model to the empirical \eqn{K}-function of the data, as explained in \code{\link{mincontrast}}. For a homogeneous model (\code{ trend = ~1 }) the empirical \eqn{K}-function of the data is computed using \code{\link{Kest}}, and the interaction parameters of the model are estimated by the method of minimum contrast. For an inhomogeneous model, the inhomogeneous \eqn{K} function is estimated by \code{\link{Kinhom}} using the fitted intensity. Then the interaction parameters of the model are estimated by the method of minimum contrast using the inhomogeneous \eqn{K} function. This two-step estimation procedure is heavily inspired by Waagepetersen (2007). If \code{statistic="pcf"} then instead of using the \eqn{K}-function, the algorithm will use the pair correlation function \code{\link{pcf}} for homogeneous models and the inhomogeneous pair correlation function \code{\link{pcfinhom}} for inhomogeneous models. In this case, the smoothing parameters of the pair correlation can be controlled using the argument \code{statargs}, as shown in the Examples. Additional arguments \code{\dots} will be passed to \code{\link{clusterfit}} to control the minimum contrast fitting algorithm. } \item{Composite likelihood:}{ If \code{method = "clik2"} the interaction parameters of the model will be fitted by maximising the second-order composite likelihood (Guan, 2006). The log composite likelihood is \deqn{ \sum_{i,j} w(d_{ij}) \log\rho(d_{ij}; \theta) - \left( \sum_{i,j} w(d_{ij}) \right) \log \int_D \int_D w(\|u-v\|) \rho(\|u-v\|; \theta)\, du\, dv }{ \sum[i,j] w(d[i,j]) log(\rho(d[i,j]; \theta)) - (\sum[i,j] w(d[i,j])) log(integral[D,D] w(||u-v||) \rho(||u-v||; \theta) du dv) } where the sums are taken over all pairs of data points \eqn{x_i, x_j}{x[i], x[j]} separated by a distance \eqn{d_{ij} = \| x_i - x_j\|}{d[i,j] = ||x[i] - x[j]||} less than \code{rmax}, and the double integral is taken over all pairs of locations \eqn{u,v} in the spatial window of the data. Here \eqn{\rho(d;\theta)}{\rho(d;\theta)} is the pair correlation function of the model with interaction parameters \eqn{\theta}{\theta}. The function \eqn{w} in the composite likelihood is a weighting function and may be chosen arbitrarily. It is specified by the argument \code{weightfun}. If this is missing or \code{NULL} then the default is a threshold weight function, \eqn{w(d) = 1(d \le R)}{w(d) = 1(d \le R)}, where \eqn{R} is \code{rmax/2}. } \item{Palm likelihood:}{ If \code{method = "palm"} the interaction parameters of the model will be fitted by maximising the Palm loglikelihood (Tanaka et al, 2008) \deqn{ \sum_{i,j} w(x_i, x_j) \log \lambda_P(x_j \mid x_i; \theta) - \int_D w(x_i, u) \lambda_P(u \mid x_i; \theta) {\rm d} u }{ \sum[i,j] w(x[i], x[j]) log(\lambda[P](x[j] | x[i]; \theta) - integral[D] w(x[i], u) \lambda[P](u | x[i]; \theta) du } with the same notation as above. Here \eqn{\lambda_P(u|v;\theta}{\lambda[P](u|v;\theta)} is the Palm intensity of the model at location \eqn{u} given there is a point at \eqn{v}. } \item{Adaptive Composite likelihood:}{ If \code{method = "cladap"} the clustering parameters of the model will be fitted by solving the adaptive second order composite likelihood estimating equation (Lavancier et al, 2021). The estimating function is \deqn{ \sum_{u, v} w(\epsilon \frac{|g(0; \theta) - 1|}{g(\|u-v\|; \theta)-1}) \frac{\nabla_\theta g(\|u-v\|;\theta)}{g(\|u-v\|;\theta)} - \int_D \int_D w(\epsilon \frac{M(u,v; \theta)} \nabla_\theta g(\|u-v\|; \theta) \rho(u) \rho(v)\, du\, dv }{ \sum[u,v] w(epsilon |g(0; theta)-1|/(g(||u-v||; theta)-1)) g'(||u-v||; theta) / g(||u-v||; theta) - integral[D,D] w(epsilon |g(0; theta)-1|/(g(||u-v||; theta)-1)) g'(||u-v||; theta) rho(u) rho(v) du dv } where the sum is taken over all distinct pairs of points. Here \eqn{g(d;\theta)}{g(d; theta)} is the pair correlation function with parameters \eqn{\theta}{theta}. The partial derivative with respect to \eqn{\theta}{theta} is \eqn{g'(d; \theta)}{g'(d; theta)}, and \eqn{\rho(u)}{rho(u)} denotes the fitted intensity function of the model. The tuning parameter \eqn{\epsilon}{epsilon} is independent of the data. It can be specified by the argument \code{epsilon} and has default value \eqn{0.01}. The function \eqn{w} in the estimating function is a weighting function of bounded support \eqn{[-1,1]}. It is specified by the argument \code{weightfun}. If this is missing or \code{NULL} then the default is \eqn{ w(d) = 1(\|d\| \le 1) \exp(1/(r^2-1))}{w(d) = 1(||d|| \le 1) exp(1/(d^2-1)) }. The estimating equation is solved using the nonlinear equation solver \code{\link[nleqslv]{nleqslv}} from the package \pkg{nleqslv}. The package \pkg{nleqslv} must be installed in order to use this option. } } It is also possible to fix any parameters desired before the optimisation by specifying them as \code{name=value} in the call to the family function. See Examples. } \section{Optimization algorithm}{ The following details allow greater control over the fitting procedure. For the first three fitting methods (\code{method="mincon", "clik2"} and \code{"palm"}), the optimisation is performed by the generic optimisation algorithm \code{\link[stats]{optim}}. The behaviour of this algorithm can be modified using the arguments \code{control} and \code{algorithm}. Useful control arguments include \code{trace}, \code{maxit} and \code{abstol} (documented in the help for \code{\link[stats]{optim}}). For \code{method="adapcl"}, the estimating equation is solved using the nonlinear equation solver \code{\link[nleqslv]{nleqslv}} from the package \pkg{nleqslv}. Arguments available for controlling the solver are documented in the help for \code{\link[nleqslv]{nleqslv}}; they include \code{control}, \code{globStrat}, \code{startparm} for the initial estimates and \code{algorithm} for the method. The package \pkg{nleqslv} must be installed in order to use this option. } \value{ An object of class \code{"dppm"} representing the fitted model. There are methods for printing, plotting, predicting and simulating objects of this class. } \seealso{ methods for \code{dppm} objects: \code{\link{plot.dppm}}, \code{\link{fitted.dppm}}, \code{\link{predict.dppm}}, \code{\link{simulate.dppm}}, \code{\link{methods.dppm}}, \code{\link{as.ppm.dppm}}, \code{\link{Kmodel.dppm}}, \code{\link{pcfmodel.dppm}}. Minimum contrast fitting algorithm: higher level interface \code{\link{clusterfit}}; low-level algorithm \code{\link{mincontrast}}. Deterimantal point process models: \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppCauchy}}, \code{\link{dppBessel}}, \code{\link{dppPowerExp}}, Summary statistics: \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{pcf}}, \code{\link{pcfinhom}}. See also \code{\link{ppm}} } \references{ Guan, Y. (2006) A composite likelihood approach in fitting spatial point process models. \emph{Journal of the American Statistical Association} \bold{101}, 1502--1512. Lavancier, F., \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference. \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. Lavancier, F., Poinas, A., and Waagepetersen, R. (2021) Adaptive estimating function inference for nonstationary determinantal point processes. \emph{Scandinavian Journal of Statistics}, \bold{48} (1), 87--107. Tanaka, U., Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott point processes. \emph{Biometrical Journal} \bold{50}, 43--57. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \examples{ jpines <- residualspaper$Fig1 \testonly{ # smaller dataset for testing jpines <- jpines[c(TRUE,FALSE,FALSE,FALSE)] } dppm(jpines ~ 1, dppGauss) dppm(jpines ~ 1, dppGauss, method="c") dppm(jpines ~ 1, dppGauss, method="p") dppm(jpines ~ 1, dppGauss, method="a") if(interactive()) { # Fixing the intensity at lambda=2 rather than the Poisson MLE 2.04: dppm(jpines ~ 1, dppGauss(lambda=2)) # The following is quite slow (using K-function) dppm(jpines ~ x, dppMatern) } # much faster using pair correlation function dppm(jpines ~ x, dppMatern, statistic="pcf", statargs=list(stoyan=0.2)) # Fixing the Matern shape parameter at nu=2 rather than estimating it: dppm(jpines ~ x, dppMatern(nu=2)) } \author{ \spatstatAuthors. Adaptive composite likelihood method contributed by Chiara Fend and modified by Adrian Baddeley. } \keyword{spatial} \keyword{models} spatstat.model/man/intensity.slrm.Rd0000644000176200001440000000350314331173076017266 0ustar liggesusers\name{intensity.slrm} \alias{intensity.slrm} \title{ Intensity of Fitted Spatial Logistic Regression Model } \description{ Computes the intensity of a fitted spatial logistic regression model, treated as a point process model. } \usage{ \method{intensity}{slrm}(X, \dots) } \arguments{ \item{X}{ A fitted spatial logistic regression model (object of class \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{predict.slrm}} in some cases. See Details. } } \details{ This is a method for the generic function \code{\link{intensity}} for spatial logistic regression models (class \code{"slrm"}). The fitted spatial logistic regression model \code{X} is interpreted as a point process model. The intensity of a point process model is defined as the expected number of random points per unit area. The fitted probabilities of presence according to \code{X} are converted to intensity values. The result is a numerical value if \code{X} is stationary, and a pixel image if \code{X} is non-stationary. In the latter case, the resolution of the pixel image is controlled by the arguments \code{\dots} which are passed to \code{\link{predict.slrm}}. } \value{ A numeric value (if the model is stationary) or a pixel image. } \references{ Baddeley, A., Berman, M., Fisher, N.I., Hardegen, A., Milne, R.K., Schuhmacher, D., Shah, R. and Turner, R. (2010) Spatial logistic regression and change-of-support for spatial Poisson point processes. \emph{Electronic Journal of Statistics} \bold{4}, 1151--1201. \code{DOI: 10.1214/10-EJS581} } \seealso{ \code{\link{intensity}}, \code{\link{intensity.ppm}} } \examples{ fitS <- slrm(swedishpines ~ 1) intensity(fitS) fitX <- slrm(swedishpines ~ x) intensity(fitX) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.model/man/intensity.ppm.Rd0000644000176200001440000000771614331173076017117 0ustar liggesusers\name{intensity.ppm} \alias{intensity.ppm} \title{ Intensity of Fitted Point Process Model } \description{ Computes the intensity of a fitted point process model. } \usage{ \method{intensity}{ppm}(X, \dots, approx=c("Poisson", "DPP")) } \arguments{ \item{X}{ A fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Arguments passed to \code{\link{predict.ppm}} in some cases. See Details. } \item{approx}{ Character string (partially matched) specifying the type of approximation to the intensity for a non-Poisson model. } } \details{ This is a method for the generic function \code{\link{intensity}} for fitted point process models (class \code{"ppm"}). The intensity of a point process model is the expected number of random points per unit area. If \code{X} is a Poisson point process model, the intensity of the process is computed exactly. The result is a numerical value if \code{X} is a stationary Poisson point process, and a pixel image if \code{X} is non-stationary. (In the latter case, the resolution of the pixel image is controlled by the arguments \code{\dots} which are passed to \code{\link{predict.ppm}}.) If \code{X} is a Gibbs point process model that is not a Poisson model, the intensity is computed approximately: \itemize{ \item if \code{approx="Poisson"} (the default), the intensity is computed using the Poisson-saddlepoint approximation (Baddeley and Nair, 2012a, 2012b, 2017; Anderssen et al, 2014). This approximation is currently available for pairwise-interaction models (Baddeley and Nair, 2012a, 2012b) and for the area-interaction model and Geyer saturation model (Baddeley and Nair, 2017). If the model is non-stationary. the pseudostationary solution (Baddeley and Nair, 2012b; Anderssen et al, 2014) is used. The result is a pixel image, whose resolution is controlled by the arguments \code{\dots} which are passed to \code{\link{predict.ppm}}. \item if \code{approx="DPP"}, the intensity is calculated using the approximation of (Coeurjolly and Lavancier, 2018) based on a determinantal point process. This approximation is more accurate than the Poisson saddlepoint approximation, for inhibitory interactions. However the DPP approximation is only available for stationary pairwise interaction models. } } \value{ A numeric value (if the model is stationary) or a pixel image. } \references{ Anderssen, R.S., Baddeley, A., DeHoog, F.R. and Nair, G.M. (2014) Solution of an integral equation arising in spatial point process theory. \emph{Journal of Integral Equations and Applications} \bold{26} (4) 437--453. Baddeley, A. and Nair, G. (2012a) Fast approximation of the intensity of Gibbs point processes. \emph{Electronic Journal of Statistics} \bold{6} 1155--1169. Baddeley, A. and Nair, G. (2012b) Approximating the moments of a spatial point process. \emph{Stat} \bold{1}, 1, 18--30. \code{DOI: 10.1002/sta4.5} Baddeley, A. and Nair, G. (2017) Poisson-saddlepoint approximation for Gibbs point processes with infinite-order interaction: in memory of Peter Hall. \emph{Journal of Applied Probability} \bold{54}, 4, 1008--1026. Coeurjolly, J.-F. and Lavancier, F. (2018) Approximation intensity for pairwise interaction Gibbs point processes using determinantal point processes. \emph{Electronic Journal of Statistics} \bold{12} 3181--3203. } \seealso{ \code{\link{intensity}}, \code{\link{intensity.ppp}} } \examples{ fitP <- ppm(swedishpines ~ 1) intensity(fitP) fitS <- ppm(swedishpines ~ 1, Strauss(9)) intensity(fitS) intensity(fitS, approx="D") fitSx <- ppm(swedishpines ~ x, Strauss(9)) lamSx <- intensity(fitSx) fitG <- ppm(swedishpines ~ 1, Geyer(9, 1)) lamG <- intensity(fitG) fitA <- ppm(swedishpines ~ 1, AreaInter(7)) lamA <- intensity(fitA) } \author{ \adrian, Gopalan Nair, and \Frederic Lavancier. } \keyword{spatial} \keyword{models} spatstat.model/man/plot.kppm.Rd0000644000176200001440000000557114331173077016220 0ustar liggesusers\name{plot.kppm} \alias{plot.kppm} \title{Plot a fitted cluster point process} \description{ Plots a fitted cluster point process model, displaying the fitted intensity and the fitted \eqn{K}-function. } \usage{ \method{plot}{kppm}(x, \dots, what=c("intensity", "statistic", "cluster"), pause=interactive(), xname) } \arguments{ \item{x}{ Fitted cluster point process model. An object of class \code{"kppm"}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppm}} and \code{\link{plot.fv}} to control the plot. } \item{what}{ Character vector determining what will be plotted. } \item{pause}{ Logical value specifying whether to pause between plots. } \item{xname}{ Optional. Character string. The name of the object \code{x} for use in the title of the plot. } } \details{ This is a method for the generic function \code{\link[graphics]{plot}} for the class \code{"kppm"} of fitted cluster point process models. The argument \code{x} should be a cluster point process model (object of class \code{"kppm"}) obtained using the function \code{\link{kppm}}. The choice of plots (and the order in which they are displayed) is controlled by the argument \code{what}. The options (partially matched) are \code{"intensity"}, \code{"statistic"} and \code{"cluster"}. This command is capable of producing three different plots: \describe{ \item{what="intensity"}{specifies the fitted intensity of the model, which is plotted using \code{\link{plot.ppm}}. By default this plot is not produced for stationary models.} \item{what="statistic"}{specifies the empirical and fitted summary statistics, which are plotted using \code{\link{plot.fv}}. This is only meaningful if the model has been fitted using the Method of Minimum Contrast, and it is turned off otherwise.} \item{what="cluster"}{specifies a fitted cluster, which is computed by \code{\link{clusterfield}} and plotted by \code{\link{plot.im}}. It is only meaningful for Poisson cluster (incl. Neyman-Scott) processes, and it is turned off for log-Gaussian Cox processes (LGCP). If the model is stationary (and non-LGCP) this option is turned on by default and shows a fitted cluster positioned at the centroid of the observation window. For non-stationary (and non-LGCP) models this option is only invoked if explicitly told so, and in that case an additional argument \code{locations} (see \code{\link{clusterfield}}) must be given to specify where to position the parent point(s) .} } Alternatively \code{what="all"} selects all available options. } \value{ Null. } \examples{ fit <- kppm(redwood~1, "Thomas") plot(fit) } \seealso{ \code{\link{kppm}}, \code{\link{plot.ppm}}, } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.model/man/Geyer.Rd0000644000176200001440000001101214331173075015330 0ustar liggesusers\name{Geyer} \alias{Geyer} \title{Geyer's Saturation Point Process Model} \description{ Creates an instance of Geyer's saturation point process model which can then be fitted to point pattern data. } \usage{ Geyer(r,sat) } \arguments{ \item{r}{Interaction radius. A positive real number.} \item{sat}{Saturation threshold. A non-negative real number.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of Geyer's saturation point process with interaction radius \eqn{r} and saturation threshold \code{sat}. } \details{ Geyer (1999) introduced the \dQuote{saturation process}, a modification of the Strauss process (see \code{\link{Strauss}}) in which the total contribution to the potential from each point (from its pairwise interaction with all other points) is trimmed to a maximum value \eqn{s}. The interaction structure of this model is implemented in the function \code{\link{Geyer}()}. The saturation point process with interaction radius \eqn{r}, saturation threshold \eqn{s}, and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{ \beta \gamma^{\min(s, t(x_i, X))} }{ beta gamma^min(s, t(x[i],X)) } to the probability density of the point pattern, where \eqn{t(x_i, X)}{t(x[i],X)} denotes the number of \sQuote{close neighbours} of \eqn{x_i}{x[i]} in the pattern \eqn{X}. A close neighbour of \eqn{x_i}{x[i]} is a point \eqn{x_j}{x[j]} with \eqn{j \neq i}{j != i} such that the distance between \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]} is less than or equal to \eqn{r}. If the saturation threshold \eqn{s} is set to infinity, this model reduces to the Strauss process (see \code{\link{Strauss}}) with interaction parameter \eqn{\gamma^2}{gamma^2}. If \eqn{s = 0}, the model reduces to the Poisson point process. If \eqn{s} is a finite positive number, then the interaction parameter \eqn{\gamma}{gamma} may take any positive value (unlike the case of the Strauss process), with values \eqn{\gamma < 1}{gamma < 1} describing an \sQuote{ordered} or \sQuote{inhibitive} pattern, and values \eqn{\gamma > 1}{gamma > 1} describing a \sQuote{clustered} or \sQuote{attractive} pattern. The nonstationary saturation process is similar except that the value \eqn{\beta}{beta} is replaced by a function \eqn{\beta(x_i)}{beta(x[i])} of location. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the saturation process interaction is yielded by \code{Geyer(r, sat)} where the arguments \code{r} and \code{sat} specify the Strauss interaction radius \eqn{r} and the saturation threshold \eqn{s}, respectively. See the examples below. Note the only arguments are the interaction radius \code{r} and the saturation threshold \code{sat}. When \code{r} and \code{sat} are fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\gamma)}{log(gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Geyer()}. } \section{Zero saturation}{ The value \code{sat=0} is permitted by \code{Geyer}, but this is not very useful. For technical reasons, when \code{\link{ppm}} fits a Geyer model with \code{sat=0}, the default behaviour is to return an \dQuote{invalid} fitted model in which the estimate of \eqn{\gamma}{gamma} is \code{NA}. In order to get a Poisson process model returned when \code{sat=0}, you would need to set \code{emend=TRUE} in the call to \code{\link{ppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}}. To make an interaction object like \code{\link{Geyer}} but having multiple interaction radii, see \code{\link{BadGey}} or \code{\link{Hybrid}}. } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \examples{ ppm(cells, ~1, Geyer(r=0.07, sat=2)) # fit the stationary saturation process to `cells' } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/rex.Rd0000644000176200001440000000524014515336521015062 0ustar liggesusers\name{rex} \alias{rex} \title{ Richardson Extrapolation } \description{ Performs Richardson Extrapolation on a sequence of approximate values. } \usage{ rex(x, r = 2, k = 1, recursive = FALSE) } \arguments{ \item{x}{ A numeric vector or matrix, whose columns are successive estimates or approximations to a vector of parameters. } \item{r}{ A number greater than 1. The ratio of successive step sizes. See Details. } \item{k}{ Integer. The order of convergence assumed. See Details. } \item{recursive}{ Logical value indicating whether to perform one step of Richardson extrapolation (\code{recursive=FALSE}, the default) or repeat the extrapolation procedure until a best estimate is obtained (\code{recursive=TRUE}. } } \details{ Richardson extrapolation is a general technique for improving numerical approximations, often used in numerical integration (Brezinski and Zaglia, 1991). It can also be used to improve parameter estimates in statistical models (Baddeley and Turner, 2014). The successive columns of \code{x} are assumed to have been obtained using approximations with step sizes \eqn{a, a/r, a/r^2, \ldots}{a, a/r, a/r^2, ...} where \eqn{a} is the initial step size (which does not need to be specified). Estimates based on a step size \eqn{s} are assumed to have an error of order \eqn{s^k}. Thus, the default values \code{r=2} and \code{k=1} imply that the errors in the second column of \code{x} should be roughly \eqn{(1/r)^k = 1/2} as large as the errors in the first column, and so on. } \value{ A matrix whose columns contain a sequence of improved estimates. } \references{ Baddeley, A. and Turner, R. (2014) Bias correction for parameter estimates of spatial point process models. \emph{Journal of Statistical Computation and Simulation} \bold{84}, 1621--1643. DOI: 10.1080/00949655.2012.755976 Brezinski, C. and Zaglia, M.R. (1991) \emph{Extrapolation Methods. Theory and Practice}. North-Holland. } \author{ \adrian and \rolf. } \seealso{ \code{\link{bc}} } \examples{ # integrals of sin(x) and cos(x) from 0 to pi # correct answers: 2, 0 est <- function(nsteps) { xx <- seq(0, pi, length=nsteps) ans <- pi * c(mean(sin(xx)), mean(cos(xx))) names(ans) <- c("sin", "cos") ans } X <- cbind(est(10), est(20), est(40)) X rex(X) rex(X, recursive=TRUE) # fitted Gibbs point process model fit0 <- ppm(cells ~ 1, Strauss(0.07), nd=16) fit1 <- update(fit0, nd=32) fit2 <- update(fit0, nd=64) co <- cbind(coef(fit0), coef(fit1), coef(fit2)) co rex(co, k=2, recursive=TRUE) } \keyword{math} \keyword{optimize} spatstat.model/man/Smooth.msr.Rd0000644000176200001440000000400414510474261016331 0ustar liggesusers\name{Smooth.msr} \alias{Smooth.msr} \title{ Smooth a Signed or Vector-Valued Measure } \description{ Apply kernel smoothing to a signed measure or vector-valued measure. } \usage{ \method{Smooth}{msr}(X, ..., drop=TRUE) } \arguments{ \item{X}{ Object of class \code{"msr"} representing a signed measure or vector-valued measure. } \item{\dots}{ Arguments passed to \code{\link{density.ppp}} controlling the smoothing bandwidth and the pixel resolution. } \item{drop}{ Logical. If \code{TRUE} (the default), the result of smoothing a scalar-valued measure is a pixel image. If \code{FALSE}, the result of smoothing a scalar-valued measure is a list containing one pixel image. } } \details{ This function applies kernel smoothing to a signed measure or vector-valued measure \code{X}. The Gaussian kernel is used. The object \code{X} would typically have been created by \code{\link{residuals.ppm}} or \code{\link{msr}}. } \value{ A pixel image or a list of pixel images. For scalar-valued measures, a pixel image (object of class \code{"im"}) provided \code{drop=TRUE}. For vector-valued measures (or if \code{drop=FALSE}), a list of pixel images; the list also belongs to the class \code{"solist"} so that it can be printed and plotted. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. } \author{ \adrian } \seealso{ \code{\link{Smooth}}, \code{\link{msr}}, \code{\link{plot.msr}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rs <- residuals(fit, type="score") plot(Smooth(rp)) plot(Smooth(rs)) } \keyword{spatial} \keyword{models} spatstat.model/man/StraussHard.Rd0000644000176200001440000000773314331173075016537 0ustar liggesusers\name{StraussHard} \alias{StraussHard} \title{The Strauss / Hard Core Point Process Model} \description{ Creates an instance of the ``Strauss/ hard core'' point process model which can then be fitted to point pattern data. } \usage{ StraussHard(r, hc=NA) } \arguments{ \item{r}{The interaction radius of the Strauss interaction} \item{hc}{The hard core distance. Optional.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the ``Strauss/hard core'' process with Strauss interaction radius \eqn{r} and hard core distance \code{hc}. } \details{ A Strauss/hard core process with interaction radius \eqn{r}, hard core distance \eqn{h < r}, and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, is a pairwise interaction point process in which \itemize{ \item distinct points are not allowed to come closer than a distance \eqn{h} apart \item each pair of points closer than \eqn{r} units apart contributes a factor \eqn{\gamma}{gamma} to the probability density. } This is a hybrid of the Strauss process and the hard core process. The probability density is zero if any pair of points is closer than \eqn{h} units apart, and otherwise equals \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of distinct unordered pairs of points that are closer than \eqn{r} units apart, and \eqn{\alpha}{alpha} is the normalising constant. The interaction parameter \eqn{\gamma}{gamma} may take any positive value (unlike the case for the Strauss process). If \eqn{\gamma < 1}{gamma < 1}, the model describes an ``ordered'' or ``inhibitive'' pattern. If \eqn{\gamma > 1}{gamma > 1}, the model is ``ordered'' or ``inhibitive'' up to the distance \eqn{h}, but has an ``attraction'' between points lying at distances in the range between \eqn{h} and \eqn{r}. If \eqn{\gamma = 1}{gamma = 1}, the process reduces to a classical hard core process with hard core distance \eqn{h}. If \eqn{\gamma = 0}{gamma = 0}, the process reduces to a classical hard core process with hard core distance \eqn{r}. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Strauss/hard core process pairwise interaction is yielded by the function \code{StraussHard()}. See the examples below. The canonical parameter \eqn{\log(\gamma)}{log(gamma)} is estimated by \code{\link{ppm}()}, not fixed in \code{StraussHard()}. If the hard core distance argument \code{hc} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{hc} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Strauss, D.J. (1975) A model for clustering. \emph{Biometrika} \bold{62}, 467--475. } \examples{ StraussHard(r=1,hc=0.02) # prints a sensible description of itself # ppm(cells ~1, StraussHard(r=0.1, hc=0.05)) # fit the stationary Strauss/hard core process to `cells' ppm(cells ~ polynom(x,y,3), StraussHard(r=0.1, hc=0.05)) # fit a nonstationary Strauss/hard core process # with log-cubic polynomial trend } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/update.interact.Rd0000644000176200001440000000227714331173100017351 0ustar liggesusers\name{update.interact} \alias{update.interact} \title{ Update an Interpoint Interaction } \description{ This command updates the \code{object} using the arguments given. } \usage{ \method{update}{interact}(object, \dots) } \arguments{ \item{object}{ Interpoint interaction (object of class \code{"interact"}). } \item{\dots}{ Additional or replacement values of parameters of \code{object}. } } \details{ This is a method for the generic function \code{\link[stats]{update}} for the class \code{"interact"} of interpoint interactions. It updates the \code{object} using the parameters given in the extra arguments \code{\dots}. The extra arguments must be given in the form \code{name=value} and must be recognisable to the interaction object. They override any parameters of the same name in \code{object}. } \value{ Another object of class \code{"interact"}, equivalent to \code{object} except for changes in parameter values. } \author{ \spatstatAuthors. } \seealso{ \code{\link{update.ppm}} } \examples{ Str <- Strauss(r=1) Str update(Str, r=2) M <- MultiStrauss(radii=matrix(1,2,2)) update(M, types=c("on", "off")) } \keyword{spatial} \keyword{models} spatstat.model/man/triplet.family.Rd0000644000176200001440000000241714331173077017233 0ustar liggesusers\name{triplet.family} \alias{triplet.family} \title{Triplet Interaction Family} \description{ An object describing the family of all Gibbs point processes with interaction order equal to 3. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the interaction structure of Gibbs point processes which have infinite order of interaction, such as the triplet interaction process \cite{\link{Triplets}}. Anyway, \code{triplet.family} is an object of class \code{"isf"} containing a function \code{triplet.family$eval} for evaluating the sufficient statistics of a Gibbs point process model taking an exponential family form. } \value{ Object of class \code{"isf"}, see \code{\link{isf.object}}. } \seealso{ \code{\link{Triplets}} to create the triplet interaction process structure. Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{inforder.family}}, \code{\link{ord.family}}. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/is.hybrid.Rd0000644000176200001440000000344414331173076016163 0ustar liggesusers\name{is.hybrid} \alias{is.hybrid} \alias{is.hybrid.ppm} \alias{is.hybrid.interact} \title{ Test Whether Object is a Hybrid } \description{ Tests where a point process model or point process interaction is a hybrid of several interactions. } \usage{ is.hybrid(x) \method{is.hybrid}{ppm}(x) \method{is.hybrid}{interact}(x) } \arguments{ \item{x}{ A point process model (object of class \code{"ppm"}) or a point process interaction structure (object of class \code{"interact"}). } } \details{ A \emph{hybrid} (Baddeley, Turner, Mateu and Bevan, 2012) is a point process model created by combining two or more point process models, or an interpoint interaction created by combining two or more interpoint interactions. The function \code{is.hybrid} is generic, with methods for point process models (objects of class \code{"ppm"}) and point process interactions (objects of class \code{"interact"}). These functions return \code{TRUE} if the object \code{x} is a hybrid, and \code{FALSE} if it is not a hybrid. Hybrids of two or more interpoint interactions are created by the function \code{\link{Hybrid}}. Such a hybrid interaction can then be fitted to point pattern data using \code{\link{ppm}}. } \value{ \code{TRUE} if the object is a hybrid, and \code{FALSE} otherwise. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \code{DOI: 10.18637/jss.v055.i11} } \seealso{ \code{\link{Hybrid}} } \examples{ S <- Strauss(0.1) is.hybrid(S) H <- Hybrid(Strauss(0.1), Geyer(0.2, 3)) is.hybrid(H) fit <- ppm(redwood, ~1, H) is.hybrid(fit) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/model.images.Rd0000644000176200001440000001037414331173077016635 0ustar liggesusers\name{model.images} \alias{model.images} \alias{model.images.ppm} \alias{model.images.dppm} \alias{model.images.kppm} \alias{model.images.slrm} \title{Compute Images of Constructed Covariates} \description{ For a point process model fitted to spatial point pattern data, this function computes pixel images of the covariates in the design matrix. } \usage{ model.images(object, ...) \method{model.images}{ppm}(object, W = as.owin(object), ...) \method{model.images}{kppm}(object, W = as.owin(object), ...) \method{model.images}{dppm}(object, W = as.owin(object), ...) \method{model.images}{slrm}(object, ...) } \arguments{ \item{object}{ The fitted point process model. An object of class \code{"ppm"} or \code{"kppm"} or \code{"slrm"} or \code{"dppm"}. } \item{W}{ A window (object of class \code{"owin"}) in which the images should be computed. Defaults to the window in which the model was fitted. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link[stats:model.matrix]{model.matrix.lm}}. } } \details{ This command is similar to \code{\link{model.matrix.ppm}} except that it computes pixel images of the covariates, instead of computing the covariate values at certain points only. The \code{object} must be a fitted spatial point process model object of class \code{"ppm"} (produced by the model-fitting function \code{\link{ppm}}) or class \code{"kppm"} (produced by the fitting function \code{\link{kppm}}) or class \code{"dppm"} (produced by the fitting function \code{\link{dppm}}) or class \code{"slrm"} (produced by \code{\link{slrm}}). The spatial covariates required by the model-fitting procedure are computed at every pixel location in the window \code{W}. For \code{slrm} objects, the covariates are computed on the pixels that were used to fit the model. Note that the spatial covariates computed here are not necessarily the original covariates that were supplied when fitting the model. Rather, they are the canonical covariates, the covariates that appear in the loglinear representation of the (conditional) intensity and in the columns of the design matrix. For example, they might include dummy or indicator variables for different levels of a factor, depending on the contrasts that are in force. The pixel resolution is determined by \code{W} if \code{W} is a mask (that is \code{W$type = "mask"}). Otherwise, the pixel resolution is determined by \code{\link{spatstat.options}}. The format of the result depends on whether the original point pattern data were marked or unmarked. \itemize{ \item If the original dataset was unmarked, the result is a named list of pixel images (objects of class \code{"im"}) containing the values of the spatial covariates. The names of the list elements are the names of the covariates determined by \code{\link[stats:model.matrix]{model.matrix.lm}}. The result is also of class \code{"solist"} so that it can be plotted immediately. \item If the original dataset was a multitype point pattern, the result is a \code{\link{hyperframe}} with one column for each possible type of points. Each column is a named list of pixel images (objects of class \code{"im"}) containing the values of the spatial covariates. The row names of the hyperframe are the names of the covariates determined by \code{\link[stats:model.matrix]{model.matrix.lm}}. } } \value{ A list (of class \code{"solist"}) or array (of class \code{"hyperframe"}) containing pixel images (objects of class \code{"im"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{model.matrix.ppm}}, \code{\link[stats]{model.matrix}}, \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{dppm}}, \code{\link{kppm}}, \code{\link{slrm}}, \code{\link{im}}, \code{\link{im.object}}, \code{\link{plot.solist}}, \code{\link{spatstat.options}} } \examples{ fit <- ppm(cells ~ x) model.images(fit) B <- owin(c(0.2, 0.4), c(0.3, 0.8)) model.images(fit, B) fit2 <- ppm(cells ~ cut(x,3)) model.images(fit2) fit3 <- slrm(japanesepines ~ x) model.images(fit3) fit4 <- ppm(amacrine ~ marks + x) model.images(fit4) } \keyword{spatial} \keyword{models} spatstat.model/man/objsurf.Rd0000644000176200001440000000757714334115644015755 0ustar liggesusers\name{objsurf} \alias{objsurf} \alias{objsurf.dppm} \alias{objsurf.kppm} \alias{objsurf.minconfit} \title{ Objective Function Surface } \description{ For a model that was fitted by optimisation, compute the values of the objective function in a neighbourhood of the optimal value. } \usage{ objsurf(x, \dots) \method{objsurf}{dppm}(x, ..., ngrid = 32, xlim=NULL, ylim=NULL, enclose=FALSE, ratio = 1.5, verbose = TRUE) \method{objsurf}{kppm}(x, ..., ngrid = 32, xlim=NULL, ylim=NULL, enclose=FALSE, ratio = 1.5, verbose = TRUE) \method{objsurf}{minconfit}(x, ..., ngrid = 32, xlim=NULL, ylim=NULL, ratio = 1.5, verbose = TRUE) } \arguments{ \item{x}{ Some kind of model that was fitted by finding the optimal value of an objective function. An object of class \code{"dppm"}, \code{"kppm"} or \code{"minconfit"}. } \item{\dots}{ Extra arguments are usually ignored. } \item{ngrid}{ Number of grid points to evaluate along each axis. Either a single integer, or a pair of integers. For example \code{ngrid=32} would mean a \code{32 * 32} grid. } \item{xlim,ylim}{ Optional. Numeric vectors of length 2, specifying the limits for the two parameters to be considered. } \item{enclose}{ Logical value specifying whether the default values of \code{xlim} and \code{ylim} should enclose the history of all function evaluations. See Details. } \item{ratio}{ Number greater than 1 determining the default ranges of parameter values. See Details. } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ The object \code{x} should be some kind of model that was fitted by maximising or minimising the value of an objective function. The objective function will be evaluated on a grid of values of the model parameters. Currently the following types of objects are accepted: \itemize{ \item an object of class \code{"dppm"} representing a determinantal point process. See \code{\link{dppm}}. \item an object of class \code{"kppm"} representing a cluster point process or Cox point process. See \code{\link{kppm}}. \item an object of class \code{"minconfit"} representing a minimum-contrast fit between a summary function and its theoretical counterpart. See \code{\link{mincontrast}}. } The result is an object of class \code{"objsurf"} which can be printed and plotted: see \code{\link{methods.objsurf}}. The range of parameter values to be considered is determined by \code{xlim} and \code{ylim}. The default values of \code{xlim} and \code{ylim} are chosen as follows. \itemize{ \item if \code{enclose=FALSE} (the default), the default values of \code{xlim} and \code{ylim} are the ranges from \code{opt/ratio} to \code{opt * ratio} where \code{opt} is the optimal parameter value on the surface. \item If \code{enclose=TRUE}, and if \code{x} contains a trajectory (history of function evaluations), then \code{xlim} and \code{ylim} will be the ranges of parameter values examined in the trajectory. } } \value{ An object of class \code{"objsurf"} which can be printed and plotted. Essentially a list containing entries \code{x}, \code{y}, \code{z} giving the parameter values and objective function values. There are methods for \code{plot}, \code{print}, \code{summary}, \code{image}, \code{contour} and \code{persp}. } \author{ \adrian and \ege. } \seealso{ \code{\link{methods.objsurf}}, \code{\link{kppm}}, \code{\link{mincontrast}} } \examples{ fit <- kppm(redwood ~ 1, "Thomas") os <- objsurf(fit) if(interactive()) { plot(os) contour(os, add=TRUE) persp(os) } } \keyword{spatial} \keyword{models} spatstat.model/man/as.function.leverage.ppm.Rd0000644000176200001440000000246614510474260021104 0ustar liggesusers\name{as.function.leverage.ppm} \alias{as.function.leverage.ppm} \title{ Convert Leverage Object to Function of Coordinates } \description{ Converts an object of class \code{"leverage.ppm"} to a function of the \eqn{x} and \eqn{y} coordinates. } \usage{ \method{as.function}{leverage.ppm}(x, ...) } \arguments{ \item{x}{ Object of class \code{"leverage.ppm"} produced by \code{\link{leverage.ppm}}. } \item{\dots}{ Ignored. } } \details{ An object of class \code{"leverage.ppm"} represents the leverage function of a fitted point process model. This command converts the object to a \code{function(x,y)} where the arguments \code{x} and \code{y} are (vectors of) spatial coordinates. This function returns the leverage values at the specified locations (calculated by referring to the nearest location where the leverage has been computed). } \value{ A function in the \R language, also belonging to the class \code{"funxy"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{as.im.leverage.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) lev <- leverage(fit) f <- as.function(lev) f(0.2, 0.3) # evaluate at (x,y) coordinates y <- f(X) # evaluate at a point pattern } \keyword{spatial} \keyword{manip} \concept{Model diagnostics} spatstat.model/man/rhohat.Rd0000644000176200001440000005321614424137200015547 0ustar liggesusers\name{rhohat.ppm} \alias{rhohat.ppm} \alias{rhohat.slrm} \concept{Resource Selection Function} \concept{Prospectivity} \title{ Nonparametric Estimate of Intensity as Function of a Covariate } \description{ Computes a nonparametric estimate of the intensity of a point process, as a function of a (continuous) spatial covariate. } \usage{ \method{rhohat}{ppm}(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "mountain", "valley", "piecewise"), subset=NULL, do.CI=TRUE, jitter=TRUE, jitterfactor=1, interpolate=TRUE, dimyx=NULL, eps=NULL, rule.eps = c("adjust.eps", "grow.frame", "shrink.frame"), n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) \method{rhohat}{slrm}(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "mountain", "valley", "piecewise"), subset=NULL, do.CI=TRUE, jitter=TRUE, jitterfactor=1, interpolate=TRUE, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) } \arguments{ \item{object}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}), a quadrature scheme (object of class \code{"quad"}) or a fitted point process model (object of class \code{"ppm"}, \code{"slrm"} or \code{"lppm"}). } \item{covariate}{ Either a \code{function(x,y)} or a pixel image (object of class \code{"im"}) providing the values of the covariate at any location. Alternatively one of the strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. } \item{weights}{ Optional weights attached to the data points. Either a numeric vector of weights for each data point, or a pixel image (object of class \code{"im"}) or a \code{function(x,y)} providing the weights. } \item{method}{ Character string determining the estimation method. See Details. } \item{horvitz}{ Logical value indicating whether to use Horvitz-Thompson weights. See Details. } \item{smoother}{ Character string determining the smoothing algorithm and the type of curve that will be estimated. See Details. } \item{subset}{ Optional. A spatial window (object of class \code{"owin"}) specifying a subset of the data, from which the estimate should be calculated. } \item{do.CI}{ Logical value specifying whether to calculate standard errors and confidence bands. } \item{jitter}{ Logical value. If \code{jitter=TRUE} (the default), the values of the covariate at the data points will be jittered (randomly perturbed by adding a small amount of noise) using the function \code{\link[base]{jitter}}. If \code{jitter=FALSE}, the covariate values at the data points will not be altered. See the section on \emph{Randomisation and discretisation}. } \item{jitterfactor}{ Numeric value controlling the scale of noise added to the covariate values at the data points when \code{jitter=TRUE}. Passed to the function \code{\link[base]{jitter}} as the argument \code{factor}. } \item{interpolate}{ Logical value specifying whether to use spatial interpolation to obtain the values of the covariate at the data points, when the covariate is a pixel image (object of class \code{"im"}). If \code{interpolate=FALSE}, the covariate value for each data point is simply the value of the covariate image at the pixel centre that is nearest to the data point. If \code{interpolate=TRUE}, the covariate value for each data point is obtained by interpolating the nearest pixel values using \code{\link[spatstat.geom]{interp.im}}. } \item{dimyx,eps,rule.eps}{ Arguments controlling the pixel resolution at which the covariate will be evaluated. See Details. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link{density.default}} to control the number and range of values at which the function will be estimated. } \item{bwref}{ Optional. An alternative value of \code{bw} to use when smoothing the reference density (the density of the covariate values observed at all locations in the window). } \item{\dots}{ Additional arguments passed to \code{\link{density.default}} or \code{\link[locfit]{locfit}}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } \item{confidence}{ Confidence level for confidence intervals. A number between 0 and 1. } \item{positiveCI}{ Logical value. If \code{TRUE}, confidence limits are always positive numbers; if \code{FALSE}, the lower limit of the confidence interval may sometimes be negative. Default is \code{FALSE} if \code{smoother="kernel"} and \code{TRUE} if \code{smoother="local"}. See Details. } \item{breaks}{ Breakpoints for the piecewise-constant function computed when \code{smoother='piecewise'}. Either a vector of numeric values specifying the breakpoints, or a single integer specifying the number of equally-spaced breakpoints. There is a sensible default. } } \details{ This command estimates the relationship between point process intensity and a given spatial covariate. Such a relationship is sometimes called a \emph{resource selection function} (if the points are organisms and the covariate is a descriptor of habitat) or a \emph{prospectivity index} (if the points are mineral deposits and the covariate is a geological variable). This command uses nonparametric methods which do not assume a particular form for the relationship. If \code{object} is a point pattern, and \code{baseline} is missing or null, this command assumes that \code{object} is a realisation of a point process with intensity function \eqn{\lambda(u)}{lambda(u)} of the form \deqn{\lambda(u) = \rho(Z(u))}{lambda(u) = rho(Z(u))} where \eqn{Z} is the spatial covariate function given by \code{covariate}, and \eqn{\rho(z)}{rho(z)} is the resource selection function or prospectivity index. A nonparametric estimator of the function \eqn{\rho(z)}{rho(z)} is computed. If \code{object} is a point pattern, and \code{baseline} is given, then the intensity function is assumed to be \deqn{\lambda(u) = \rho(Z(u)) B(u)}{lambda(u) = rho(Z(u)) * B(u)} where \eqn{B(u)} is the baseline intensity at location \eqn{u}. A nonparametric estimator of the relative intensity \eqn{\rho(z)}{rho(z)} is computed. If \code{object} is a fitted point process model, suppose \code{X} is the original data point pattern to which the model was fitted. Then this command assumes \code{X} is a realisation of a Poisson point process with intensity function of the form \deqn{ \lambda(u) = \rho(Z(u)) \kappa(u) }{ lambda(u) = rho(Z(u)) * kappa(u) } where \eqn{\kappa(u)}{kappa(u)} is the intensity of the fitted model \code{object}. A nonparametric estimator of the relative intensity \eqn{\rho(z)}{rho(z)} is computed. The nonparametric estimation procedure is controlled by the arguments \code{smoother}, \code{method} and \code{horvitz}. The argument \code{smoother} selects the type of estimation technique. \itemize{ \item If \code{smoother="kernel"} (the default), the nonparametric estimator is a \emph{kernel smoothing estimator} of \eqn{\rho(z)}{rho(z)} (Guan, 2008; Baddeley et al, 2012). The estimated function \eqn{\rho(z)}{rho(z)} will be a smooth function of \eqn{z} which takes nonnegative values. If \code{do.CI=TRUE} (the default), confidence bands are also computed, assuming a Poisson point process. See the section on \emph{Smooth estimates}. \item If \code{smoother="local"}, the nonparametric estimator is a \emph{local regression estimator} of \eqn{\rho(z)}{rho(z)} (Baddeley et al, 2012) obtained using local likelihood. The estimated function \eqn{\rho(z)}{rho(z)} will be a smooth function of \eqn{z}. If \code{do.CI=TRUE} (the default), confidence bands are also computed, assuming a Poisson point process. See the section on \emph{Smooth estimates}. \item If \code{smoother="increasing"}, we assume that \eqn{\rho(z)}{rho(z)} is an increasing function of \eqn{z}, and use the \emph{nonparametric maximum likelihood estimator} of \eqn{\rho(z)}{rho(z)} described by Sager (1982). The estimated function will be a step function, that is increasing as a function of \eqn{z}. Confidence bands are not computed. See the section on \emph{Monotone estimates}. \item If \code{smoother="decreasing"}, we assume that \eqn{\rho(z)}{rho(z)} is a decreasing function of \eqn{z}, and use the \emph{nonparametric maximum likelihood estimator} of \eqn{\rho(z)}{rho(z)} described by Sager (1982). The estimated function will be a step function, that is decreasing as a function of \eqn{z}. Confidence bands are not computed. See the section on \emph{Monotone estimates}. \item If \code{smoother="mountain"}, we assume that \eqn{\rho(z)}{rho(z)} is a function with an inverted U shape, with a single peak at a value \eqn{z_0}{z0}, so that \eqn{\rho(z)}{rho(z)} is an increasing function of \eqn{z} for \eqn{z < z_0}{z < z0} and a decreasing function of \eqn{z} for \eqn{z > z_0}{z > z0}. We compute the \emph{nonparametric maximum likelihood estimator}. The estimated function will be a step function, which is increasing and then decreasing as a function of \eqn{z}. Confidence bands are not computed. See the section on \emph{Unimodal estimates}. \item If \code{smoother="valley"}, we assume that \eqn{\rho(z)}{rho(z)} is a function with a U shape, with a single minimum at a value \eqn{z_0}{z0}, so that \eqn{\rho(z)}{rho(z)} is a decreasing function of \eqn{z} for \eqn{z < z_0}{z < z0} and an increasing function of \eqn{z} for \eqn{z > z_0}{z > z0}. We compute the \emph{nonparametric maximum likelihood estimator}. The estimated function will be a step function, which is decreasing and then increasing as a function of \eqn{z}. Confidence bands are not computed. See the section on \emph{Unimodal estimates}. \item If \code{smoother="piecewise"}, the estimate of \eqn{\rho(z)}{rho(z)} is piecewise constant. The range of covariate values is divided into several intervals (ranges or bands). The endpoints of these intervals are the breakpoints, which may be specified by the argument \code{breaks}; there is a sensible default. The estimate of \eqn{\rho(z)}{rho(z)} takes a constant value on each interval. The estimate of \eqn{\rho(z)}{rho(z)} in each interval of covariate values is simply the average intensity (number of points per unit area) in the relevant sub-region. If \code{do.CI=TRUE} (the default), confidence bands are computed assuming a Poisson process. } See Baddeley (2018) for a comparison of these estimation techniques (except for \code{"mountain"} and \code{"valley"}). If the argument \code{weights} is present, then the contribution from each data point \code{X[i]} to the estimate of \eqn{\rho}{rho} is multiplied by \code{weights[i]}. If the argument \code{subset} is present, then the calculations are performed using only the data inside this spatial region. This technique assumes that \code{covariate} has continuous values. It is not applicable to covariates with categorical (factor) values or discrete values such as small integers. For a categorical covariate, use \code{\link{intensity.quadratcount}} applied to the result of \code{\link{quadratcount}(X, tess=covariate)}. The argument \code{covariate} should be a pixel image, or a function, or one of the strings \code{"x"} or \code{"y"} signifying the cartesian coordinates. It will be evaluated on a fine grid of locations, with spatial resolution controlled by the arguments \code{dimyx,eps,rule.eps} which are passed to \code{\link{as.mask}}. } \section{Smooth estimates}{ Smooth estimators of \eqn{\rho(z)}{rho(z)} were proposed by Baddeley and Turner (2005) and Baddeley et al (2012). Similar estimators were proposed by Guan (2008) and in the literature on relative distributions (Handcock and Morris, 1999). The estimated function \eqn{\rho(z)}{rho(z)} will be a smooth function of \eqn{z}. The smooth estimation procedure involves computing several density estimates and combining them. The algorithm used to compute density estimates is determined by \code{smoother}: \itemize{ \item If \code{smoother="kernel"}, the smoothing procedure is based on fixed-bandwidth kernel density estimation, performed by \code{\link{density.default}}. \item If \code{smoother="local"}, the smoothing procedure is based on local likelihood density estimation, performed by \code{\link[locfit]{locfit}}. } The argument \code{method} determines how the density estimates will be combined to obtain an estimate of \eqn{\rho(z)}{rho(z)}: \itemize{ \item If \code{method="ratio"}, then \eqn{\rho(z)}{rho(z)} is estimated by the ratio of two density estimates, The numerator is a (rescaled) density estimate obtained by smoothing the values \eqn{Z(y_i)}{Z(y[i])} of the covariate \eqn{Z} observed at the data points \eqn{y_i}{y[i]}. The denominator is a density estimate of the reference distribution of \eqn{Z}. See Baddeley et al (2012), equation (8). This is similar but not identical to an estimator proposed by Guan (2008). \item If \code{method="reweight"}, then \eqn{\rho(z)}{rho(z)} is estimated by applying density estimation to the values \eqn{Z(y_i)}{Z(y[i])} of the covariate \eqn{Z} observed at the data points \eqn{y_i}{y[i]}, with weights inversely proportional to the reference density of \eqn{Z}. See Baddeley et al (2012), equation (9). \item If \code{method="transform"}, the smoothing method is variable-bandwidth kernel smoothing, implemented by applying the Probability Integral Transform to the covariate values, yielding values in the range 0 to 1, then applying edge-corrected density estimation on the interval \eqn{[0,1]}, and back-transforming. See Baddeley et al (2012), equation (10). } If \code{horvitz=TRUE}, then the calculations described above are modified by using Horvitz-Thompson weighting. The contribution to the numerator from each data point is weighted by the reciprocal of the baseline value or fitted intensity value at that data point; and a corresponding adjustment is made to the denominator. Pointwise confidence intervals for the true value of \eqn{\rho(z)} are also calculated for each \eqn{z}, and will be plotted as grey shading. The confidence intervals are derived using the central limit theorem, based on variance calculations which assume a Poisson point process. If \code{positiveCI=FALSE}, the lower limit of the confidence interval may sometimes be negative, because the confidence intervals are based on a normal approximation to the estimate of \eqn{\rho(z)}. If \code{positiveCI=TRUE}, the confidence limits are always positive, because the confidence interval is based on a normal approximation to the estimate of \eqn{\log(\rho(z))}{log(\rho(z))}. For consistency with earlier versions, the default is \code{positiveCI=FALSE} for \code{smoother="kernel"} and \code{positiveCI=TRUE} for \code{smoother="local"}. } \section{Monotone estimates}{ The nonparametric maximum likelihood estimator of a monotone function \eqn{\rho(z)}{rho(z)} was described by Sager (1982). This method assumes that \eqn{\rho(z)}{rho(z)} is either an increasing function of \eqn{z}, or a decreasing function of \eqn{z}. The estimated function will be a step function, increasing or decreasing as a function of \eqn{z}. This estimator is chosen by specifying \code{smoother="increasing"} or \code{smoother="decreasing"}. The argument \code{method} is ignored this case. To compute the estimate of \eqn{\rho(z)}{rho(z)}, the algorithm first computes several primitive step-function estimates, and then takes the maximum of these primitive functions. If \code{smoother="decreasing"}, each primitive step function takes the form \eqn{\rho(z) = \lambda}{rho(z) = lambda} when \eqn{z \le t}, and \eqn{\rho(z) = 0}{rho(z) = 0} when \eqn{z > t}, where and \eqn{\lambda}{lambda} is a primitive estimate of intensity based on the data for \eqn{Z \le t}{Z <= t}. The jump location \eqn{t} will be the value of the covariate \eqn{Z} at one of the data points. The primitive estimate \eqn{\lambda}{lambda} is the average intensity (number of points divided by area) for the region of space where the covariate value is less than or equal to \eqn{t}. If \code{horvitz=TRUE}, then the calculations described above are modified by using Horvitz-Thompson weighting. The contribution to the numerator from each data point is weighted by the reciprocal of the baseline value or fitted intensity value at that data point; and a corresponding adjustment is made to the denominator. Confidence intervals are not available for the monotone estimators. } \section{Unimodal estimators}{ If \code{smoother="valley"} then we estimate a U-shaped function. A function \eqn{\rho(z)}{rho(z)} is U-shaped if it is decreasing when \eqn{z < z_0}{z < z0} and increasing when \eqn{z > z_0}{z > z0}, where \eqn{z_0}{z0} is called the critical value. The nonparametric maximum likelihood estimate of such a function can be computed by profiling over \eqn{z_0}{z0}. The algorithm considers all possible candidate values of the critical value \eqn{z_0}{z0}, and estimates the function \eqn{\rho(z)}{rho(z)} separately on the left and right of \eqn{z_0}{z0} using the monotone estimators described above. These function estimates are combined into a single function, and the Poisson point process likelihood is computed. The optimal value of \eqn{z_0}{z0} is the one which maximises the Poisson point process likelihood. If \code{smoother="mountain"} then we estimate a function which has an inverted U shape. A function \eqn{\rho(z)}{rho(z)} is inverted-U-shaped if it is increasing when \eqn{z < z_0}{z < z0} and decreasing when \eqn{z > z_0}{z > z0}. The nonparametric maximum likelihood estimate of such a function can be computed by profiling over \eqn{z_0}{z0} using the same technique \emph{mutatis mutandis}. Confidence intervals are not available for the unimodal estimators. } \section{Randomisation}{ By default, \code{rhohat} adds a small amount of random noise to the data. This is designed to suppress the effects of discretisation in pixel images. This strategy means that \code{rhohat} does not produce exactly the same result when the computation is repeated. If you need the results to be exactly reproducible, set \code{jitter=FALSE}. By default, the values of the covariate at the data points will be randomly perturbed by adding a small amount of noise using the function \code{\link[base]{jitter}}. To reduce this effect, set \code{jitterfactor} to a number smaller than 1. To suppress this effect entirely, set \code{jitter=FALSE}. } \value{ A function value table (object of class \code{"fv"}) containing the estimated values of \eqn{\rho}{rho} (and confidence limits) for a sequence of values of \eqn{Z}. Also belongs to the class \code{"rhohat"} which has special methods for \code{print}, \code{plot} and \code{predict}. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2012) Nonparametric estimation of the dependence of a point process on spatial covariates. \emph{Statistics and Its Interface} \bold{5} (2), 221--236. Baddeley, A. and Turner, R. (2005) Modelling spatial point patterns in R. In: A. Baddeley, P. Gregori, J. Mateu, R. Stoica, and D. Stoyan, editors, \emph{Case Studies in Spatial Point Pattern Modelling}, Lecture Notes in Statistics number 185. Pages 23--74. Springer-Verlag, New York, 2006. ISBN: 0-387-28311-0. Baddeley, A. (2018) A statistical commentary on mineral prospectivity analysis. Chapter 2, pages 25--65 in \emph{Handbook of Mathematical Geosciences: Fifty Years of IAMG}, edited by B.S. Daya Sagar, Q. Cheng and F.P. Agterberg. Springer, Berlin. Guan, Y. (2008) On consistent nonparametric intensity estimation for inhomogeneous spatial point processes. \emph{Journal of the American Statistical Association} \bold{103}, 1238--1247. Handcock, M.S. and Morris, M. (1999) \emph{Relative Distribution Methods in the Social Sciences}. Springer, New York. Sager, T.W. (1982) Nonparametric maximum likelihood estimation of spatial patterns. \emph{Annals of Statistics} \bold{10}, 1125--1136. } \author{ Smoothing algorithm by \adrian, Ya-Mei Chang, Yong Song, and \rolf. Nonparametric maximum likelihood algorithm by \adrian. } \seealso{ \code{\link{rho2hat}}, \code{\link{methods.rhohat}}, \code{\link[spatstat.model]{parres}}. See \code{\link[spatstat.model]{ppm}} for a parametric method for the same problem. } \examples{ X <- rpoispp(function(x,y){exp(3+3*x)}) fit <- ppm(X ~x) rr <- rhohat(fit, "y") } \keyword{spatial} \keyword{models} \keyword{nonparametric} spatstat.model/man/emend.Rd0000644000176200001440000000157614331173076015364 0ustar liggesusers\name{emend} \alias{emend} \title{ Force Model to be Valid } \description{ Check whether a model is valid, and if not, find the nearest model which is valid. } \usage{ emend(object, \dots) } \arguments{ \item{object}{ A statistical model, belonging to some class. } \item{\dots}{Arguments passed to methods.} } \details{ The function \code{emend} is generic, and has methods for several classes of statistical models in the \pkg{spatstat} package (mostly point process models). Its purpose is to check whether a given model is valid (for example, that none of the model parameters are \code{NA}) and, if not, to find the nearest model which is valid. See the methods for more information. } \value{ Another model of the same kind. } \author{ \spatstatAuthors. } \seealso{ \code{\link{emend.ppm}}, \code{\link{valid}}. } \keyword{spatial} \keyword{models} spatstat.model/man/simulate.slrm.Rd0000644000176200001440000000503714331173077017070 0ustar liggesusers\name{simulate.slrm} \alias{simulate.slrm} \title{Simulate a Fitted Spatial Logistic Regression Model} \description{ Generates simulated realisations from a fitted spatial logistic regresson model } \usage{ \method{simulate}{slrm}(object, nsim = 1, seed=NULL, ..., window=NULL, covariates=NULL, verbose=TRUE, drop=FALSE) } \arguments{ \item{object}{ Fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{nsim}{ Number of simulated realisations. } \item{seed}{ an object specifying whether and how to initialise the random number generator. Either \code{NULL} or an integer that will be used in a call to \code{\link[base:Random]{set.seed}} before simulating the point patterns. } \item{\dots}{Ignored.} \item{window}{ Optional. Window (object of class \code{"owin"}) in which the model should be simulated. } \item{covariates}{ Optional. A named list containing new values for the covariates in the model. } \item{verbose}{ Logical. Whether to print progress reports (when \code{nsim > 1}). } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE}, the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"slrm"} of fitted spatial logistic regression models. Simulations are performed by \code{\link{rpoispp}} after the intensity has been computed by \code{\link{predict.slrm}}. The return value is a list of point patterns. It also carries an attribute \code{"seed"} that captures the initial state of the random number generator. This follows the convention used in \code{simulate.lm} (see \code{\link[stats]{simulate}}). It can be used to force a sequence of simulations to be repeated exactly, as shown in the examples for \code{\link[stats]{simulate}}. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"ppp"}). The return value also carries an attribute \code{"seed"} that captures the initial state of the random number generator. See Details. } \examples{ X <- copper$SouthPoints fit <- slrm(X ~ 1) simulate(fit, 2) fitxy <- slrm(X ~ x+y) simulate(fitxy, 2, window=square(2)) } \seealso{ \code{\link{slrm}}, \code{\link{rpoispp}}, \code{\link{simulate.ppm}}, \code{\link{simulate.kppm}}, \code{\link[stats]{simulate}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/quadrat.test.mppm.Rd0000644000176200001440000000731614515336521017661 0ustar liggesusers\name{quadrat.test.mppm} \alias{quadrat.test.mppm} \title{Chi-Squared Test for Multiple Point Process Model Based on Quadrat Counts} \description{ Performs a chi-squared goodness-of-fit test of a Poisson point process model fitted to multiple point patterns. } \usage{ \method{quadrat.test}{mppm}(X, ...) } \arguments{ \item{X}{ An object of class \code{"mppm"} representing a point process model fitted to multiple point patterns. It should be a Poisson model. } \item{\dots}{ Arguments passed to \code{\link{quadrat.test.ppm}} which determine the size of the quadrats. } } \details{ This function performs a \eqn{\chi^2}{chi^2} test of goodness-of-fit for a Poisson point process model, based on quadrat counts. It can also be used to perform a test of Complete Spatial Randomness for a list of point patterns. The function \code{quadrat.test} is generic, with methods for point patterns (class \code{"ppp"}), point process models (class \code{"ppm"}) and multiple point process models (class \code{"mppm"}). For this function, the argument \code{X} should be a multiple point process model (object of class \code{"mppm"}) obtained by fitting a point process model to a list of point patterns using the function \code{\link{mppm}}. To perform the test, the data point patterns are extracted from \code{X}. For each point pattern \itemize{ \item the window of observation is divided into rectangular tiles, and the number of data points in each tile is counted, as described in \code{\link{quadratcount}}. \item The expected number of points in each quadrat is calculated, as determined by the fitted model. } Then we perform a single \eqn{\chi^2}{chi^2} test of goodness-of-fit based on these observed and expected counts. } \section{Testing Complete Spatial Randomness}{ If the intention is to test Complete Spatial Randomness (CSR) there are two options: \itemize{ \item CSR with the same intensity of points in each point pattern; \item CSR with a different, unrelated intensity of points in each point pattern. } In the first case, suppose \code{P} is a list of point patterns we want to test. Then fit the multiple model \code{fit1 <- mppm(P ~1)} which signifies a Poisson point process model with a constant intensity. Then apply \code{quadrat.test(fit1)}. In the second case, fit the model \code{fit2 <- mppm(P ~id)} which signifies a Poisson point process with a different constant intensity for each point pattern. Then apply \code{quadrat.test(fit2)}. } \value{ An object of class \code{"htest"}. Printing the object gives comprehensible output about the outcome of the test. The \eqn{p}-value of the test is stored in the component \code{p.value}. The return value also belongs to the special class \code{"quadrat.test"}. Plotting the object will display, for each window, the position of the quadrats, annotated by their observed and expected counts and the Pearson residuals. See the examples. The return value also has an attribute \code{"components"} which is a list containing the results of \eqn{\chi^2}{chi^2} tests of goodness-of-fit for each individual point pattern. } \seealso{ \code{\link{mppm}}, \code{\link{quadrat.test}} } \examples{ H <- hyperframe(X=waterstriders) # Poisson with constant intensity for all patterns fit1 <- mppm(X~1, H) quadrat.test(fit1, nx=2) # uniform Poisson with different intensity for each pattern fit2 <- mppm(X ~ id, H) quadrat.test(fit2, nx=2) } \references{ \baddrubaturnbook } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \keyword{spatial} \keyword{htest} \concept{Goodness-of-fit} spatstat.model/man/psstA.Rd0000644000176200001440000001544614510474260015365 0ustar liggesusers\name{psstA} \Rdversion{1.1} \alias{psstA} \title{ Pseudoscore Diagnostic For Fitted Model against Area-Interaction Alternative } \description{ Given a point process model fitted to a point pattern dataset, this function computes the pseudoscore diagnostic of goodness-of-fit for the model, against moderately clustered or moderately inhibited alternatives of area-interaction type. } \usage{ psstA(object, r = NULL, breaks = NULL, \dots, model = NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), ppmcorrection = "border", correction = "all", truecoef = NULL, hi.res = NULL, nr=spatstat.options("psstA.nr"), ngrid=spatstat.options("psstA.ngrid")) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the diagnostic should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{ Extra arguments passed to \code{\link{quadscheme}} to determine the quadrature scheme, if \code{object} is a point pattern. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord,ppmcorrection}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{ppmcorrection}{ Optional. Character string specifying the edge correction for the pseudolikelihood to be used in fitting the point process model. Passed to \code{\link{ppm}}. } \item{correction}{ Optional. Character string specifying which diagnostic quantities will be computed. Options are \code{"all"} and \code{"best"}. The default is to compute all diagnostic quantities. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } \item{nr}{ Optional. Number of \code{r} values to be used if \code{r} is not specified. } \item{ngrid}{ Integer. Number of points in the square grid used to compute the approximate area. } } \details{ This function computes the pseudoscore test statistic which can be used as a diagnostic for goodness-of-fit of a fitted point process model. Let \eqn{x} be a point pattern dataset consisting of points \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} in a window \eqn{W}. Consider a point process model fitted to \eqn{x}, with conditional intensity \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}. For the purpose of testing goodness-of-fit, we regard the fitted model as the null hypothesis. The alternative hypothesis is a family of hybrid models obtained by combining the fitted model with the area-interaction process (see \code{\link{AreaInter}}). The family of alternatives includes models that are slightly more regular than the fitted model, and others that are slightly more clustered than the fitted model. The pseudoscore, evaluated at the null model, is \deqn{ V(r) = \sum_i A(x_i, x, r) - \int_W A(u,x, r) \lambda(u,x) {\rm d} u }{ V(r) = sum( A(x[i], x, r)) - integral( A(u,x,r) lambda(u,x) du) } where \deqn{ A(u,x,r) = B(x\cup\{u\},r) - B(x\setminus u, r) }{ A(u,x,r) = B(x union u, r) - B(x setminus u, r) } where \eqn{B(x,r)} is the area of the union of the discs of radius \eqn{r} centred at the points of \eqn{x} (i.e. \eqn{B(x,r)} is the area of the dilation of \eqn{x} by a distance \eqn{r}). Thus \eqn{A(u,x,r)} is the \emph{unclaimed area} associated with \eqn{u}, that is, the area of that part of the disc of radius \eqn{r} centred at the point \eqn{u} that is not covered by any of the discs of radius \eqn{r} centred at points of \eqn{x}. According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have mean zero if the model is correct (ignoring the fact that the parameters of the model have been estimated). Hence \eqn{V(r)} can be used as a diagnostic for goodness-of-fit. The diagnostic \eqn{V(r)} is also called the \bold{pseudoresidual} of \eqn{S}. On the right hand side of the equation for \eqn{V(r)} given above, the sum over points of \eqn{x} is called the \bold{pseudosum} and the integral is called the \bold{pseudocompensator}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. Columns in this data frame include \code{dat} for the pseudosum, \code{com} for the compensator and \code{res} for the pseudoresidual. There is a plot method for this class. See \code{\link{fv.object}}. } \section{Warning}{ This computation can take a \bold{very long time}. To shorten the computation time, choose smaller values of the arguments \code{nr} and \code{ngrid}, or reduce the values of their defaults \code{spatstat.options("psstA.nr")} and \code{spatstat.options("psstA.ngrid")}. Computation time is roughly proportional to \code{nr * npoints * ngrid^2} where \code{npoints} is the number of points in the point pattern. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Alternative functions: \code{\link{psstG}}, \code{\link{psst}}, \code{\link{Gres}}, \code{\link{Kres}}. Point process models: \code{\link{ppm}}. Options: \code{\link{spatstat.options}} } \examples{ if(live <- interactive()) { X <- rStrauss(200,0.1,0.05) } else { pso <- spatstat.options(psstA.ngrid=16,psstA.nr=10, ndummy.min=16,npixel=32) X <- cells } plot(psstA(X)) plot(psstA(X, interaction=Strauss(0.05))) if(!live) spatstat.options(pso) } \keyword{spatial} \keyword{models} \concept{Model diagnostics} spatstat.model/man/traj.Rd0000644000176200001440000000226414336357734015241 0ustar liggesusers\name{traj} \alias{traj} \title{ Extract trajectory of function evaluations } \description{ Extract the history of evaluations of the objective function performed when a cluster process model was fitted. } \usage{ traj(object) } \arguments{ \item{object}{ Fitted cluster point process model (object of class \code{"kppm"}) or objective function surface (object of class \code{"objsurf"}). } } \details{ Under appropriate circumstances, the fitted model \code{object} contains the history of evaluations of the objective function that were performed by the optimisation algorithm. This history is extracted by \code{traj}. The result is a data frame containing the input parameter values for the objective function, and the corresponding value of the objective function, that were considered by the optimisation algorithm. This data frame also belongs to the class \code{"traj"} which has methods for \code{plot}, \code{print} and other purposes. } \value{ Either a data frame (belonging to class \code{"traj"}) or \code{NULL}. } \author{ \adrian. } \seealso{ \code{\link{methods.traj}} } \examples{ fit <- kppm(redwood, trajectory=TRUE) h <- traj(fit) } spatstat.model/man/valid.Rd0000644000176200001440000000211514331173100015345 0ustar liggesusers\name{valid} \alias{valid} \title{ Check Whether Point Process Model is Valid } \description{ Determines whether a point process model object corresponds to a valid point process. } \usage{ valid(object, \dots) } \arguments{ \item{object}{ Object of some class, describing a point process model. } \item{\dots}{ Additional arguments passed to methods. } } \details{ The function \code{valid} is generic, with methods for the classes \code{"ppm"} and \code{"dppmodel"}. An object representing a point process is called valid if all its parameter values are known (for example, no parameter takes the value \code{NA} or \code{NaN}) and the parameter values correspond to a well-defined point process (for example, the parameter values satisfy all the constraints that are imposed by mathematical theory.) See the methods for further details. } \value{ A logical value, or \code{NA}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{valid.ppm}}, \code{\link{valid.detpointprocfamily}} } \keyword{spatial} \keyword{models} spatstat.model/man/model.depends.Rd0000644000176200001440000000672014331173077017012 0ustar liggesusers\name{model.depends} \alias{model.depends} \alias{model.is.additive} \alias{model.covariates} \alias{has.offset.term} \alias{has.offset} \title{ Identify Covariates Involved in each Model Term } \description{ Given a fitted model (of any kind), identify which of the covariates is involved in each term of the model. } \usage{ model.depends(object) model.is.additive(object) model.covariates(object, fitted=TRUE, offset=TRUE) has.offset.term(object) has.offset(object) } \arguments{ \item{object}{ A fitted model of any kind. } \item{fitted,offset}{ Logical values determining which type of covariates to include. } } \details{ The \code{object} can be a fitted model of any kind, including models of the classes \code{\link{lm}}, \code{\link{glm}} and \code{\link{ppm}}. To be precise, \code{object} must belong to a class for which there are methods for \code{\link{formula}}, \code{\link{terms}} and \code{\link{model.matrix}}. The command \code{model.depends} determines the relationship between the original covariates (the data supplied when \code{object} was fitted) and the canonical covariates (the columns of the design matrix). It returns a logical matrix, with one row for each canonical covariate, and one column for each of the original covariates, with the \code{i,j} entry equal to \code{TRUE} if the \code{i}th canonical covariate depends on the \code{j}th original covariate. If the model formula of \code{object} includes offset terms (see \code{\link{offset}}), then the return value of \code{model.depends} also has an attribute \code{"offset"}. This is a logical value or matrix with one row for each offset term and one column for each of the original covariates, with the \code{i,j} entry equal to \code{TRUE} if the \code{i}th offset term depends on the \code{j}th original covariate. The command \code{model.covariates} returns a character vector containing the names of all (original) covariates that were actually used to fit the model. By default, this includes all covariates that appear in the model formula, including offset terms as well as canonical covariate terms. To omit the offset terms, set \code{offset=FALSE}. To omit the canonical covariate terms, set \code{fitted=FALSE}. The command \code{model.is.additive} determines whether the model is additive, in the sense that there is no canonical covariate that depends on two or more original covariates. It returns a logical value. The command \code{has.offset.term} is a faster way to determine whether the model \emph{formula} includes an \code{offset} term. The functions \code{model.depends} and \code{has.offset.term} only detect \code{offset} terms which are present in the model formula. They do not detect numerical offsets in the model object, that were inserted using the \code{offset} argument in \code{lm}, \code{glm} etc. To detect the presence of offsets of both kinds, use \code{has.offset}. } \value{ A logical value or matrix. } \seealso{ \code{\link{ppm}}, \code{\link{model.matrix}} } \examples{ x <- 1:10 y <- 3*x + 2 z <- rep(c(-1,1), 5) fit <- lm(y ~ poly(x,2) + sin(z)) model.depends(fit) model.covariates(fit) model.is.additive(fit) fitoff1 <- lm(y ~ x + offset(z)) fitoff2 <- lm(y ~ x, offset=z) has.offset.term(fitoff1) has.offset(fitoff1) has.offset.term(fitoff2) has.offset(fitoff2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat.model/man/HierStraussHard.Rd0000644000176200001440000001172514331173075017343 0ustar liggesusers\name{HierStraussHard} \alias{HierStraussHard} \title{The Hierarchical Strauss Hard Core Point Process Model} \description{ Creates an instance of the hierarchical Strauss-hard core point process model which can then be fitted to point pattern data. } \usage{ HierStraussHard(iradii, hradii=NULL, types=NULL, archy=NULL) } \arguments{ \item{iradii}{Matrix of interaction radii} \item{hradii}{Optional matrix of hard core distances} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} \item{archy}{Optional: the hierarchical order. See Details.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hierarchical Strauss-hard core process with interaction radii \eqn{iradii[i,j]} and hard core distances \eqn{hradii[i,j]}. } \details{ This is a hierarchical point process model for a multitype point pattern (\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander} and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, 1999; Grabarnik and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, 2009). It is appropriate for analysing multitype point pattern data in which the types are ordered so that the points of type \eqn{j} depend on the points of type \eqn{1,2,\ldots,j-1}{1,2,...,j-1}. The hierarchical version of the (stationary) Strauss hard core process with \eqn{m} types, with interaction radii \eqn{r_{ij}}{r[i,j]}, hard core distances \eqn{h_{ij}}{h[i,j]} and parameters \eqn{\beta_j}{beta[j]} and \eqn{\gamma_{ij}}{gamma[i,j]} is a point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern, and a pair of points of types \eqn{i} and \eqn{j} closer than \eqn{r_{ij}}{r[i,j]} units apart contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the density \bold{provided} \eqn{i \le j}{i <= j}. If any pair of points of types \eqn{i} and \eqn{j} lies closer than \eqn{h_{ij}}{h[i,j]} units apart, the configuration of points is impossible (probability density zero). The nonstationary hierarchical Strauss hard core process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hierarchical Strauss hard core process pairwise interaction is yielded by the function \code{HierStraussHard()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the HierStraussHard interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The argument \code{archy} can be used to specify a hierarchical ordering of the types. It can be either a vector of integers or a character vector matching the possible types. The default is the sequence \eqn{1,2, \ldots, m}{1,2, ..., m} meaning that type \eqn{j} depends on types \eqn{1,2, \ldots, j-1}{1,2, ..., j-1}. The matrices \code{iradii} and \code{hradii} must be square, with entries which are either positive numbers or zero or \code{NA}. A value of zero or \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii and hard core distances are specified in \code{HierStraussHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{HierStraussHard()}. } \seealso{ \code{\link{MultiStraussHard}} for the corresponding symmetrical interaction. \code{\link{HierHard}}, \code{\link{HierStrauss}}. } \examples{ r <- matrix(c(30, NA, 40, 30), nrow=2,ncol=2) h <- matrix(c(4, NA, 10, 15), 2, 2) HierStraussHard(r, h) # prints a sensible description of itself ppm(ants ~1, HierStraussHard(r, h)) # fit the stationary hierarchical Strauss-hard core process to ants data } \author{\adrian , \rolf and \ege. } \references{ Grabarnik, P. and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, A. (2009) Modelling the spatial structure of forest stands by multivariate point processes with hierarchical interactions. \emph{Ecological Modelling} \bold{220}, 1232--1240. \ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}, H. and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, A. (1999) Multitype spatial point patterns with hierarchical interactions. \emph{Biometrics} \bold{55}, 1051--1058. } \keyword{spatial} \keyword{models} spatstat.model/man/Kcom.Rd0000644000176200001440000002207114331173075015155 0ustar liggesusers\name{Kcom} \Rdversion{1.1} \alias{Kcom} \title{ Model Compensator of K Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the \emph{compensator} of the \eqn{K} function based on the fitted model (as well as the usual nonparametric estimates of \eqn{K} based on the data alone). Comparison between the nonparametric and model-compensated \eqn{K} functions serves as a diagnostic for the model. } \usage{ Kcom(object, r = NULL, breaks = NULL, ..., correction = c("border", "isotropic", "translate"), conditional = !is.poisson(object), restrict = FALSE, model = NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), compute.var = TRUE, truecoef = NULL, hi.res = NULL) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the function \eqn{K(r)} should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ This argument is for advanced use only. } \item{\dots}{ Ignored. } \item{correction}{ Optional vector of character strings specifying the edge correction(s) to be used. See \code{\link{Kest}} for options. } \item{conditional}{ Optional. Logical value indicating whether to compute the estimates for the conditional case. See Details. } \item{restrict}{ Logical value indicating whether to compute the restriction estimator (\code{restrict=TRUE}) or the reweighting estimator (\code{restrict=FALSE}, the default). Applies only if \code{conditional=TRUE}. See Details. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{compute.var}{ Logical value indicating whether to compute the Poincare variance bound for the residual \eqn{K} function (calculation is only implemented for the isotropic correction). } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes an estimate of the \eqn{K} function of the dataset, together with a \emph{model compensator} of the \eqn{K} function, which should be approximately equal if the model is a good fit to the data. The first argument, \code{object}, is usually a fitted point process model (object of class \code{"ppm"}), obtained from the model-fitting function \code{\link{ppm}}. For convenience, \code{object} can also be a point pattern (object of class \code{"ppp"}). In that case, a point process model will be fitted to it, by calling \code{\link{ppm}} using the arguments \code{trend} (for the first order trend), \code{interaction} (for the interpoint interaction) and \code{rbord} (for the erosion distance in the border correction for the pseudolikelihood). See \code{\link{ppm}} for details of these arguments. The algorithm first extracts the original point pattern dataset (to which the model was fitted) and computes the standard nonparametric estimates of the \eqn{K} function. It then also computes the \emph{model compensator} of the \eqn{K} function. The different function estimates are returned as columns in a data frame (of class \code{"fv"}). The argument \code{correction} determines the edge correction(s) to be applied. See \code{\link{Kest}} for explanation of the principle of edge corrections. The following table gives the options for the \code{correction} argument, and the corresponding column names in the result: \tabular{llll}{ \code{correction} \tab \bold{description of correction} \tab \bold{nonparametric} \tab \bold{compensator} \cr \code{"isotropic"} \tab Ripley isotropic correction \tab \code{iso} \tab \code{icom} \cr \code{"translate"} \tab Ohser-Stoyan translation correction \tab \code{trans} \tab \code{tcom} \cr \code{"border"} \tab border correction \tab \code{border} \tab \code{bcom} } The nonparametric estimates can all be expressed in the form \deqn{ \hat K(r) = \sum_i \sum_{j < i} e(x_i,x_j,r,x) I\{ d(x_i,x_j) \le r \} }{ K(r) = sum[i] sum[j < i] e(x[i], x[j], r, x) I( d(x[i],x[j]) <= r ) } where \eqn{x_i}{x[i]} is the \eqn{i}-th data point, \eqn{d(x_i,x_j)}{d(x[i],x[j])} is the distance between \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}, and \eqn{e(x_i,x_j,r,x)}{e(x[i],x[j],r,x)} is a term that serves to correct edge effects and to re-normalise the sum. The corresponding model compensator is \deqn{ {\bf C} \, \tilde K(r) = \int_W \lambda(u,x) \sum_j e(u,x_j,r,x \cup u) I\{ d(u,x_j) \le r\} }{ C K(r) = integral[u] lambda(u,x) sum[j] e(u, x[j], r, x+u) I( d(u,x[j]) <= r ) } where the integral is over all locations \eqn{u} in the observation window, \eqn{\lambda(u,x)}{lambda(u,x)} denotes the conditional intensity of the model at the location \eqn{u}, and \eqn{x \cup u}{x+u} denotes the data point pattern \eqn{x} augmented by adding the extra point \eqn{u}. If the fitted model is a Poisson point process, then the formulae above are exactly what is computed. If the fitted model is not Poisson, the formulae above are modified slightly to handle edge effects. The modification is determined by the arguments \code{conditional} and \code{restrict}. The value of \code{conditional} defaults to \code{FALSE} for Poisson models and \code{TRUE} for non-Poisson models. If \code{conditional=FALSE} then the formulae above are not modified. If \code{conditional=TRUE}, then the algorithm calculates the \emph{restriction estimator} if \code{restrict=TRUE}, and calculates the \emph{reweighting estimator} if \code{restrict=FALSE}. See Appendix D of Baddeley, Rubak and \Moller (2011). Thus, by default, the reweighting estimator is computed for non-Poisson models. The nonparametric estimates of \eqn{K(r)} are approximately unbiased estimates of the \eqn{K}-function, assuming the point process is stationary. The model compensators are unbiased estimates \emph{of the mean values of the corresponding nonparametric estimates}, assuming the model is true. Thus, if the model is a good fit, the mean value of the difference between the nonparametric estimates and model compensators is approximately zero. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Related functions: \code{\link{Kres}}, \code{\link{Kest}}. Alternative functions: \code{\link{Gcom}}, \code{\link{psstG}}, \code{\link{psstA}}, \code{\link{psst}}. Point process models: \code{\link{ppm}}. } \examples{ fit0 <- ppm(cells, ~1) # uniform Poisson \testonly{fit0 <- ppm(cells, ~1, nd=16)} if(interactive()) { plot(Kcom(fit0)) # compare the isotropic-correction estimates plot(Kcom(fit0), cbind(iso, icom) ~ r) # uniform Poisson is clearly not correct } fit1 <- ppm(cells, ~1, Strauss(0.08)) \testonly{fit1 <- ppm(cells, ~1, Strauss(0.08), nd=16)} K1 <- Kcom(fit1) K1 if(interactive()) { plot(K1) plot(K1, cbind(iso, icom) ~ r) plot(K1, cbind(trans, tcom) ~ r) # how to plot the difference between nonparametric estimates and compensators plot(K1, iso - icom ~ r) # fit looks approximately OK; try adjusting interaction distance } fit2 <- ppm(cells, ~1, Strauss(0.12)) \testonly{fit2 <- ppm(cells, ~1, Strauss(0.12), nd=16)} K2 <- Kcom(fit2) if(interactive()) { plot(K2) plot(K2, cbind(iso, icom) ~ r) plot(K2, iso - icom ~ r) } } \keyword{spatial} \keyword{models} spatstat.model/man/quadrat.test.Rd0000644000176200001440000002200714510474261016702 0ustar liggesusers\name{quadrat.test.ppm} \alias{quadrat.test.ppm} \alias{quadrat.test.slrm} \title{Dispersion Test for Spatial Point Pattern Based on Quadrat Counts} \description{ Performs a test of Complete Spatial Randomness for a given point pattern, based on quadrat counts. Alternatively performs a goodness-of-fit test of a fitted inhomogeneous Poisson model. By default performs chi-squared tests; can also perform Monte Carlo based tests. } \usage{ \method{quadrat.test}{ppm}(X, nx=5, ny=nx, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, df.est=NULL, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) \method{quadrat.test}{slrm}(X, nx=5, ny=nx, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, df.est=NULL, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) to be subjected to the goodness-of-fit test. Alternatively a fitted point process model (object of class \code{"ppm"} or \code{"slrm"}) to be tested. Alternatively \code{X} can be the result of applying \code{\link{quadratcount}} to a point pattern. } \item{nx,ny}{ Numbers of quadrats in the \eqn{x} and \eqn{y} directions. Incompatible with \code{xbreaks} and \code{ybreaks}. } \item{alternative}{ Character string (partially matched) specifying the alternative hypothesis. } \item{method}{ Character string (partially matched) specifying the test to use: either \code{method="Chisq"} for the chi-squared test (the default), or \code{method="MonteCarlo"} for a Monte Carlo test. } \item{conditional}{ Logical. Should the Monte Carlo test be conducted conditionally upon the observed number of points of the pattern? Ignored if \code{method="Chisq"}. } \item{CR}{ Optional. Numerical value. The exponent for the Cressie-Read test statistic. See Details. } \item{df.est}{ Optional. Advanced use only. The number of fitted parameters, or the number of degrees of freedom lost by estimation of parameters. } \item{\dots}{Ignored.} \item{xbreaks}{ Optional. Numeric vector giving the \eqn{x} coordinates of the boundaries of the quadrats. Incompatible with \code{nx}. } \item{ybreaks}{ Optional. Numeric vector giving the \eqn{y} coordinates of the boundaries of the quadrats. Incompatible with \code{ny}. } \item{tess}{ Tessellation (object of class \code{"tess"} or something acceptable to \code{\link{as.tess}}) determining the quadrats. Incompatible with \code{nx, ny, xbreaks, ybreaks}. } \item{nsim}{ The number of simulated samples to generate when \code{method="MonteCarlo"}. } } \details{ These functions perform \eqn{\chi^2}{chi^2} tests or Monte Carlo tests of goodness-of-fit for a point process model, based on quadrat counts. The function \code{quadrat.test} is generic, with methods for point patterns (class \code{"ppp"}), split point patterns (class \code{"splitppp"}), point process models (class \code{"ppm"} or \code{"slrm"}) and quadrat count tables (class \code{"quadratcount"}). \itemize{ \item if \code{X} is a point pattern, we test the null hypothesis that the data pattern is a realisation of Complete Spatial Randomness (the uniform Poisson point process). Marks in the point pattern are ignored. (If \code{lambda} is given then the null hypothesis is the Poisson process with intensity \code{lambda}.) \item if \code{X} is a split point pattern, then for each of the component point patterns (taken separately) we test the null hypotheses of Complete Spatial Randomness. See \code{\link{quadrat.test.splitppp}} for documentation. \item If \code{X} is a fitted point process model, then it should be a Poisson point process model. The data to which this model was fitted are extracted from the model object, and are treated as the data point pattern for the test. We test the null hypothesis that the data pattern is a realisation of the (inhomogeneous) Poisson point process specified by \code{X}. } In all cases, the window of observation is divided into tiles, and the number of data points in each tile is counted, as described in \code{\link{quadratcount}}. The quadrats are rectangular by default, or may be regions of arbitrary shape specified by the argument \code{tess}. The expected number of points in each quadrat is also calculated, as determined by CSR (in the first case) or by the fitted model (in the second case). Then the Pearson \eqn{X^2} statistic \deqn{ X^2 = sum((observed - expected)^2/expected) } is computed. If \code{method="Chisq"} then a \eqn{\chi^2}{chi^2} test of goodness-of-fit is performed by comparing the test statistic to the \eqn{\chi^2}{chi^2} distribution with \eqn{m-k} degrees of freedom, where \code{m} is the number of quadrats and \eqn{k} is the number of fitted parameters (equal to 1 for \code{quadrat.test.ppp}). The default is to compute the \emph{two-sided} \eqn{p}-value, so that the test will be declared significant if \eqn{X^2} is either very large or very small. One-sided \eqn{p}-values can be obtained by specifying the \code{alternative}. An important requirement of the \eqn{\chi^2}{chi^2} test is that the expected counts in each quadrat be greater than 5. If \code{method="MonteCarlo"} then a Monte Carlo test is performed, obviating the need for all expected counts to be at least 5. In the Monte Carlo test, \code{nsim} random point patterns are generated from the null hypothesis (either CSR or the fitted point process model). The Pearson \eqn{X^2} statistic is computed as above. The \eqn{p}-value is determined by comparing the \eqn{X^2} statistic for the observed point pattern, with the values obtained from the simulations. Again the default is to compute the \emph{two-sided} \eqn{p}-value. If \code{conditional} is \code{TRUE} then the simulated samples are generated from the multinomial distribution with the number of \dQuote{trials} equal to the number of observed points and the vector of probabilities equal to the expected counts divided by the sum of the expected counts. Otherwise the simulated samples are independent Poisson counts, with means equal to the expected counts. If the argument \code{CR} is given, then instead of the Pearson \eqn{X^2} statistic, the Cressie-Read (1984) power divergence test statistic \deqn{ 2nI = \frac{2}{CR(CR+1)} \sum_i \left[ \left( \frac{X_i}{E_i} \right)^CR - 1 \right] }{ 2nI = (2/(CR * (CR+1))) * sum((X[i]/E[i])^CR - 1) } is computed, where \eqn{X_i}{X[i]} is the \eqn{i}th observed count and \eqn{E_i}{E[i]} is the corresponding expected count. The value \code{CR=1} gives the Pearson \eqn{X^2} statistic; \code{CR=0} gives the likelihood ratio test statistic \eqn{G^2}; \code{CR=-1/2} gives the Freeman-Tukey statistic \eqn{T^2}; \code{CR=-1} gives the modified likelihood ratio test statistic \eqn{GM^2}; and \code{CR=-2} gives Neyman's modified statistic \eqn{NM^2}. In all cases the asymptotic distribution of this test statistic is the same \eqn{\chi^2}{chi^2} distribution as above. The return value is an object of class \code{"htest"}. Printing the object gives comprehensible output about the outcome of the test. The return value also belongs to the special class \code{"quadrat.test"}. Plotting the object will display the quadrats, annotated by their observed and expected counts and the Pearson residuals. See the examples. } \seealso{ \code{\link{quadrat.test.splitppp}}, \code{\link{quadratcount}}, \code{\link{quadrats}}, \code{\link{quadratresample}}, \code{\link{chisq.test}}, \code{\link{cdf.test}}. To test a Poisson point process model against a specific alternative, use \code{\link[spatstat.model]{anova.ppm}}. } \value{ An object of class \code{"htest"}. See \code{\link[stats]{chisq.test}} for explanation. The return value is also an object of the special class \code{"quadrattest"}, and there is a plot method for this class. See the examples. } \references{ Cressie, N. and Read, T.R.C. (1984) Multinomial goodness-of-fit tests. \emph{Journal of the Royal Statistical Society, Series B} \bold{46}, 440--464. } \examples{ # fitted model: inhomogeneous Poisson fitx <- ppm(simdat ~ x) quadrat.test(fitx) # an equivalent test (results differ due to discretisation effects): quadrat.test(simdat, lambda=predict(fitx), df.est=length(coef(fitx))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \concept{Test of randomness} \concept{Test of clustering} spatstat.model/man/isf.object.Rd0000644000176200001440000000364314331173076016317 0ustar liggesusers\name{isf.object} \alias{isf.object} %DoNotExport \title{Interaction Structure Family Objects} \description{ Objects of class \code{"isf"} are used internally by the \pkg{spatstat} package to represent the structure of the interpoint interactions in a family of point process models. } \details{ \emph{Advanced Use Only!} An object of class \code{"isf"} (Interaction Structure Family) is used internally by the \pkg{spatstat} package to represent the common mathematical and algorithmic structure of the interpoint interactions in a family of point process models. The existing objects of class \code{"isf"} are: \tabular{ll}{ \code{\link{pairwise.family}} \tab pairwise interaction \cr \code{\link{triplet.family}} \tab triplet interaction \cr \code{\link{pairsat.family}} \tab saturated pairwise interaction \cr \code{\link{hierpair.family}} \tab hierarchical pairwise interaction \cr \code{\link{inforder.family}} \tab infinite order interaction \cr \code{\link{hybrid.family}} \tab hybrids of several interactions \cr \code{\link{ord.family}} \tab Ord interactions \cr } The information contained in these objects enables the \pkg{spatstat} package to select the appropriate algorithm for fitting, predicting and simulating each point process model. For example, in order to fit a model that involves pairwise interactions, the model-fitting function \code{\link{ppm}} would use information contained in \code{\link{pairwise.family}} to select the appropriate algorithms. An object of class \code{"isf"} is essentially a list of functions for various tasks. The internal format is undocumented and may be changed without notice. } \value{ An object of class \code{"isf"}, essentially a list of functions for various tasks. The internal format is undocumented and may be changed without notice. } \author{\adrian.} \keyword{spatial} spatstat.model/man/dummy.ppm.Rd0000644000176200001440000000426514331173076016220 0ustar liggesusers\name{dummy.ppm} \alias{dummy.ppm} \title{Extract Dummy Points Used to Fit a Point Process Model} \description{ Given a fitted point process model, this function extracts the `dummy points' of the quadrature scheme used to fit the model. } \usage{ dummy.ppm(object, drop=FALSE) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"}). } \item{drop}{ Logical value determining whether to delete dummy points that were not used to fit the model. } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The maximum pseudolikelihood algorithm in \code{\link{ppm}} approximates the pseudolikelihood integral by a sum over a finite set of quadrature points, which is constructed by augmenting the original data point pattern by a set of ``dummy'' points. The fitted model object returned by \code{\link{ppm}} contains complete information about this quadrature scheme. See \code{\link{ppm}} or \code{\link{ppm.object}} for further information. This function \code{dummy.ppm} extracts the dummy points of the quadrature scheme. A typical use of this function would be to count the number of dummy points, to gauge the accuracy of the approximation to the exact pseudolikelihood. It may happen that some dummy points are not actually used in fitting the model (typically because the value of a covariate is \code{NA} at these points). The argument \code{drop} specifies whether these unused dummy points shall be deleted (\code{drop=TRUE}) or retained (\code{drop=FALSE}) in the return value. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppp.object}}, \code{\link{ppm}} } \examples{ fit <- ppm(cells, ~1, Strauss(r=0.1)) X <- dummy.ppm(fit) npoints(X) # this is the number of dummy points in the quadrature scheme } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} \keyword{models} spatstat.model/man/hardcoredist.Rd0000644000176200001440000000320214374302015016725 0ustar liggesusers\name{hardcoredist} \alias{hardcoredist} \alias{hardcoredist.fii} \alias{hardcoredist.ppm} \title{ Extract the Hard Core Distance of a Point Process Model } \description{ Extract or compute the hard core distance of a point process model. } \usage{ hardcoredist(x, \dots) \method{hardcoredist}{fii}(x, \dots, epsilon = 0) \method{hardcoredist}{ppm}(x, \dots, epsilon = 0) } \arguments{ \item{x}{ An object representing a point process model (class \code{"ppm"}) or the interaction structure of a point process (class \code{"fii"}) or similar. } \item{\dots}{ Additional arguments passed to methods. } \item{epsilon}{ Tolerance for defining the hard core. } } \details{ A point process model has a hard core distance \code{h} if it is impossible for two random points to lie closer than the distance \code{h} apart. The function \code{hardcoredist} is generic, with methods for objects of class \code{"ppm"} (point process models) and \code{"fii"} (fitted point process interactions). It extracts or computes the hard core distance. If \code{epsilon} is specified, then the code calculates the largest distance at which the interaction factor is smaller than \code{epsilon}, implying that points are unlikely to occur closer than this distance. The result is zero if the model does not have a hard core distance. } \value{ A single numeric value, or for multitype point processes, a numeric matrix giving the hard core distances for each pair of types of points. } \author{ \spatstatAuthors. } \examples{ m <- ppm(cells~1, Hardcore()) hardcoredist(m) } \keyword{spatial} \keyword{models} spatstat.model/man/as.fv.kppm.Rd0000644000176200001440000000253214510474260016246 0ustar liggesusers\name{as.fv.kppm} \alias{as.fv.kppm} \alias{as.fv.dppm} \alias{as.fv.minconfit} \title{Convert Fitted Model To Class fv} \description{ Converts fitted model into a function table (an object of class \code{"fv"}). } \usage{ \method{as.fv}{kppm}(x) \method{as.fv}{dppm}(x) \method{as.fv}{minconfit}(x) } \arguments{ \item{x}{A fitted model which will be converted into a function table} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ The generic command \code{as.fv} converts data \code{x}, that could be interpreted as the values of a function, into a function value table (object of the class \code{"fv"} as described in \code{\link{fv.object}}). This object can then be plotted easily using \code{\link{plot.fv}}. Objects of class \code{"kppm"} (and related classes) represent a model that has been fitted to a dataset by computing a summary function of the dataset and matching it to the corresponding summary function of the model. The methods for \code{as.fv} for classes \code{"kppm"}, \code{"dppm"} and \code{"minconfit"} extract this information: the result is a function table containing the observed summary function and the best fit summary function. } \examples{ as.fv(kppm(redwood)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} \concept{Model diagnostics} spatstat.model/man/ppm.Rd0000644000176200001440000003750614331173077015073 0ustar liggesusers\name{ppm} \alias{ppm} \alias{ppm.formula} \concept{point process model} \concept{Poisson point process} \concept{Gibbs point process} \title{ Fit Point Process Model to Data } \description{ Fits a point process model to an observed point pattern. } \usage{ ppm(Q, \dots) \method{ppm}{formula}(Q, interaction=NULL, \dots, data=NULL, subset) } \arguments{ \item{Q}{ A \code{formula} in the \R language describing the model to be fitted. } \item{interaction}{ An object of class \code{"interact"} describing the point process interaction structure, or a function that makes such an object, or \code{NULL} indicating that a Poisson process (stationary or nonstationary) should be fitted. } \item{\dots}{ Arguments passed to \code{\link{ppm.ppp}} or \code{\link{ppm.quad}} to control the model-fitting process. } \item{data}{ Optional. The values of spatial covariates (other than the Cartesian coordinates) required by the model. Either a data frame, or a list whose entries are images, functions, windows, tessellations or single numbers. See Details. } \item{subset}{ Optional. An expression (which may involve the names of the Cartesian coordinates \code{x} and \code{y} and the names of entries in \code{data}) defining a subset of the spatial domain, to which the model-fitting should be restricted. The result of evaluating the expression should be either a logical vector, or a window (object of class \code{"owin"}) or a logical-valued pixel image (object of class \code{"im"}). } } \value{ An object of class \code{"ppm"} describing a fitted point process model. See \code{\link{ppm.object}} for details of the format of this object and methods available for manipulating it. } \details{ This function fits a point process model to an observed point pattern. The model may include spatial trend, interpoint interaction, and dependence on covariates. The model fitted by \code{ppm} is either a Poisson point process (in which different points do not interact with each other) or a Gibbs point process (in which different points typically inhibit each other). For clustered point process models, use \code{\link{kppm}}. The function \code{ppm} is generic, with methods for the classes \code{formula}, \code{ppp} and \code{quad}. This page describes the method for a \code{formula}. The first argument is a \code{formula} in the \R language describing the spatial trend model to be fitted. It has the general form \code{pattern ~ trend} where the left hand side \code{pattern} is usually the name of a spatial point pattern (object of class \code{"ppp"}) to which the model should be fitted, or an expression which evaluates to a point pattern; and the right hand side \code{trend} is an expression specifying the spatial trend of the model. Systematic effects (spatial trend and/or dependence on spatial covariates) are specified by the \code{trend} expression on the right hand side of the formula. The trend may involve the Cartesian coordinates \code{x}, \code{y}, the marks \code{marks}, the names of entries in the argument \code{data} (if supplied), or the names of objects that exist in the \R session. The trend formula specifies the \bold{logarithm} of the intensity of a Poisson process, or in general, the logarithm of the first order potential of the Gibbs process. The formula should not use any names beginning with \code{.mpl} as these are reserved for internal use. If the formula is \code{pattern~1}, then the model to be fitted is stationary (or at least, its first order potential is constant). The symbol \code{.} in the trend expression stands for all the covariates supplied in the argument \code{data}. For example the formula \code{pattern ~ .} indicates an additive model with a main effect for each covariate in \code{data}. Stochastic interactions between random points of the point process are defined by the argument \code{interaction}. This is an object of class \code{"interact"} which is initialised in a very similar way to the usage of family objects in \code{\link{glm}} and \code{gam}. The interaction models currently available are: \GibbsInteractionsList. See the examples below. Note that it is possible to combine several interactions using \code{\link{Hybrid}}. If \code{interaction} is missing or \code{NULL}, then the model to be fitted has no interpoint interactions, that is, it is a Poisson process (stationary or nonstationary according to \code{trend}). In this case the methods of maximum pseudolikelihood and maximum logistic likelihood coincide with maximum likelihood. The fitted point process model returned by this function can be printed (by the print method \code{\link{print.ppm}}) to inspect the fitted parameter values. If a nonparametric spatial trend was fitted, this can be extracted using the predict method \code{\link{predict.ppm}}. To fit a model involving spatial covariates other than the Cartesian coordinates \eqn{x} and \eqn{y}, the values of the covariates should either be supplied in the argument \code{data}, or should be stored in objects that exist in the \R session. Note that it is not sufficient to have observed the covariate only at the points of the data point pattern; the covariate must also have been observed at other locations in the window. If it is given, the argument \code{data} is typically a list, with names corresponding to variables in the \code{trend} formula. Each entry in the list is either \describe{ \item{a pixel image,}{ giving the values of a spatial covariate at a fine grid of locations. It should be an object of class \code{"im"}, see \code{\link{im.object}}. } \item{a function,}{ which can be evaluated at any location \code{(x,y)} to obtain the value of the spatial covariate. It should be a \code{function(x, y)} or \code{function(x, y, ...)} in the \R language. For marked point pattern data, the covariate can be a \code{function(x, y, marks)} or \code{function(x, y, marks, ...)}. The first two arguments of the function should be the Cartesian coordinates \eqn{x} and \eqn{y}. The function may have additional arguments; if the function does not have default values for these additional arguments, then the user must supply values for them, in \code{covfunargs}. See the Examples. } \item{a window,}{ interpreted as a logical variable which is \code{TRUE} inside the window and \code{FALSE} outside it. This should be an object of class \code{"owin"}. } \item{a tessellation,}{ interpreted as a factor covariate. For each spatial location, the factor value indicates which tile of the tessellation it belongs to. This should be an object of class \code{"tess"}. (To make a covariate in which each tile of the tessellation has a numerical value, convert the tessellation to a \code{function(x,y)} using \code{\link{as.function.tess}}.) } \item{a single number,}{indicating a covariate that is constant in this dataset. } } The software will look up the values of each covariate at the required locations (quadrature points). Note that, for covariate functions, only the \emph{name} of the function appears in the trend formula. A covariate function is treated as if it were a single variable. The function arguments do not appear in the trend formula. See the Examples. If \code{data} is a list, the list entries should have names corresponding to (some of) the names of covariates in the model formula \code{trend}. The variable names \code{x}, \code{y} and \code{marks} are reserved for the Cartesian coordinates and the mark values, and these should not be used for variables in \code{data}. Alternatively, \code{data} may be a data frame giving the values of the covariates at specified locations. Then \code{pattern} should be a quadrature scheme (object of class \code{"quad"}) giving the corresponding locations. See \code{\link{ppm.quad}} for details. } \section{Interaction parameters}{ Apart from the Poisson model, every point process model fitted by \code{ppm} has parameters that determine the strength and range of \sQuote{interaction} or dependence between points. These parameters are of two types: \describe{ \item{regular parameters:}{ A parameter \eqn{\phi}{phi} is called \emph{regular} if the log likelihood is a linear function of \eqn{\theta}{theta} where \eqn{\theta = \theta(\psi)}{theta = theta(psi)} is some transformation of \eqn{\psi}{psi}. [Then \eqn{\theta}{theta} is called the canonical parameter.] } \item{irregular parameters}{ Other parameters are called \emph{irregular}. } } Typically, regular parameters determine the \sQuote{strength} of the interaction, while irregular parameters determine the \sQuote{range} of the interaction. For example, the Strauss process has a regular parameter \eqn{\gamma}{gamma} controlling the strength of interpoint inhibition, and an irregular parameter \eqn{r} determining the range of interaction. The \code{ppm} command is only designed to estimate regular parameters of the interaction. It requires the values of any irregular parameters of the interaction to be fixed. For example, to fit a Strauss process model to the \code{cells} dataset, you could type \code{ppm(cells ~ 1, Strauss(r=0.07))}. Note that the value of the irregular parameter \code{r} must be given. The result of this command will be a fitted model in which the regular parameter \eqn{\gamma}{gamma} has been estimated. To determine the irregular parameters, there are several practical techniques, but no general statistical theory available. Useful techniques include maximum profile pseudolikelihood, which is implemented in the command \code{\link{profilepl}}, and Newton-Raphson maximisation, implemented in the experimental command \code{\link{ippm}}. Some irregular parameters can be estimated directly from data: the hard-core radius in the model \code{\link{Hardcore}} and the matrix of hard-core radii in \code{\link{MultiHard}} can be estimated easily from data. In these cases, \code{ppm} allows the user to specify the interaction without giving the value of the irregular parameter. The user can give the hard core interaction as \code{interaction=Hardcore()} or even \code{interaction=Hardcore}, and the hard core radius will then be estimated from the data. } \section{Technical Warnings and Error Messages}{ See \code{\link{ppm.ppp}} for some technical warnings about the weaknesses of the algorithm, and explanation of some common error messages. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014) Logistic regression for spatial Gibbs point processes. \emph{Biometrika} \bold{101} (2) 377--392. Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} 283--322. Berman, M. and Turner, T.R. (1992) Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41}, 31--38. Besag, J. (1975) Statistical analysis of non-lattice data. \emph{The Statistician} \bold{24}, 179-195. Diggle, P.J., Fiksel, T., Grabarnik, P., Ogata, Y., Stoyan, D. and Tanemura, M. (1994) On parameter estimation for pairwise interaction processes. \emph{International Statistical Review} \bold{62}, 99-117. Huang, F. and Ogata, Y. (1999) Improvements of the maximum pseudo-likelihood estimators in various spatial statistical models. \emph{Journal of Computational and Graphical Statistics} \bold{8}, 510--530. Jensen, J.L. and Moeller, M. (1991) Pseudolikelihood for exponential family models of spatial point processes. \emph{Annals of Applied Probability} \bold{1}, 445--461. Jensen, J.L. and Kuensch, H.R. (1994) On asymptotic normality of pseudo likelihood estimates for pairwise interaction processes, \emph{Annals of the Institute of Statistical Mathematics} \bold{46}, 475--486. } \seealso{ \code{\link{ppm.ppp}} and \code{\link{ppm.quad}} for more details on the fitting technique and edge correction. \code{\link{ppm.object}} for details of how to print, plot and manipulate a fitted model. \code{\link{ppp}} and \code{\link{quadscheme}} for constructing data. Interactions: \GibbsInteractionsList. See \code{\link{profilepl}} for advice on fitting nuisance parameters in the interaction, and \code{\link{ippm}} for irregular parameters in the trend. See \code{\link{valid.ppm}} and \code{\link{project.ppm}} for ensuring the fitted model is a valid point process. See \code{\link{kppm}} for fitting Cox point process models and cluster point process models, and \code{\link{dppm}} for fitting determinantal point process models. } \examples{ online <- interactive() if(!online) { # reduce grid sizes for efficiency in tests spatstat.options(npixel=32, ndummy.min=16) } # fit the stationary Poisson process # to point pattern 'nztrees' ppm(nztrees ~ 1) if(online) { Q <- quadscheme(nztrees) ppm(Q ~ 1) # equivalent. } fit1 <- ppm(nztrees ~ x) # fit the nonstationary Poisson process # with intensity function lambda(x,y) = exp(a + bx) # where x,y are the Cartesian coordinates # and a,b are parameters to be estimated fit1 coef(fit1) coef(summary(fit1)) ppm(nztrees ~ polynom(x,2)) # fit the nonstationary Poisson process # with intensity function lambda(x,y) = exp(a + bx + cx^2) if(online) { library(splines) ppm(nztrees ~ bs(x,df=3)) } # Fits the nonstationary Poisson process # with intensity function lambda(x,y) = exp(B(x)) # where B is a B-spline with df = 3 ppm(nztrees ~ 1, Strauss(r=10), rbord=10) # Fit the stationary Strauss process with interaction range r=10 # using the border method with margin rbord=10 ppm(nztrees ~ x, Strauss(13), correction="periodic") # Fit the nonstationary Strauss process with interaction range r=13 # and exp(first order potential) = activity = beta(x,y) = exp(a+bx) # using the periodic correction. # Compare Maximum Pseudolikelihood, Huang-Ogata and Variational Bayes fits: if(online) ppm(swedishpines ~ 1, Strauss(9)) ppm(swedishpines ~ 1, Strauss(9), method="VBlogi") ppm(swedishpines ~ 1, Strauss(9), improve.type="ho", nsim=if(!online) 8 else 99) # Elastic net fit: if(require(glmnet)) { ppm(swedishpines ~ x+y, Strauss(9), improve.type="enet") } # COVARIATES # X <- rpoispp(20) weirdfunction <- function(x,y){ 10 * x^2 + 5 * sin(10 * y) } # # (a) covariate values as function ppm(X ~ y + weirdfunction) # # (b) covariate values in pixel image Zimage <- as.im(weirdfunction, unit.square()) ppm(X ~ y + Z, covariates=list(Z=Zimage)) # # (c) covariate values in data frame Q <- quadscheme(X) xQ <- x.quad(Q) yQ <- y.quad(Q) Zvalues <- weirdfunction(xQ,yQ) ppm(Q ~ y + Z, data=data.frame(Z=Zvalues)) # Note Q not X # COVARIATE FUNCTION WITH EXTRA ARGUMENTS # f <- function(x,y,a){ y - a } ppm(X ~ x + f, covfunargs=list(a=1/2)) # COVARIATE: logical value TRUE inside window, FALSE outside b <- owin(c(0.1, 0.6), c(0.1, 0.9)) ppm(X ~ b) ## MULTITYPE POINT PROCESSES ### # fit stationary marked Poisson process # with different intensity for each species if(online) { ppm(lansing ~ marks, Poisson()) } else { ama <- amacrine[square(0.7)] a <- ppm(ama ~ marks, Poisson(), nd=16) } # fit nonstationary marked Poisson process # with different log-cubic trend for each species if(online) { ppm(lansing ~ marks * polynom(x,y,3), Poisson()) } else { b <- ppm(ama ~ marks * polynom(x,y,2), Poisson(), nd=16) } } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat.model/man/rdpp.Rd0000644000176200001440000000274014331173077015234 0ustar liggesusers\name{rdpp} \alias{rdpp} \title{Simulation of a Determinantal Point Process} \description{ Generates simulated realisations from a determinantal point process. } \usage{ rdpp(eig, index, basis = "fourierbasis", window = boxx(rep(list(0:1), ncol(index))), reject_max = 10000, progress = 0, debug = FALSE, \dots) } \arguments{ \item{eig}{ vector of values between 0 and 1 specifying the non-zero eigenvalues for the process. } \item{index}{ \code{data.frame} or \code{matrix} (or something acceptable to \code{\link{as.matrix}}) specifying indices of the basis functions. } \item{basis}{character string giving the name of the basis.} \item{window}{ window (of class \code{"owin"}, \code{"box3"} or \code{"boxx"}) giving the domain of the point process. } \item{reject_max}{ integer giving the maximal number of trials for rejection sampling. } \item{progress}{ integer giving the interval for making a progress report. The value zero turns reporting off. } \item{debug}{ logical value indicating whether debug informationb should be outputted. } \item{\dots}{Ignored.} } \author{ \spatstatAuthors. } \value{ A point pattern (object of class \code{"ppp"}). } \examples{ index <- expand.grid(-2:2,-2:2) eig <- exp(-rowSums(index^2)) X <- rdpp(eig, index) X ## To simulate a det. projection p. p. with the given indices set eig=1: XX <- rdpp(1, index) XX } \keyword{datagen} \keyword{spatial} \keyword{models} spatstat.model/man/MultiStraussHard.Rd0000644000176200001440000000673614331173075017554 0ustar liggesusers\name{MultiStraussHard} \alias{MultiStraussHard} \title{The Multitype/Hard Core Strauss Point Process Model} \description{ Creates an instance of the multitype/hard core Strauss point process model which can then be fitted to point pattern data. } \usage{ MultiStraussHard(iradii, hradii, types=NULL) } \arguments{ \item{iradii}{Matrix of interaction radii} \item{hradii}{Matrix of hard core radii} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the multitype/hard core Strauss process with interaction radii \eqn{iradii[i,j]} and hard core radii \eqn{hradii[i,j]}. } \details{ This is a hybrid of the multitype Strauss process (see \code{\link{MultiStrauss}}) and the hard core process (case \eqn{\gamma=0}{gamma = 0} of the Strauss process). A pair of points of types \eqn{i} and \eqn{j} must not lie closer than \eqn{h_{ij}}{h[i,j]} units apart; if the pair lies more than \eqn{h_{ij}}{h[i,j]} and less than \eqn{r_{ij}}{r[i,j]} units apart, it contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the probability density. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the MultiStraussHard interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrices \code{iradii} and \code{hradii}. The matrices \code{iradii} and \code{hradii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii and hardcore radii are specified in \code{MultiStraussHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{MultiStraussHard()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{MultiStrauss}}, \code{\link{MultiHard}}, \code{\link{Strauss}} } \examples{ r <- matrix(3, nrow=2,ncol=2) h <- matrix(c(1,2,2,1), nrow=2,ncol=2) MultiStraussHard(r,h) # prints a sensible description of itself r <- 0.04 * matrix(c(1,2,2,1), nrow=2,ncol=2) h <- 0.02 * matrix(c(1,NA,NA,1), nrow=2,ncol=2) X <- amacrine \testonly{ X <- X[owin(c(0,0.8), c(0,1))] } fit <- ppm(X ~1, MultiStraussHard(r,h)) # fit stationary multitype hardcore Strauss process to `amacrine' } \section{Warnings}{ In order that \code{\link{ppm}} can fit the multitype/hard core Strauss model correctly to a point pattern \code{X}, this pattern must be marked, with \code{markformat} equal to \code{vector} and the mark vector \code{marks(X)} must be a factor. If the argument \code{types} is specified it is interpreted as a set of factor levels and this set must equal \code{levels(marks(X))}. } \section{Changed Syntax}{ Before \pkg{spatstat} version \code{1.37-0}, the syntax of this function was different: \code{MultiStraussHard(types=NULL, iradii, hradii)}. The new code attempts to handle the old syntax as well. } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{models} spatstat.model/man/plot.msr.Rd0000644000176200001440000000702714331173100016033 0ustar liggesusers\name{plot.msr} \alias{plot.msr} \title{Plot a Signed or Vector-Valued Measure} \description{ Plot a signed measure or vector-valued measure. } \usage{ \method{plot}{msr}(x, \dots, add = FALSE, how = c("image", "contour", "imagecontour"), main = NULL, do.plot = TRUE, multiplot = TRUE, massthresh = 0, equal.markscale = FALSE, equal.ribbon = FALSE) } \arguments{ \item{x}{ The signed or vector measure to be plotted. An object of class \code{"msr"} (see \code{\link{msr}}). } \item{\dots}{ Extra arguments passed to \code{\link{Smooth.ppp}} to control the interpolation of the continuous density component of \code{x}, or passed to \code{\link{plot.im}} or \code{\link{plot.ppp}} to control the appearance of the plot. } \item{add}{ Logical flag; if \code{TRUE}, the graphics are added to the existing plot. If \code{FALSE} (the default) a new plot is initialised. } \item{how}{ String indicating how to display the continuous density component. } \item{main}{ String. Main title for the plot. } \item{do.plot}{ Logical value determining whether to actually perform the plotting. } \item{multiplot}{ Logical value indicating whether it is permissible to display a plot with multiple panels (representing different components of a vector-valued measure, or different types of points in a multitype measure.) } \item{massthresh}{ Threshold for plotting atoms. A single numeric value or \code{NULL}. If \code{massthresh=0} (the default) then only atoms with nonzero mass will be plotted. If \code{massthresh > 0} then only atoms whose absolute mass exceeds \code{massthresh} will be plotted. If \code{massthresh=NULL}, then all atoms of the measure will be plotted. } \item{equal.markscale}{ Logical value indicating whether different panels should use the same symbol map (to represent the masses of atoms of the measure). } \item{equal.ribbon}{ Logical value indicating whether different panels should use the same colour map (to represent the density values in the diffuse component of the measure). } } \value{ (Invisible) colour map (object of class \code{"colourmap"}) for the colour image. } \details{ This is the \code{plot} method for the class \code{"msr"}. The continuous density component of \code{x} is interpolated from the existing data by \code{\link{Smooth.ppp}}, and then displayed as a colour image by \code{\link{plot.im}}. The discrete atomic component of \code{x} is then superimposed on this image by plotting the atoms as circles (for positive mass) or squares (for negative mass) by \code{\link{plot.ppp}}. By default, atoms with zero mass are not plotted at all. To smooth both the discrete and continuous components, use \code{\link{Smooth.msr}}. Use the argument \code{clipwin} to restrict the plot to a subset of the full data. To remove atoms with tiny masses, use the argument \code{massthresh}. } \seealso{ \code{\link{msr}}, \code{\link{Smooth.ppp}}, \code{\link{Smooth.msr}}, \code{\link{plot.im}}, \code{\link{plot.ppp}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rs <- residuals(fit, type="score") plot(rp) plot(rs) plot(rs, how="contour") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat.model/man/logLik.mppm.Rd0000644000176200001440000001002414331173100016435 0ustar liggesusers\name{logLik.mppm} \alias{logLik.mppm} \alias{AIC.mppm} \alias{extractAIC.mppm} \alias{nobs.mppm} \alias{getCall.mppm} \alias{terms.mppm} \title{Log Likelihood and AIC for Multiple Point Process Model} \description{ For a point process model that has been fitted to multiple point patterns, these functions extract the log likelihood and AIC, or analogous quantities based on the pseudolikelihood. } \usage{ \method{logLik}{mppm}(object, \dots, warn=TRUE) \method{AIC}{mppm}(object, \dots, k=2, takeuchi=TRUE) \method{extractAIC}{mppm}(fit, scale = 0, k = 2, \dots, takeuchi = TRUE) \method{nobs}{mppm}(object, \dots) \method{getCall}{mppm}(x, \dots) \method{terms}{mppm}(x, \dots) } \arguments{ \item{object,fit,x}{ Fitted point process model (fitted to multiple point patterns). An object of class \code{"mppm"}. } \item{\dots}{Ignored.} \item{warn}{ If \code{TRUE}, a warning is given when the pseudolikelihood is returned instead of the likelihood. } \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details. } \item{takeuchi}{ Logical value specifying whether to use the Takeuchi penalty (\code{takeuchi=TRUE}) or the number of fitted parameters (\code{takeuchi=FALSE}) in calculating AIC. } } \details{ These functions are methods for the generic commands \code{\link[stats]{logLik}}, \code{\link[stats]{AIC}}, \code{\link[stats]{extractAIC}}, \code{\link[stats]{terms}} and \code{\link[stats:update]{getCall}} for the class \code{"mppm"}. An object of class \code{"mppm"} represents a fitted Poisson or Gibbs point process model fitted to several point patterns. It is obtained from the model-fitting function \code{\link{mppm}}. The method \code{logLik.mppm} extracts the maximised value of the log likelihood for the fitted model (as approximated by quadrature using the Berman-Turner approximation). If \code{object} is not a Poisson process, the maximised log \emph{pseudolikelihood} is returned, with a warning. The Akaike Information Criterion AIC for a fitted model is defined as \deqn{ AIC = -2 \log(L) + k \times \mbox{penalty} }{ AIC = -2 * log(L) + k * penalty } where \eqn{L} is the maximised likelihood of the fitted model, and \eqn{\mbox{penalty}}{penalty} is a penalty for model complexity, usually equal to the effective degrees of freedom of the model. The method \code{extractAIC.mppm} returns the \emph{analogous} quantity \eqn{AIC*} in which \eqn{L} is replaced by \eqn{L*}, the quadrature approximation to the likelihood (if \code{fit} is a Poisson model) or the pseudolikelihood (if \code{fit} is a Gibbs model). The \eqn{\mbox{penalty}}{penalty} term is calculated as follows. If \code{takeuchi=FALSE} then \eqn{\mbox{penalty}}{penalty} is the number of fitted parameters. If \code{takeuchi=TRUE} then \eqn{\mbox{penalty} = \mbox{trace}(J H^{-1})}{penalty = trace(J H^(-1))} where \eqn{J} and \eqn{H} are the estimated variance and hessian, respectively, of the composite score. These two choices are equivalent for a Poisson process. The method \code{nobs.mppm} returns the total number of points in the original data point patterns to which the model was fitted. The method \code{getCall.mppm} extracts the original call to \code{\link{mppm}} which caused the model to be fitted. The method \code{terms.mppm} extracts the covariate terms in the model formula as a \code{terms} object. Note that these terms do not include the interaction component of the model. The \R function \code{\link[stats]{step}} uses these methods. } \value{ See the help files for the corresponding generic functions. } \seealso{ \code{\link{mppm}} } \references{ \baddrubaturnbook } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \examples{ fit <- mppm(Bugs ~ x, hyperframe(Bugs=waterstriders)) logLik(fit) AIC(fit) nobs(fit) getCall(fit) } \keyword{spatial} \keyword{models} spatstat.model/DESCRIPTION0000644000176200001440000001141214515425132014723 0ustar liggesusersPackage: spatstat.model Version: 3.2-8 Date: 2023-10-23 Title: Parametric Statistical Modelling and Inference for the 'spatstat' Family Authors@R: c(person("Adrian", "Baddeley", role = c("aut", "cre", "cph"), email = "Adrian.Baddeley@curtin.edu.au", comment = c(ORCID="0000-0001-9499-8382")), person("Rolf", "Turner", role = c("aut", "cph"), email="rolfturner@posteo.net", comment=c(ORCID="0000-0001-5521-5218")), person("Ege", "Rubak", role = c("aut", "cph"), email = "rubak@math.aau.dk", comment=c(ORCID="0000-0002-6675-533X")), person("Kasper", "Klitgaard Berthelsen", role = "ctb"), person("Achmad", "Choiruddin", role = c("ctb", "cph")), person("Jean-Francois", "Coeurjolly", role = "ctb"), person("Ottmar", "Cronie", role = "ctb"), person("Tilman", "Davies", role = "ctb"), person("Julian", "Gilbey", role = "ctb"), person("Yongtao", "Guan", role = "ctb"), person("Ute", "Hahn", role = "ctb"), person("Martin", "Hazelton", role = "ctb"), person("Kassel", "Hingee", role = "ctb"), person("Abdollah", "Jalilian", role = "ctb"), person("Frederic", "Lavancier", role = "ctb"), person("Marie-Colette", "van Lieshout", role = "ctb"), person("Bethany", "Macdonald", role = "ctb"), person("Greg", "McSwiggan", role = "ctb"), person("Tuomas", "Rajala", role = "ctb"), person("Suman", "Rakshit", role = c("ctb", "cph")), person("Dominic", "Schuhmacher", role = "ctb"), person("Rasmus", "Plenge Waagepetersen", role = "ctb"), person("Hangsheng", "Wang", role = "ctb")) Maintainer: Adrian Baddeley Depends: R (>= 3.5.0), spatstat.data (>= 3.0), spatstat.geom (>= 3.2-7), spatstat.random (>= 3.2-1), spatstat.explore (>= 3.2-5), stats, graphics, grDevices, utils, methods, nlme, rpart Imports: spatstat.utils (>= 3.0-3), spatstat.sparse (>= 3.0), mgcv, Matrix, abind, tensor, goftest (>= 1.2-2) Suggests: sm, gsl, locfit, spatial, fftwtools (>= 0.9-8), nleqslv, glmnet, spatstat.linnet (>= 3.1), spatstat (>= 3.0) Description: Functionality for parametric statistical modelling and inference for spatial data, mainly spatial point patterns, in the 'spatstat' family of packages. (Excludes analysis of spatial data on a linear network, which is covered by the separate package 'spatstat.linnet'.) Supports parametric modelling, formal statistical inference, and model validation. Parametric models include Poisson point processes, Cox point processes, Neyman-Scott cluster processes, Gibbs point processes and determinantal point processes. Models can be fitted to data using maximum likelihood, maximum pseudolikelihood, maximum composite likelihood and the method of minimum contrast. Fitted models can be simulated and predicted. Formal inference includes hypothesis tests (quadrat counting tests, Cressie-Read tests, Clark-Evans test, Berman test, Diggle-Cressie-Loosmore-Ford test, scan test, studentised permutation test, segregation test, ANOVA tests of fitted models, adjusted composite likelihood ratio test, envelope tests, Dao-Genton test, balanced independent two-stage test), confidence intervals for parameters, and prediction intervals for point counts. Model validation techniques include leverage, influence, partial residuals, added variable plots, diagnostic plots, pseudoscore residual plots, model compensators and Q-Q plots. License: GPL (>= 2) URL: http://spatstat.org/ NeedsCompilation: yes ByteCompile: true BugReports: https://github.com/spatstat/spatstat.model/issues Packaged: 2023-10-23 00:24:50 UTC; adrian Author: Adrian Baddeley [aut, cre, cph] (), Rolf Turner [aut, cph] (), Ege Rubak [aut, cph] (), Kasper Klitgaard Berthelsen [ctb], Achmad Choiruddin [ctb, cph], Jean-Francois Coeurjolly [ctb], Ottmar Cronie [ctb], Tilman Davies [ctb], Julian Gilbey [ctb], Yongtao Guan [ctb], Ute Hahn [ctb], Martin Hazelton [ctb], Kassel Hingee [ctb], Abdollah Jalilian [ctb], Frederic Lavancier [ctb], Marie-Colette van Lieshout [ctb], Bethany Macdonald [ctb], Greg McSwiggan [ctb], Tuomas Rajala [ctb], Suman Rakshit [ctb, cph], Dominic Schuhmacher [ctb], Rasmus Plenge Waagepetersen [ctb], Hangsheng Wang [ctb] Repository: CRAN Date/Publication: 2023-10-23 08:10:02 UTC spatstat.model/tests/0000755000176200001440000000000014366616216014371 5ustar liggesusersspatstat.model/tests/testsK.R0000644000176200001440000002565214514651372016000 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.model #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.model) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/kppm.R # # $Revision: 1.39 $ $Date: 2023/10/21 04:25:51 $ # # Test functionality of kppm that once depended on RandomFields # Test update.kppm for old style kppm objects if(!FULLTEST) spatstat.options(npixel=32, ndummy.min=16) local({ fit <- kppm(redwood ~1, "Thomas") # sic fitx <- kppm(redwood ~x, "Thomas", verbose=TRUE) if(FULLTEST) { fitx <- update(fit, ~ . + x) fitM <- update(fit, clusters="MatClust") fitC <- update(fit, cells) fitCx <- update(fit, cells ~ x) #' Wsub <- owin(c(0, 0.5), c(-0.5, 0)) Zsub <- (bdist.pixels(Window(redwood)) > 0.1) fitWsub <- kppm(redwood ~1, "Thomas", subset=Wsub) fitZsub <- kppm(redwood ~1, "Thomas", subset=Zsub) fitWsub #' various methods ff <- as.fv(fitx) uu <- unitname(fitx) unitname(fitCx) <- "furlong" mo <- model.images(fitCx) p <- psib(fit) px <- psib(fitx) } if(ALWAYS) { Y <- simulate(fitx, seed=42, saveLambda=TRUE)[[1]] } if(FULLTEST) { #' vcov.kppm different algorithms vc <- vcov(fitx) vc2 <- vcov(fitx, fast=TRUE) vc3 <- vcov(fitx, fast=TRUE, splitup=TRUE) vc4 <- vcov(fitx, splitup=TRUE) ## other code blocks a <- varcount(fitx, function(x,y){x+1}) # always positive a <- varcount(fitx, function(x,y){y-1}) # always negative a <- varcount(fitx, function(x,y){x+y}) # positive or negative #' improve.kppm fitI <- update(fit, improve.type="quasi") fitxI <- update(fitx, improve.type="quasi") fitxIs <- update(fitx, improve.type="quasi", fast=FALSE) #' vcov.kppm vcI <- vcov(fitxI) } ## plot.kppm including predict.kppm if(ALWAYS) { fitMC <- kppm(redwood ~ x, "Thomas") plot(fitMC) } if(FULLTEST) { fitCL <- kppm(redwood ~ x, "Thomas", method="c") fitPA <- kppm(redwood ~ x, "Thomas", method="p") plot(fitCL) plot(fitPA) ## fit with composite likelihood method [thanks to Abdollah Jalilian] fut <- kppm(redwood ~ x, "VarGamma", method="clik2", nu.ker=-3/8) kfut <- as.fv(fut) } if(ALWAYS) { fit0 <- kppm(redwood ~1, "LGCP") is.poisson(fit0) Y0 <- simulate(fit0, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y0)) p0 <- psib(fit0) # issues a warning if(FULLTEST) { ## fit LGCP using K function: slow fit1 <- kppm(redwood ~x, "LGCP", covmodel=list(model="matern", nu=0.3), control=list(maxit=3)) Y1 <- simulate(fit1, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y1)) } ## fit LGCP using pcf fit1p <- kppm(redwood ~x, "LGCP", covmodel=list(model="matern", nu=0.3), statistic="pcf") Y1p <- simulate(fit1p, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y1p)) ## .. and using different fitting methods if(FULLTEST) { fit1pClik <- update(fit1p, method="clik") fit1pPalm <- update(fit1p, method="palm") } ## shortcut evaluation of pcf ## (the code being tested is in spatstat.random::clusterinfo.R) if(FULLTEST) { putSpatstatVariable("RFshortcut", TRUE) fitGshort <- kppm(redwood ~ 1, "LGCP", covmodel=list(model="gauss")) fitSshort <- kppm(redwood ~ 1, "LGCP", covmodel=list(model="stable", alpha=1)) putSpatstatVariable("RFshortcut", FALSE) fitGlong <- kppm(redwood ~ 1, "LGCP", covmodel=list(model="gauss")) fitSlong <- kppm(redwood ~ 1, "LGCP", covmodel=list(model="stable", alpha=1)) discrepG <- unlist(parameters(fitGshort)) - unlist(parameters(fitGlong)) discrepS <- unlist(parameters(fitSshort)) - unlist(parameters(fitSlong)) print(discrepG) print(discrepS) if(max(abs(discrepG) > 0.01)) stop("Discrepancy in short-cut fitting of Gaussian LGCP") if(max(abs(discrepS) > 0.01)) stop("Discrepancy in short-cut fitting of stable LGCP") } ## image covariate (a different code block) xx <- as.im(function(x,y) x, Window(redwood)) fit1xx <- update(fit1p, . ~ xx, data=solist(xx=xx)) Y1xx <- simulate(fit1xx, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y1xx)) if(FULLTEST) { fit1xxVG <- update(fit1xx, clusters="VarGamma", nu=-1/4) Y1xxVG <- simulate(fit1xxVG, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y1xxVG)) } fit1xxLG <- update(fit1xx, clusters="LGCP", covmodel=list(model="matern", nu=0.3), statistic="pcf") Y1xxLG <- simulate(fit1xxLG, saveLambda=TRUE, drop=TRUE) stopifnot(is.ppp(Y1xxLG)) # ... and Abdollah's code if(FULLTEST) { fit2 <- kppm(redwood ~x, cluster="Cauchy", statistic="K") Y2 <- simulate(fit2, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y2)) } } }) if(FULLTEST) { local({ #' various code blocks fut <- kppm(redwood, ~x) fet <- update(fut, redwood3) fot <- update(fut, trend=~y) fit <- kppm(redwoodfull ~ x) Y <- simulate(fit, window=redwoodfull.extra$regionII, saveLambda=TRUE) gut <- improve.kppm(fit, type="wclik1") gut <- improve.kppm(fit, vcov=TRUE, fast.vcov=TRUE, save.internals=TRUE) hut <- kppm(redwood ~ x, method="clik", weightfun=NULL) hut <- kppm(redwood ~ x, method="palm", weightfun=NULL) mut <- kppm(redwood) nut <- update(mut, Y) if(TRUE) { #' Bug in rLGCP spotted by Tilman Davies X <- rLGCP("matern", function(x,y) { 1 - 0.4* y }, var=2, scale=0.7, nu=0.5, win = square(10), dimyx=c(32,64)) } }) } if(FULLTEST) { local({ #' minimum contrast code K <- Kest(redwood) a <- matclust.estK(K) a <- thomas.estK(K) a <- cauchy.estK(K) a <- vargamma.estK(K) a <- lgcp.estK(K) print(a) u <- unitname(a) g <- pcf(redwood) a <- matclust.estpcf(g) a <- thomas.estpcf(g) a <- cauchy.estpcf(g) a <- vargamma.estpcf(g) a <- lgcp.estpcf(g) #' auxiliary functions b <- resolve.vargamma.shape(nu.pcf=1.5) Z <- clusterfield("Thomas", kappa=1, scale=0.2) aa <- NULL aa <- accumulateStatus(simpleMessage("Woof"), aa) aa <- accumulateStatus(simpleMessage("Sit"), aa) aa <- accumulateStatus(simpleMessage("Woof"), aa) printStatusList(aa) RMIN <- 0.01 fit <- kppm(redwood ~ 1, ctrl=list(rmin=RMIN,q=1/2)) if(fit$Fit$mcfit$ctrl$rmin != RMIN) stop("kppm did not handle parameter 'rmin' in argument 'ctrl' ") fit <- kppm(redwood ~ 1, ctrl=list(rmin=0,q=1/2), rmin=RMIN) if(fit$Fit$mcfit$ctrl$rmin != RMIN) stop("kppm did not handle parameter 'rmin' in argument 'ctrl'") RMIN <- 2 fit <- dppm(swedishpines~1, dppGauss(), ctrl=list(rmin=RMIN,q=1)) if(fit$Fit$mcfit$ctrl$rmin != RMIN) stop("dppm did not handle parameter 'rmin' in argument 'ctrl'") fit <- dppm(swedishpines~1, dppGauss(), ctrl=list(rmin=0,q=1), rmin=RMIN) if(fit$Fit$mcfit$ctrl$rmin != RMIN) stop("dppm did not handle argument 'rmin'") }) } if(FULLTEST) { local({ #' cover a few code blocks fut <- kppm(redwood ~ x, method="clik") print(summary(fut)) a <- residuals(fut) fut2 <- kppm(redwood ~ x, "LGCP", method="palm") print(summary(fut2)) b <- residuals(fut2) #' po <- ppm(redwood ~ 1) A <- kppmComLik(redwood, Xname="redwood", po=po, clusters="Thomas", statistic="pcf", statargs=list(), control=list(), weightfun=NULL, rmax=0.1) A <- kppmPalmLik(redwood, Xname="redwood", po=po, clusters="Thomas", statistic="pcf", statargs=list(), control=list(), weightfun=NULL, rmax=0.1) }) } reset.spatstat.options() #' #' tests/Kfuns.R #' #' Various K and L functions and pcf #' #' $Revision: 1.43 $ $Date: 2022/06/17 01:47:08 $ #' #' Assumes 'EveryStart.R' was run if(FULLTEST) { Cells <- cells Amacrine <- amacrine Redwood <- redwood } else { ## reduce numbers of data + dummy points spatstat.options(npixel=32, ndummy.min=16) Cells <- cells[c(FALSE, TRUE)] Amacrine <- amacrine[c(FALSE, TRUE)] Redwood <- redwood[c(FALSE, TRUE)] } if(FULLTEST) { local({ #' code blocks using fitted model to determine intensity #' Kinhom X <- rpoispp(function(x,y) { 100 * x }, 100, square(1)) fut <- ppm(X ~ x) Kio <- Kinhom(X, fut, update=FALSE) Kiu <- Kinhom(X, fut, update=TRUE, diagonal=FALSE) fit <- ppm(Amacrine ~ marks) #' lohboot Linhom Zred <- predict(ppm(Redwood ~ x+y)) Lred <- lohboot(Redwood, Linhom, lambda=Zred) #' Kmulti.inhom K1 <- Kcross.inhom(Amacrine, lambdaX=fit) On <- split(Amacrine)$on Off <- split(Amacrine)$off K4 <- Kcross.inhom(Amacrine, lambdaI=ppm(On), lambdaJ=ppm(Off)) #' local K functions fut <- ppm(swedishpines ~ polynom(x,y,2)) Z <- predict(fut) Lam <- fitted(fut, dataonly=TRUE) a <- localLinhom(swedishpines, lambda=fut) a <- localLinhom(swedishpines, lambda=Z) a <- localLinhom(swedishpines, lambda=Lam) a <- localLinhom(swedishpines, lambda=Z, correction="none") a <- localLinhom(swedishpines, lambda=Z, correction="translate") #' local cross K functions fat <- ppm(Amacrine ~ x * marks) Zed <- predict(fat) Lum <- fitted(fat, dataonly=TRUE) moff <- (marks(Amacrine) == "off") a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaX=Zed) a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaX=Lum) a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaX=fat) a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaFrom=Lum[moff], lambdaTo=Lum[!moff]) a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaX=Zed, correction="none") a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaX=Zed, correction="translate") #' cases of resolve.lambdacross h <- resolve.lambdacross(Amacrine, moff, !moff, lambdaX=fat) h <- resolve.lambdacross(Amacrine, moff, !moff, lambdaX=fat, update=FALSE) h <- resolve.lambdacross(Amacrine, moff, !moff, lambdaI=fat, lambdaJ=fat) h <- resolve.lambdacross(Amacrine, moff, !moff, lambdaI=fat, lambdaJ=fat, update=FALSE) #' lohboot b <- lohboot(Amacrine, Lcross.inhom, from="off", to="on", lambdaX=Zed) b <- lohboot(Amacrine, Lcross.inhom, from="off", to="on", lambdaX=Lum) b <- lohboot(Amacrine, Lcross.inhom, from="off", to="on", lambdaX=fat) b <- lohboot(Amacrine, Lcross.inhom, from="off", to="on", lambdaFrom=Lum[moff], lambdaTo=Lum[!moff]) #' #' residual K functions etc #' rco <- compareFit(Cells, Kcom, interaction=anylist(P=Poisson(), S=Strauss(0.08)), same="trans", different="tcom") fit <- ppm(Cells ~ x, Strauss(0.07)) K <- Kcom(Cells, model=fit, restrict=TRUE) }) } reset.spatstat.options() spatstat.model/tests/testsGtoJ.R0000644000176200001440000001077514460146552016450 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.model #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.model) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) #' #' tests/hobjects.R #' #' Validity of methods for ppm(... method="ho") #' #' $Revision: 1.4 $ $Date: 2022/06/18 10:14:44 $ if(FULLTEST) { local({ set.seed(42) fit <- ppm(cells ~1, Strauss(0.1), improve.type="ho", nsim=10) fitx <- ppm(cells ~offset(x), Strauss(0.1), improve.type="ho", nsim=10) a <- AIC(fit) ax <- AIC(fitx) f <- fitted(fit) fx <- fitted(fitx) p <- predict(fit) px <- predict(fitx) }) } #' tests/hypotests.R #' Hypothesis tests #' #' $Revision: 1.10 $ $Date: 2023/07/17 07:30:48 $ if(FULLTEST) { local({ #' scan test with baseline fit <- ppm(cells ~ x) lam <- predict(fit) rr <- c(0.05, 1) scan.test(cells, rr, nsim=5, method="poisson", baseline=fit, alternative="less") scan.test(cells, rr, nsim=5, method="poisson", baseline=lam, alternative="less") }) } #' #' tests/interact.R #' #' Support for interaction objects #' #' $Revision: 1.2 $ $Date: 2020/04/28 12:58:26 $ if(FULLTEST) { local({ #' print.intermaker Strauss Geyer Ord #' intermaker BS <- get("BlankStrauss", envir=environment(Strauss)) BD <- function(r) { instantiate.interact(BS, list(r=r)) } BlueDanube <- intermaker(BD, BS) }) } #' tests/ippm.R #' Tests of 'ippm' class #' $Revision: 1.6 $ $Date: 2020/04/28 12:58:26 $ if(FULLTEST) { local({ # .......... set up example from help file ................. nd <- 10 gamma0 <- 3 delta0 <- 5 POW <- 3 # Terms in intensity Z <- function(x,y) { -2*y } f <- function(x,y,gamma,delta) { 1 + exp(gamma - delta * x^POW) } # True intensity lamb <- function(x,y,gamma,delta) { 200 * exp(Z(x,y)) * f(x,y,gamma,delta) } # Simulate realisation lmax <- max(lamb(0,0,gamma0,delta0), lamb(1,1,gamma0,delta0)) set.seed(42) X <- rpoispp(lamb, lmax=lmax, win=owin(), gamma=gamma0, delta=delta0) # Partial derivatives of log f DlogfDgamma <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) topbit/(1 + topbit) } DlogfDdelta <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) - (x^POW) * topbit/(1 + topbit) } # irregular score Dlogf <- list(gamma=DlogfDgamma, delta=DlogfDdelta) # fit model fit <- ippm(X ~Z + offset(log(f)), covariates=list(Z=Z, f=f), iScore=Dlogf, start=list(gamma=1, delta=1), nd=nd) # fit model with logistic likelihood but without iScore fitlo <- ippm(X ~Z + offset(log(f)), method="logi", covariates=list(Z=Z, f=f), start=list(gamma=1, delta=1), nd=nd) ## ............. test ippm class support ...................... Ar <- model.matrix(fit) Ai <- model.matrix(fit, irregular=TRUE) An <- model.matrix(fit, irregular=TRUE, keepNA=FALSE) AS <- model.matrix(fit, irregular=TRUE, subset=(abs(Z) < 0.5)) Zr <- model.images(fit) Zi <- model.images(fit, irregular=TRUE) ## update.ippm fit2 <- update(fit, . ~ . + I(Z^2)) fit0 <- update(fit, . ~ . - Z, start=list(gamma=2, delta=4)) oldfit <- ippm(X, ~Z + offset(log(f)), covariates=list(Z=Z, f=f), iScore=Dlogf, start=list(gamma=1, delta=1), nd=nd) oldfit2 <- update(oldfit, . ~ . + I(Z^2)) oldfit0 <- update(oldfit, . ~ . - Z, start=list(gamma=2, delta=4)) ## again with logistic fitlo2 <- update(fitlo, . ~ . + I(Z^2)) fitlo0 <- update(fitlo, . ~ . - Z, start=list(gamma=2, delta=4)) oldfitlo <- ippm(X, ~Z + offset(log(f)), method="logi", covariates=list(Z=Z, f=f), start=list(gamma=1, delta=1), nd=nd) oldfitlo2 <- update(oldfitlo, . ~ . + I(Z^2)) oldfitlo0 <- update(oldfitlo, . ~ . - Z, start=list(gamma=2, delta=4)) ## anova.ppm including ippm objects fit0 <- update(fit, . ~ Z) fit0lo <- update(fitlo, . ~ Z) A <- anova(fit0, fit) Alo <- anova(fit0lo, fitlo) }) } spatstat.model/tests/testsR2.R0000644000176200001440000002267214325154601016061 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.model #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.model) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/rmhExpand.R # # test decisions about expansion of simulation window # # $Revision: 1.9 $ $Date: 2022/10/23 01:17:33 $ # local({ if(FULLTEST) { ## check expansion in rmhmodel.ppm fit <- ppm(cells ~x) mod <- rmhmodel(fit) is.expandable(mod) wsim <- as.rectangle(mod$trend) ## work around changes in 'unitname' wcel <- as.owin(cells) unitname(wcel) <- unitname(cells) ## test if(!identical(wsim, wcel)) stop("Expansion occurred improperly in rmhmodel.ppm") } }) # # tests/rmhTrend.R # # Problems with trend images (rmhmodel.ppm or rmhEngine) # if(ALWAYS) { local({ set.seed(42) # Bug folder 37 of 8 feb 2011 # rmhmodel.ppm -> predict.ppm # + rmhResolveTypes -> is.subset.owin Z <- rescale(demopat, 7000) X <- unmark(Z) X1 <- split(Z)[[1]] Int <- density(X,dimyx=200) Lint <- eval.im(log(npoints(X1)*Int/npoints(X))) M <- as.owin(Int) MR <- intersect.owin(M,scalardilate(M,0.5,origin="midpoint")) X1 <- X1[MR] Fut <- ppm(X1~offset(Lint),covariates=list(Lint=Lint), inter=BadGey(r=c(0.03,0.05),sat=3)) Y <- rmh(Fut,control=list(expand=M,nrep=1e3), verbose=FALSE) }) } # # tests/rmhmodel.ppm.R # # $Revision: 1.10 $ $Date: 2020/05/01 05:29:42 $ # # Case-by-case tests of rmhmodel.ppm # if(FULLTEST) { local({ f <- ppm(cells) m <- rmhmodel(f) f <- ppm(cells ~x) m <- rmhmodel(f) f <- ppm(cells ~1, Strauss(0.1)) m <- rmhmodel(f) f <- ppm(cells ~1, StraussHard(r=0.1,hc=0.05)) m <- rmhmodel(f) print(m) f <- ppm(cells ~1, Hardcore(0.07)) m <- rmhmodel(f) f <- ppm(cells ~1, DiggleGratton(0.05,0.1)) m <- rmhmodel(f) f <- ppm(cells ~1, Softcore(0.5), correction="isotropic") m <- rmhmodel(f) f <- ppm(cells ~1, Geyer(0.07,2)) m <- rmhmodel(f) f <- ppm(cells ~1, BadGey(c(0.07,0.1,0.13),2)) m <- rmhmodel(f) f <- ppm(cells ~1, PairPiece(r = c(0.05, 0.1, 0.2))) m <- rmhmodel(f) f <- ppm(cells ~1, AreaInter(r=0.06)) m <- rmhmodel(f) print(m) # multitype r <- matrix(0.07, 2, 2) f <- ppm(amacrine ~1, MultiStrauss(c("off","on"),r)) m <- rmhmodel(f) print(m) h <- matrix(min(nndist(amacrine))/2, 2, 2) f <- ppm(amacrine ~1, MultiStraussHard(c("off","on"),r, h)) m <- rmhmodel(f) diag(r) <- NA diag(h) <- NA f <- ppm(amacrine ~1, MultiStrauss(c("off","on"),r)) m <- rmhmodel(f) f <- ppm(amacrine ~1, MultiStraussHard(c("off","on"),r, h)) m <- rmhmodel(f) # multitype data, interaction not dependent on type f <- ppm(amacrine ~marks, Strauss(0.05)) m <- rmhmodel(f) print(m) # trends f <- ppm(cells ~x, Strauss(0.1)) m <- rmhmodel(f) f <- ppm(cells ~y, StraussHard(r=0.1,hc=0.05)) m <- rmhmodel(f) f <- ppm(cells ~x+y, Hardcore(0.07)) m <- rmhmodel(f) print(m) f <- ppm(cells ~polynom(x,y,2), Softcore(0.5), correction="isotropic") m <- rmhmodel(f) # covariates Z <- as.im(function(x,y){ x^2+y^2 }, as.owin(cells)) f <- ppm(cells ~z, covariates=list(z=Z)) m <- rmhmodel(f) m <- rmhmodel(f, control=list(p=1)) print(m) Zim <- as.im(Z, as.owin(cells)) f <- ppm(cells ~z, covariates=list(z=Zim)) m <- rmhmodel(f) Z <- as.im(function(x,y){ x^2+y }, as.owin(amacrine)) f <- ppm(amacrine ~z + marks, covariates=list(z=Z)) m <- rmhmodel(f) print(m) m <- rmhmodel(f, control=list(p=1)) m <- rmhmodel(f, control=list(p=1,fixall=TRUE)) print(m) Zim <- as.im(Z, as.owin(amacrine)) f <- ppm(amacrine ~z + marks, covariates=list(z=Zim)) m <- rmhmodel(f) print(m) }) } # # tests/rmhmodelHybrids.R # # Test that rmhmodel.ppm and rmhmodel.default # work on Hybrid interaction models # # $Revision: 1.6 $ $Date: 2022/10/23 01:17:56 $ # if(ALWAYS) { # involves C code local({ ## ......... rmhmodel.ppm ....................... fit1 <- ppm(redwood ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2), C=Geyer(0.15, 1))) m1 <- rmhmodel(fit1) m1 reach(m1) ## Test of handling 'IsOffset' fit2 <- ppm(cells ~1, Hybrid(H=Hardcore(0.05), G=Geyer(0.15, 2))) m2 <- rmhmodel(fit2) ## also test C code for hybrid interaction with hard core fakecells <- rmh(fit2, nrep=1e4) ## Test of handling Poisson components fit3 <- ppm(cells ~1, Hybrid(P=Poisson(), S=Strauss(0.05))) X3 <- rmh(fit3, control=list(nrep=1e3,expand=1), verbose=FALSE) }) } # # tests/rmh.ppm.R # # $Revision: 1.5 $ $Date: 2020/05/01 05:29:42 $ # # Examples removed from rmh.ppm.Rd # stripped down to minimal tests of validity # local({ op <- spatstat.options() spatstat.options(rmh.nrep=10, npixel=10, ndummy.min=10) spatstat.options(project.fast=TRUE) Nrep <- 10 X <- swedishpines if(FULLTEST) { ## Poisson process fit <- ppm(X ~1, Poisson()) Xsim <- rmh(fit) } if(ALWAYS) { # Gibbs model => C code ## Strauss process fit <- ppm(X ~1, Strauss(r=7)) Xsim <- rmh(fit) ## Strauss process simulated on a larger window ## then clipped to original window Xsim <- rmh(fit, control=list(nrep=Nrep, expand=1.1, periodic=TRUE)) ## Extension of model to another window (thanks to Tuomas Rajala) Xsim <- rmh(fit, w=square(2)) Xsim <- simulate(fit, w=square(2)) ## Strauss - hard core process ## fit <- ppm(X ~1, StraussHard(r=7,hc=2)) ## Xsim <- rmh(fit, start=list(n.start=X$n)) ## Geyer saturation process ## fit <- ppm(X ~1, Geyer(r=7,sat=2)) ## Xsim <- rmh(fit, start=list(n.start=X$n)) ## Area-interaction process fit <- ppm(X ~1, AreaInter(r=7)) Xsim <- rmh(fit, start=list(n.start=X$n)) ## Penttinen process fit <- ppm(X ~1, Penttinen(r=7)) Xsim <- rmh(fit, start=list(n.start=X$n)) ## soft core interaction process ## X <- quadscheme(X, nd=50) ## fit <- ppm(X ~1, Softcore(kappa=0.1), correction="isotropic") ## Xsim <- rmh(fit, start=list(n.start=X$n)) ## Diggle-Gratton pairwise interaction model ## fit <- ppm(cells ~1, DiggleGratton(0.05, 0.1)) ## Xsim <- rmh(fit, start=list(n.start=cells$n)) ## plot(Xsim, main="simulation from fitted Diggle-Gratton model") ## piecewise-constant pairwise interaction function X <- rSSI(0.05, 100) fit <- ppm(X ~1, PairPiece(seq(0.02, 0.1, by=0.01))) Xsim <- rmh(fit) } ## marked point pattern Y <- amacrine if(FULLTEST) { #' marked Poisson models fit <- ppm(Y) Ysim <- rmh(fit) fit <- ppm(Y~marks) Ysim <- rmh(fit) fit <- ppm(Y~x) Ysim <- rmh(fit) fit <- ppm(Y~marks+x) Ysim <- rmh(fit) } if(ALWAYS) { #' multitype Strauss typ <- levels(Y$marks) MS <- MultiStrauss(types = typ, radii=matrix(0.07, ncol=2, nrow=2)) fit <- ppm(Y~marks*x, MS) Ysim <- rmh(fit) #' multitype Hardcore h0 <- minnndist(unmark(Y)) * 0.95 MH <- MultiHard(types = typ, hradii=matrix(h0, ncol=2, nrow=2)) fit <- ppm(Y ~ marks+x, MH) Ysim <- rmh(fit) #' other code blocks Ysim <- rmh(fit, control=list(periodic=TRUE, expand=1)) Ysim <- rmh(fit, control=list(periodic=FALSE, expand=1)) #' multihard core with invalid initial state Ydouble <- superimpose(Y, rjitter(Y, h0/10)) Ysim <- rmh(fit, start=list(x.start=Ydouble)) #' Lennard-Jones fut <- ppm(unmark(longleaf) ~ 1, LennardJones(), rbord=1) Ysim <- rmh(fut) Ysim <- rmh(fut, control=list(periodic=TRUE, expand=1)) } spatstat.options(op) }) reset.spatstat.options() #' #' tests/rmhsnoopy.R #' #' Test the rmh interactive debugger #' #' $Revision: 1.11 $ $Date: 2022/10/23 01:19:00 $ if(ALWAYS) { # may depend on platform local({ R <- 0.1 ## fit a model and prepare to simulate model <- ppm(amacrine ~ marks + x, Strauss(R)) siminfo <- rmh(model, preponly=TRUE) Wsim <- siminfo$control$internal$w.sim Wclip <- siminfo$control$internal$w.clip if(is.null(Wclip)) Wclip <- Window(cells) ## determine debugger interface panel geometry Xinit <- runifpoint(ex=amacrine)[1:40] P <- rmhsnoop(Wsim=Wsim, Wclip=Wclip, R=R, xcoords=Xinit$x, ycoords=Xinit$y, mlevels=levels(marks(Xinit)), mcodes=as.integer(marks(Xinit)) - 1L, irep=3L, itype=1L, proptype=1, proplocn=c(0.5, 0.5), propmark=0, propindx=0, numerator=42, denominator=24, panel.only=TRUE) boxes <- P$boxes clicknames <- names(P$clicks) boxcentres <- do.call(concatxy, lapply(boxes, centroid.owin)) ## design a sequence of clicks actionsequence <- c("Up", "Down", "Left", "Right", "At Proposal", "Zoom Out", "Zoom In", "Reset", "Accept", "Reject", "Print Info", "Next Iteration", "Next Shift", "Next Death", "Skip 10", "Skip 100", "Skip 1000", "Skip 10,000", "Skip 100,000", "Exit Debugger") actionsequence <- match(actionsequence, clicknames) actionsequence <- actionsequence[!is.na(actionsequence)] xy <- lapply(boxcentres, "[", actionsequence) ## queue the click sequence spatstat.utils::queueSpatstatLocator(xy$x,xy$y) ## go rmh(model, snoop=TRUE) }) } spatstat.model/tests/testsR1.R0000644000176200001440000000645214243551505016061 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.model #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.model) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) #' tests/resid.R #' #' Stuff related to residuals and residual diagnostics #' including residual summary functions #' #' $Revision: 1.7 $ $Date: 2022/05/22 08:43:31 $ #' local({ fit <- ppm(cells ~x, Strauss(r=0.15)) rr <- residuals(fit, quad=quadscheme(cells, nd=128)) diagnose.ppm(fit, cumulative=FALSE, type="pearson") if(FULLTEST) { diagnose.ppm(fit, cumulative=FALSE) fitoff <- ppm(cells ~ sin(x) + offset(y)) plot(a <- parres(fitoff, "x")) plot(b <- parres(fitoff, "y")) print(a) print(b) d <- diagnose.ppm(fit, which="marks") plot(d, plot.neg="discrete") plot(d, plot.neg="imagecontour") d <- diagnose.ppm(fit, type="pearson", which="smooth") plot(d, plot.smooth="image") plot(d, plot.smooth="contour") plot(d, plot.smooth="imagecontour") d <- diagnose.ppm(fit, type="pearson", which="x") plot(d) d <- diagnose.ppm(fit, type="pearson", which="y") plot(d) diagnose.ppm(fit, type="pearson", which="x", cumulative=FALSE) diagnose.ppm(fit, type="pearson", which="x", cumulative=FALSE) diagnose.ppm(fit, type="raw", plot.neg="discrete", plot.smooth="image") diagnose.ppm(fit, type="pearson", plot.neg="contour", plot.smooth="contour") diagnose.ppm(fitoff, type="raw", which="smooth", plot.smooth="persp") diagnose.ppm(fitoff, type="pearson", plot.neg="imagecontour") plot(Frame(letterR), main="") ploterodewin(letterR, erosion(letterR, 0.05), main="jeans") W <- as.mask(letterR) plot(Frame(W), main="") ploterodewin(W, erosion(W, 0.05), main="JeAnS") #' entangled terms in model U <- as.im(1, owin()) Z <- as.im(function(x,y) x, owin()) X <- runifpoint(40) fut <- ppm(X ~ Z:U) a <- parres(fut, "Z") futoff <- ppm(X ~ offset(Z*U)) a <- parres(futoff, "Z") #' residual summary functions pt <- psst(cells, interaction=Strauss(0.1), fun=nndcumfun) } }) ## ## tests/rhohat.R ## ## Test all combinations of options for rhohatCalc ## ## $Revision: 1.6 $ $Date: 2022/05/22 08:03:48 $ local({ if(FULLTEST) { X <- rpoispp(function(x,y){exp(3+3*x)}) Z <- as.im(function(x,y) { x }, Window(X)) f <- funxy(function(x,y) { y + 1 }, Window(X)) ## rhohat.ppm fit <- ppm(X ~x) rhofitA <- rhohat(fit, "x") rhofitB <- rhohat(fit, "x", method="reweight") rhofitC <- rhohat(fit, "x", method="transform") rhofitD <- rhohat(fit, Z) rhofitD <- rhohat(fit, Z, positiveCI=TRUE) lam <- predict(fit) ## Horvitz-Thompson rhofitAH <- rhohat(fit, "x", horvitz=TRUE) rhofitBH <- rhohat(fit, "x", method="reweight", horvitz=TRUE) rhofitCH <- rhohat(fit, "x", method="transform", horvitz=TRUE) r2myx <- rho2hat(fit, "y", "x") r2myxw <- rho2hat(fit, "y", "x", method="reweight") plot(r2myx) plot(r2myxw) print(r2myxw) predict(r2myxw) predict(r2myxw, relative=TRUE) } }) spatstat.model/tests/testsP2.R0000644000176200001440000004430414243551505016056 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.model #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.model) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/ppmBadData.R # # $Revision: 1.6 $ $Date: 2020/04/30 05:23:52 $ # Testing robustness of ppm and support functions # when data are rubbish local({ if(ALWAYS) { ## from Rolf: very large proportion of data is NA SEED <- 42 K <- 101 A <- 500 X <- seq(0, A, length=K) G <- expand.grid(x=X, y=X) FOO <- function(x,y) { sin(x)^2 + cos(y)^2 } M1 <- im(matrix(FOO(G$x, G$y), K, K), xcol=X, yrow=X) M <- im(matrix(FOO(G$x, G$y), K, K)) BAR <- function(x) { exp(-6.618913 + 5.855337 * x - 8.432483 * x^2) } V <- im(BAR(M$v), xcol=X, yrow=X) # V <- eval.im(exp(-6.618913 + 5.855337 * M - 8.432483 * M^2)) set.seed(SEED) Y <- rpoispp(V) fY <- ppm(Y ~cv + I(cv^2), data=list(cv=M), correction="translate") diagnose.ppm(fY) lurking(fY, covariate=as.im(function(x,y){x}, square(A)), type="raw") } if(ALWAYS) { ## from Andrew Bevan: numerical overflow, ill-conditioned Fisher information SEED <- 42 nongranite<- owin(poly = list(x = c(0, 8500, 7000, 6400, 6400, 6700, 7000, 7200, 7300, 8000, 8100, 8800, 9500, 10000, 10000, 0), y = c(0, 0, 2000, 3800, 4000, 5000, 6500, 7400, 7500, 8000, 8100, 9000, 9500, 9600, 10000, 10000))) ## Trend on raster grid rain <- as.im(X=function(x,y) { x^2 + y^2 }, W=nongranite, dimyx=100) ## Generate a point pattern via a Lennard-Jones process set.seed(SEED) mod4<- rmhmodel(cif="lennard", par=list(beta=1, sigma=250, epsilon=2.2), trend=rain, w=nongranite) ljtr<- rmh(mod4, start=list(n.start=80), control=list(p=1, nrep=1e5)) ## Fit a point process model to the pattern with rain as a covariate ## NOTE INCORRECT TREND FORMULA ljtrmod <- ppm(ljtr, trend= ~ Z, interaction=NULL, data=list(Z=rain)) ss <- summary(ljtrmod) } if(FULLTEST) { ## From Ege ## Degenerate but non-null argument 'covariates' xx <- list() names(xx) <- character(0) fit <- ppm(cells ~x, covariates = xx) st <- summary(fit) } }) #' tests/ppmclass.R #' #' Class support for ppm #' #' $Revision: 1.9 $ $Date: 2022/03/07 03:26:09 $ if(FULLTEST) { local({ #' (1) print.ppm, summary.ppm, print.summary.ppm Z <- as.im(function(x,y){x}, Window(cells)) fitZ <- ppm(cells ~ Z) print(fitZ) print(summary(fitZ)) #' logistic fitl <- ppm(swedishpines ~ x+y, method="logi") print(fitl) print(summary(fitl)) #' Model with covariate arguments f <- function(x,y,b) { x+b } fitf <- ppm(cells ~ f, covfunargs=list(b=1)) print(fitf) print(summary(fitf)) #' Invalid model fitN <- ppm(redwood ~ 1, Strauss(0.1)) print(fitN) print(summary(fitN)) #' standard errors in output fat <- ppm(cells ~ x, Strauss(0.12)) op <- spatstat.options(print.ppm.SE='always') print(fat) spatstat.options(print.ppm.SE='never') print(fat) print(fitZ) spatstat.options(op) ## (2) plot.ppm plot(fitZ) plot(fat, trend=FALSE, cif=FALSE, se=FALSE) ## (3) emend.ppm fitZe <- emend(fitZ, trace=TRUE) ZZ <- Z fitZZ <- ppm(cells ~ Z + ZZ) fitZZe <- emend(fitZZ, trace=TRUE) fitOK <- ppm(redwood ~1, Strauss(0.1), emend=TRUE) print(fitOK) fitNot <- ppm(redwood ~1, Strauss(0.1)) fitSlow <- emend(fitNot, trace=TRUE) print(fitSlow) op <- spatstat.options(project.fast=TRUE) fitFast <- emend(fitNot, trace=TRUE) print(fitFast) fitZZe <- emend(fitZZ, trace=TRUE) spatstat.options(op) #' (4) methods for other generics logLik(fitZ, absolute=TRUE) unitname(fitZ) unitname(fat) <- c("metre", "metres") is.expandable(fitf) fit0 <- update(fitZ, . ~ 1) anova(fit0, fitZ, override=TRUE) interactionfamilyname(fat) interactionorder(fat) hardcoredist(fat) #' (5) miscellaneous ## example from Robert Aue - handling offsets X <- demohyper$Points[[1]] GH <- Hybrid(G=Geyer(r=0.1, sat=3), H=Hardcore(0.01)) fit <- ppm(X ~ 1, GH) valid.ppm(fit) #' hard core distance of hybrid hardcoredist(fit) #' interaction order of hybrid interactionorder(fit) #' case of boundingbox boundingbox(cells, ppm(cells ~ 1)) }) reset.spatstat.options() } # # tests/ppmgam.R # # Test ppm with use.gam=TRUE # # $Revision: 1.4 $ $Date: 2020/04/30 05:23:52 $ # if(FULLTEST) { local({ fit <- ppm(nztrees ~s(x,y), use.gam=TRUE) mm <- model.matrix(fit) mf <- model.frame(fit) v <- vcov(fit) prd <- predict(fit) }) } #' #' tests/ppmlogi.R #' #' Tests of ppm(method='logi') #' and related code (predict, leverage etc) #' #' $Revision: 1.15 $ $Date: 2020/04/30 05:23:52 $ #' local({ if(FULLTEST) { fit <- ppm(cells ~x, method="logi") f <- fitted(fit) p <- predict(fit) u <- summary(fit) fitS <- ppm(cells ~x, Strauss(0.12), method="logi") fS <- fitted(fitS) pS <- predict(fitS) uS <- summary(fitS) print(uS) plot(leverage(fit)) plot(influence(fit)) plot(dfbetas(fit)) plot(leverage(fitS)) plot(influence(fitS)) plot(dfbetas(fitS)) } if(FULLTEST) { #' same with hard core - A1 is singular fitH <- ppm(cells ~x, Strauss(0.08), method="logi") print(fitH) fH <- fitted(fitH) pH <- predict(fitH) uH <- summary(fitH) print(uH) plot(leverage(fitH)) plot(influence(fitH)) plot(dfbetas(fitH)) } if(FULLTEST) { #' logistic fit to data frame of covariates z <- c(rep(TRUE, 5), rep(FALSE, 5)) df <- data.frame(A=z + 2* runif(10), B=runif(10)) Y <- quadscheme.logi(runifpoint(5), runifpoint(5)) fut <- ppm(Y ~ A+B, data=df, method="logi") sf <- summary(fut) print(sf) } if(FULLTEST) { #' vblogit code, just to check that it runs. fee <- ppm(cells ~ x, method="VBlogi", nd=21) print(fee) summary(fee) logLik(fee) AIC(fee) extractAIC(fee) Z <- predict(fee) summary(Z) print(fee$internal$glmfit) # print.vblogit } }) # # tests/ppmmarkorder.R # # $Revision: 1.4 $ $Date: 2020/04/30 05:23:52 $ # # Test that predict.ppm, plot.ppm and plot.fitin # tolerate marks with levels that are not in alpha order # if(ALWAYS) { # locale-dependent? local({ X <- amacrine levels(marks(X)) <- c("ZZZ", "AAA") fit <- ppm(X ~marks, MultiStrauss(c("ZZZ","AAA"), matrix(0.06, 2, 2))) aa <- predict(fit, type="trend") bb <- predict(fit, type="cif") plot(fit) plot(fitin(fit)) }) } # # tests/ppmscope.R # # Test things that might corrupt the internal format of ppm objects # # $Revision: 1.7 $ $Date: 2022/01/19 09:18:20 $ # if(ALWAYS) { # dependent on R version? local({ ## (1) Scoping problem that can arise when ppm splits the data fit <- ppm(bei ~elev, data=bei.extra) mm <- model.matrix(fit) ## (2) Fast update mechanism fit1 <- ppm(cells ~x+y, Strauss(0.07)) fit2 <- update(fit1, ~y) fit3 <- update(fit2, ~x) ## (3) New formula-based syntax attach(bei.extra) slfit <- ppm(bei ~ grad) sl2fit <- update(slfit, ~grad + I(grad^2)) slfitup <- update(slfit, use.internal=TRUE) sl2fitup <- update(sl2fit, use.internal=TRUE) ## (4) anova.ppm fut1 <- ppm(cells ~ 1, Strauss(0.1)) futx <- ppm(cells ~ x, Strauss(0.1)) anova(fut1, test="Chi") anova(futx, test="Chi") fut1a <- ppm(cells ~ 1, Strauss(0.1), rbord=0) anova(fut1a, futx, test="Chi") fut1d <- ppm(cells ~ 1, Strauss(0.1), nd=23) anova(fut1d, futx, test="Chi") ## This now works! futxyg <- ppm(cells ~ x + s(y), Strauss(0.1), use.gam=TRUE) anova(futx, futxyg) ## marked fatP <- ppm(amacrine ~ marks) fatM <- ppm(amacrine ~ marks, MultiStrauss(matrix(0.07, 2, 2))) anova(fatP, fatM, test="Chi") ## (5) expansion of "." in update.ppm fitb <- ppm(bei ~ . , data=bei.extra) step(fitb) }) } grep# # tests/ppmtricks.R # # Test backdoor exits, hidden options, internals and tricks in ppm # # $Revision: 1.19 $ $Date: 2020/04/30 05:23:52 $ # local({ ## (1) skip.border if(ALWAYS) { # needed below fit <- ppm(cells, ~1, Strauss(0.1), skip.border=TRUE) } ## (2) subset arguments of different kinds if(FULLTEST) { fut <- ppm(cells ~ x, subset=(x > 0.5)) fot <- ppm(cells ~ x, subset=(x > 0.5), method="logi") W <- owin(c(0.4, 0.8), c(0.2, 0.7)) fut <- ppm(cells ~ x, subset=W) fot <- ppm(cells ~ x, subset=W, method="logi") V <- as.im(inside.owin, Window(cells), w=W) fet <- ppm(cells ~ x, subset=V) fet <- ppm(cells ~ x, subset=V, method="logi") } ## (3) profilepl -> ppm ## uses 'skip.border' and 'precomputed' ## also tests scoping for covariates if(FULLTEST) { splants <- split(ants) mess <- splants[["Messor"]] cats <- splants[["Cataglyphis"]] ss <- data.frame(r=seq(60,120,by=20),hc=29/6) dM <- distmap(mess,dimyx=256) mungf <- profilepl(ss, StraussHard, cats ~ dM) mungp <- profilepl(ss, StraussHard, trend=~dM, Q=cats) } ## (4) splitting large quadschemes into blocks if(FULLTEST) { mop <- spatstat.options(maxmatrix=5000) qr <- quadBlockSizes(quadscheme(cells)) pr <- predict(ppm(cells ~ x, AreaInter(0.05))) spatstat.options(mop) qr <- quadBlockSizes(quadscheme(cells)) } ## (5) shortcuts in summary.ppm ## and corresponding behaviour of print.summary.ppm if(FULLTEST) { print(summary(fit, quick=TRUE)) print(summary(fit, quick="entries")) print(summary(fit, quick="no prediction")) print(summary(fit, quick="no variances")) } ## (6) suffstat.R if(ALWAYS) { fitP <- update(fit, Poisson()) suffstat.poisson(fitP, cells) fit0 <- killinteraction(fit) suffstat.poisson(fit0, cells) } ## (7) various support for class ppm if(FULLTEST) { fut <- kppm(redwood ~ x) A <- quad.ppm(fut) Z <- as.im(function(x,y){x}, Window(cells)) fitZ <- ppm(cells ~ Z) U <- getppmOriginalCovariates(fitZ) } ## (8) support for class profilepl if(FULLTEST) { rr <- data.frame(r=seq(0.05, 0.15, by=0.02)) ps <- profilepl(rr, Strauss, cells) ## plot(ps) ## covered in plot.profilepl.Rd simulate(ps, nrep=1e4) parameters(ps) fitin(ps) predict(ps, type="cif") } ## (9) class 'plotppm' if(FULLTEST) { fut <- ppm(amacrine ~ marks + polynom(x,y,2), Strauss(0.07)) p <- plot(fut, plot.it=FALSE) print(p) plot(p, how="contour") plot(p, how="persp") } ## (10) ppm -> mpl.engine -> mpl.prepare if(ALWAYS) { # includes C code fit <- ppm(cells, NULL) fit <- ppm(cells ~ x, clipwin=square(0.7)) fit <- ppm(cells ~ x, subset=square(0.7)) DG <- as.im(function(x,y){x+y < 1}, square(1)) fit <- ppm(cells ~ x, subset=DG) fit <- ppm(cells ~ x, GLM=glm) fit <- ppm(cells ~ x, famille=quasi(link='log', variance='mu')) fit <- ppm(cells ~ x, Hardcore(0.07), skip.border=TRUE, splitInf=TRUE) } ## (11) unidentifiable model (triggers an error in ppm) if(FULLTEST) { Q <- quadscheme(cells) M <- mpl.prepare(Q, cells, as.ppp(Q), trend=~1, covariates=NULL, interaction=Hardcore(0.3), correction="none") } }) reset.spatstat.options() # # tests/prediction.R # # Things that might go wrong with predict() # # $Revision: 1.20 $ $Date: 2020/04/30 05:41:59 $ # local({ if(ALWAYS) { ## test of 'covfunargs' - platform dependent? f <- function(x,y,a){ y - a } fit <- ppm(cells ~x + f, covariates=list(f=f), covfunargs=list(a=1/2)) p <- predict(fit) ## prediction involving 0 * NA qc <- quadscheme(cells, nd=10) r <- minnndist(as.ppp(qc))/10 fit <- ppm(qc ~ 1, Strauss(r)) # model has NA for interaction coefficient p1 <- predict(fit) p2 <- predict(fit, type="cif", ngrid=10) stopifnot(all(is.finite(as.matrix(p1)))) stopifnot(all(is.finite(as.matrix(p2)))) } if(FULLTEST) { ## test of 'new.coef' mechanism fut <- ppm(cells ~ x, Strauss(0.15), rbord=0) p0 <- predict(fut, type="cif") pe <- predict(fut, type="cif", new.coef=coef(fut)) pn <- predict(fut, type="cif", new.coef=unname(coef(fut))) if(max(abs(pe-p0)) > 0.01) stop("new.coef mechanism is broken!") if(max(abs(pn-p0)) > 0.01) stop("new.coef mechanism gives wrong answer, for unnamed vectors") #' adaptcoef a <- c(A=1,B=2,Z=42) b <- c(B=41,A=0) ab <- adaptcoef(a, b, drop=TRUE) } if(FULLTEST) { ## tests of relrisk.ppm fut <- ppm(amacrine ~ x * marks) a <- relrisk(fut, control=2, relative=TRUE) a <- relrisk(fut, se=TRUE) a <- relrisk(fut, relative=TRUE, se=TRUE) fut <- ppm(sporophores ~ marks + x) a <- relrisk(fut, control=2, relative=TRUE) a <- relrisk(fut, se=TRUE) a <- relrisk(fut, relative=TRUE, se=TRUE) ## untested cases of predict.ppm fit0 <- ppm(cells) a <- predict(fit0, interval="confidence") a <- predict(fit0, interval="confidence", type="count") fit <- ppm(cells ~ x) b <- predict(fit, se=TRUE, locations=cells) b <- predict(fit, se=TRUE, interval="confidence") b <- predict(fit, type="count", se=TRUE) b <- predict(fit, type="count", window=square(0.5), se=TRUE) b <- predict(fit, type="count", window=quadrats(cells, 3), se=TRUE) d <- predict(fit, type="count", interval="prediction", se=TRUE) d <- predict(fit, type="count", interval="confidence", se=TRUE) d <- predict(fit, interval="confidence", se=TRUE) foot <- ppm(cells ~ x, StraussHard(0.12)) d <- predict(foot, ignore.hardcore=TRUE) dX <- predict(foot, ignore.hardcore=TRUE, locations=cells) ## superseded usages b <- predict(fit, type="se", getoutofjail=TRUE) b <- predict(fit, type="se", locations=cells) # warning b <- predict(fit, total=TRUE) b <- predict(fit, total=square(0.5)) b <- predict(fit, total=quadrats(cells, 3)) ## supporting code u <- model.se.image(fit, square(0.5)) u <- model.se.image(fit, square(0.5), what="cv") u <- model.se.image(fit, square(0.5), what="ce") co <- c(Intercept=5, slope=3, kink=2) re <- c("Intercept", "slope") a <- fill.coefs(co, re) # warning b <- fill.coefs(co, rev(names(co))) d <- fill.coefs(co, letters[1:3]) ## model matrix etc v <- model.frame(ppm(cells)) fut <- ppm(cells ~ x, Strauss(0.1)) v <- model.matrix(fut, subset=(x<0.5), keepNA=FALSE) df <- data.frame(x=runif(10), y=runif(10), Interaction=sample(0:1, 10, TRUE)) m10 <- PPMmodelmatrix(fut, data=df) mmm <- PPMmodelmatrix(fut, Q=quad.ppm(fut)) #' effectfun for Gibbs effectfun(fut, "x") effectfun(fut, "x", se.fit=TRUE) #' implicit covariate when there is only one effectfun(fut) effectfun(fut, se.fit=TRUE) #' given covariate dlin <- distfun(copper$SouthLines) copfit <- ppm(copper$SouthPoints ~ dlin, Geyer(1,1)) effectfun(copfit, "dlin") effectfun(copfit) #' covariate that is not used in model effectfun(fut, "y", x=0) futS <- ppm(cells ~ 1, Strauss(0.1)) effectfun(futS, "x") effectfun(futS, "y") #' factor covariate fot <- ppm(amacrine~x+marks) effectfun(fot, "marks", x=0.5, se.fit=TRUE) #' covariate retained but not used W <- Window(swedishpines) a <- solist(A=funxy(function(x,y){x < 20}, W), B=funxy(function(x,y){factor(x < 20)}, W)) fvt <- ppm(swedishpines ~ A, data=a, allcovar=TRUE) effectfun(fvt, "A", se.fit=TRUE) effectfun(fvt, "B", A=TRUE, se.fit=TRUE) ## ppm with covariate values in data frame X <- rpoispp(42) Q <- quadscheme(X) weirdfunction <- function(x,y){ 10 * x^2 + 5 * sin(10 * y) } Zvalues <- weirdfunction(x.quad(Q), y.quad(Q)) fot <- ppm(Q ~ y + Z, data=data.frame(Z=Zvalues)) effectfun(fot, "y", Z=0) effectfun(fot, "Z", y=0) #' multitype modX <- ppm(amacrine ~ polynom(x,2)) effectfun(modX) effectfun(modX, "x") modXM <- ppm(amacrine ~ marks*polynom(x,2)) effectfun(modXM, "x", marks="on") modXYM <- ppm(amacrine ~ marks*polynom(x,y,2)) effectfun(modXYM, "x", y=0, marks="on") df <- as.data.frame(simulate(modXM, drop=TRUE)) df$marks <- as.character(df$marks) dfpr <- predict(modXM, locations=df) } }) # # tests/project.ppm.R # # $Revision: 1.7 $ $Date: 2020/04/30 05:41:59 $ # # Tests of projection mechanism # local({ chk <- function(m) { if(!valid.ppm(m)) stop("Projected model was still not valid") return(invisible(NULL)) } if(FULLTEST) { ## a very unidentifiable model fit <- ppm(cells ~Z, Strauss(1e-06), covariates=list(Z=0)) chk(emend(fit)) ## multitype r <- matrix(1e-06, 2, 2) fit2 <- ppm(amacrine ~1, MultiStrauss(types=c("off", "on"), radii=r)) chk(emend(fit2)) ## complicated multitype fit3 <- ppm(amacrine ~1, MultiStraussHard(types=c("off", "on"), iradii=r, hradii=r/5)) chk(emend(fit3)) #' code coverage op <- spatstat.options(project.fast=TRUE) fut <- emend(fit, trace=TRUE) chk(fut) spatstat.options(op) #' hierarchical ra <- r r[2,1] <- NA fit4 <- ppm(amacrine ~1, HierStrauss(types=c("off", "on"), radii=r)) chk(emend(fit4)) #' complicated hierarchical fit5 <- ppm(amacrine ~1, HierStraussHard(types=c("off", "on"), iradii=r, hradii=r/5)) chk(emend(fit5)) ## hybrids r0 <- min(nndist(redwood)) ra <- 1.25 * r0 rb <- 0.8 * r0 f1 <- ppm(redwood ~1, Hybrid(A=Strauss(ra), B=Geyer(0.1, 2)), project=TRUE) chk(f1) f2 <- ppm(redwood ~1, Hybrid(A=Strauss(rb), B=Geyer(0.1, 2)), project=TRUE) chk(f2) f3 <- ppm(redwood ~1, Hybrid(A=Strauss(ra), B=Strauss(0.1)), project=TRUE) chk(f3) f4 <- ppm(redwood ~1, Hybrid(A=Strauss(rb), B=Strauss(0.1)), project=TRUE) chk(f4) f5 <- ppm(redwood ~1, Hybrid(A=Hardcore(rb), B=Strauss(0.1)), project=TRUE) chk(f5) f6 <- ppm(redwood ~1, Hybrid(A=Hardcore(rb), B=Geyer(0.1, 2)), project=TRUE) chk(f6) f7 <- ppm(redwood ~1, Hybrid(A=Geyer(rb, 1), B=Strauss(0.1)), project=TRUE) chk(f7) } }) reset.spatstat.options() spatstat.model/tests/testsL.R0000644000176200001440000002504314253322675015774 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.model #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.model) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) ## ## tests/legacy.R ## ## Test that current version of spatstat is compatible with outmoded usage ## $Revision: 1.3 $ $Date: 2020/04/29 08:55:17 $ if(FULLTEST) { local({ ## (1) Old syntax of ppm ppm(cells, ~x) ## (2) Old syntax of MultiStrauss etc. r <- matrix(3, 2, 2) a <- MultiStrauss( , r) a <- MultiStrauss(NULL, r) a <- MultiHard(, r) h <- r/2 a <- MultiStraussHard( , r, h) NULL }) } #' #' tests/leverinf.R #' #' leverage and influence for Gibbs models #' #' $Revision: 1.35 $ $Date: 2022/06/18 10:15:17 $ #' if(FULLTEST) { Cells <- cells Amacrine <- amacrine Redwood <- redwood } else { ## reduce number of data + dummy points spatstat.options(npixel=32, ndummy.min=16) Cells <- cells[c(FALSE,TRUE)] Redwood <- redwood[c(FALSE, TRUE)] Amacrine <- amacrine[c(FALSE, TRUE)] } local({ cat("Running non-sparse algorithm...", fill=TRUE) # original non-sparse algorithm Leverage <- function(...) leverage(..., sparseOK=FALSE) Influence <- function(...) influence(..., sparseOK=FALSE) Dfbetas <- function(...) dfbetas(..., sparseOK=FALSE) if(ALWAYS) { ## Strauss()$delta2 fitS <- ppm(Cells ~ x, Strauss(0.12), rbord=0) levS <- Leverage(fitS) infS <- Influence(fitS) dfbS <- Dfbetas(fitS) ## Geyer()$delta2 fitG <- ppm(Redwood ~ 1, Geyer(0.1, 2), rbord=0) levG <- Leverage(fitG) infG <- Influence(fitG) ## AreaInter()$delta2 fitA <- ppm(Cells ~ 1, AreaInter(0.06), rbord=0, nd=11) levA <- Leverage(fitA) infA <- Influence(fitA) ## pairwise.family$delta2 fitD <- ppm(Cells ~ 1, DiggleGatesStibbard(0.12), rbord=0) levD <- Leverage(fitD) infD <- Influence(fitD) ## DiggleGratton() special code fitDG <- ppm(Cells ~ 1, DiggleGratton(0.05, 0.12), rbord=0) levDG <- Leverage(fitDG) infDG <- Influence(fitDG) ## ppmInfluence; offset is present; coefficient vector has length 0 fitH <- ppm(Cells ~ 1, Hardcore(0.07)) levH <- Leverage(fitH) infH <- Influence(fitH) ## ppmInfluence; hard core fitSH <- ppm(Cells ~ 1, StraussHard(0.07, 0.01)) levSH <- Leverage(fitSH) infSH <- Influence(fitSH) ## ppmInfluence; offset is present; coefficient vector has length 1 fitHx <- ppm(Cells ~ x, Hardcore(0.07), rbord=0) levHx <- Leverage(fitHx) infHx <- Influence(fitHx) ## multitype futAm <- ppm(Amacrine ~ x + marks, Strauss(0.07)) levAm <- leverage(futAm) } if(FULLTEST) { ## ......... class support ............................. ## other methods for classes leverage.ppm and influence.ppm ## not elsewhere tested cat("Testing class support...", fill=TRUE) w <- domain(levS) w <- Window(infS) vv <- shift(levS, c(1.2, 1.3)) vv <- shift(infS, c(1.2, 1.3)) A <- quadrats(Window(Cells), 2) a <- integral(levS,domain=A) b <- integral(infS,domain=A) u <- Smooth(levS, sigma=0.07) v <- Smooth(infS, sigma=0.1) ## plot options plot(levS, what="exact") plot(levS, what="nearest") contour(levS, what="nearest") persp(levS, what="nearest") ## plotting for multitype models plot(levAm) contour(levAm) persp(levAm) plot(levAm, multiplot=FALSE) contour(levAm, multiplot=FALSE) } if(ALWAYS) { ## .......... compare algorithms ......................... ## divide and recombine algorithm cat("Reduce maximum block side to 50,000 ...", fill=TRUE) op <- spatstat.options(maxmatrix=50000) ## non-sparse levSB <- Leverage(fitS) infSB <- Influence(fitS) dfbSB <- Dfbetas(fitS) } chk <- function(x, y, what, from="single-block and multi-block", thresh=1e-12) { if(max(abs(x-y)) > thresh) stop(paste("Different results for", what, "obtained from", from, "algorithms"), call.=FALSE) invisible(NULL) } if(ALWAYS) { cat("Compare single-block to multi-block...", fill=TRUE) chk(marks(as.ppp(infS)), marks(as.ppp(infSB)), "influence") chk(as.im(levS), as.im(levSB), "leverage") chk(dfbS$val, dfbSB$val, "dfbetas$value") chk(dfbS$density, dfbSB$density, "dfbetas$density") } if(FULLTEST) { ## also check case of zero cif cat("Check zero cif cases...", fill=TRUE) levHB <- Leverage(fitH) infHB <- Influence(fitH) dfbHB <- Dfbetas(fitH) levHxB <- Leverage(fitHx) infHxB <- Influence(fitHx) dfbHxB <- Dfbetas(fitHx) } ## run all code segments Everything <- function(model, ...) { ppmInfluence(model, ..., what="all") } if(FULLTEST) { cat("Run full code on AreaInteraction model...", fill=TRUE) pmiA <- Everything(fitA) ## sparse algorithm, with blocks cat("Run sparse algorithm with blocks...", fill=TRUE) pmiSSB <- Everything(fitS, sparseOK=TRUE) ## also check case of zero cif pmiHSB <- Everything(fitH, sparseOK=TRUE) pmiSHSB <- Everything(fitSH, sparseOK=TRUE) pmiHxSB <- Everything(fitHx, sparseOK=TRUE) cat("Reinstate maxmatrix...", fill=TRUE) spatstat.options(op) } if(ALWAYS) { ## sparse algorithm, no blocks cat("Compare sparse and non-sparse results...", fill=TRUE) pmi <- Everything(fitS, sparseOK=TRUE) levSp <- pmi$leverage infSp <- pmi$influence dfbSp <- pmi$dfbetas chks <- function(...) chk(..., from="sparse and non-sparse") chks(marks(as.ppp(infS)), marks(as.ppp(infSp)), "influence") chks(as.im(levS), as.im(levSp), "leverage") chks(dfbS$val, dfbSp$val, "dfbetas$value") chks(dfbS$density, dfbSp$density, "dfbetas$density") } if(ALWAYS) { #' case of zero cif cat("zero cif...", fill=TRUE) pmiH <- Everything(fitH, sparseOK=TRUE) pmiSH <- Everything(fitSH, sparseOK=TRUE) pmiHx <- Everything(fitHx, sparseOK=TRUE) } if(FULLTEST) { #' other code blocks - check execution only cat("other code blocks...", fill=TRUE) a <- Everything(fitS) a <- Everything(fitS, method="interpreted") a <- Everything(fitS, method="interpreted", entrywise=FALSE) a <- Everything(fitS, entrywise=FALSE) #' zero cif b <- Everything(fitSH) b <- Everything(fitSH, method="interpreted") b <- Everything(fitSH, method="interpreted", entrywise=FALSE) b <- Everything(fitSH, entrywise=FALSE) } #' NOTE: code for irregular parameters is tested below, and in 'make bookcheck' ## ........... logistic fits ....................... cat("Logistic fits...", fill=TRUE) #' special algorithm for delta2 fitSlogi <- ppm(Cells ~ x, Strauss(0.12), rbord=0, method="logi") if(FULLTEST) { pmiSlogi <- Everything(fitSlogi) #' special algorithm for delta2 fitGlogi <- ppm(Redwood ~ 1, Geyer(0.1, 2), rbord=0, method="logi") pmiGlogi <- Everything(fitGlogi) #' generic algorithm for delta2 fitDlogi <- ppm(Cells ~ 1, DiggleGatesStibbard(0.12), rbord=0, method="logi") pmiDlogi <- Everything(fitDlogi) #' generic algorithm for delta2 : offset; zero-dimensional fitHlogi <- ppm(Cells ~ 1, Hardcore(0.07), method="logi") pmiHlogi <- Everything(fitHlogi) #' generic algorithm for delta2 : offset; 1-dimensional fitHxlogi <- ppm(Cells ~ x, Hardcore(0.07), rbord=0, method="logi") pmiHxlogi <- Everything(fitHxlogi) #' plotting plot(leverage(fitSlogi)) plot(influence(fitSlogi)) plot(dfbetas(fitSlogi)) } if(ALWAYS) { #' other code blocks - check execution only cat("Other code blocks...", fill=TRUE) b <- Everything(fitSlogi) # i.e. full set of results b <- Everything(fitSlogi, method="interpreted") b <- Everything(fitSlogi, method="interpreted", entrywise=FALSE) b <- Everything(fitSlogi, entrywise=FALSE) } #' irregular parameters cat("Irregular parameters...", fill=TRUE) ytoa <- function(x,y, alpha=1) { y^alpha } lam <- function(x,y,alpha=1) { exp(4 + y^alpha) } set.seed(90210) X <- rpoispp(lam, alpha=2) iScor <- list(alpha=function(x,y,alpha) { alpha * y^(alpha-1) } ) iHess <- list(alpha=function(x,y,alpha) { alpha * (alpha-1) * y^(alpha-2) } ) gogo <- function(tag, ..., iS=iScor, iH=iHess) { cat(tag, fill=TRUE) #' compute all leverage+influence terms ppmInfluence(..., what="all", iScore=iS, iHessian=iH) } gogogo <- function(hdr, fit) { cat(hdr, fill=TRUE) force(fit) #' try all code options d <- gogo("a", fit) d <- gogo("b", fit, method="interpreted") d <- gogo("c", fit, method="interpreted", entrywise=FALSE) d <- gogo("d", fit, entrywise=FALSE) invisible(NULL) } gogogo("Offset model...", ippm(X ~ offset(ytoa), start=list(alpha=1), iterlim=40)) gogogo("Offset model (logistic) ...", ippm(X ~ offset(ytoa), start=list(alpha=1), method="logi", iterlim=40)) gogogo("Offset+x model...", ippm(X ~ x + offset(ytoa), start=list(alpha=1), iterlim=40)) gogogo("Offset+x model (logistic) ...", ippm(X ~ x + offset(ytoa), start=list(alpha=1), method="logi", iterlim=40)) gogogo("Offset model Strauss ...", ippm(X ~ offset(ytoa), Strauss(0.07), start=list(alpha=1), iterlim=40)) gogogo("Offset model Strauss (logistic) ...", ippm(X ~ offset(ytoa), Strauss(0.07), start=list(alpha=1), method="logi", iterlim=40)) if(FULLTEST) { gogogo("Offset+x model Strauss ...", ippm(X ~ x + offset(ytoa), Strauss(0.07), start=list(alpha=1), iterlim=40)) gogogo("Offset+x model Strauss (logistic)...", ippm(X ~ x + offset(ytoa), Strauss(0.07), start=list(alpha=1), method="logi", iterlim=40)) } #' if(FULLTEST) { set.seed(452) foo <- ppm(Cells ~ 1, Strauss(0.15), improve.type="ho", nsim=5) aa <- Everything(foo) #' Gradient and Hessian obtained by symbolic differentiation f <- deriv(expression((1+x)^a), "a", function.arg=c("x", "y", "a"), hessian=TRUE) #' check they can be extracted fit <- ippm(Cells ~offset(f), start=list(a=0.7)) Everything(fit) } }) reset.spatstat.options() spatstat.model/tests/testsUtoZ.R0000644000176200001440000002211614325154601016470 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.model #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.model) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/undoc.R # # $Revision: 1.16 $ $Date: 2020/11/02 07:06:49 $ # # Test undocumented hacks, experimental code, etc local({ if(FULLTEST) { ## cases of 'pickoption' aliases <- c(Lenin="Ulyanov", Stalin="Djugashvili", Trotsky="Bronstein") surname <- "Trot" pickoption("leader", surname, aliases) pickoption("leader", surname, aliases, exact=TRUE, die=FALSE) } if(ALWAYS) { ## pixellate.ppp accepts a data frame of weights pixellate(cells, weights=data.frame(a=1:42, b=42:1)) ## test parts of 'rmhsnoop' that don't require interaction with user rmhSnoopEnv(cells, Window(cells), 0.1) } if(FULLTEST) { ## Berman-Turner frame A <- bt.frame(quadscheme(cells), ~x, Strauss(0.07), rbord=0.07) print(A) ## digestCovariates D <- distfun(cells) Z <- distmap(cells) U <- dirichlet(cells) stopifnot(is.scov(D)) stopifnot(is.scov(Z)) stopifnot(is.scov(U)) stopifnot(is.scov("x")) dg <- digestCovariates(D=D,Z=Z,U=U,"x",list(A="x", B=D)) ## a <- getfields(dg, c("A", "D", "niets"), fatal=FALSE) ## util.R gg <- pointgrid(owin(), 7) checkbigmatrix(1000000L, 1000000L, FALSE, TRUE) spatstatDiagnostic("whatever") M <- list(list(a=2, b=FALSE), list(a=2, b=TRUE)) stopifnot(!allElementsIdentical(M)) stopifnot(allElementsIdentical(M, "a")) ## A <- Strauss(0.1) A <- reincarnate.interact(A) ## ## special lists B <- solist(a=cells, b=redwood, c=japanesepines) BB <- as.ppplist(B) BL <- as.layered(B) DB <- as.imlist(lapply(B, density)) is.solist(B) is.ppplist(B) is.imlist(DB) ## case of density.ppplist DEB <- density(BB, se=TRUE) } if(ALWAYS) { ## fft z <- matrix(1:16, 4, 4) a <- fft2D(z, west=FALSE) if(fftwAvailable()) b <- fft2D(z, west=TRUE) } if(ALWAYS) { ## experimental interactions pot <- function(d, par) { d <= 0.1 } A <- Saturated(pot) print(A) A <- update(A, name="something") ppm(amacrine ~ x, A, rbord=0.1) } if(ALWAYS) { # platform dependent #' version-checking now <- Sys.Date() versioncurrency.spatstat(now + 80, FALSE) versioncurrency.spatstat(now + 140, FALSE) versioncurrency.spatstat(now + 400, FALSE) versioncurrency.spatstat(now + 1000) } if(FULLTEST) { #' general Ord interaction gradual <- function(d, pars) { y <- pmax(0, 0.005 - d)/0.005 if(is.matrix(d)) y <- matrix(y, nrow(d), ncol(d)) return(y) } B <- Ord(gradual, "gradual Ord process") } }) ## ## tests/updateppm.R ## ## Check validity of update.ppm ## ## $Revision: 1.8 $ $Date: 2022/10/23 01:19:19 $ local({ if(ALWAYS) { require(spatstat.utils) h <- function(m1, m2) { mc <- short.deparse(sys.call()) cat(paste(mc, "\t... ")) m1name <- short.deparse(substitute(m1)) m2name <- short.deparse(substitute(m2)) if(!identical(names(coef(m1)), names(coef(m2)))) stop(paste("Differing results for", m1name, "and", m2name, "in updateppm.R"), call.=FALSE) cat("OK\n") } X <- redwood[c(TRUE,FALSE)] Y <- redwood[c(FALSE,TRUE)] fit0f <- ppm(X ~ 1, nd=8) fit0p <- ppm(X, ~1, nd=8) fitxf <- ppm(X ~ x, nd=8) fitxp <- ppm(X, ~x, nd=8) cat("Basic consistency ...\n") h(fit0f, fit0p) h(fitxf, fitxp) cat("\nTest correct handling of model formulas ...\n") h(update(fitxf, Y), fitxf) h(update(fitxf, Q=Y), fitxf) h(update(fitxf, Y~x), fitxf) h(update(fitxf, Q=Y~x), fitxf) h(update(fitxf, ~x), fitxf) } if(FULLTEST) { h(update(fitxf, Y~1), fit0f) h(update(fitxf, ~1), fit0f) h(update(fit0f, Y~x), fitxf) h(update(fit0f, ~x), fitxf) h(update(fitxp, Y), fitxp) h(update(fitxp, Q=Y), fitxp) h(update(fitxp, Y~x), fitxp) h(update(fitxp, Q=Y~x), fitxp) h(update(fitxp, ~x), fitxp) h(update(fitxp, Y~1), fit0p) h(update(fitxp, ~1), fit0p) h(update(fit0p, Y~x), fitxp) h(update(fit0p, ~x), fitxp) } if(ALWAYS) { cat("\nTest scope handling for left hand side ...\n") X <- Y h(update(fitxf), fitxf) } if(ALWAYS) { cat("\nTest scope handling for right hand side ...\n") Z <- distmap(X) fitZf <- ppm(X ~ Z) fitZp <- ppm(X, ~ Z) h(update(fitxf, X ~ Z), fitZf) } if(FULLTEST) { h(update(fitxp, X ~ Z), fitZp) h(update(fitxf, . ~ Z), fitZf) h(update(fitZf, . ~ x), fitxf) h(update(fitZf, . ~ . - Z), fit0f) h(update(fitxp, . ~ Z), fitZp) h(update(fitZp, . ~ . - Z), fit0p) h(update(fit0p, . ~ . + Z), fitZp) h(update(fitZf, . ~ . ), fitZf) h(update(fitZp, . ~ . ), fitZp) } if(ALWAYS) { cat("\nTest use of internal data ...\n") h(update(fitZf, ~ x, use.internal=TRUE), fitxf) fitsin <- update(fitZf, X~sin(Z)) h(update(fitZf, ~ sin(Z), use.internal=TRUE), fitsin) } if(FULLTEST) { cat("\nTest step() ... ") fut <- ppm(X ~ Z + x + y, nd=8) fut0 <- step(fut, trace=0) cat("OK\n") } }) # # tests/vcovppm.R # # Check validity of vcov.ppm algorithms # # Thanks to Ege Rubak # # $Revision: 1.12 $ $Date: 2020/05/02 01:32:58 $ # local({ set.seed(42) X <- rStrauss(200, .5, .05) model <- ppm(X, inter = Strauss(.05)) if(ALWAYS) { b <- vcov(model, generic = TRUE, algorithm = "basic") v <- vcov(model, generic = TRUE, algorithm = "vector") vc <- vcov(model, generic = TRUE, algorithm = "vectorclip") vn <- vcov(model, generic = FALSE) disagree <- function(x, y, tol=1e-7) { max(abs(x-y)) > tol } asymmetric <- function(x) { disagree(x, t(x)) } if(asymmetric(b)) stop("Non-symmetric matrix produced by vcov.ppm 'basic' algorithm") if(asymmetric(v)) stop("Non-symmetric matrix produced by vcov.ppm 'vector' algorithm") if(asymmetric(vc)) stop("Non-symmetric matrix produced by vcov.ppm 'vectorclip' algorithm") if(asymmetric(vn)) stop("Non-symmetric matrix produced by vcov.ppm Strauss algorithm") if(disagree(v, b)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'basic' ") if(disagree(v, vc)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'vectorclip' ") if(disagree(vn, vc)) stop("Disagreement between vcov.ppm generic and Strauss algorithms") } if(ALWAYS) { # C code ## Geyer code xx <- c(0.7375956, 0.6851697, 0.6399788, 0.6188382) yy <- c(0.5816040, 0.6456319, 0.5150633, 0.6191592) Y <- ppp(xx, yy, window=square(1)) modelY <- ppm(Y ~1, Geyer(0.1, 1)) b <- vcov(modelY, generic = TRUE, algorithm = "basic") v <- vcov(modelY, generic = TRUE, algorithm = "vector") vc <- vcov(modelY, generic = TRUE, algorithm = "vectorclip") if(asymmetric(b)) stop("Non-symmetric matrix produced by vcov.ppm 'basic' algorithm for Geyer model") if(asymmetric(v)) stop("Non-symmetric matrix produced by vcov.ppm 'vector' algorithm for Geyer model") if(asymmetric(vc)) stop("Non-symmetric matrix produced by vcov.ppm 'vectorclip' algorithm for Geyer model") if(disagree(v, b)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'basic' for Geyer model") if(disagree(v, vc)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'vectorclip' for Geyer model") } if(ALWAYS) { # C code ## tests of 'deltasuffstat' code ## Handling of offset terms modelH <- ppm(cells ~x, Hardcore(0.05)) a <- vcov(modelH, generic=TRUE) ## may fall over b <- vcov(modelH, generic=FALSE) if(disagree(a, b)) stop("Disagreement between vcov.ppm algorithms for Hardcore model") ## Correctness of pairwise.family$delta2 modelZ <- ppm(amacrine ~1, MultiStrauss(radii=matrix(0.1, 2, 2))) b <- vcov(modelZ, generic=FALSE) g <- vcov(modelZ, generic=TRUE) if(disagree(b, g)) stop("Disagreement between vcov.ppm algorithms for MultiStrauss model") ## Test that 'deltasuffstat' works for Hybrids modelHyb <- ppm(japanesepines ~ 1, Hybrid(Strauss(0.05), Strauss(0.1))) vHyb <- vcov(modelHyb) } if(FULLTEST) { ## Code blocks for other choices of 'what' model <- ppm(X ~ 1, Strauss(.05)) cG <- vcov(model, what="corr") cP <- vcov(update(model, Poisson()), what="corr") ## outdated usage cX <- vcov(model, A1dummy=TRUE) ## Model with zero-length coefficient vector lam <- intensity(X) f <- function(x,y) { rep(lam, length(x)) } model0 <- ppm(X ~ offset(log(f)) - 1) dd <- vcov(model0) cc <- vcov(model0, what="corr") ## Model with NA coefficients fit <- ppm(X ~ log(f)) vcov(fit) fitE <- emend(fit, trace=TRUE) ## Other weird stuff su <- suffloc(ppm(X ~ x)) } }) spatstat.model/tests/testsQ.R0000644000176200001440000000065614243551505015777 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.model #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.model) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) spatstat.model/tests/testsEtoF.R0000644000176200001440000001731314337644655016447 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.model #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.model) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/envelopes.R # # Test validity of envelope data # # $Revision: 1.28 $ $Date: 2022/11/24 01:35:26 $ # local({ checktheo <- function(fit) { fitname <- deparse(substitute(fit)) en <- envelope(fit, nsim=4, verbose=FALSE, nrep=1e3) nama <- names(en) expecttheo <- is.poisson(fit) && is.stationary(fit) context <- paste("Envelope of", fitname) if(expecttheo) { if(!("theo" %in% nama)) stop(paste(context, "did not contain", sQuote("theo"))) if("mmean" %in% nama) stop(paste(context, "unexpectedly contained", sQuote("mmean"))) } else { if("theo" %in% nama) stop(paste(context, "unexpectedly contained", sQuote("theo"))) if(!("mmean" %in% nama)) stop(paste(context, "did not contain", sQuote("mmean"))) } cat(paste(context, "has correct format\n")) } if(ALWAYS) { checktheo(ppm(cells ~x)) } if(FULLTEST) { checktheo(ppm(cells)) checktheo(ppm(cells ~1, Strauss(0.1))) } #' check savefuns/savepatterns with global fit <- ppm(cells~x) if(ALWAYS) Ef <- envelope(fit, Kest, nsim=4, savefuns=TRUE, global=TRUE) if(FULLTEST) Ep <- envelope(fit, Kest, nsim=4, savepatterns=TRUE, global=TRUE) #' check handling of 'dangerous' cases if(FULLTEST) { fut <- ppm(redwood ~ x) Ek <- envelope(fut, Kinhom, update=FALSE, nsim=4) kfut <- kppm(redwood3 ~ x) Ekk <- envelope(kfut, Kinhom, lambda=density(redwood3), nsim=7) } if(ALWAYS) { # invokes C code fit <- ppm(japanesepines ~ 1, Strauss(0.04)) e6 <- envelope(fit, Kest, nsim=4, fix.n=TRUE) fit2 <- ppm(amacrine ~ 1, Strauss(0.03)) e7 <- envelope(fit2, Gcross, nsim=4, fix.marks=TRUE) } if(FULLTEST) { fit <- ppm(cells ~ 1, Strauss(0.07)) U <- envelope(fit, nsim=3, simulate=expression(runifpoint(20))) kfit <- kppm(redwood3 ~ x) UU <- envelope(kfit, nsim=7, simulate=expression(simulate(kfit, drop=TRUE))) VV <- envelope(kfit, nsim=7, weights=1:7) MM <- envelope(kfit, nsim=7, Kinhom, lambda=density(redwood3)) } if(FULLTEST) { #' envelope computations in other functions P <- lurking(cells, expression(x), envelope=TRUE, nsim=9) print(P) #' re-using envelope objects in other functions A <- envelope(cells, nsim=9, savepatterns=TRUE, savefuns=TRUE) S <- lurking(cells, expression(x), envelope=A, nsim=9) #' envelope.envelope B <- envelope(cells, nsim=5, savepatterns=TRUE, savefuns=FALSE) envelope(B) } ## close 'local' }) #' tests/enveltest.R #' Envelope tests (dclf.test, mad.test) #' and two-stage tests (bits.test, dg.test, bits.envelope, dg.envelope) #' #' $Revision: 1.3 $ $Date: 2020/04/28 12:58:26 $ #' if(FULLTEST) { local({ #' handling of NA function values (due to empty point patterns) set.seed(1234) X <- rThomas(5, 0.05, 10) fit <- kppm(X ~ 1, "Thomas") set.seed(100000) dclf.test(fit) set.seed(909) dg.test(fit, nsim=9) #' other code blocks dclf.test(fit, rinterval=c(0, 3), nsim=9) envelopeTest(X, exponent=3, clamp=TRUE, nsim=9) }) } # # tests/fastgeyer.R # # checks validity of fast C implementation of Geyer interaction # # $Revision: 1.4 $ $Date: 2020/04/28 12:58:26 $ # if(FULLTEST) { # depends on hardware local({ X <- redwood Q <- quadscheme(X) U <- union.quad(Q) EP <- equalpairs.quad(Q) G <- Geyer(0.11, 2) # The value r=0.11 is chosen to avoid hardware numerical effects (gcc bug 323). # It avoids being close any value of pairdist(redwood). # The nearest such values are 0.1077.. and 0.1131.. # By contrast if r = 0.1 there are values differing from 0.1 by 3e-17 a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border") b <- G$fasteval(X,U,EP,G$pot,G$par,"border") if(!all(a==b)) stop("Results of Geyer()$fasteval and pairsat.family$eval do not match") # ... # and again for a non-integer value of 'sat' # (spotted by Thordis Linda Thorarinsdottir) G <- Geyer(0.11, 2.5) a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border") b <- G$fasteval(X,U,EP,G$pot,G$par,"border") if(!all(a==b)) stop("Results of Geyer()$fasteval and pairsat.family$eval do not match when sat is not an integer") # and again for sat < 1 # (spotted by Rolf) G <- Geyer(0.11, 0.5) a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border") b <- G$fasteval(X,U,EP,G$pot,G$par,"border") if(!all(a==b)) stop("Results of Geyer()$fasteval and pairsat.family$eval do not match when sat < 1") }) } #' tests/formuli.R #' #' Test machinery for manipulating formulae #' #' $Revision: 1.7 $ $Date: 2020/04/28 12:58:26 $ local({ ff <- function(A, deletevar, B) { D <- reduceformula(A, deletevar) if(!spatstat.utils::identical.formulae(D, B)) { AD <- as.expression(substitute(reduceformula(A,d), list(A=A, d=deletevar))) stop(paste(AD, "\n\tyields ", spatstat.utils::pasteFormula(D), " instead of ", spatstat.utils::pasteFormula(B)), call.=FALSE) } invisible(NULL) } ff(~ x + z, "x", ~z) ff(y ~ x + z, "x", y~z) ff(~ I(x^2) + z, "x", ~z) ff(y ~ poly(x,2) + poly(z,3), "x", y ~poly(z,3)) ff(y ~ x + z, "g", y ~ x + z) reduceformula(y ~ x+z, "g", verbose=TRUE) reduceformula(y ~ sin(x-z), "z", verbose=TRUE) illegal.iformula(~str*g, itags="str", dfvarnames=c("marks", "g", "x", "y")) }) ## ## tests/funnymarks.R ## ## tests involving strange mark values ## $Revision: 1.7 $ $Date: 2020/04/28 12:58:26 $ if(ALWAYS) { # depends on locale local({ ## ppm() where mark levels contain illegal characters hyphenated <- c("a", "not-a") spaced <- c("U", "non U") suffixed <- c("a+", "a*") charred <- c("+", "*") irad <- matrix(0.1, 2,2) hrad <- matrix(0.005, 2, 2) tryit <- function(types, X, irad, hrad) { levels(marks(X)) <- types fit <- ppm(X ~marks + polynom(x,y,2), MultiStraussHard(types=types,iradii=irad,hradii=hrad)) print(fit) print(coef(fit)) val <- fitted(fit) pred <- predict(fit) return(invisible(NULL)) } tryit(hyphenated, amacrine, irad, hrad) tryit(spaced, amacrine, irad, hrad) tryit(suffixed, amacrine, irad, hrad) tryit(charred, amacrine, irad, hrad) ## marks which are dates X <- cells n <- npoints(X) endoftime <- rep(ISOdate(2001,1,1), n) eotDate <- rep(as.Date("2001-01-01"), n) markformat(endoftime) markformat(eotDate) marks(X) <- endoftime print(X) Y <- X %mark% data.frame(id=1:42, date=endoftime, dd=eotDate) print(Y) md <- markformat(endoftime) ## mark formats Z <- Y marks(Z) <- marks(Z)[1,,drop=FALSE] ms <- markformat(solist(cells, redwood)) marks(Z) <- factor(1:npoints(Z)) marks(Z)[12] <- NA mz <- is.multitype(Z) cZ <- coerce.marks.numeric(Z) marks(Z) <- data.frame(n=1:npoints(Z), a=factor(sample(letters, npoints(Z), replace=TRUE))) cZ <- coerce.marks.numeric(Z) stopifnot(is.multitype(cells %mark% data.frame(a=factor(1:npoints(cells))))) a <- numeric.columns(finpines) b1 <- numeric.columns(amacrine) b2 <- coerce.marks.numeric(amacrine) d <- numeric.columns(cells) f <- numeric.columns(longleaf) ff <- data.frame(a=factor(letters[1:10]), y=factor(sample(letters, 10))) numeric.columns(ff) ## mark operations df <- data.frame(x=1:2, y=sample(letters, 2)) h <- hyperframe(z=1:2, p=solist(cells, cells)) a <- NULL %mrep% 3 a <- 1:4 %mrep% 3 a <- df %mrep% 3 a <- h %mrep% 3 b <- markcbind(df, h) b <- markcbind(h, df) }) } spatstat.model/tests/testsS.R0000644000176200001440000000271614243551505016000 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.model #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.model) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/slrm.R # # $Revision: 1.3 $ $Date: 2020/05/01 09:59:59 $ # # Test slrm fitting and prediction when there are NA's # if(ALWAYS) { local({ X <- copper$SouthPoints W <- owin(poly=list(x=c(0,35,35,1),y=c(1,1,150,150))) Y <- X[W] fit <- slrm(Y ~ x+y) pred <- predict(fit) extractAIC(fit) fitx <- update(fit, . ~ x) simulate(fitx, seed=42) if(FULLTEST) { unitname(fitx) unitname(fitx) <- "km" mur <- solapply(murchison,rescale, 1000, "km") mur$dfault <- distfun(mur$faults) fut <- slrm(gold ~ dfault, data=mur, splitby="greenstone") A <- model.images(fut) } }) } # # tests/step.R # # $Revision: 1.5 $ $Date: 2020/05/01 09:59:59 $ # # test for step() operation # if(FULLTEST) { local({ Z <- as.im(function(x,y){ x^3 - y^2 }, nztrees$window) fitP <- ppm(nztrees ~x+y+Z, covariates=list(Z=Z)) step(fitP) fitS <- update(fitP, Strauss(7)) step(fitS) fitM <- ppm(amacrine ~ marks*(x+y), MultiStrauss(types=levels(marks(amacrine)), radii=matrix(0.04, 2, 2))) step(fitM) }) } spatstat.model/tests/testsP1.R0000644000176200001440000000252214325156533016054 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.model #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.model) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) ## ## tests/percy.R ## ## Tests of Percus-Yevick approximations ## ## $Revision: 1.3 $ $Date: 2020/04/30 05:23:52 $ if(FULLTEST) { local({ fit <- ppm(swedishpines ~1, DiggleGatesStibbard(6)) K <- Kmodel(fit) }) } ## ## tests/pixelgripes.R ## Problems related to pixellation of windows ## ## $Revision: 1.8 $ $Date: 2022/10/23 06:21:10 $ if(FULLTEST) { local({ ## From Philipp Hunziker: bug in rNeymanScott (etc) ## Create an irregular window PM <- matrix(c(1,0,0.5,1,0,0), 3, 2, byrow=TRUE) P <- owin(poly=PM) ## Generate Matern points X <- rMatClust(50, 0.05, 5, win=P) ## Some distance function as a covariate distorigin <- function(x, y) { sqrt(x^2 + y^2) } ## No covariates: works fine fit0 <- kppm(X ~ 1, clusters="MatClust") Y0 <- simulate(fit0, retry=0) ## Covariates: Simulation fails fit1 <- kppm(X ~ distorigin, clusters="MatClust") Y1 <- simulate(fit1, retry=0) }) } spatstat.model/tests/testsAtoC.R0000644000176200001440000000366114243551505016424 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.model #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.model) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) #' tests/aucroc.R #' #' AUC and ROC code #' #' $Revision: 1.6 $ $Date: 2020/11/02 06:26:45 $ local({ if(FULLTEST) { fit <- kppm(redwood ~ I(y-x)) a <- roc(fit) b <- auc(fit) fet <- ppm(amacrine~x+y+marks) d <- roc(fet) e <- auc(fet) } }) ## tests/cdf.test.R local({ NSIM <- 9 op <- spatstat.options(ndummy.min=16, npixel=32) if(FULLTEST) { ## Monte Carlo test for Gibbs model fit <- ppm(cells ~ 1, Strauss(0.07)) cdf.test(fit, "x", nsim=NSIM) ## cdf.test.slrm fut <- slrm(japanesepines ~ x + y) Z <- distmap(japanesepines) cdf.test(fut, Z) } reset.spatstat.options() }) #' #' tests/contrib.R #' #' Tests for user-contributed code in spatstat #' #' $Revision: 1.4 $ $Date: 2021/04/17 02:32:24 $ local({ #' Jinhom #' Marie-Colette van Lieshout and Ottmar Cronie X <- redwood3 if(FULLTEST) { fit <- ppm(X ~ polynom(x,y,2)) } else { X <- X[c(TRUE,FALSE)] spatstat.options(npixel=32, ndummy.min=16) fit <- ppm(X ~ x) } lam <- predict(fit) lamX <- fitted(fit, dataonly=TRUE) lmin <- 0.9 * min(lam) g1 <- Ginhom(X, lambda=fit, update=TRUE) if(FULLTEST) { g2 <- Ginhom(X, lambda=fit, update=FALSE, lmin = lmin) g3 <- Ginhom(X, lambda=lam, lmin=lmin) g4 <- Ginhom(X, lambda=lamX, lmin=lmin) } if(ALWAYS) { f2 <- Finhom(X, lambda=fit, update=FALSE) } if(FULLTEST) { f1 <- Finhom(X, lambda=fit, update=TRUE) f3 <- Finhom(X, lambda=lam, lmin=lmin) } if(!FULLTEST) reset.spatstat.options() }) spatstat.model/tests/testsT.R0000644000176200001440000001117214243551505015775 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.model #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.model) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/testaddvar.R # # test addvar options # # $Revision: 1.3 $ $Date: 2020/05/02 01:32:58 $ if(FULLTEST) { local({ X <- rpoispp(function(x,y){exp(3+3*x)}) model <- ppm(X ~y) addvar(model, "x", crosscheck=TRUE) addvar(model, "x", bw.input="quad") w <- square(0.5) addvar(model, "x", subregion=w) addvar(model, "x", subregion=w, bw.input="points") Z <- as.im(function(x,y) { x }, Window(X)) addvar(model, Z) }) } # # tests/testparres.R # # additional test of parres # # $Revision: 1.8 $ $Date: 2022/05/20 06:59:59 $ # if(FULLTEST) { local({ X <- rpoispp(function(x,y){exp(3+x+2*x^2)}) model <- ppm(X ~x+y) # options in parres (and code blocks in print.parres) parres(model, "x") parres(model, "x", smooth.effect=TRUE) parres(model, "x", bw.input="quad") w <- square(0.5) parres(model, "x", subregion=w) parres(model, "x", subregion=w, bw.input="quad") f <- function(x,y) { x + y } parres(model, f) # check whether 'update.ppm' has messed up internals mod2 <- update(model, ~x) parres(mod2, "x") #' other kinds of covariates mod3 <- ppm(X ~ x + offset(y)) parres(mod3, "offset(y)") Z <- distmap(runifpoint(3)) parres(mod3, Z) mod4 <- ppm(X ~ sin(x), data=solist(B=Z)) parres(mod4, "sin(x)") parres(mod4, "B") #' models with interaction mod5 <- ppm(cells ~ x, AreaInter(0.06)) parres(mod5, "x") dlin <- distfun(copper$SouthLines) copfit <- ppm(copper$SouthPoints ~ dlin, Geyer(1,1)) parres(copfit, "dlin") #' covariate need not be specified if there is only one. parres(mod5) parres(copfit) #' infrastructure ltuae <- evaluateCovariate(42, cells) LTUAE <- evaluateCovariate(ltuae, cells) fit <- ppm(amacrine ~ x * marks, nd=16) dmat <- model.depends(fit) check.separable(dmat, "x", c(x=FALSE, marks=FALSE), FALSE) check.separable(dmat, "x", c(FALSE, FALSE), FALSE) check.separable(dmat, "x", c(x=FALSE, marks=TRUE), FALSE) }) } #' #' tests/threedee.R #' #' Tests of 3D code #' #' $Revision: 1.8 $ $Date: 2020/05/02 01:32:58 $ #' local({ X <- runifpoint3(30) Y <- runifpoint3(20) if(FULLTEST) { A <- runifpoint3(10, nsim=2) Z <- ppsubset(X, 2:4) } ## if(ALWAYS) { # includes C code d <- pairdist(X, periodic=TRUE, squared=TRUE) d <- crossdist(X, Y, squared=TRUE) d <- crossdist(X, Y, squared=TRUE, periodic=TRUE) #' h <- has.close(X, 0.2) h <- has.close(X, 0.2, periodic=TRUE) h <- has.close(X, 0.2, Y=Y) h <- has.close(X, 0.2, Y=Y, periodic=TRUE) #' code blocks not otherwise reached rmax <- 0.6 * max(nndist(X)) g <- G3est(X, rmax=rmax, correction="rs") g <- G3est(X, rmax=rmax, correction="km") g <- G3est(X, rmax=rmax, correction="Hanisch") g <- G3est(X, rmax=rmax, sphere="ideal") g <- G3est(X, rmax=rmax, sphere="digital") v <- sphere.volume() v <- digital.volume() #' older code co <- coords(X) xx <- co$x yy <- co$y zz <- co$z gg1 <- g3engine(xx, yy, zz, correction="Hanisch G3") gg2 <- g3engine(xx, yy, zz, correction="minus sampling") ff1 <- f3engine(xx, yy, zz, correction="no") ff2 <- f3engine(xx, yy, zz, correction="minus sampling") } ## if(ALWAYS) { #'class support X <- runifpoint3(10) print(X) print(X %mark% runif(10)) print(X %mark% factor(letters[c(1:5,5:1)])) print(X %mark% data.frame(a=1:10, b=runif(10))) da <- as.Date(paste0("2020-01-0", c(1:5,5:1))) print(X %mark% da) print(X %mark% data.frame(a=1:10, b=da)) } }) # # tests/triplets.R # # test code for triplet interaction # # $Revision: 1.9 $ $Date: 2022/05/22 08:45:38 $ # if(ALWAYS) { # C code, platform dependence local({ #' valid model fit <- ppm(cells ~1, Triplets(0.1)) fit suffstat(fit) #' invalid model fitR <- ppm(redwood ~1, Triplets(0.1)) fitR suffstat(fitR) #' hard core (zero triangles, coefficient is NA) fit0 <- ppm(cells ~1, Triplets(0.05)) fit0 suffstat(fit0) #' bug case (1 triangle in data) fit1 <- ppm(cells ~1, Triplets(0.15)) fit1 suffstat(fit1) #' simulation X <- simulate(fit) mod <- list(cif="triplets",par=list(beta=50,gamma=0.2,r=0.07), w=square(1)) Xm <- rmh(model=mod,start=list(n.start=5), control=list(nrep=1e5)) #' hard core mod$par$gamma <- 0 XmHard <- rmh(model=mod,start=list(n.start=5), control=list(nrep=1e5)) }) } spatstat.model/tests/testsD.R0000644000176200001440000001565514514520744015771 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.model #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.model) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) #' #' tests/deltasuffstat.R #' #' Explicit tests of 'deltasuffstat' #' #' $Revision: 1.4 $ $Date: 2021/01/22 08:08:48 $ if(!FULLTEST) spatstat.options(npixel=32, ndummy.min=16) if(ALWAYS) { # depends on C code local({ disagree <- function(x, y, tol=1e-7) { !is.null(x) && !is.null(y) && max(abs(x-y)) > tol } flydelta <- function(model, modelname="") { ## Check execution of different algorithms for 'deltasuffstat' dSS <- deltasuffstat(model, sparseOK=TRUE) dBS <- deltasuffstat(model, sparseOK=TRUE, use.special=FALSE, force=TRUE) dBF <- deltasuffstat(model, sparseOK=FALSE, use.special=FALSE, force=TRUE) ## Compare results if(disagree(dBS, dSS)) stop(paste(modelname, "model: Brute force algorithm disagrees with special algorithm")) if(disagree(dBF, dBS)) stop(paste(modelname, "model: Sparse and full versions of brute force algorithm disagree")) return(invisible(NULL)) } modelS <- ppm(cells ~ x, Strauss(0.13), nd=10) flydelta(modelS, "Strauss") antsub <- ants[c(FALSE,TRUE,FALSE)] rmat <- matrix(c(130, 90, 90, 60), 2, 2) modelM <- ppm(antsub ~ 1, MultiStrauss(rmat), nd=16) flydelta(modelM, "MultiStrauss") modelA <- ppm(antsub ~ 1, HierStrauss(rmat, archy=c(2,1)), nd=16) flydelta(modelA, "HierStrauss") }) } reset.spatstat.options() #' #' tests/density.R #' #' Test behaviour of density() methods, #' relrisk(), Smooth() #' and inhomogeneous summary functions #' and idw, adaptive.density, intensity #' #' $Revision: 1.62 $ $Date: 2022/05/22 11:14:51 $ #' if(!FULLTEST) spatstat.options(npixel=32, ndummy.min=16) local({ ## likewise 'relrisk.ppm' fit <- ppm(ants ~ x) rants <- function(..., model=fit) { a <- relrisk(model, sigma=100, se=TRUE, ...) return(TRUE) } if(ALWAYS) { rants() rants(diggle=TRUE) rants(edge=FALSE) rants(at="points") rants(casecontrol=FALSE) rants(relative=TRUE) } if(FULLTEST) { rants(diggle=TRUE, at="points") rants(edge=FALSE, at="points") rants(casecontrol=FALSE, relative=TRUE) rants(casecontrol=FALSE,at="points") rants(relative=TRUE,at="points") rants(casecontrol=FALSE, relative=TRUE,at="points") rants(relative=TRUE, control="Cataglyphis", case="Messor") rants(relative=TRUE, control="Cataglyphis", case="Messor", at="points") } ## more than 2 types fut <- ppm(sporophores ~ x) if(ALWAYS) { rants(model=fut) } if(FULLTEST) { rants(model=fut, at="points") rants(model=fut, relative=TRUE, at="points") } if(FULLTEST) { ## cases of 'intensity' etc a <- intensity(ppm(amacrine ~ 1)) } }) reset.spatstat.options() #' #' tests/diagnostique.R #' #' Diagnostic tools such as diagnose.ppm, qqplot.ppm #' #' $Revision: 1.6 $ $Date: 2020/04/28 12:58:26 $ #' if(FULLTEST) { local({ fit <- ppm(cells ~ x) diagE <- diagnose.ppm(fit, type="eem") diagI <- diagnose.ppm(fit, type="inverse") diagP <- diagnose.ppm(fit, type="Pearson") plot(diagE, which="all") plot(diagI, which="smooth") plot(diagP, which="x") plot(diagP, which="marks", plot.neg="discrete") plot(diagP, which="marks", plot.neg="contour") plot(diagP, which="smooth", srange=c(-5,5)) plot(diagP, which="smooth", plot.smooth="contour") plot(diagP, which="smooth", plot.smooth="image") fitS <- ppm(cells ~ x, Strauss(0.08)) diagES <- diagnose.ppm(fitS, type="eem", clip=FALSE) diagIS <- diagnose.ppm(fitS, type="inverse", clip=FALSE) diagPS <- diagnose.ppm(fitS, type="Pearson", clip=FALSE) plot(diagES, which="marks", plot.neg="imagecontour") plot(diagPS, which="marks", plot.neg="discrete") plot(diagPS, which="marks", plot.neg="contour") plot(diagPS, which="smooth", plot.smooth="image") plot(diagPS, which="smooth", plot.smooth="contour") plot(diagPS, which="smooth", plot.smooth="persp") #' infinite reach, not border-corrected fut <- ppm(cells ~ x, Softcore(0.5), correction="isotropic") diagnose.ppm(fut) #' diagPX <- diagnose.ppm(fit, type="Pearson", cumulative=FALSE) plot(diagPX, which="y") #' simulation based e <- envelope(cells, nsim=4, savepatterns=TRUE, savefuns=TRUE) Plist <- rpoispp(40, nsim=5) qf <- qqplot.ppm(fit, nsim=4, expr=e, plot.it=FALSE) print(qf) qp <- qqplot.ppm(fit, nsim=5, expr=Plist, fast=FALSE) print(qp) qp <- qqplot.ppm(fit, nsim=5, expr=expression(rpoispp(40)), plot.it=FALSE) print(qp) qg <- qqplot.ppm(fit, nsim=5, style="classical", plot.it=FALSE) print(qg) #' lurking.ppm #' covariate is numeric vector fitx <- ppm(cells ~ x) yvals <- coords(as.ppp(quad.ppm(fitx)))[,"y"] lurking(fitx, yvals) #' covariate is stored but is not used in model Z <- as.im(function(x,y){ x+y }, Window(cells)) fitxx <- ppm(cells ~ x, data=solist(Zed=Z), allcovar=TRUE) lurking(fitxx, expression(Zed)) #' envelope is a ppplist; length < nsim; glmdata=NULL fit <- ppm(cells ~ 1) stuff <- lurking(fit, expression(x), envelope=Plist, plot.sd=FALSE) #' plot.lurk plot(stuff, shade=NULL) }) } #' #' tests/deepeepee.R #' #' Tests for determinantal point process models #' #' $Revision: 1.9 $ $Date: 2022/04/24 09:14:46 $ local({ if(ALWAYS) { #' simulate.dppm jpines <- residualspaper$Fig1 fit <- dppm(jpines ~ 1, dppGauss) set.seed(10981) simulate(fit, W=square(5)) } if(FULLTEST) { #' simulate.detpointprocfamily - code blocks model <- dppGauss(lambda=100, alpha=.05, d=2) simulate(model, seed=1999, correction="border") u <- is.stationary(model) #' other methods for dppm kay <- Kmodel(fit) gee <- pcfmodel(fit) lam <- intensity(fit) arr <- reach(fit) pah <- parameters(fit) #' a user bug report - matrix dimension error set.seed(256) dat <- simulate( dppGauss(lambda = 8.5, alpha = 0.1, d = 2), nsim = 1) } if(FULLTEST) { ## cover print.summary.dppm jpines <- japanesepines[c(TRUE,FALSE,FALSE,FALSE)] print(summary(dppm(jpines ~ 1, dppGauss))) print(summary(dppm(jpines ~ 1, dppGauss, method="c"))) print(summary(dppm(jpines ~ 1, dppGauss, method="p"))) print(summary(dppm(jpines ~ 1, dppGauss, method="a"))) } #' dppeigen code blocks if(ALWAYS) { mod <- dppMatern(lambda=2, alpha=0.01, nu=1, d=2) uT <- dppeigen(mod, trunc=1.1, Wscale=c(1,1), stationary=TRUE) } if(FULLTEST) { uF <- dppeigen(mod, trunc=1.1, Wscale=c(1,1), stationary=FALSE) vT <- dppeigen(mod, trunc=0.98, Wscale=c(1,1), stationary=TRUE) vF <- dppeigen(mod, trunc=0.98, Wscale=c(1,1), stationary=FALSE) } }) spatstat.model/tests/testsM.R0000644000176200001440000002342014261734266015774 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.model #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.model) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) ## ## tests/marcelino.R ## ## $Revision: 1.4 $ $Date: 2020/04/30 02:18:23 $ ## local({ if(FULLTEST) { Y <- split(urkiola) B <- Y$birch O <- Y$oak B.lam <- predict (ppm(B ~polynom(x,y,2)), type="trend") O.lam <- predict (ppm(O ~polynom(x,y,2)), type="trend") Kinhom(B, lambda=B.lam, correction="iso") Kinhom(B, lambda=B.lam, correction="border") Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam) Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam, correction = "iso") Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam, correction = "border") } }) #' tests/mctests.R #' Monte Carlo tests #' (mad.test, dclf.test, envelopeTest, hasenvelope) #' $Revision: 1.5 $ $Date: 2022/05/23 04:09:49 $ local({ if(FULLTEST) { fitx <- ppm(redwood~x) ax <- envelopeTest(fitx, exponent=2, nsim=9, savefuns=TRUE) print(ax) } }) # # tests/mppm.R # # Basic tests of mppm # # $Revision: 1.23 $ $Date: 2022/07/08 04:55:13 $ # if(!FULLTEST) spatstat.options(npixel=32, ndummy.min=16) local({ ## test interaction formulae and subfits fit1 <- mppm(Points ~ group, simba, hyperframe(po=Poisson(), str=Strauss(0.1)), iformula=~ifelse(group=="control", po, str)) fit2 <- mppm(Points ~ group, simba, hyperframe(po=Poisson(), str=Strauss(0.1)), iformula=~str/id) fit2w <- mppm(Points ~ group, simba, hyperframe(po=Poisson(), str=Strauss(0.1)), iformula=~str/id, weights=runif(nrow(simba))) # currently invalid # fit3 <- mppm(Points ~ group, simba, # hyperframe(po=Poisson(), pie=PairPiece(c(0.05,0.1))), # iformula=~I((group=="control") * po) + I((group=="treatment") * pie)) fit1 fit2 fit2w # fit3 if(FULLTEST) { ## run summary.mppm which currently sits in spatstat-internal.Rd summary(fit1) summary(fit2) summary(fit2w) # summary(fit3) } ## test vcov algorithm vcov(fit1) vcov(fit2) # vcov(fit3) if(FULLTEST) { fit4 <- mppm(Points ~ group, simba, hyperframe(str=Strauss(0.1)), iformula=~str/group) fit4 summary(fit4) vcov(fit4) fit0 <- mppm(Points ~ group, simba) anova(fit0, fit4, test="Chi") ## [bug from Fernando Milesi] fit5 <- mppm(Wat ~ id, data=hyperframe(Wat=waterstriders), interaction=StraussHard(4.5, 1.5), iformula=~Interaction:id) fit5 summary(fit5) vcov(fit5) } ## test subfits algorithm if(FULLTEST) { s1 <- subfits(fit1) s2 <- subfits(fit2) # s3 <- subfits(fit3) s4 <- subfits(fit4) s5 <- subfits(fit5) ## validity of results of subfits() p1 <- solapply(s1, predict) p2 <- solapply(s2, predict) # p3 <- solapply(s3, predict) p4 <- solapply(s4, predict) p5 <- solapply(s5, predict) } }) local({ if(FULLTEST) { ## cases of predict.mppm W <- solapply(waterstriders, Window) Fakes <- solapply(W, runifpoint, n=30) FakeDist <- solapply(Fakes, distfun) H <- hyperframe(Bugs=waterstriders, D=FakeDist) fit <- mppm(Bugs ~ D, data=H) p1 <- predict(fit) p2 <- predict(fit, locations=Fakes) p3 <- predict(fit, locations=solapply(W, erosion, r=4)) locn <- as.data.frame(do.call(cbind, lapply(Fakes, coords))) df <- data.frame(id=sample(1:3, nrow(locn), replace=TRUE), D=runif(nrow(locn))) p4 <- predict(fit, locations=locn, newdata=df) fitG <- mppm(Bugs ~ D, data=H, use.gam=TRUE) p1G <- predict(fitG) p2G <- predict(fitG, locations=Fakes) p3G <- predict(fitG, locations=solapply(W, erosion, r=4)) p4G <- predict(fitG, locations=locn, newdata=df) } }) local({ ## [thanks to Sven Wagner] ## factor covariate, with some levels unused in some rows if(FULLTEST) { set.seed(14921788) H <- hyperframe(X=replicate(3, runifpoint(20), simplify=FALSE), Z=solist(as.im(function(x,y){x}, owin()), as.im(function(x,y){y}, owin()), as.im(function(x,y){x+y}, owin()))) H$Z <- solapply(H$Z, cut, breaks=(0:4)/2) fit6 <- mppm(X ~ Z, H) v6 <- vcov(fit6) s6 <- subfits(fit6) p6 <- solapply(s6, predict) ## random effects fit7 <- mppm(X ~ Z, H, random=~1|id) v7 <- vcov(fit7) s7 <- subfits(fit7) p7 <- solapply(s7, predict) fit7a <- mppm(X ~ Z, H, random=~x|id) v7a <- vcov(fit7a) s7a <- subfits(fit7a) p7a <- solapply(s7a, predict) ## multitype: collisions in vcov.ppm, predict.ppm H$X <- lapply(H$X, rlabel, labels=factor(c("a","b")), permute=FALSE) M <- MultiStrauss(matrix(0.1, 2, 2), c("a","b")) fit8 <- mppm(X ~ Z, H, M) v8 <- vcov(fit8, fine=TRUE) s8 <- subfits(fit8) p8 <- lapply(s8, predict) c8 <- lapply(s8, predict, type="cif") fit9 <- mppm(X ~ Z, H, M, iformula=~Interaction * id) v9 <- vcov(fit9, fine=TRUE) s9 <- subfits(fit9) p9 <- lapply(s9, predict) c9 <- lapply(s9, predict, type="cif") ## and a simple error in recognising 'marks' fit10 <- mppm(X ~ marks, H) } }) local({ if(FULLTEST) { ## test handling of offsets and zero cif values in mppm H <- hyperframe(Y = waterstriders) (fit1 <- mppm(Y ~ 1, data=H, Hardcore(1.5))) (fit2 <- mppm(Y ~ 1, data=H, StraussHard(7, 1.5))) (fit3 <- mppm(Y ~ 1, data=H, Hybrid(S=Strauss(7), H=Hardcore(1.5)))) s1 <- subfits(fit1) s2 <- subfits(fit2) s3 <- subfits(fit3) ## prediction, in training/testing context ## (example from Markus Herrmann and Ege Rubak) X <- waterstriders dist <- solapply(waterstriders, function(z) distfun(runifpoint(1, Window(z)))) i <- 3 train <- hyperframe(pattern = X[-i], dist = dist[-i]) test <- hyperframe(pattern = X[i], dist = dist[i]) fit <- mppm(pattern ~ dist, data = train) pred <- predict(fit, type="cif", newdata=test, verbose=TRUE) ## examples from Robert Aue GH <- Hybrid(G=Geyer(r=0.1, sat=3), H=Hardcore(0.01)) res <- mppm(Points ~ 1, interaction = GH, data=demohyper) print(summary(res)) sub <- subfits(res, verbose=TRUE) print(sub) } }) local({ if(FULLTEST) { ## test handling of interaction coefficients in multitype case set.seed(42) XX <- as.solist(replicate(3, rthin(amacrine, 0.8), simplify=FALSE)) H <- hyperframe(X=XX) M <- MultiStrauss(matrix(0.1, 2, 2), levels(marks(amacrine))) fit <- mppm(X ~ 1, H, M) co <- coef(fit) subco <- sapply(subfits(fit), coef) if(max(abs(subco - co)) > 0.001) stop("Wrong coefficient values in subfits, for multitype interaction") } }) local({ if(FULLTEST) { ## test lurking.mppm ## example from 'mppm' n <- 7 H <- hyperframe(V=1:n, U=runif(n, min=-1, max=1)) H$Z <- setcov(square(1)) H$U <- with(H, as.im(U, as.rectangle(Z))) H$Y <- with(H, rpoispp(eval.im(exp(2+3*Z)))) fit <- mppm(Y ~ Z + U + V, data=H) lurking(fit, expression(Z), type="P") lurking(fit, expression(V), type="raw") # design covariate lurking(fit, expression(U), type="raw") # image, constant in each row lurking(fit, H$Z, type="P") # list of images } }) local({ if(FULLTEST) { ## test anova.mppm code blocks and scoping problem H <- hyperframe(X=waterstriders) mod0 <- mppm(X~1, data=H, Poisson()) modxy <- mppm(X~x+y, data=H, Poisson()) mod0S <- mppm(X~1, data=H, Strauss(2)) modxyS <- mppm(X~x+y, data=H, Strauss(2)) anova(mod0, modxy, test="Chi") anova(mod0S, modxyS, test="Chi") anova(modxy, test="Chi") anova(modxyS, test="Chi") #' models with random effects (example from Marcelino de la Cruz) mod0r <- mppm(X~1, data=H, Poisson(), random = ~1|id) modxr <- mppm(X~x, data=H, Poisson(), random = ~1|id) anova(mod0r, modxr, test="Chi") } }) local({ if(FULLTEST) { ## test multitype stuff foo <- flu[1:3,] msh <- MultiStraussHard(iradii=matrix(100, 2, 2), hradii=matrix(10,2,2), types=levels(marks(foo$pattern[[1]]))) msh0 <- MultiStraussHard(iradii=matrix(100, 2, 2), hradii=matrix(10,2,2)) fit <- mppm(pattern ~ 1, data=foo, interaction=msh0) print(fit) print(summary(fit)) v <- vcov(fit) } }) reset.spatstat.options() #' #' tests/msr.R #' #' $Revision: 1.5 $ $Date: 2020/11/30 07:27:44 $ #' #' Tests of code for measures #' if(FULLTEST) { local({ ## cases of 'msr' Q <- quadscheme(cells) nQ <- n.quad(Q) nX <- npoints(cells) A <- matrix(nX * 3, nX, 3) B <- matrix(nQ * 3, nQ, 3) m <- msr(Q, A, B) M <- msr(Q, A, 1) M <- msr(Q, 1, B) M <- msr(Q, A, B[,1]) M <- msr(Q, A[,1], B) M <- msr(Q, A, B[,1,drop=FALSE]) M <- msr(Q, A[,1,drop=FALSE], B) ## methods a <- summary(m) b <- is.marked(m) w <- as.owin(m) z <- domain(m) ss <- scalardilate(m, 2) tt <- rescale(m, 2) ee <- rotate(m, pi/4) aa <- affine(m, mat=diag(c(1,2)), vec=c(0,1)) ff <- flipxy(m) am <- augment.msr(m, sigma=0.08) ua <- update(am) rr <- residuals(ppm(cells ~ x)) mm <- residuals(ppm(amacrine ~ x)) ss <- residuals(ppm(amacrine ~ x), type="score") gg <- rescale(ss, 1/662, c("micron", "microns")) plot(mm) plot(mm, multiplot=FALSE) plot(mm, equal.markscale=TRUE, equal.ribbon=TRUE) plot(ss) plot(ss, multiplot=FALSE) }) } spatstat.model/tests/testsNtoO.R0000644000176200001440000000170114243551505016446 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.model #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.model) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/NAinCov.R # # Testing the response to the presence of NA's in covariates # # $Revision: 1.8 $ $Date: 2022/05/22 08:37:38 $ if(FULLTEST) { local({ X <- runifpoint(42) Y <- as.im(function(x,y) { x+y }, owin()) Y[owin(c(0.2,0.4),c(0.2,0.4))] <- NA # fit model: should produce a warning but no failure misfit <- ppm(X ~Y, covariates=list(Y=Y)) # prediction Z <- predict(misfit, type="trend", se=TRUE) # covariance matrix: all should be silent v <- vcov(misfit) ss <- vcov(misfit, what="internals") }) } spatstat.model/src/0000755000176200001440000000000014243551505014007 5ustar liggesusersspatstat.model/src/Ediggatsti.c0000755000176200001440000000353414325154601016244 0ustar liggesusers#include #include #include #include "chunkloop.h" #include "looptest.h" #include "constants.h" /* Ediggatsti.c $Revision: 1.5 $ $Date: 2022/10/21 10:43:01 $ C implementation of 'eval' for DiggleGatesStibbard interaction Assumes point patterns are sorted in increasing order of x coordinate Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void Ediggatsti( /* inputs */ int *nnsource, /* query points */ double *xsource, double *ysource, int *idsource, int *nntarget, /* data points */ double *xtarget, double *ytarget, int *idtarget, double *rrho, /* model parameter */ /* output */ double *values ) { int nsource, ntarget, maxchunk, j, i, ileft, idsourcej; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double rho, rho2, rho2pluseps, coef, product; nsource = *nnsource; ntarget = *nntarget; rho = *rrho; if(nsource == 0 || ntarget == 0) return; rho2 = rho * rho; coef = M_PI_2/rho; rho2pluseps = rho2 + EPSILON(rho2); ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { product = 1; xsourcej = xsource[j]; ysourcej = ysource[j]; idsourcej = idsource[j]; /* adjust starting position */ xleft = xsourcej - rho; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process from ileft until dx > rho */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > rho2pluseps) break; if(idtarget[i] != idsourcej) { dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= rho2) product *= sin(sqrt(d2) * coef); } } values[j] = log(product * product); } } } spatstat.model/src/constants.h0000755000176200001440000000074714325154601016204 0ustar liggesusers/* constants.h Ensure that required constants are defined (Insurance against flaky installations) $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifndef M_PI #define M_PI 3.141592653589793 #endif #ifndef M_PI_2 #define M_PI_2 1.570796326794897 #endif #ifndef M_2_PI #define M_2_PI (2.0/M_PI) #endif #ifndef M_2PI #define M_2PI 6.283185307179586 #endif spatstat.model/src/Efiksel.c0000755000176200001440000000331314325154601015535 0ustar liggesusers#include #include #include "chunkloop.h" #include "looptest.h" /* Efiksel.c $Revision: 1.6 $ $Date: 2022/10/22 10:09:51 $ C implementation of 'eval' for Fiksel interaction (non-hardcore part) Assumes point patterns are sorted in increasing order of x coordinate Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ double sqrt(double x); double exp(double x); void Efiksel( /* inputs */ int *nnsource, double *xsource, double *ysource, int *nntarget, double *xtarget, double *ytarget, double *rrmax, double *kkappa, /* output */ double *values ) { int nsource, ntarget, maxchunk, j, i, ileft; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double rmax, r2max, r2maxpluseps, kappa, total; nsource = *nnsource; ntarget = *nntarget; rmax = *rrmax; kappa = *kkappa; if(nsource == 0 || ntarget == 0) return; r2max = rmax * rmax; r2maxpluseps = r2max + EPSILON(r2max); ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 16384) { total = 0; xsourcej = xsource[j]; ysourcej = ysource[j]; /* adjust starting point */ xleft = xsourcej - rmax; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process from ileft until dx > rmax */ for(i=ileft; i < ntarget; i++) { /* squared interpoint distance */ dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > r2maxpluseps) break; dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= r2max) total += exp(- kappa * sqrt(d2)); } values[j] = total; } } } spatstat.model/src/raster.c0000755000176200001440000000200414325154601015447 0ustar liggesusers/* raster.c shape_raster() initialise a Raster structure $Revision: 1.2 $ $Date: 2022/10/22 02:32:10 $ */ #include #include "raster.h" void shape_raster( /* the raster structure to be initialised */ Raster *ras, /* pointer to data storage for pixel values */ void *data, /* range of GRID COORDS excluding margin */ double xmin, double ymin, double xmax, double ymax, /* absolute dimensions of storage array */ int nrow, int ncol, /* margins for working */ int mrow, int mcol ) { ras->data = data; ras->nrow = nrow; ras->ncol = ncol; ras->length = nrow * ncol; ras->rmin = mrow; ras->rmax = nrow - mrow - 1; ras->cmin = mcol; ras->cmax = ncol - mcol - 1; ras->x0 = ras->xmin = xmin; ras->x1 = ras->xmax = xmax; ras->y0 = ras->ymin = ymin; ras->y1 = ras->ymax = ymax; ras->xstep = (xmax-xmin)/(ncol - 2 * mcol - 1); ras->ystep = (ymax-ymin)/(nrow - 2 * mrow - 1); /* Rprintf("xstep,ystep = %lf,%lf\n", ras->xstep,ras->ystep); */ } spatstat.model/src/crossloop.h0000755000176200001440000000356214325154601016211 0ustar liggesusers/* crossloop.h Generic code template for loop for cross-close-pairs operations collecting contributions to point x_i from all points y_j such that ||x_i - y_j|| <= r cpp variables used: INITIAL_I code executed at start of 'i' loop CONTRIBUTE_IJ code executed to compute contribution from j to i COMMIT_I code executed to save total contribution to i C variables used: int i, j, n1, n2, maxchunk, jleft; double x1i, y1i, xleft, dx, dy, d2, rmax, r2max; double *x1, *y1, *x2, *y2; $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifndef CHUNKLOOP_H #include "chunkloop.h" #endif #define CROSSLOOP(INITIAL_I, CONTRIBUTE_IJ, COMMIT_I) \ OUTERCHUNKLOOP(i, n1, maxchunk, 65536) { \ R_CheckUserInterrupt(); \ INNERCHUNKLOOP(i, n1, maxchunk, 65536) { \ \ x1i = x1[i]; \ y1i = y1[i]; \ \ INITIAL_I; \ \ jleft = 0; \ \ /* \ adjust starting point jleft \ */ \ xleft = x1i - rmax; \ while((x2[jleft] < xleft) && (jleft+1 < n2)) \ ++jleft; \ \ /* \ process from j = jleft until dx > rmax \ */ \ for(j=jleft; j < n2; j++) { \ dx = x2[j] - x1i; \ if(dx > rmax) \ break; \ dy = y2[j] - y1i; \ d2 = dx * dx + dy * dy; \ if(d2 <= r2max) { \ /* add this (i, j) pair to output */ \ CONTRIBUTE_IJ; \ } \ } \ COMMIT_I; \ } \ } spatstat.model/src/init.c0000644000176200001440000000174514514520743015126 0ustar liggesusers /* Native symbol registration table for spatstat.model package Automatically generated - do not edit this file! */ #include "proto.h" #include // for NULL #include /* See proto.h for declarations for the native routines registered below. */ static const R_CMethodDef CEntries[] = { {"Cclosepaircounts", (DL_FUNC) &Cclosepaircounts, 5}, {"Ccrosspaircounts", (DL_FUNC) &Ccrosspaircounts, 8}, {"delta2area", (DL_FUNC) &delta2area, 10}, {"Ediggatsti", (DL_FUNC) &Ediggatsti, 10}, {"Ediggra", (DL_FUNC) &Ediggra, 11}, {"Efiksel", (DL_FUNC) &Efiksel, 9}, {"Egeyer", (DL_FUNC) &Egeyer, 11}, {"ESdiggra", (DL_FUNC) &ESdiggra, 12}, {NULL, NULL, 0} }; void R_init_spatstat_model(DllInfo *dll) { R_registerRoutines(dll, CEntries, NULL, NULL, NULL); R_useDynamicSymbols(dll, FALSE); R_forceSymbols(dll, TRUE); } spatstat.model/src/areapair.c0000755000176200001440000000370314325154601015742 0ustar liggesusers/* areapair.c $Revision: 1.8 $ $Date: 2022/10/20 10:57:43 $ Specialised code for the second order conditional intensity of the area-interaction process Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include "yesno.h" /* computes area of b(A, r) \int b(B, r) \setminus \bigcup_i b(X[i], r) */ void delta2area( double *xa, double *ya, double *xb, double *yb, int *nother, double *xother, double *yother, double *radius, double *epsilon, int *pixcount ) { int Ni, Nj, Nk, i, j, k, count, covered; double xA, yA, xB, yB, r, eps, r2; double xmin, xmax, ymin, ymax, xi, yj; double dxA, dyA; double dxB, dyB; double dx, dy; Nk = *nother; xA = *xa; yA = *ya; xB = *xb; yB = *yb; r = *radius; eps = *epsilon; r2 = r * r; /* find intersection of squares centred on A and B */ if(xA < xB) { xmin = xB - r; xmax = xA + r; } else { xmin = xA - r; xmax = xB + r; } if(xmin > xmax) return; if(yA < yB) { ymin = yB - r; ymax = yA + r; } else { ymin = yA - r; ymax = yB + r; } if(ymin > ymax) return; /* set up grid */ Ni = (int) ceil((xmax - xmin)/eps) + 1; Nj = (int) ceil((ymax - ymin)/eps) + 1; count = 0; for(i = 0, xi = xmin; i < Ni; i++, xi += eps) { dxA = xi - xA; for(j = 0, yj = ymin; j < Nj; j++, yj += eps) { dyA = yj - yA; if(dxA * dxA + dyA * dyA <= r2) { /* grid point belongs to b(A, r) */ dxB = xi - xB; dyB = yj - yB; if(dxB * dxB + dyB * dyB <= r2) { /* grid point belongs to b(A,r) \cap b(B,r) */ covered = NO; /* test whether it is covered by another b(X[k], r) */ for(k = 0; k < Nk; k++) { dx = xi - xother[k]; dy = yj - yother[k]; if(dx * dx + dy * dy <= r2) { covered = YES; break; } } if(!covered) { ++count; } } } } } *pixcount = count; } spatstat.model/src/pairloop.h0000755000176200001440000000344714325154601016015 0ustar liggesusers/* pairloop.h Generic code template for loop collecting contributions to point x_i from all points x_j such that ||x_i - x_j|| <= r cpp variables used: INITIAL_I code executed at start of 'i' loop CONTRIBUTE_IJ code executed to compute contribution from j to i COMMIT_I code executed to save total contribution to i C variables used: int i, j, n, maxchunk; double xi, yi, dx, dy, dx2, d2, r2max; double *x, *y; $Revision: 1.5 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifndef CHUNKLOOP_H #include "chunkloop.h" #endif #define PAIRLOOP(INITIAL_I, CONTRIBUTE_IJ, COMMIT_I) \ OUTERCHUNKLOOP(i, n, maxchunk, 65536) { \ R_CheckUserInterrupt(); \ INNERCHUNKLOOP(i, n, maxchunk, 65536) { \ \ xi = x[i]; \ yi = y[i]; \ \ INITIAL_I; \ \ if(i > 0) { \ for(j=i-1; j >= 0; j--) { \ dx = x[j] - xi; \ dx2 = dx * dx; \ if(dx2 > r2max) \ break; \ dy = y[j] - yi; \ d2 = dx2 + dy * dy; \ if(d2 <= r2max) { \ CONTRIBUTE_IJ; \ } \ } \ } \ \ if(i+1 < n) { \ for(j=i+1; j < n; j++) { \ dx = x[j] - xi; \ dx2 = dx * dx; \ if(dx2 > r2max) \ break; \ dy = y[j] - yi; \ d2 = dx2 + dy * dy; \ if(d2 <= r2max) { \ CONTRIBUTE_IJ; \ } \ } \ } \ COMMIT_I; \ } \ } spatstat.model/src/proto.h0000644000176200001440000000225714514520744015333 0ustar liggesusers#include #include /* Prototype declarations for all native routines in spatstat.model package Automatically generated - do not edit! */ /* Functions invoked by .C */ void delta2area(double *, double *, double *, double *, int *, double *, double *, double *, double *, int *); void delta2area(double *, double *, double *, double *, int *, double *, double *, double *, double *, int *); void Ediggra(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, double *); void ESdiggra(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, double *, int *); void Ediggatsti(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *); void Efiksel(int *, double *, double *, int *, double *, double *, double *, double *, double *); void Egeyer(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, double *); void Cclosepaircounts(int *, double *, double *, double *, int *); void Ccrosspaircounts(int *, double *, double *, int *, double *, double *, double *, int *); /* Functions invoked by .Call */ spatstat.model/src/yesno.h0000755000176200001440000000011614243551505015316 0ustar liggesusers/* yesno.h */ #ifndef YES #define YES (0 == 0) #define NO (!YES) #endif spatstat.model/src/looptest.h0000755000176200001440000000030214325154601016024 0ustar liggesusers/* looptest.h Utilities for looping $Revision: 1.1 $ $Date: 2014/09/19 00:47:34 $ */ /* a small value relative to threshold X, for loop exit test */ #define EPSILON(X) ((X)/64) spatstat.model/src/chunkloop.h0000755000176200001440000000161514325154601016165 0ustar liggesusers/* chunkloop.h Divide a loop into chunks Convenient for divide-and-recombine, and reducing calls to R_CheckUserInterrupt, etc. $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define OUTERCHUNKLOOP(IVAR, LOOPLENGTH, ICHUNK, CHUNKSIZE) \ IVAR = 0; \ ICHUNK = 0; \ while(IVAR < LOOPLENGTH) #define INNERCHUNKLOOP(IVAR, LOOPLENGTH, ICHUNK, CHUNKSIZE) \ ICHUNK += CHUNKSIZE; \ if(ICHUNK > LOOPLENGTH) ICHUNK = LOOPLENGTH; \ for(; IVAR < ICHUNK; IVAR++) #define XOUTERCHUNKLOOP(IVAR, ISTART, IEND, ICHUNK, CHUNKSIZE) \ IVAR = ISTART; \ ICHUNK = 0; \ while(IVAR <= IEND) #define XINNERCHUNKLOOP(IVAR, ISTART, IEND, ICHUNK, CHUNKSIZE) \ ICHUNK += CHUNKSIZE; \ if(ICHUNK > IEND) ICHUNK = IEND; \ for(; IVAR <= IEND; IVAR++) #define CHUNKLOOP_H spatstat.model/src/Estrauss.c0000755000176200001440000000565514325154601015777 0ustar liggesusers#include #include #include "chunkloop.h" #include "looptest.h" /* Estrauss.c $Revision: 1.8 $ $Date: 2022/10/22 10:09:51 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 C implementation of 'eval' for Strauss interaction Calculates number of data points within distance r of each quadrature point (when 'source' = quadrature points, 'target' = data points) Assumes point patterns are sorted in increasing order of x coordinate Additionally: 'Cclosepaircounts' for a single point pattern */ double sqrt(double x); void Ccrosspaircounts( /* inputs */ int *nnsource, double *xsource, double *ysource, int *nntarget, double *xtarget, double *ytarget, double *rrmax, /* output */ int *counts ) { int nsource, ntarget, maxchunk, j, i, ileft, counted; double xsourcej, ysourcej, rmax, r2max, r2maxpluseps, xleft, dx, dy, dx2, d2; nsource = *nnsource; ntarget = *nntarget; rmax = *rrmax; r2max = rmax * rmax; r2maxpluseps = r2max + EPSILON(r2max); if(nsource == 0 || ntarget == 0) return; ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { counted = 0; xsourcej = xsource[j]; ysourcej = ysource[j]; /* adjust starting point */ xleft = xsourcej - rmax; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process from ileft to iright */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > r2maxpluseps) break; dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= r2max) ++counted; } counts[j] = counted; } } } /* count for each i the number of j closer than distance r */ void Cclosepaircounts( /* inputs */ int *nxy, /* number of (x,y) points */ double *x, double *y, /* (x,y) coordinates */ double *rmaxi, /* maximum distance */ /* output VECTOR, assumed initialised to 0 */ int *counts ) { int n, maxchunk, i, j; double xi, yi, rmax, r2max, dx, dy, a; n = *nxy; rmax = *rmaxi; r2max = rmax * rmax; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; if(i > 0) { /* scan backwards from i */ for(j = i - 1; j >= 0; j--) { dx = x[j] - xi; a = r2max - dx * dx; if(a < 0) break; dy = y[j] - yi; a -= dy * dy; if(a >= 0) (counts[i])++; } } if(i + 1 < n) { /* scan forwards from i */ for(j = i + 1; j < n; j++) { dx = x[j] - xi; a = r2max - dx * dx; if(a < 0) break; dy = y[j] - yi; a -= dy * dy; if(a >= 0) (counts[i])++; } } /* end loop over i */ } } } spatstat.model/src/Egeyer.c0000755000176200001440000000460214325154601015375 0ustar liggesusers#include #include #include "chunkloop.h" #include "looptest.h" /* Egeyer.c $Revision: 1.9 $ $Date: 2022/10/22 10:09:51 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Part of C implementation of 'eval' for Geyer interaction Calculates change in saturated count (xquad, yquad): quadscheme (xdata, ydata): data tdata: unsaturated pair counts for data pattern quadtodata[j] = i if quad[j] == data[i] (indices start from ZERO) Assumes point patterns are sorted in increasing order of x coordinate */ double sqrt(double x); void Egeyer( /* inputs */ int *nnquad, double *xquad, double *yquad, int *quadtodata, int *nndata, double *xdata, double *ydata, int *tdata, double *rrmax, double *ssat, /* output */ double *result ) { int nquad, ndata, maxchunk, j, i, ileft, dataindex, isdata; double xquadj, yquadj, rmax, sat, r2max, r2maxpluseps, xleft, dx, dy, dx2, d2; double tbefore, tafter, satbefore, satafter, delta, totalchange; nquad = *nnquad; ndata = *nndata; rmax = *rrmax; sat = *ssat; if(nquad == 0 || ndata == 0) return; r2max = rmax * rmax; r2maxpluseps = r2max + EPSILON(r2max); ileft = 0; OUTERCHUNKLOOP(j, nquad, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nquad, maxchunk, 65536) { totalchange = 0.0; xquadj = xquad[j]; yquadj = yquad[j]; dataindex = quadtodata[j]; isdata = (dataindex >= 0); /* adjust starting point */ xleft = xquadj - rmax; while((xdata[ileft] < xleft) && (ileft+1 < ndata)) ++ileft; /* process until dx > rmax */ for(i=ileft; i < ndata; i++) { dx = xdata[i] - xquadj; dx2 = dx * dx; if(dx2 > r2maxpluseps) break; if(i != dataindex) { dy = ydata[i] - yquadj; d2 = dx2 + dy * dy; if(d2 <= r2max) { /* effect of adding dummy point j or negative effect of removing data point */ tbefore = tdata[i]; tafter = tbefore + ((isdata) ? -1 : 1); /* effect on saturated values */ satbefore = (double) ((tbefore < sat)? tbefore : sat); satafter = (double) ((tafter < sat)? tafter : sat); /* sum changes over all i */ delta = satafter - satbefore; totalchange += ((isdata) ? -delta : delta); } } } result[j] = totalchange; } } } spatstat.model/src/raster.h0000755000176200001440000000523014325154601015460 0ustar liggesusers/* raster.h Definition of raster structures & operations requires (for floor()) $Revision: 1.6 $ $Date: 2022/03/15 02:19:08 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ typedef struct Raster{ /* array of data */ char *data; /* coerced to appropriate type */ int nrow; /* dimensions of entire array */ int ncol; int length; int rmin; /* position of valid subrectangle */ int rmax; int cmin; int cmax; /* definition of mapping into continuous space */ double x0; /* position of entry (rmin,cmin) */ double y0; double x1; /* position of entry (rmax,cmax) */ double y1; double xstep; /* x increment for each column step */ double ystep; /* y increment for each row step */ /* xstep = (x1 - x0)/(cmax - cmin) = (x1 - x0)/(number of valid columns - 1) CAN BE POSITIVE OR NEGATIVE */ /* ranges of grid coordinates */ double xmin; /* = min{x0,x1} */ double xmax; double ymin; double ymax; /* limits of enclosing frame are xmin-xstep/2, xmax+xstep/2 etc. */ } Raster; /* how to clear the data */ #define Clear(ARRAY,TYPE,VALUE) \ { unsigned int i; TYPE *p; \ for(i = 0, p = (TYPE *) (ARRAY).data; i < (ARRAY).length; i++, p++) \ *p = VALUE; } /* how to index a rectangular array stored sequentially in row-major order */ #define Entry(ARRAY,ROW,COL,TYPE) \ ((TYPE *)((ARRAY).data))[COL + (ROW) * ((ARRAY).ncol)] /* test for indices inside subrectangle */ #define Inside(ARRAY,ROW,COL) \ ( (ROW >= (ARRAY).rmin) && (ROW <= (ARRAY).rmax) && \ (COL >= (ARRAY).cmin) && (COL <= (ARRAY).cmax)) /* how to compute the position in R^2 corresponding to a raster entry */ #define Xpos(ARRAY,COL) \ ((ARRAY).x0 + (ARRAY).xstep * (COL - (ARRAY).cmin)) #define Ypos(ARRAY,ROW) \ ((ARRAY).y0 + (ARRAY).ystep * (ROW - (ARRAY).rmin)) #define Distance(X,Y,XX,YY) sqrt((X - XX)* (X - XX) + (Y - YY) * (Y - YY)) #define DistanceTo(X,Y,ARRAY,ROW,COL)\ Distance(X,Y,Xpos(ARRAY,COL),Ypos(ARRAY,ROW)) #define DistanceSquared(X,Y,XX,YY) ((X - XX)* (X - XX) + (Y - YY) * (Y - YY)) #define DistanceToSquared(X,Y,ARRAY,ROW,COL)\ DistanceSquared(X,Y,Xpos(ARRAY,COL),Ypos(ARRAY,ROW)) /* how to map a point (x,y) in R^2 to a raster entry */ /* (x,y) is guaranteed to lie in the rectangle bounded by the images of the entries (r,c), (r+1,c), (r,c+1), (r+1,c+1) where r = RowIndex(..) and c = ColIndex(..). */ #define RowIndex(ARRAY,Y) \ ((ARRAY).rmin + (int) floor(((Y) - (ARRAY).y0)/(ARRAY).ystep)) #define ColIndex(ARRAY,X) \ ((ARRAY).cmin + (int) floor(((X) - (ARRAY).x0)/(ARRAY).xstep)) spatstat.model/src/Ediggra.c0000755000176200001440000000714714325154601015526 0ustar liggesusers#include #include #include "chunkloop.h" #include "looptest.h" /* Ediggra.c $Revision: 1.10 $ $Date: 2022/10/22 10:09:51 $ C implementation of 'eval' for DiggleGratton interaction (exponentiated) Assumes point patterns are sorted in increasing order of x coordinate Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ double sqrt(double x); void Ediggra( /* inputs */ /* query points */ int *nnsource, double *xsource, double *ysource, int *idsource, /* data points */ int *nntarget, double *xtarget, double *ytarget, int *idtarget, /* model parameters */ double *ddelta, double *rrho, /* output */ double *values ) { int nsource, ntarget, maxchunk, j, i, ileft, idsourcej; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double delta, rho, delta2, rho2, rho2pluseps, rhominusdelta; double product; nsource = *nnsource; ntarget = *nntarget; delta = *ddelta; rho = *rrho; if(nsource == 0 || ntarget == 0) return; rho2 = rho * rho; delta2 = delta * delta; rhominusdelta = rho - delta; rho2pluseps = rho2 + EPSILON(rho2); ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { product = 1; xsourcej = xsource[j]; ysourcej = ysource[j]; idsourcej = idsource[j]; /* adjust starting point */ xleft = xsourcej - rho; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process until dx > rho (or until product is zero) */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > rho2pluseps) break; if(idtarget[i] != idsourcej) { dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= rho2) { if(d2 <= delta2) { product = 0; break; } else product *= (sqrt(d2) - delta)/rhominusdelta; } } } values[j] = product; } } } /* 'split' version separating hard core terms from others */ void ESdiggra( /* inputs */ int *nnsource, double *xsource, double *ysource, int *idsource, int *nntarget, double *xtarget, double *ytarget, int *idtarget, double *ddelta, double *rrho, /* output */ double *positive, int *hardcore ) { int nsource, ntarget, maxchunk, j, i, ileft, idsourcej; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double delta, rho, delta2, rho2, rho2pluseps, rhominusdelta; double product; nsource = *nnsource; ntarget = *nntarget; delta = *ddelta; rho = *rrho; if(nsource == 0 || ntarget == 0) return; rho2 = rho * rho; delta2 = delta * delta; rhominusdelta = rho - delta; rho2pluseps = rho2 + EPSILON(rho2); ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { product = 1; xsourcej = xsource[j]; ysourcej = ysource[j]; idsourcej = idsource[j]; /* adjust starting point */ xleft = xsourcej - rho; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process until dx > rho */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > rho2pluseps) break; if(idtarget[i] != idsourcej) { dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= rho2) { if(d2 <= delta2) { hardcore[j] = 1; } else { product *= (sqrt(d2) - delta)/rhominusdelta; } } } } positive[j] = product; } } } spatstat.model/NEWS0000644000176200001440000006427614515336521013737 0ustar liggesusers CHANGES IN spatstat.model VERSION 3.2-8 OVERVIEW o spatstat.model no longer suggests package 'maptools' or 'RandomFields'. o code for fitting log-Gaussian Cox models has changed. o minor improvements to help files. SIGNIFICANT USER-VISIBLE CHANGES o kppm The code for fitting log-Gaussian Cox process models (clusters="LGCP") has been re-implemented without using the package 'RandomFields'. The current code supports the 'exponential', 'gauss', 'stable', 'gencauchy' and 'matern' covariance models. o lgcp.estK, lgcp.estpcf This code for fitting log-Gaussian Cox process models has been re-implemented without using the package 'RandomFields'. The current code supports the 'exponential', 'gauss', 'stable', 'gencauchy' and 'matern' covariance models. o simulate.kppm For log-Gaussian Cox process models (clusters='LGCP') the simulation algorithm has been completely re-implemented to avoid dependence on the defunct package 'RandomFields'. For details, see the help for 'rLGCP' in the package 'spatstat.random'. CHANGES IN spatstat.model VERSION 3.2-6 OVERVIEW o Internal improvements. SIGNIFICANT USER-VISIBLE CHANGES o ppm Internal changes to support improvements in spatstat.linnet::lppm. CHANGES IN spatstat.model VERSION 3.2-4 OVERVIEW o Minor improvements. SIGNIFICANT USER-VISIBLE CHANGES o cdf.test.ppm Recognises argument 'rule.eps' passed to 'as.mask'. o dfbetas.ppm Recognises argument 'rule.eps' passed to 'as.mask'. o leverage.ppm Recognises argument 'rule.eps' passed to 'as.mask'. o rhohat.ppm New argument 'rule.eps' passed to 'as.mask'. CHANGES IN spatstat.model VERSION 3.2-3 OVERVIEW o Tweak to satisfy package checker. CHANGES IN spatstat.model VERSION 3.2-2 OVERVIEW o Accelerated some code. o Standard errors are now available for 'ppm' models fitted using 'gam'. o Internal reorganisation. o Minor changes to documentation. o Bug fixes. SIGNIFICANT USER-VISIBLE CHANGES o plot.mppm New argument 'main'. BUG FIXES o predict.ppm Standard error calculation ('se=TRUE') crashed if the fitted model was a generalised additive model (fitted with 'use.gam=TRUE'). Fixed. o effectfun Standard error calculation ('se.fit=TRUE') crashed if the fitted model was a generalised additive model (fitted with 'use.gam=TRUE'). Fixed. o parres If 'model' was a large object, computation was extremely slow or terminated with a message about 'deparse'. Fixed. o plot.mppm If the fitted model 'x' was a large object, computation was extremely slow or terminated with a message about 'deparse'. Fixed. o predict.ppm If 'new.coef' was given and the fitted model 'object' was a large object, computation was extremely slow, or terminated with message about 'deparse'. Fixed. CHANGES IN spatstat.model VERSION 3.2-1 OVERVIEW o Minor changes to citation file, to satisfy CRAN. CHANGES IN spatstat.model VERSION 3.2-0 OVERVIEW o Improvements to 'update' methods for point process models. o New 'update' methods for classes 'dppm' and 'rppm'. o Minor improvements and bug fixes. NEW FUNCTIONS o update.dppm Update method for determinantal point process models. o update.rppm Update method for recursively partitioned point process models. SIGNIFICANT USER-VISIBLE CHANGES o update.slrm Now accepts the idiom 'update(object, X)' where X is a point pattern. o update.ppm, update.kppm, update.dppm, update.slrm, update.rppm All of these methods now accept the idiom 'update(object, X)' where X is a point pattern. o print.ppm, summary.ppm Prints the name of the point pattern dataset to which the model was fitted. o update.ppm Internal improvements. CHANGES IN spatstat.model VERSION 3.1-2 OVERVIEW o Minor changes to satisfy CRAN package checker. CHANGES IN spatstat.model VERSION 3.1-1 OVERVIEW o We thank Bethany Macdonald for contributions. o kppm has been accelerated when method='palm' or 'clik2' o kppm can save the history of the optimisation algorithm. o Bug fixes and internal tweaks. SIGNIFICANT USER-VISIBLE CHANGES o kppm New argument 'trajectory' specifies whether to save the history of function evaluations performed by the optimization algorithm. o kppm Computation accelerated when 'method="palm"' or 'method='clik2'". [Kindly contributed by Bethany Macdonald.] BUG FIXES o simulate.kppm If the model was very close to a Poisson process, and if saveLambda=TRUE was selected, the attribute "Lambda" was incorrectly labelled "lambda". Fixed. o simulate.kppm Simulation of the variance-gamma model terminated with an error about the value of 'nu' (with recent versions of spatstat.random). Fixed. o kppm Terminated with an error about missing argument 'A', if penalised=TRUE. Fixed. o summary.kppm Did not correctly recognise when a model was fitted using a penalty. Fixed. CHANGES IN spatstat.model VERSION 3.1-0 OVERVIEW o We thank Tilman Davies and Martin Hazelton for contributions. o Penalised model-fitting for Neyman-Scott cluster process models. o Index of the strength of clustering in a Neyman-Scott cluster process model. o Probability of having any siblings. o More information is printed about Neyman-Scott cluster process models. o Minor improvements. NEW CLASSES o traj Trajectory (history of function evaluations) in a model that was fitted by optimisation. NEW FUNCTIONS o panysib Probability that a point in a cluster process has any siblings. o is.poissonclusterprocess Detects whether a given model is a Poisson cluster process (which includes Neyman-Scott processes). o traj, print.traj, plot.traj, lines.traj Extract, print and plot the trajectory of function evaluations. SIGNIFICANT USER-VISIBLE CHANGES o kppm New argument 'penalised' supports penalised model-fitting with a penalty against extremely large or small values of the cluster scale. o print.kppm, summary.kppm Additional characteristics of the fitted model are reported, including the cluster strength 'phi' and the sibling probability. o varcount New argument 'relative' (supports calculation of the overdispersion index). o plot.palmdiag Improved calculation of y axis limits. CHANGES IN spatstat.model VERSION 3.0-3 OVERVIEW o Palm intensity diagnostic. NEW FUNCTIONS o palmdiagnose, plot.palmdiag Palm intensity diagnostic plot for cluster process models proposed by Tanaka, Ogata and Stoyan. CHANGES IN spatstat.model VERSION 3.0-2 OVERVIEW o Minor changes to placate the package checker. CHANGES IN spatstat.model VERSION 3.0-1 OVERVIEW o Minor changes to placate the package checker. CHANGES IN spatstat.model VERSION 3.0-0 OVERVIEW o New package o We thank Achmad Choiruddin and Suman Rakshit for contributions. o Regularized model-fitting in 'ppm' and 'kppm'. o integral.msr accepts a weight function. o Weighted version of a measure. o Residuals for recursively-partitioned models. o Residuals for any estimate of intensity. o U-shaped curves in 'rhohat'. o Bug fixes and minor improvements. NEW FUNCTIONS o measureWeighted Weighted version of a measure. o residuals.rppm Residual measure for a recursively-partitioned point process model. o residualMeasure Residual measure given an observed point pattern and an estimate of its intensity. SIGNIFICANT USER-VISIBLE CHANGES o Package structure The package 'spatstat.core' has been split into two packages called 'spatstat.explore' (for exploratory data analysis) and 'spatstat.model' (for modelling and formal inference). o spatstat.model The new package 'spatstat.model' contains the code for model-fitting, model diagnostics, and formal inference. Examples include 'ppm', 'kppm', 'mppm', 'dppm', 'slrm', 'simulate.ppm', 'anova.ppm', 'diagnose.ppm', 'residuals.ppm', 'leverage.ppm', 'addvar', 'parres', o NEWS The NEWS file for the new package 'spatstat.model' contains older news items from the defunct package 'spatstat.core' (for functions which are now in 'spatstat.model'). o ppm New argument 'improve.type'. o ppm Now supports regularized model-fitting when 'improve.type="enet"'. o ppm Option 'method="ho"' is replaced by 'improve.type="ho"'. o ppm Huang-Ogata approximate maximum likelihood can be applied to logistic fits by setting 'method="logi"' and 'improve.type="ho"'. o kppm New argument 'ppm.improve.type'. o kppm Now supports regularized model-fitting of the first order trend when 'ppm.improve.type="enet"'. o integral.msr New argument 'weight' specifies a weight (integrand) for the integration. o rhohat.ppm, rhohat.slrm New options 'smoother="mountain"' and 'smoother="valley"' for estimating a unimodal function (U-shaped curve). o rhohat.ppm, rhohat.slrm If the unit of length is a 'distfun', the name of the unit of length is saved and displayed on the plot. o rhohat.ppm, rhohat.slrm New arguments 'jitter', 'jitterfactor', 'interpolate' allow greater control over the calculation. o rhohat.ppm, rhohat.slrm New argument 'do.CI' specifies whether to calculate confidence bands. BUG FIXES o predict.ppm Argument 'new.coef' was ignored in the calculation of the standard error when 'se=TRUE'. Fixed. o predict.ppm Argument 'new.coef' was ignored in calculating the standard error (and therefore the width of the interval) when 'type="count"' and 'interval="confidence"' or 'interval="prediction"'. Fixed. o vcov.mppm Crashed with a message about 'subscript out of bounds', for some models. Fixed. CHANGES IN spatstat.core VERSION 2.4-4.010 OVERVIEW o Internal improvements. CHANGES IN spatstat.core VERSION 2.4-4 OVERVIEW o Bug fixes and minor improvements. CHANGES IN spatstat.core VERSION 2.4-3 OVERVIEW o We thank Art Stock for contributions. o Bug fixes and minor improvements. CHANGES IN spatstat.core VERSION 2.4-2 OVERVIEW o Internal bug fixes. CHANGES IN spatstat.core VERSION 2.4-1 OVERVIEW o We thank Frederic Lavancier, Sebastian Meyer and Sven Wagner for contributions. o Improved approximation of intensity of Gibbs models. o Experimental code to represent (theoretical) point process models o Extract more information about a point process model. o Internal improvements and bug fixes. NEW CLASSES o zgibbsmodel Experimental. An object of class 'zgibbsmodel' represents a Gibbs point process model with specified parameter values (whereas 'ppm' represents a model fitted to data). NEW FUNCTIONS o hardcoredist Extract the hard core distance of a point process model. o interactionorder Extract the order of interpoint interaction of a point process model. o zgibbsmodel Experimental. Create an object of class 'zgibbsmodel'. o print.zgibbsmodel Experimental. Print an object of class 'zgibbsmodel'. o is.poisson.zgibbsmodel, is.stationary.zgibbsmodel Experimental. Methods for class 'zgibbsmodel' SIGNIFICANT USER-VISIBLE CHANGES o intensity.ppm Can now calculate the Coeurjolly-Lavancier DPP approximation of intensity. [Code kindly contributed by Frederic Lavancier] New argument 'approx' specifies the choice of approximation. BUG FIXES o vcov.ppm Crashed in some cases, with message 'object lamdel not found'. [Spotted by Sven Wagner.] Fixed. CHANGES IN spatstat.core VERSION 2.4-0 OVERVIEW o We thank Sriram Ramamurthy for contributions. o spatstat.core now depends on the new package 'spatstat.random'. o Functions for generating random patterns have been removed. o Important bug fixes in anova.mppm and vcov.mppm. o Minor improvements and bug fixes SIGNIFICANT USER-VISIBLE CHANGES o package structure The code for generating random spatial patterns (including 'rpoispp', 'rMatClust', 'rThomas', 'rNeymanScott', 'rStrauss', 'rmh') has been removed from 'spatstat.core' and placed in a new package 'spatstat.random'. This new package is required by 'spatstat.core'. o anova.mppm Improved labelling of models in output. o qqplot.ppm, plot.qqppm Improved the text label indicating the type of residuals. BUG FIXES o vcov.mppm For Gibbs (non-Poisson) models, the variance matrix was calculated incorrectly in some cases. Fixed. o anova.mppm Results were sometimes incorrect if the two models had different interactions (e.g. Strauss vs Poisson). Fixed. o anova.mppm Crashed for some models with a message about 'coefficient missing from new.coef'. Fixed. o anova.mppm Gave a warning for some models about "Internal error: unable to map submodels to full model". Fixed. o addvar If the covariate contained NA, NaN or Infinite values, the calculations were sometimes incorrect. Fixed. o pcfmodel.ppm Refused to handle an inhomogeneous Poisson process. Fixed. o fitted.ppm Crashed if leaveoneout=TRUE and the model had no fitted coefficients. Fixed. CHANGES IN spatstat.core VERSION 2.3-2 OVERVIEW o More diagnostics for spatial logistic regression models. o Important bug fix in kppm. o Increased numerical stability in kppm. o Minor improvements and bug fixes. o We thank Jonas Brehmer for contributions. NEW FUNCTIONS o lurking.slrm Lurking variable plot for spatial logistic regression models. o eem.slrm Exponential energy marks for spatial logistic regression models. o eem.ppm Exponential energy marks for Gibbs and Poisson point process models (this function was previously called 'eem'). SIGNIFICANT USER-VISIBLE CHANGES o eem The function 'eem' is now generic, with methods for 'ppm' and 'slrm'. The function previously named 'eem' is now called 'eem.ppm'. o objsurf.dppm, objsurf.kppm, objsurf.mincontrast New arguments 'xlim', 'ylim' determine the range of parameter values to be considered. o Hybrid Printed output from hybrid models has been improved slightly. o kppm New default settings ensure greater numerical stability of the optimization algorithm against the effects of the scale of the spatial coordinates. New argument 'stabilize' specifies whether the optimization algorithm should be numerically stabilized. o pcf Improved error message BUG FIXES o kppm Results were sometimes incorrect for method='clik2' and method='palm' because the log composite likelihood was erroneously truncated to positive values. Any fitted model for which logLik(model) = 2.2e-16 should be suspected of being incorrect. Fixed. o MultiHard A hybrid of 'MultiHard' with another multitype interaction caused an error. Fixed. o simulate.dppm, simulate.detpointprocfamily Crashed, rarely, with error message 'The dimension differs from the number of columns in index'. Fixed. o as.fv.kppm, as.fv.dppm The default plot labels in 'as.fv(x)' implied that the model 'x' was inhomogeneous. Fixed. CHANGES IN spatstat.core VERSION 2.3-1 OVERVIEW o Covariates in ppm and mppm may be functions that depend on the marks as well as the spatial coordinates. o Automatic selection of threshold for defining a binary predictor. o Random perturbation of line segments. o Minor extensions, performance improvements, and bug fixes. NEW FUNCTIONS o coef<-.fii Changes the coefficients of a fitted interaction object (a method for the generic "coef<-") SIGNIFICANT USER-VISIBLE CHANGES o ppm Covariates which are functions may now depend on the marks as well as the spatial coordinates: function(x,y,marks). o mppm Covariates which are functions may now depend on the marks as well as the spatial coordinates: function(x,y,marks). o reach.kppm Now returns a result for LGCP models as well as cluster models. o update.kppm New argument 'envir'. o clusterfit Minor changes to the argument list. o mincontrast Minor changes to the argument list. o mincontrast Improved algorithm for handling NA, NaN or infinite values. o varcount Argument B has a sensible default. BUG FIXES o subfits The fitted interaction coefficients were garbled. If 'mfit' is the mppm object and 'a <- subfits(mfit)[[i]]' is one of the sub-models, then coef(a) was correct, but coef(fitin(a)) was incorrect. The fitted interaction was shown correctly by printing 'mfit' but incorrectly by printing 'a'. Fixed. o varcount The result was very inaccurate if the cluster radius was small compared to the size of the window 'B', due to discretisation error. Fixed. CHANGES IN spatstat.core VERSION 2.3-0 OVERVIEW o We thank Chiara Fend for contributions. o Extensive support for spatial logistic regression models. o New fitting method in kppm and dppm. o Summary method for 'objsurf'. o Minor bug fixes. NEW FUNCTIONS o response Generic function which extracts the values of the response in a fitted model. There are methods for lm, glm (which extract the numeric vector of responses), and ppm, kppm, slrm, lppm and mppm (which extract the original data point pattern). o summary.objsurf, print.summary.objsurf Summary method for 'objsurf' o residuals.slrm Residuals for spatial logistic regression models. o leverage.slrm, influence.slrm, dfbetas.slrm, dffit.slrm Leverage and influence diagnostics for spatial logistic regression models. o rhohat.slrm Method for 'rhohat' for spatial logistic regression models. o envelope.slrm Method for 'envelope' for spatial logistic regression models. o intensity.slrm Method for 'intensity' for spatial logistic regression models. o deviance.slrm Method for 'deviance' for spatial logistic regression models. o pseudoR2.slrm Method for 'pseudoR2' for spatial logistic regression models. o quadrat.test.slrm Method for 'quadrat.test' for spatial logistic regression models. o parameters.slrm Method for 'parameters' for spatial logistic regression models. o valid.slrm Method for 'valid' for spatial logistic regression models. o emend.slrm Method for 'emend' for spatial logistic regression models. o roc.slrm Method for 'roc' for spatial logistic regression models. o auc.slrm Method for 'auc' for spatial logistic regression models. o Window.slrm, as.owin.slrm Methods for 'Window' and 'as.owin' for spatial logistic regression models. SIGNIFICANT USER-VISIBLE CHANGES o kppm, dppm New option 'method="adapcl"' performs adaptive composite likelihood fitting. [Contributed by Chiara Fend.] BUG FIXES o clusterfield Values of the cluster field were slightly incorrect (slightly higher than the correct values) near the edge of the window, because an 'edge correction' was mistakenly applied. Fixed. CHANGES IN spatstat.core VERSION 2.2-0 OVERVIEW o summary method for spatial logistic regression models NEW FUNCTIONS o summary.slrm, print.summary.slrm Summary method for spatial logistic regression models o coef.summary.slrm Print the fitted coefficients, confidence interval and p-values for a spatial logistic regression model. CHANGES IN spatstat.core VERSION 2.1-2 OVERVIEW o Reduced CRAN check time. CHANGES IN spatstat.core VERSION 2.1-1 OVERVIEW o Minor bug fix BUG FIXES o simulate.kppm Conditional simulation crashed on rare occasions, with an error about negative probabilities. Fixed. CHANGES IN spatstat.core VERSION 2.1-0 OVERVIEW o We thank Achmad Choiruddin, Jean-Francois Coeurjolly and Rasmus Waagepetersen for contributions. o Conditional simulation in kppm o Information criteria for model selection in kppm o Modified handling of covariates in slrm o Improved output in summary.mppm o Minor improvements and bug fixes. NEW FUNCTIONS o ic Information criteria for model selection in ppm and kppm. Kindly contributed by Achmad Choiruddin, Jean-Francois Coeurjolly and Rasmus Waagepetersen. SIGNIFICANT USER-VISIBLE CHANGES o simulate.kppm Conditional simulation of the model, given a fixed number of points, is now supported using the new arguments 'n.cond' and 'w.cond'. o slrm In the default case (where dataAtPoints is not given) all spatial covariates, including the spatial coordinates x and y, are now evaluated at the centre of each pixel. This improves consistency with other implementations of spatial logistic regression. o slrm Silently ignores any arguments '...' that are not recognised by 'as.mask' o summary.mppm Improved summary of the dependence of the interpoint interaction on the covariates. o pairs.im New argument 'drop'. BUG FIXES o model.matrix.mppm If the model was fitted using 'gam', the resulting matrix did not have an 'assign' attribute. Fixed. o model.depends Crashed for models fitted using 'gam'. Fixed. o predict.slrm, fitted.slrm Crashed if the model was fitted using split pixels (argument 'splitby'). Fixed. o predict.slrm, fitted.slrm Crashed in some cases when 'window' was given. Fixed. o update.slrm Failed to find covariates that were provided in 'env'. Fixed. CHANGES IN spatstat.core VERSION 2.0-0 OVERVIEW o We thank Corey Anderson, Michael Chirico, Andy Craig, Marcelino de la Cruz, Tilman Davies, Pavel Fibich, Kurt Hornik, Gopalan Nair, Yonatan Rosen and Rasmus Waagepetersen for contributions. o More support for spatial logistic regression models. o predict.mppm now works for multitype point process models. o Improved handling of 'newdata' in predict.mppm. o More support for multi-dimensional patterns. SIGNIFICANT USER-VISIBLE CHANGES o slrm 'step' can now be applied to models fitted using 'slrm'. o predict.mppm Now supports multitype point process models. o predict.mppm Improved handling of argument 'newdata' o simulate.ppm Now recognises the argument 'window' as an alternative to 'w'. o kppm Improved numerical robustness. o anova.mppm Issues a warning when applied to random-effects models (models fitted using the argument 'random'). o mincontrast New argument 'action.bad.values' specifies what action is taken when the summary function produces NA or NaN or infinite values. BUG FIXES o rMatClust, rThomas, rCauchy, rVarGamma If the fitted model was effectively a Poisson process, the result did not have attributes 'Lambda' and 'parents' even when the user requested them. Fixed. o model.matrix.mppm Crashed with random-effects models. Fixed. o anova.mppm Crashed with random-effects models. Fixed. o objsurf.kppm Crashed if the model was fitted by Palm likelihood (method="palm") or second order composite likelihood (method="clik2"). Fixed. CHANGES IN spatstat.core VERSION 1.65-11 OVERVIEW o Internal tweaks. CHANGES IN spatstat.core VERSION 1.65-10 OVERVIEW o Minor corrections to documentation. CHANGES IN spatstat.core VERSION 1.65-9 OVERVIEW o We thank Ian Buller for a suggestion. CHANGES IN spatstat.core VERSION 1.65-8 OVERVIEW o Minor changes to appease the compiler. CHANGES IN spatstat.core VERSION 1.65-7 OVERVIEW o We thank Michael Chirico for a contribution. o Minor changes to appease the compiler. CHANGES IN spatstat.core VERSION 1.65-6 OVERVIEW o We thank Tilman Davies and Pavel Fibich for contributions. o Important bug fix in simulation of log-Gaussian Cox processes. o Increased speed for large datasets. o variance calculations handle larger datasets. SIGNIFICANT USER-VISIBLE CHANGES o vcov.ppm, summary.ppm Variance calculations now handle larger datasets (using sparse arrays). o overall speed Changes have been made to the internal code of spatstat which should accelerate computations involving large datasets. o localpcf, localpcfinhom New argument 'rvalue'. BUG FIXES o simulate.kppm Simulation results for log-Gaussian Cox processes were incorrect unless the pixel dimensions and pixel spacings were identical on the horizontal and vertical axes. (If pixel dimensions were not specified, then the results were incorrect whenever the Frame of the simulation window was not a square.) [Spotted by Tilman Davies.] Fixed. o print.ppm Crashed sometimes when applied to the result of subfits(). Fixed. CHANGES IN spatstat.core VERSION 1.65-5 OVERVIEW o Minor changes required by CRAN. CHANGES IN spatstat.core VERSION 1.65-1 OVERVIEW o Added NEWS file. CHANGES IN spatstat.core VERSION 1.65-0 OVERVIEW o Package initialised at version 1.65-0 SIGNIFICANT USER-VISIBLE CHANGES o spatstat.core The package 'spatstat.core' has been created from a subset of the code in the original 'spatstat' package version 1.65-0. It contains the core functionality for statistical analysis of spatial data. For an overview, see help("spatstat.core-package") o Execution The 'spatstat.core' package is slightly faster than the corresponding code in the 'spatstat' package, because the procedure for calling internal C functions has been streamlined. spatstat.model/R/0000755000176200001440000000000014334115572013422 5ustar liggesusersspatstat.model/R/polynom.R0000644000176200001440000000450514331173073015243 0ustar liggesusers#' #' polynom.R #' #' $Revision: 1.1 $ $Date: 2017/01/02 09:48:36 $ #' polynom <- function(x, ...) { rest <- list(...) # degree not given if(length(rest) == 0) stop("degree of polynomial must be given") #call with single variable and degree if(length(rest) == 1) { degree <- ..1 if((degree %% 1) != 0 || length(degree) != 1 || degree < 1) stop("degree of polynomial should be a positive integer") # compute values result <- outer(x, 1:degree, "^") # compute column names - the hard part ! namex <- deparse(substitute(x)) # check whether it needs to be parenthesised if(!is.name(substitute(x))) namex <- paste("(", namex, ")", sep="") # column names namepowers <- if(degree == 1) namex else c(namex, paste(namex, "^", 2:degree, sep="")) namepowers <- paste("[", namepowers, "]", sep="") # stick them on dimnames(result) <- list(NULL, namepowers) return(result) } # call with two variables and degree if(length(rest) == 2) { y <- ..1 degree <- ..2 # list of exponents of x and y, in nice order xexp <- yexp <- numeric() for(i in 1:degree) { xexp <- c(xexp, i:0) yexp <- c(yexp, 0:i) } nterms <- length(xexp) # compute result <- matrix(, nrow=length(x), ncol=nterms) for(i in 1:nterms) result[, i] <- x^xexp[i] * y^yexp[i] # names of these terms namex <- deparse(substitute(x)) # namey <- deparse(substitute(..1)) ### seems not to work in R zzz <- as.list(match.call()) namey <- deparse(zzz[[3]]) # check whether they need to be parenthesised # if so, add parentheses if(!is.name(substitute(x))) namex <- paste("(", namex, ")", sep="") if(!is.name(zzz[[3]])) namey <- paste("(", namey, ")", sep="") nameXexp <- c("", namex, paste(namex, "^", 2:degree, sep="")) nameYexp <- c("", namey, paste(namey, "^", 2:degree, sep="")) # make the term names termnames <- paste(nameXexp[xexp + 1], ifelse(xexp > 0 & yexp > 0, ".", ""), nameYexp[yexp + 1], sep="") termnames <- paste("[", termnames, "]", sep="") dimnames(result) <- list(NULL, termnames) # return(result) } stop("Can't deal with more than 2 variables yet") } spatstat.model/R/kppm.R0000644000176200001440000025773014420371141014522 0ustar liggesusers# # kppm.R # # kluster/kox point process models # # $Revision: 1.230 $ $Date: 2023/03/26 10:08:44 $ # kppm <- function(X, ...) { UseMethod("kppm") } kppm.formula <- function(X, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), ..., data=NULL) { ## remember call callstring <- short.deparse(sys.call()) cl <- match.call() ########### INTERPRET FORMULA ############################## if(!inherits(X, "formula")) stop(paste("Argument 'X' should be a formula")) formula <- X if(spatstat.options("expand.polynom")) formula <- expand.polynom(formula) ## check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Formula must have a left hand side")) Yexpr <- formula[[2L]] trend <- formula[c(1L,3L)] ## FIT ####################################### thecall <- call("kppm", X=Yexpr, trend=trend, data=data, clusters=clusters) ncall <- length(thecall) argh <- list(...) nargh <- length(argh) if(nargh > 0) { thecall[ncall + 1:nargh] <- argh names(thecall)[ncall + 1:nargh] <- names(argh) } ## result <- eval(thecall, ## envir=if(!is.null(data)) data else parent.frame(), ## enclos=if(!is.null(data)) parent.frame() else baseenv()) callenv <- list2env(as.list(data), parent=parent.frame()) result <- eval(thecall, envir=callenv, enclos=baseenv()) result$call <- cl result$callframe <- parent.frame() if(!("callstring" %in% names(list(...)))) result$callstring <- callstring return(result) } kppm.ppp <- kppm.quad <- function(X, trend = ~1, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), data=NULL, ..., covariates = data, subset, method = c("mincon", "clik2", "palm", "adapcl"), penalised = FALSE, improve.type = c("none", "clik1", "wclik1", "quasi"), improve.args = list(), weightfun=NULL, control=list(), stabilize=TRUE, algorithm, trajectory=FALSE, statistic="K", statargs=list(), rmax = NULL, epsilon=0.01, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL, ppm.improve.type = c("none", "ho", "enet"), ppm.improve.args=list()) { cl <- match.call() callstring <- paste(short.deparse(sys.call()), collapse="") Xname <- short.deparse(substitute(X)) clusters <- match.arg(clusters) improve.type <- match.arg(improve.type) method <- match.arg(method) if(method == "mincon") statistic <- pickoption("summary statistic", statistic, c(K="K", g="pcf", pcf="pcf")) if(missing(algorithm)) { algorithm <- if(method == "adapcl") "Broyden" else "Nelder-Mead" } else check.1.string(algorithm) if(isTRUE(trajectory) && method == "adapcl") warning("trajectory=TRUE is not supported for method 'adapcl'", call.=FALSE) ClusterArgs <- list(method = method, improve.type = improve.type, improve.args = improve.args, weightfun=weightfun, control=control, stabilize=stabilize, algorithm=algorithm, statistic=statistic, statargs=statargs, rmax = rmax) Xenv <- list2env(as.list(covariates), parent=parent.frame()) X <- eval(substitute(X), envir=Xenv, enclos=parent.frame()) isquad <- is.quad(X) if(!is.ppp(X) && !isquad) stop("X should be a point pattern (ppp) or quadrature scheme (quad)") if(is.marked(X)) stop("Sorry, cannot handle marked point patterns") if(!missing(subset)) { W <- eval(subset, covariates, parent.frame()) if(!is.null(W)) { if(is.im(W)) { W <- solutionset(W) } else if(!is.owin(W)) { stop("Argument 'subset' should yield a window or logical image", call.=FALSE) } X <- X[W] } } po <- ppm(Q=X, trend=trend, covariates=covariates, forcefit=TRUE, rename.intercept=FALSE, covfunargs=covfunargs, use.gam=use.gam, nd=nd, eps=eps, improve.type=ppm.improve.type, improve.args=ppm.improve.args) XX <- if(isquad) X$data else X if(is.character(weightfun)) { RmaxW <- (rmax %orifnull% rmax.rule("K", Window(XX), intensity(XX))) / 2 switch(weightfun, threshold = { weightfun <- function(d) { as.integer(d <= RmaxW) } attr(weightfun, "selfprint") <- paste0("Indicator(distance <= ", RmaxW, ")") }, taper = { weightfun <- function(d) { pmin(1, RmaxW/d)^2 } attr(weightfun, "selfprint") <- paste0("min(1, ", RmaxW, "/d)^2") }, stop(paste("Unrecognised option", sQuote(weightfun), "for weightfun")) ) } ## default weight function if(is.null(weightfun)) switch(method, adapcl = { weightfun <- function(d) { as.integer(abs(d) <= 1)*exp(1/(d^2-1)) } attr(weightfun, "selfprint") <- "Indicator(-1 <= distance <= 1) * exp(1/(distance^2-1))" }, mincon = { }, { RmaxW <- (rmax %orifnull% rmax.rule("K", Window(XX), intensity(XX))) / 2 weightfun <- function(d) { as.integer(d <= RmaxW) } attr(weightfun, "selfprint") <- paste0("Indicator(distance <= ", RmaxW, ")") }) ## fit out <- switch(method, mincon = kppmMinCon(X=XX, Xname=Xname, po=po, clusters=clusters, control=control, stabilize=stabilize, statistic=statistic, statargs=statargs, rmax=rmax, algorithm=algorithm, penalised = penalised, trajectory = trajectory, ...), clik2 = kppmComLik(X=XX, Xname=Xname, po=po, clusters=clusters, control=control, stabilize=stabilize, weightfun=weightfun, rmax=rmax, algorithm=algorithm, penalised = penalised, trajectory = trajectory, ...), palm = kppmPalmLik(X=XX, Xname=Xname, po=po, clusters=clusters, control=control, stabilize=stabilize, weightfun=weightfun, rmax=rmax, algorithm=algorithm, penalised = penalised, trajectory = trajectory, ...), adapcl = kppmCLadap(X=XX, Xname=Xname, po=po, clusters=clusters, control=control, epsilon=epsilon, weightfun=weightfun, rmax=rmax, algorithm=algorithm, ...)) ## h <- attr(out, "h") out <- append(out, list(ClusterArgs=ClusterArgs, call=cl, callframe=parent.frame(), callstring=callstring)) # Detect DPPs DPP <- list(...)$DPP class(out) <- c(ifelse(is.null(DPP), "kppm", "dppm"), class(out)) # Update intensity estimate with improve.kppm if necessary: if(improve.type != "none") out <- do.call(improve.kppm, append(list(object = out, type = improve.type), improve.args)) if(!is.null(h)) class(h) <- unique(c("traj", class(h))) attr(out, "h") <- h return(out) } ## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ## M i n i m u m C o n t r a s t ## <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< kppmMinCon <- function(X, Xname, po, clusters, control=list(), stabilize=TRUE, statistic, statargs, algorithm="Nelder-Mead", DPP=NULL, ..., pspace=NULL) { # Minimum contrast fit stationary <- is.stationary(po) pspace <- do.call(make.pspace, resolve.defaults( list(fitmethod="mincon", clusters=clusters), list(...), ## ellipsis arguments override pspace as.list(pspace), .MatchNull=FALSE)) # compute intensity if(stationary) { lambda <- summary(po)$trend$value } else { # compute intensity at high resolution if available w <- as.owin(po, from="covariates") if(!is.mask(w)) w <- NULL lambda <- predict(po, locations=w) } # Detect DPP model and change clusters and intensity correspondingly if(!is.null(DPP)){ tmp <- dppmFixIntensity(DPP, lambda, po) clusters <- tmp$clusters lambda <- tmp$lambda po <- tmp$po } mcfit <- clusterfit(X, clusters, lambda = lambda, dataname = Xname, control = control, stabilize=stabilize, statistic = statistic, statargs = statargs, algorithm=algorithm, pspace=pspace, ...) fitinfo <- attr(mcfit, "info") attr(mcfit, "info") <- NULL # all info that depends on the fitting method: Fit <- list(method = "mincon", statistic = statistic, Stat = fitinfo$Stat, StatFun = fitinfo$StatFun, StatName = fitinfo$StatName, FitFun = fitinfo$FitFun, statargs = statargs, pspace.given = pspace, pspace.used = fitinfo$pspace.used, mcfit = mcfit, maxlogcl = NULL) # results if(!is.null(DPP)){ clusters <- update(clusters, as.list(mcfit$par)) out <- list(Xname = Xname, X = X, stationary = stationary, fitted = clusters, po = po, Fit = Fit) } else{ out <- list(Xname = Xname, X = X, stationary = stationary, clusters = clusters, modelname = fitinfo$modelname, isPCP = fitinfo$isPCP, po = po, lambda = lambda, mu = mcfit$mu, par = mcfit$par, par.canon = mcfit$par.canon, clustpar = mcfit$clustpar, clustargs = mcfit$clustargs, modelpar = mcfit$modelpar, covmodel = mcfit$covmodel, Fit = Fit) } h <- attr(mcfit, "h") if(!is.null(h)) class(h) <- unique(c("traj", class(h))) attr(out, "h") <- h return(out) } clusterfit <- function(X, clusters, lambda = NULL, startpar = NULL, ..., q=1/4, p=2, rmin=NULL, rmax=NULL, ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax), statistic = NULL, statargs = NULL, algorithm="Nelder-Mead", verbose=FALSE, pspace=NULL){ if(verbose) splat("Fitting cluster model") ## If possible get dataname from dots dataname <- list(...)$dataname ## Cluster info: info <- spatstatClusterModelInfo(clusters) if(verbose) splat("Retrieved cluster model information") ## Determine model type isPCP <- isTRUE(info$isPCP) isDPP <- inherits(clusters, "detpointprocfamily") ## resolve algorithm parameters default.ctrl <- list(q=if(isDPP) 1/2 else 1/4, p=2, rmin=NULL, rmax=NULL) given.ctrl <- if(missing(ctrl)) list() else ctrl[names(default.ctrl)] given.args <- c(if(missing(q)) NULL else list(q=q), if(missing(p)) NULL else list(p=p), if(missing(rmin)) NULL else list(rmin=rmin), if(missing(rmax)) NULL else list(rmax=rmax)) ctrl <- resolve.defaults(given.args, given.ctrl, default.ctrl) if(verbose) { splat("Algorithm parameters:") print(ctrl) } ## if(inherits(X, "ppp")){ if(verbose) splat("Using point pattern data") if(is.null(dataname)) dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(is.null(statistic)) statistic <- "K" # Startpar: if(is.null(startpar)) startpar <- info$selfstart(X) stationary <- is.null(lambda) || (is.numeric(lambda) && length(lambda)==1) if(verbose) { splat("Starting parameters:") print(startpar) cat("Calculating summary function...") } # compute summary function if(stationary) { if(is.null(lambda)) lambda <- intensity(X) StatFun <- if(statistic == "K") "Kest" else "pcf" StatName <- if(statistic == "K") "K-function" else "pair correlation function" Stat <- do.call(StatFun, resolve.defaults(list(X=quote(X)), statargs, list(correction="best"))) } else { StatFun <- if(statistic == "K") "Kinhom" else "pcfinhom" StatName <- if(statistic == "K") "inhomogeneous K-function" else "inhomogeneous pair correlation function" Stat <- do.call(StatFun, resolve.defaults(list(X=quote(X), lambda=lambda), statargs, list(correction="best"))) } if(verbose) splat("Done.") } else if(inherits(X, "fv")){ if(verbose) splat("Using the given summary function") Stat <- X ## Get statistic type stattype <- attr(Stat, "fname") StatFun <- paste0(stattype) StatName <- NULL if(is.null(statistic)){ if(is.null(stattype) || !is.element(stattype[1L], c("K", "pcf"))) stop("Cannot infer the type of summary statistic from argument ", sQuote("X"), " please specify this via argument ", sQuote("statistic")) statistic <- stattype[1L] } if(stattype[1L]!=statistic) stop("Statistic inferred from ", sQuote("X"), " not equal to supplied argument ", sQuote("statistic")) # Startpar: if(is.null(startpar)){ if(isDPP) stop("No rule for starting parameters in this case. Please set ", sQuote("startpar"), " explicitly.") startpar <- info$checkpar(startpar, native=FALSE) startpar[["scale"]] <- mean(range(Stat[[fvnames(Stat, ".x")]])) } } else{ stop("Unrecognised format for argument X") } ## avoid using g(0) as it may be infinite if(statistic=="pcf"){ if(verbose) splat("Checking g(0)") argu <- fvnames(Stat, ".x") rvals <- Stat[[argu]] if(rvals[1L] == 0 && (is.null(rmin) || rmin == 0)) { if(verbose) splat("Ignoring g(0)") rmin <- rvals[2L] } } ## DPP resolving algorithm and checking startpar changealgorithm <- length(startpar)==1 && algorithm=="Nelder-Mead" if(isDPP){ if(verbose) splat("Invoking dppmFixAlgorithm") alg <- dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar) algorithm <- alg$algorithm } #' determine initial values of parameters startpar <- info$checkpar(startpar, native=TRUE) #' code to compute the theoretical summary function of the model theoret <- info[[statistic]] #' explanatory text desc <- paste("minimum contrast fit of", info$descname) #' determine shape parameters if any dots <- info$resolveshape(...) margs <- dots$margs #' ............ permit negative parameter values ......................... strict <- !isFALSE(pspace$strict) sargs <- if(strict) list() else list(strict=FALSE) #' ............ adjust K function or pair correlation function ........... do.adjust <- isTRUE(pspace$adjusted) if(do.adjust) { if(verbose) splat("Applying kppm adjustment") W <- Window(X) delta <- if(is.null(rmax)) NULL else rmax/4096 ## pack up precomputed information needed for adjustment adjdata <- list(paircorr = info[["pcf"]], pairWcdf = distcdf(W, delta=delta), tohuman = NULL) adjfun <- function(theo, par, auxdata, ..., margs=NULL) { with(auxdata, { if(!is.null(tohuman)) par <- tohuman(par, ..., margs=margs) a <- as.numeric(stieltjes(paircorr, pairWcdf, par=par, ..., margs=margs)) return(theo/a) }) } pspace$adjustment <- list(fun=adjfun, auxdata=adjdata) } #' parameter vector corresponding to Poisson process if(isDPP) { poispar <- NULL } else if(isPCP) { if(!("kappa" %in% names(startpar))) stop("Internal error: startpar does not include 'kappa'") poispar <- startpar poispar[["kappa"]] <- Inf } else { #' LGCP if(!("sigma2" %in% names(startpar))) stop("Internal error: startpar does not include 'sigma2'") poispar <- startpar poispar[["sigma2"]] <- .Machine$double.eps # i.e. 0 } #' ............ use canonical parameters ......................... usecanonical <- isTRUE(pspace$canonical) if(usecanonical) { if(verbose) splat("Converting to canonical parameters") tocanonical <- info$tocanonical tohuman <- info$tohuman if(is.null(tocanonical) || is.null(tohuman)) { warning("Canonical parameters are not yet supported for this model") usecanonical <- FALSE } } startpar.human <- startpar poispar.human <- poispar if(usecanonical) { htheo <- theoret startpar <- tocanonical(startpar, margs=margs) if(!is.null(poispar)) poispar <- tocanonical(poispar, margs=margs) theoret <- function(par, ...) { htheo(tohuman(par, ...), ...) } if(do.adjust) pspace$adjustment$auxdata$tohuman <- tohuman } #' ............ penalty ......................... penalty <- pspace$penalty penal.args <- pspace$penal.args tau <- pspace$tau %orifnull% 1 if(is.function(penalty)) { # penalised optimisation if(usecanonical) { penalty.human <- penalty penalty <- function(par, ...) { penalty.human(tohuman(par, ...), ...) } } ## data-dependent arguments in penalty if(is.function(penal.args)) penal.args <- penal.args(X) ## exchange rate (defer evaluation if it is a function) if(!is.function(tau)) check.1.real(tau) ## reinsert in 'pspace' to pass to 'mincontrast' pspace$penalty <- penalty pspace$penal.args <- penal.args pspace$tau <- tau ## unpenalised version pspace.unpen <- pspace pspace.unpen[c("penalty", "penal.args", "tau")] <- NULL } #' ................................................... #' mcargs <- resolve.defaults(list(observed=Stat, theoretical=theoret, startpar=startpar, ctrl=ctrl, method=algorithm, fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=statistic, modelname=info$modelname), margs=dots$margs, model=dots$model, pspace=pspace), ## As modified above list(...), sargs) if(isDPP && algorithm=="Brent" && changealgorithm) mcargs <- resolve.defaults(mcargs, list(lower=alg$lower, upper=alg$upper)) if(is.function(penalty) && is.function(tau)) { ## data-dependent exchange rate 'tau': evaluate now if("poisval" %in% names(formals(tau))) { ## New style: requires value of (unpenalised) objective function at Poisson process mcargs.unpen <- mcargs mcargs.unpen$pspace <- pspace.unpen ## Evaluate using undocumented argument 'evalpar' to mincontrast mcargs.unpen$evalpar <- poispar poisval <- do.call(mincontrast, mcargs.unpen) tau <- tau(X, poisval=poisval) } else { ## old style tau <- tau(X) } check.1.real(tau) ## update 'tau' in argument list pspace$tau <- tau mcargs$pspace$tau <- tau } ## .............. FIT ....................... if(verbose) splat("Starting minimum contrast fit") mcfit <- do.call(mincontrast, mcargs) if(verbose) splat("Returned from minimum contrast fit") ## .......................................... ## extract fitted parameters and reshape if(!usecanonical) { optpar.canon <- NULL optpar.human <- mcfit$par names(optpar.human) <- names(startpar.human) } else { optpar.canon <- mcfit$par names(optpar.canon) <- names(startpar) optpar.human <- tohuman(optpar.canon, margs=margs) names(optpar.human) <- names(startpar.human) } mcfit$par <- optpar.human mcfit$par.canon <- optpar.canon ## extract fitted parameters optpar <- mcfit$par names(optpar) <- names(startpar) mcfit$par <- optpar # Return results for DPPs if(isDPP){ extra <- list(Stat = Stat, StatFun = StatFun, StatName = StatName, modelname = info$modelabbrev, lambda = lambda) attr(mcfit, "info") <- extra if(verbose) splat("Returning from clusterfit (DPP case)") return(mcfit) } ## Extra stuff for ordinary cluster/lgcp models ## imbue with meaning ## infer model parameters mcfit$modelpar <- info$interpret(optpar.human, lambda) mcfit$internal <- list(model=ifelse(isPCP, clusters, "lgcp")) mcfit$covmodel <- dots$covmodel if(isPCP) { # Poisson cluster process: extract parent intensity kappa kappa <- mcfit$par[["kappa"]] # mu = mean cluster size mu <- lambda/kappa } else { # LGCP: extract variance parameter sigma2 sigma2 <- mcfit$par[["sigma2"]] # mu = mean of log intensity mu <- log(lambda) - sigma2/2 } ## Parameter values (new format) mcfit$mu <- mu mcfit$clustpar <- info$checkpar(mcfit$par, native=FALSE, strict=strict) mcfit$clustargs <- info$outputshape(dots$margs) ## The old fit fun that would have been used (DO WE NEED THIS?) FitFun <- paste0(tolower(clusters), ".est", statistic) extra <- list(FitFun = FitFun, Stat = Stat, StatFun = StatFun, StatName = StatName, modelname = info$modelabbrev, isPCP = isPCP, lambda = lambda, pspace.used = pspace) # Modified from call to 'clusterfit' attr(mcfit, "info") <- extra if(verbose) splat("Returning from clusterfit") return(mcfit) } ## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ## C o m p o s i t e L i k e l i h o o d ## <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< kppmComLik <- function(X, Xname, po, clusters, control=list(), stabilize=TRUE, weightfun, rmax, algorithm="Nelder-Mead", DPP=NULL, ..., pspace=NULL) { pspace <- do.call(make.pspace, resolve.defaults( list(fitmethod="clik2", clusters=clusters), list(...), ## ellipsis arguments override pspace as.list(pspace), .MatchNull=FALSE)) W <- as.owin(X) if(is.null(rmax)) rmax <- rmax.rule("K", W, intensity(X)) ## identify unordered pairs of points that contribute cl <- closepairs(X, rmax, what="ijd", twice=FALSE, neat=FALSE) dIJ <- cl$d # compute weights for unordered pairs of points if(is.function(weightfun)) { wIJ <- weightfun(dIJ) sumweight <- safePositiveValue(sum(wIJ)) } else { npairs <- length(dIJ) wIJ <- rep.int(1, npairs) sumweight <- npairs } # convert window to mask, saving other arguments for later dcm <- do.call.matched(as.mask, append(list(w=W), list(...)), sieve=TRUE) M <- dcm$result otherargs <- dcm$otherargs ## Detect DPP usage isDPP <- inherits(clusters, "detpointprocfamily") # compute intensity at pairs of data points # and c.d.f. of interpoint distance in window if(stationary <- is.stationary(po)) { # stationary unmarked Poisson process lambda <- intensity(X) # lambdaIJ <- lambda^2 # compute cdf of distance between two uniform random points in W g <- distcdf(W, delta=rmax/4096) # scaling constant is (area * intensity)^2 gscale <- npoints(X)^2 } else { # compute fitted intensity at data points and in window # lambdaX <- fitted(po, dataonly=TRUE) lambda <- lambdaM <- predict(po, locations=M) # lambda(x_i) * lambda(x_j) # lambdaIJ <- lambdaX[I] * lambdaX[J] # compute cdf of distance between two random points in W # with density proportional to intensity function g <- distcdf(M, dW=lambdaM, delta=rmax/4096) # scaling constant is (integral of intensity)^2 gscale <- safePositiveValue(integral.im(lambdaM)^2, default=npoints(X)^2) } # Detect DPP model and change clusters and intensity correspondingly isDPP <- !is.null(DPP) if(isDPP){ tmp <- dppmFixIntensity(DPP, lambda, po) clusters <- tmp$clusters lambda <- tmp$lambda po <- tmp$po } # trim 'g' to [0, rmax] g <- g[with(g, .x) <= rmax,] # get pair correlation function (etc) for model info <- spatstatClusterModelInfo(clusters) pcfun <- info$pcf selfstart <- info$selfstart isPCP <- info$isPCP resolveshape <- info$resolveshape modelname <- info$modelname # Assemble information required for computing pair correlation if(is.function(resolveshape)) { # Additional 'shape' parameters of cluster model are required. # These may be given as individual arguments, # or in a list called 'covmodel' clustargs <- if("covmodel" %in% names(otherargs)) otherargs[["covmodel"]] else otherargs shapemodel <- do.call(resolveshape, clustargs)$covmodel } else shapemodel <- NULL pcfunargs <- shapemodel margs <- pcfunargs$margs # determine starting parameter values startpar <- selfstart(X) pspace.given <- pspace #' ............ permit negative parameter values ................... strict <- !isFALSE(pspace$strict) if(!strict) pcfunargs <- append(pcfunargs, list(strict=FALSE)) #' ............ parameter corresponding to Poisson process ......... if(isDPP) { poispar <- NULL } else if(isPCP) { if(!("kappa" %in% names(startpar))) stop("Internal error: startpar does not include 'kappa'") poispar <- startpar poispar[["kappa"]] <- Inf } else { ## LGCP if(!("sigma2" %in% names(startpar))) stop("Internal error: startpar does not include 'sigma2'") poispar <- startpar poispar[["sigma2"]] <- .Machine$double.eps # i.e. 0 } #' ............ use canonical parameters ......................... usecanonical <- isTRUE(pspace$canonical) if(usecanonical) { tocanonical <- info$tocanonical tohuman <- info$tohuman if(is.null(tocanonical) || is.null(tohuman)) { warning("Canonical parameters are not yet supported for this model") usecanonical <- FALSE } } startpar.human <- startpar poispar.human <- poispar if(usecanonical) { pcftheo <- pcfun startpar <- tocanonical(startpar, margs=margs) if(!is.null(poispar)) poispar <- tocanonical(poispar, margs=margs) pcfun <- function(par, ...) { pcftheo(tohuman(par, ...), ...) } } #' ............ penalty ...................................... penalty <- pspace$penalty penal.args <- pspace$penal.args tau <- pspace$tau %orifnull% 1 if(is.function(penalty)) { ## penalised optimisation if(usecanonical) { penalty.human <- penalty penalty <- function(par, ...) { penalty.human(tohuman(par, ...), ...) } } ## data-dependent arguments in penalty if(is.function(penal.args)) penal.args <- penal.args(X) ## exchange rate (defer evaluation if it is a function) if(!is.function(tau)) check.1.real(tau) ## reinsert in 'pspace' for insurance pspace$penalty <- penalty pspace$penal.args <- penal.args pspace$tau <- tau } #' ............ debugger ..................................... TRACE <- isTRUE(pspace$trace) if(SAVE <- isTRUE(pspace$save)) { saveplace <- new.env() assign("h", NULL, envir=saveplace) } else saveplace <- NULL # ..................................................... # create local function to evaluate pair correlation # (with additional parameters 'pcfunargs' in its environment) paco <- function(d, par) { do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) } #' .......... define objective function ...................... if(!is.function(weightfun)) { # pack up necessary information objargs <- list(dIJ=dIJ, sumweight=sumweight, g=g, gscale=gscale, envir=environment(paco), ## The following variables are temporarily omitted ## in order to calculate the objective function ## without using them, or their side effects. penalty=NULL, # updated below penal.args=NULL, # updated below tau=NULL, # updated below TRACE=FALSE, # updated below saveplace=NULL, # updated below BIGVALUE=1, # updated below SMALLVALUE=.Machine$double.eps) # define objective function (with 'paco' in its environment) # This is the log composite likelihood minus the constant term # 2 * (sum(log(lambdaIJ)) - npairs * log(gscale)) obj <- function(par, objargs) { with(objargs, { logprod <- sum(log(safePositiveValue(paco(dIJ, par)))) integ <- unlist(stieltjes(paco, g, par=par)) integ <- pmax(SMALLVALUE, integ) logcl <- 2*(logprod - sumweight * log(integ)) logcl <- safeFiniteValue(logcl, default=-BIGVALUE) ## penalty if(hasPenalty <- is.function(penalty)) { straf <- do.call(penalty, append(list(par), penal.args)) logclPEN <- logcl - tau * straf } ## debugger if(isTRUE(TRACE)) { cat("Parameters:", fill=TRUE) print(par) splat("\tlogprod:", logprod) splat("\tinteg:", integ) splat("log composite likelihood:", logcl) if(hasPenalty) splat("penalised log composite likelihood:", logclPEN) } ## save state if(is.environment(saveplace)) { h <- get("h", envir=saveplace) value <- list(logcl=logcl) if(hasPenalty) value <- append(value, list(logclPEN=logclPEN)) hplus <- as.data.frame(append(par, value)) h <- rbind(h, hplus) assign("h", h, envir=saveplace) } return(if(hasPenalty) logclPEN else logcl) }, enclos=objargs$envir) } ## Determine the values of some parameters ## (1) Determine a suitable large number to replace Inf objargs$BIGVALUE <- bigvaluerule(obj, objargs, startpar) ## (2) Evaluate exchange rate 'tau' if(is.function(penalty) && is.function(tau)) { ## data-dependent exchange rate 'tau': evaluate now if("poisval" %in% names(formals(tau))) { ## New style: requires value of (unpenalised) objective function at Poisson process poisval <- obj(poispar, objargs) tau <- tau(X, poisval=poisval) } else { tau <- tau(X) } check.1.real(tau) } ## Now insert the penalty, etc objargs <- resolve.defaults(list(penalty = penalty, penal.args = penal.args, tau = tau, saveplace = saveplace, TRACE = TRACE), objargs) } else { # create local function to evaluate pair correlation(d) * weight(d) # (with additional parameters 'pcfunargs', 'weightfun' in its environment) force(weightfun) wpaco <- function(d, par) { y <- do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) w <- weightfun(d) return(y * w) } # pack up necessary information objargs <- list(dIJ=dIJ, wIJ=wIJ, sumweight=sumweight, g=g, gscale=gscale, envir=environment(wpaco), penalty=NULL, # updated below penal.args=NULL, # updated below tau=NULL, # updated below TRACE=FALSE, # updated below saveplace=NULL, # updated below BIGVALUE=1, # updated below SMALLVALUE=.Machine$double.eps) # define objective function (with 'paco', 'wpaco' in its environment) # This is the log composite likelihood minus the constant term # 2 * (sum(wIJ * log(lambdaIJ)) - sumweight * log(gscale)) obj <- function(par, objargs) { with(objargs, { integ <- unlist(stieltjes(wpaco, g, par=par)) integ <- pmax(SMALLVALUE, integ) logcl <- safeFiniteValue( 2*(sum(wIJ * log(safePositiveValue(paco(dIJ, par)))) - sumweight * log(integ)), default=-BIGVALUE) ## penalty if(hasPenalty <- is.function(penalty)) { straf <- do.call(penalty, append(list(par), penal.args)) logclPEN <- logcl - tau * straf } ## debugger if(isTRUE(TRACE)) { cat("Parameters:", fill=TRUE) print(par) splat("\tinteg:", integ) splat("log composite likelihood:", logcl) if(hasPenalty) splat("penalised log composite likelihood:", logclPEN) } if(is.environment(saveplace)) { h <- get("h", envir=saveplace) value <- list(logcl=logcl) if(hasPenalty) value <- append(value, list(logclPEN=logclPEN)) hplus <- as.data.frame(append(par, value)) h <- rbind(h, hplus) assign("h", h, envir=saveplace) } return(if(hasPenalty) logclPEN else logcl) }, enclos=objargs$envir) } ## Determine the values of some parameters ## (1) Determine a suitable large number to replace Inf objargs$BIGVALUE <- bigvaluerule(obj, objargs, startpar) ## (2) Evaluate exchange rate 'tau' if(is.function(penalty) && is.function(tau)) { ## data-dependent exchange rate 'tau': evaluate now if("poisval" %in% names(formals(tau))) { ## New style: requires value of (unpenalised) objective function at Poisson process poisval <- obj(poispar, objargs) tau <- tau(X, poisval=poisval) } else { tau <- tau(X) } check.1.real(tau) } ## Now insert the penalty, etc objargs <- resolve.defaults(list(penalty = penalty, penal.args = penal.args, tau = tau, saveplace = saveplace, TRACE = TRACE), objargs) } ## ...................... Optimization settings ........................ if(stabilize) { ## Numerical stabilisation ## evaluate objective at starting state startval <- obj(startpar, objargs) ## use to determine appropriate global scale smallscale <- sqrt(.Machine$double.eps) fnscale <- -max(abs(startval), smallscale) parscale <- pmax(abs(startpar), smallscale) scaling <- list(fnscale=fnscale, parscale=parscale) } else { scaling <- list(fnscale=-1) } ## Update list of algorithm control arguments control.updated <- resolve.defaults(control, scaling, list(trace=0)) ## Initialise list of all arguments to 'optim' optargs <- list(par=startpar, fn=obj, objargs=objargs, control=control.updated, method=algorithm) ## DPP case: check startpar and modify algorithm changealgorithm <- length(startpar)==1 && algorithm=="Nelder-Mead" if(isDPP){ alg <- dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar.human) algorithm <- optargs$method <- alg$algorithm if(algorithm=="Brent" && changealgorithm){ optargs$lower <- alg$lower optargs$upper <- alg$upper } } if(isTRUE(pspace$debug)) { splat("About to optimize... Objective function arguments:") print(objargs) } ## .......... optimize it .............................. opt <- do.call(optim, optargs) ## raise warning/error if something went wrong signalStatus(optimStatus(opt), errors.only=TRUE) ## .......... extract fitted parameters ..................... if(!usecanonical) { optpar.canon <- NULL optpar.human <- opt$par names(optpar.human) <- names(startpar.human) } else { optpar.canon <- opt$par names(optpar.canon) <- names(startpar) optpar.human <- tohuman(optpar.canon, margs=margs) names(optpar.human) <- names(startpar.human) } opt$par <- optpar.human opt$par.canon <- optpar.canon ## save starting values in 'opt' for consistency with mincontrast() opt$startpar <- startpar.human ## Finish in DPP case if(!is.null(DPP)){ ## all info that depends on the fitting method: Fit <- list(method = "clik2", clfit = opt, weightfun = weightfun, rmax = rmax, objfun = obj, objargs = objargs, maxlogcl = opt$value, pspace.given = pspace.given, pspace.used = pspace) # pack up clusters <- update(clusters, as.list(opt$par)) result <- list(Xname = Xname, X = X, stationary = stationary, fitted = clusters, modelname = modelname, po = po, lambda = lambda, Fit = Fit) if(SAVE) { h <- get("h", envir=saveplace) if(!is.null(h)) class(h) <- unique(c("traj", class(h))) attr(result, "h") <- h } return(result) } ## meaningful model parameters modelpar <- info$interpret(optpar.human, lambda) # infer parameter 'mu' if(isPCP) { # Poisson cluster process: extract parent intensity kappa kappa <- optpar.human[["kappa"]] # mu = mean cluster size mu <- if(stationary) lambda/kappa else eval.im(lambda/kappa) } else { # LGCP: extract variance parameter sigma2 sigma2 <- optpar.human[["sigma2"]] # mu = mean of log intensity mu <- if(stationary) log(lambda) - sigma2/2 else eval.im(log(lambda) - sigma2/2) } # all info that depends on the fitting method: Fit <- list(method = "clik2", clfit = opt, weightfun = weightfun, rmax = rmax, objfun = obj, objargs = objargs, maxlogcl = opt$value, pspace.given = pspace.given, pspace.used = pspace) # pack up result <- list(Xname = Xname, X = X, stationary = stationary, clusters = clusters, modelname = modelname, isPCP = isPCP, po = po, lambda = lambda, mu = mu, par = optpar.human, par.canon = optpar.canon, clustpar = info$checkpar(par=optpar.human, native=FALSE, strict=strict), clustargs = info$outputshape(shapemodel$margs), modelpar = modelpar, covmodel = shapemodel, Fit = Fit) if(SAVE) { h <- get("h", envir=saveplace) if(!is.null(h)) class(h) <- unique(c("traj", class(h))) attr(result, "h") <- h } return(result) } ## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ## P a l m L i k e l i h o o d ## <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< kppmPalmLik <- function(X, Xname, po, clusters, control=list(), stabilize=TRUE, weightfun, rmax, algorithm="Nelder-Mead", DPP=NULL, ..., pspace=NULL) { pspace <- do.call(make.pspace, resolve.defaults( list(fitmethod="palm", clusters=clusters), list(...), ## ellipsis arguments override pspace as.list(pspace), .MatchNull=FALSE)) W <- as.owin(X) if(is.null(rmax)) rmax <- rmax.rule("K", W, intensity(X)) ## identify unordered pairs of points that contribute cl <- closepairs(X, rmax, twice=FALSE, neat=FALSE) dIJ <- cl$d ## compute weights for unordered pairs of points. Must be symmetric. if(is.function(weightfun)) { wIJ <- weightfun(dIJ) } else { npairs <- length(dIJ) wIJ <- rep.int(1, npairs) } ## first point in each *ordered* pair J <- c(cl$i, cl$j) ## convert window to mask, saving other arguments for later dcm <- do.call.matched(as.mask, append(list(w=W), list(...)), sieve=TRUE) M <- dcm$result otherargs <- dcm$otherargs ## Detect DPP usage isDPP <- inherits(clusters, "detpointprocfamily") # compute intensity at data points # and c.d.f. of interpoint distance in window if(stationary <- is.stationary(po)) { # stationary unmarked Poisson process lambda <- intensity(X) lambdaJ <- rep(lambda, length(J)) # compute cdf of distance between a randomly-selected point in X # and a uniform random point in W g <- distcdf(X, M, delta=rmax/4096) # scaling constant is (integral of intensity) * (number of points) gscale <- npoints(X)^2 } else { # compute fitted intensity at data points and in window lambdaX <- fitted(po, dataonly=TRUE) lambda <- lambdaM <- predict(po, locations=M) lambdaJ <- lambdaX[J] # compute cdf of distance between a randomly-selected point in X # and a random point in W with density proportional to intensity function g <- distcdf(X, M, dV=lambdaM, delta=rmax/4096) # scaling constant is (integral of intensity) * (number of points) gscale <- safePositiveValue(integral.im(lambdaM) * npoints(X), default=npoints(X)^2) } # Detect DPP model and change clusters and intensity correspondingly isDPP <- !is.null(DPP) if(isDPP){ tmp <- dppmFixIntensity(DPP, lambda, po) clusters <- tmp$clusters lambda <- tmp$lambda po <- tmp$po } # trim 'g' to [0, rmax] g <- g[with(g, .x) <= rmax,] # get pair correlation function (etc) for model info <- spatstatClusterModelInfo(clusters) pcfun <- info$pcf selfstart <- info$selfstart isPCP <- info$isPCP resolveshape <- info$resolveshape modelname <- info$modelname # Assemble information required for computing pair correlation if(is.function(resolveshape)) { # Additional 'shape' parameters of cluster model are required. # These may be given as individual arguments, # or in a list called 'covmodel' clustargs <- if("covmodel" %in% names(otherargs)) otherargs[["covmodel"]] else otherargs shapemodel <- do.call(resolveshape, clustargs)$covmodel } else shapemodel <- NULL pcfunargs <- shapemodel margs <- pcfunargs$margs # determine starting parameter values startpar <- selfstart(X) pspace.given <- pspace #' ............ permit negative parameter values ................ strict <- !isFALSE(pspace$strict) if(!strict) pcfunargs <- append(pcfunargs, list(strict=strict)) #' ............ parameter corresponding to Poisson process ...... if(isDPP) { poispar <- NULL } else if(isPCP) { if(!("kappa" %in% names(startpar))) stop("Internal error: startpar does not include 'kappa'") poispar <- startpar poispar[["kappa"]] <- Inf } else { #' LGCP if(!("sigma2" %in% names(startpar))) stop("Internal error: startpar does not include 'sigma2'") poispar <- startpar poispar[["sigma2"]] <- .Machine$double.eps # i.e. 0 } #' ............ use canonical parameters ......................... usecanonical <- isTRUE(pspace$canonical) if(usecanonical) { tocanonical <- info$tocanonical tohuman <- info$tohuman if(is.null(tocanonical) || is.null(tohuman)) { warning("Canonical parameters are not yet supported for this model") usecanonical <- FALSE } } startpar.human <- startpar poispar.human <- poispar if(usecanonical) { pcftheo <- pcfun startpar <- tocanonical(startpar, margs=margs) if(!is.null(poispar)) poispar <- tocanonical(poispar, margs=margs) pcfun <- function(par, ...) { pcftheo(tohuman(par, ...), ...) } } #' ............ penalty ....................................... penalty <- pspace$penalty penal.args <- pspace$penal.args tau <- pspace$tau %orifnull% 1 if(is.function(penalty)) { ## penalised optimisation if(usecanonical) { penalty.human <- penalty penalty <- function(par, ...) { penalty.human(tohuman(par, ...), ...) } } ## data-dependent arguments in penalty if(is.function(penal.args)) penal.args <- penal.args(X) ## exchange rate (defer evaluation if it is a function) if(!is.function(tau)) check.1.real(tau) ## reinsert in 'pspace' for insurance pspace$penalty <- penalty pspace$penal.args <- penal.args pspace$tau <- tau } #' ............ debugger ...................................... TRACE <- isTRUE(pspace$trace) if(SAVE <- isTRUE(pspace$save)) { saveplace <- new.env() assign("h", NULL, envir=saveplace) } else saveplace <- NULL ## ..................................................... # create local function to evaluate pair correlation # (with additional parameters 'pcfunargs' in its environment) paco <- function(d, par) { do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) } # define objective function if(!is.function(weightfun)) { # pack up necessary information objargs <- list(dIJ=dIJ, g=g, gscale=gscale, sumloglam=safeFiniteValue(sum(log(lambdaJ))), envir=environment(paco), ## The following variables are temporarily omitted ## in order to calculate the objective function ## without using them, or their side effects. penalty=NULL, # updated below penal.args=NULL, # updated below tau=NULL, # updated below TRACE=FALSE, # updated below saveplace=NULL, # updated below BIGVALUE=1, # updated below SMALLVALUE=.Machine$double.eps) # define objective function (with 'paco' in its environment) # This is the log Palm likelihood obj <- function(par, objargs) { with(objargs, { integ <- unlist(stieltjes(paco, g, par=par)) integ <- pmax(SMALLVALUE, integ) logplik <- safeFiniteValue( sumloglam + 2 * sum(log(safePositiveValue(paco(dIJ, par)))) - gscale * integ, default=-BIGVALUE) ## penalty if(hasPenalty <- is.function(penalty)) { straf <- do.call(penalty, append(list(par), penal.args)) logplikPEN <- logplik - tau * straf } ## debugger if(isTRUE(TRACE)) { cat("Parameters:", fill=TRUE) print(par) splat("integral:", integ) splat("log Palm likelihood:", logplik) if(hasPenalty) splat("penalised log Palm likelihood:", logplikPEN) } if(is.environment(saveplace)) { h <- get("h", envir=saveplace) value <- list(logplik=logplik) if(hasPenalty) value <- append(value, list(logplikPEN=logplikPEN)) hplus <- as.data.frame(append(par, value)) h <- rbind(h, hplus) assign("h", h, envir=saveplace) } return(if(hasPenalty) logplikPEN else logplik) }, enclos=objargs$envir) } ## Determine the values of some parameters ## (1) Determine a suitable large number to replace Inf objargs$BIGVALUE <- bigvaluerule(obj, objargs, startpar) ## (2) Evaluate exchange rate 'tau' if(is.function(penalty) && is.function(tau)) { ## data-dependent exchange rate 'tau': evaluate now if("poisval" %in% names(formals(tau))) { ## New style: requires value of (unpenalised) objective function at Poisson process poisval <- obj(poispar, objargs) tau <- tau(X, poisval=poisval) } else { tau <- tau(X) } check.1.real(tau) } objargs <- resolve.defaults(list(penalty = penalty, penal.args = penal.args, tau = tau, saveplace = saveplace, TRACE = TRACE), objargs) } else { # create local function to evaluate pair correlation(d) * weight(d) # (with additional parameters 'pcfunargs', 'weightfun' in its environment) force(weightfun) wpaco <- function(d, par) { y <- do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) w <- weightfun(d) return(y * w) } # pack up necessary information objargs <- list(dIJ=dIJ, wIJ=wIJ, g=g, gscale=gscale, wsumloglam=safeFiniteValue( sum(c(wIJ,wIJ) * safeFiniteValue(log(lambdaJ))) ), envir=environment(wpaco), penalty=NULL, # updated below penal.args=NULL, # updated below tau=NULL, # updated below TRACE=FALSE, # updated below saveplace=NULL, # updated below BIGVALUE=1, # updated below SMALLVALUE=.Machine$double.eps) # define objective function (with 'paco', 'wpaco' in its environment) # This is the log Palm likelihood obj <- function(par, objargs) { with(objargs, { integ <- unlist(stieltjes(wpaco, g, par=par)) integ <- pmax(SMALLVALUE, integ) logplik <- safeFiniteValue(wsumloglam + 2 * sum(wIJ * log(safePositiveValue(paco(dIJ, par)))) - gscale * integ, default=-BIGVALUE) ## penalty if(hasPenalty <- is.function(penalty)) { straf <- do.call(penalty, append(list(par), penal.args)) logplikPEN <- logplik - tau * straf } ## debugger if(isTRUE(TRACE)) { cat("Parameters:", fill=TRUE) print(par) splat("integral:", integ) splat("log Palm likelihood:", logplik) if(hasPenalty) splat("penalised log Palm likelihood:", logplikPEN) } if(is.environment(saveplace)) { h <- get("h", envir=saveplace) value <- list(logplik=logplik) if(hasPenalty) value <- append(value, list(logplikPEN=logplikPEN)) hplus <- as.data.frame(append(par, value)) h <- rbind(h, hplus) assign("h", h, envir=saveplace) } return(if(hasPenalty) logplikPEN else logplik) }, enclos=objargs$envir) } ## Determine the values of some parameters ## (1) Determine a suitable large number to replace Inf objargs$BIGVALUE <- bigvaluerule(obj, objargs, startpar) ## (2) Evaluate exchange rate 'tau' if(is.function(penalty) && is.function(tau)) { ## data-dependent exchange rate 'tau': evaluate now if("poisval" %in% names(formals(tau))) { ## New style: requires value of (unpenalised) objective function at Poisson process poisval <- obj(poispar, objargs) tau <- tau(X, poisval=poisval) } else { tau <- tau(X) } check.1.real(tau) } ## Now insert penalty, etc. objargs <- resolve.defaults(list(penalty = penalty, penal.args = penal.args, tau = tau, saveplace = saveplace, TRACE = TRACE), objargs) } ## ...................... Optimization settings ........................ if(stabilize) { ## Numerical stabilisation ## evaluate objective at starting state startval <- obj(startpar, objargs) ## use to determine appropriate global scale smallscale <- sqrt(.Machine$double.eps) fnscale <- -max(abs(startval), smallscale) parscale <- pmax(abs(startpar), smallscale) scaling <- list(fnscale=fnscale, parscale=parscale) } else { scaling <- list(fnscale=-1) } ## Update list of algorithm control arguments control.updated <- resolve.defaults(control, scaling, list(trace=0)) ## Initialise list of all arguments to 'optim' optargs <- list(par=startpar, fn=obj, objargs=objargs, control=control.updated, method=algorithm) ## DPP case: check startpar and modify algorithm changealgorithm <- length(startpar)==1 && algorithm=="Nelder-Mead" if(isDPP){ alg <- dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar.human) algorithm <- optargs$method <- alg$algorithm if(algorithm=="Brent" && changealgorithm){ optargs$lower <- alg$lower optargs$upper <- alg$upper } } ## ....................................................................... if(isTRUE(pspace$debug)) { splat("About to optimize... Objective function arguments:") print(objargs) } # optimize it opt <- do.call(optim, optargs) # raise warning/error if something went wrong signalStatus(optimStatus(opt), errors.only=TRUE) ## Extract optimal values of parameters if(!usecanonical) { optpar.canon <- NULL optpar.human <- opt$par names(optpar.human) <- names(startpar.human) } else { optpar.canon <- opt$par names(optpar.canon) <- names(startpar) optpar.human <- tohuman(optpar.canon, margs=margs) names(optpar.human) <- names(startpar.human) } opt$par <- optpar.human opt$par.canon <- optpar.canon ## save starting values in 'opt' for consistency with minconfit() opt$startpar <- startpar.human ## Finish in DPP case if(!is.null(DPP)){ ## all info that depends on the fitting method: Fit <- list(method = "palm", clfit = opt, weightfun = weightfun, rmax = rmax, objfun = obj, objargs = objargs, maxlogcl = opt$value, pspace.given = pspace.given, pspace.used = pspace) # pack up clusters <- update(clusters, as.list(optpar.human)) result <- list(Xname = Xname, X = X, stationary = stationary, fitted = clusters, modelname = modelname, po = po, lambda = lambda, Fit = Fit) if(SAVE) { h <- get("h", envir=saveplace) if(!is.null(h)) class(h) <- unique(c("traj", class(h))) attr(result, "h") <- h } return(result) } # meaningful model parameters modelpar <- info$interpret(optpar.human, lambda) # infer parameter 'mu' if(isPCP) { # Poisson cluster process: extract parent intensity kappa kappa <- optpar.human[["kappa"]] # mu = mean cluster size mu <- if(stationary) lambda/kappa else eval.im(lambda/kappa) } else { # LGCP: extract variance parameter sigma2 sigma2 <- optpar.human[["sigma2"]] # mu = mean of log intensity mu <- if(stationary) log(lambda) - sigma2/2 else eval.im(log(lambda) - sigma2/2) } # all info that depends on the fitting method: Fit <- list(method = "palm", clfit = opt, weightfun = weightfun, rmax = rmax, objfun = obj, objargs = objargs, maxlogcl = opt$value, pspace.given = pspace.given, pspace.used = pspace) # pack up result <- list(Xname = Xname, X = X, stationary = stationary, clusters = clusters, modelname = modelname, isPCP = isPCP, po = po, lambda = lambda, mu = mu, par = optpar.human, par.canon = optpar.canon, clustpar = info$checkpar(par=optpar.human, native=FALSE, strict=strict), clustargs = info$outputshape(shapemodel$margs), modelpar = modelpar, covmodel = shapemodel, Fit = Fit) if(SAVE) { h <- get("h", envir=saveplace) if(!is.null(h)) class(h) <- unique(c("traj", class(h))) attr(result, "h") <- h } return(result) } ## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ## A d a p t i v e C o m p o s i t e L i k e l i h o o d ## <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< ## ........... contributed by Chiara Fend ................... ## needs nonlinear equation solver nleqslv kppmCLadap <- function(X, Xname, po, clusters, control, weightfun, rmax=NULL, epsilon=0.01, DPP=NULL, algorithm="Broyden", ..., startpar=NULL, globStrat="dbldog") { if(!requireNamespace("nleqslv", quietly=TRUE)) stop(paste("The package", sQuote("nleqslv"), "is required"), call.=FALSE) W <- as.owin(X) if(is.null(rmax)) # specified for numerical stability rmax <- shortside(Frame(W)) # identify pairs of points that might contribute cl <- closepairs(X, rmax) dIJ <- cl$d #pairwise distances Rmin <- min(dIJ) indexmin <- which(dIJ==Rmin) #for later use # convert window to mask, saving other arguments for later dcm <- do.call.matched(as.mask, append(list(w=W), list(...)), sieve=TRUE) M <- dcm$result otherargs <- dcm$otherargs #' penalised estimation is not supported if(any(m <- (c("penalised", "pspace") %in% names(otherargs)))) warning(paste(ngettext(sum(m), "Argument", "Arguments"), commasep(sQuote(c("penalised", "pspace")[m])), ngettext(sum(m), "is", "are"), "not supported for adaptive composite likelihood"), call.=FALSE) # compute intensity at pairs of data points # and c.d.f. of interpoint distance in window if(stationary <- is.stationary(po)) { # stationary unmarked Poisson process lambda <- intensity(X) # compute cdf of distance between two uniform random points in W g <- distcdf(W, delta=rmax/4096) # scaling constant is (area * intensity)^2 gscale <- npoints(X)^2 } else { # compute fitted intensity at data points and in window # lambdaX <- fitted(po, dataonly=TRUE) lambda <- lambdaM <- predict(po, locations=M) # compute cdf of distance between two random points in W # with density proportional to intensity function g <- distcdf(M, dW=lambdaM, delta=rmax/4096) # scaling constant is (integral of intensity)^2 gscale <- safePositiveValue(integral.im(lambdaM)^2, default=npoints(X)^2) } isDPP <- !is.null(DPP) if(isDPP){ tmp <- dppmFixIntensity(DPP, lambda, po) clusters <- tmp$clusters lambda <- tmp$lambda po <- tmp$po } # get pair correlation function (etc) for model info <- spatstatClusterModelInfo(clusters) pcfun <- info$pcf dpcfun <- info$Dpcf selfstart <- info$selfstart isPCP <- info$isPCP resolveshape <- info$resolveshape modelname <- info$modelname # Assemble information required for computing pair correlation if(is.function(resolveshape)) { # Additional parameters of cluster model are required. # These may be given as individual arguments, # or in a list called 'covmodel' clustargs <- if("covmodel" %in% names(otherargs)) otherargs[["covmodel"]] else otherargs shapemodel <- do.call(resolveshape, clustargs)$covmodel } else shapemodel <- NULL pcfunargs <- shapemodel ## determine starting parameter values if(is.null(startpar)) { startpar <- selfstart(X) } else if(!isDPP){ startpar <- info$checkpar(startpar, native=TRUE) } ## optimization will be over the logarithms of the parameters startparLog <- log(startpar) pcfunLog <- function(par, ...) { pcfun(exp(par), ...) } dpcfunLog <- function(par, ...) { dpcfun(exp(par), ...) } # create local functions to evaluate pair correlation and its gradient # (with additional parameters 'pcfunargs' in its environment) paco <- function(d, par) { do.call(pcfunLog, append(list(par=par, rvals=d), pcfunargs)) } dpaco <- function(d, par) { do.call(dpcfunLog, append(list(par=par, rvals=d), pcfunargs)) } # trim 'g' to [0, rmax] g <- g[with(g, .x) <= rmax,] #' .......... define objective function ...................... # create local function to evaluate weight(epsilon*M/(pcf(d)-1)) weight <- function(d, par) { y <- paco(d=d, par=par) # calculate M (only needs to be calculated for cluster models) M <- 1 if(!isDPP){ M <- abs(paco(d=0, par=par)-1) } return(weightfun(epsilon*M/(y-1))) } wlogcl2score <- function(par, paco, dpaco, dIJ, gscale, epsilon, cdf=g){ p <- length(par) temp <- rep(0, p) # check if current parameter is valid, if not return inf if(isDPP){ if(length(par)==1 && is.null(names(par))) names(par) <- clusters$freepar mod <- update(clusters, as.list(exp(par))) if(!valid(mod)){ return(rep(Inf, p)) } } # everything can be computed wdIJ <- weight(d=dIJ, par=par) index <- unique(c(which(wdIJ!=0), indexmin)) dIJcurrent <- dIJ[index] for(i in 1:p){ parname <- names(par)[i] # weighted derivatives wrt log of parname dpcfweighted <- function(d, par){ y <- dpaco(d = d, par = par)[parname,]*exp(par[i]) return(y*weight(d = d, par = par)) } temp[i] <- sum(dpcfweighted(d = dIJcurrent, par=par)/paco(d = dIJcurrent, par = par)) - gscale * stieltjes(dpcfweighted,cdf, par=par)$f } return(temp) } ## ................. optimize it .............................. opt <- nleqslv::nleqslv(x = startparLog, fn = wlogcl2score, method = algorithm, global = globStrat, control = control, paco=paco, dpaco=dpaco, dIJ=dIJ, gscale=gscale, epsilon=epsilon) ## .......... extract fitted parameters on original scale ............... optpar <- exp(opt$x) names(optpar) <- names(startpar) ## insert entries expected in 'opt' opt$par <- optpar opt$startpar <- startpar ## Finish in DPP case if(isDPP){ # all info that depends on the fitting method: Fit <- list(method = "adapcl", cladapfit = opt, weightfun = weightfun, rmax = rmax, epsilon = epsilon, objfun = wlogcl2score, objargs = control, estfunc = opt$fvec) # pack up clusters <- update(clusters, as.list(exp(opt$x))) result <- list(Xname = Xname, X = X, stationary = stationary, fitted = clusters, modelname = modelname, po = po, lambda = lambda, Fit = Fit) return(result) } ## meaningful model parameters modelpar <- info$interpret(optpar, lambda) # infer parameter 'mu' if(isPCP) { # Poisson cluster process: extract parent intensity kappa kappa <- optpar[["kappa"]] # mu = mean cluster size mu <- if(stationary) lambda/kappa else eval.im(lambda/kappa) } else { # LGCP: extract variance parameter sigma2 sigma2 <- optpar[["sigma2"]] # mu = mean of log intensity mu <- if(stationary) log(lambda) - sigma2/2 else eval.im(log(lambda) - sigma2/2) } # all info that depends on the fitting method: Fit <- list(method = "adapcl", cladapfit = opt, weightfun = weightfun, rmax = rmax, epsilon = epsilon, objfun = wlogcl2score, objargs = control, estfunc = opt$fvec) # pack up result <- list(Xname = Xname, X = X, stationary = stationary, clusters = clusters, modelname = modelname, isPCP = isPCP, po = po, lambda = lambda, mu = mu, par = optpar, clustpar = info$checkpar(par=optpar, native=FALSE), clustargs = info$outputshape(shapemodel$margs), modelpar = modelpar, covmodel = shapemodel, Fit = Fit) return(result) } improve.kppm <- local({ fnc <- function(r, eps, g){ (g(r) - 1)/(g(0) - 1) - eps} improve.kppm <- function(object, type=c("quasi", "wclik1", "clik1"), rmax = NULL, eps.rmax = 0.01, dimyx = 50, maxIter = 100, tolerance = 1e-06, fast = TRUE, vcov = FALSE, fast.vcov = FALSE, verbose = FALSE, save.internals = FALSE) { verifyclass(object, "kppm") type <- match.arg(type) gfun <- pcfmodel(object) X <- object$X win <- as.owin(X) ## simple (rectangular) grid quadrature scheme ## (using pixels with centers inside owin only) mask <- as.mask(win, dimyx = dimyx) wt <- pixellate(win, W = mask) wt <- wt[mask] Uxy <- rasterxy.mask(mask) U <- ppp(Uxy$x, Uxy$y, window = win, check=FALSE) U <- U[mask] # nU <- npoints(U) Yu <- pixellate(X, W = mask) Yu <- Yu[mask] ## covariates at quadrature points po <- object$po Z <- model.images(po, mask) Z <- sapply(Z, "[", i=U) ##obtain initial beta estimate using composite likelihood beta0 <- coef(po) ## determining the dependence range if (type != "clik1" && is.null(rmax)) { diamwin <- diameter(win) rmax <- if(fnc(diamwin, eps.rmax, gfun) >= 0) diamwin else uniroot(fnc, lower = 0, upper = diameter(win), eps=eps.rmax, g=gfun)$root if(verbose) splat(paste0("type: ", type, ", ", "dependence range: ", rmax, ", ", "dimyx: ", dimyx, ", g(0) - 1:", gfun(0) -1)) } ## preparing the WCL case if (type == "wclik1") Kmax <- 2*pi * integrate(function(r){r * (gfun(r) - 1)}, lower=0, upper=rmax)$value * exp(c(Z %*% beta0)) ## the g()-1 matrix without tapering if (!fast || (vcov && !fast.vcov)){ if (verbose) cat("computing the g(u_i,u_j)-1 matrix ...") gminus1 <- matrix(gfun(c(pairdist(U))) - 1, U$n, U$n) if (verbose) cat("..Done.\n") } if ( (fast && type == "quasi") | fast.vcov ){ if (verbose) cat("computing the sparse G-1 matrix ...\n") ## Non-zero gminus1 entries (when using tapering) cp <- crosspairs(U,U,rmax,what="ijd") if (verbose) cat("crosspairs done\n") Gtap <- (gfun(cp$d) - 1) if(vcov){ if(fast.vcov){ gminus1 <- Matrix::sparseMatrix(i=cp$i, j=cp$j, x=Gtap, dims=c(U$n, U$n)) } else{ if(fast) gminus1 <- matrix(gfun(c(pairdist(U))) - 1, U$n, U$n) } } if (verbose & type!="quasi") cat("..Done.\n") } if (type == "quasi" && fast){ mu0 <- exp(c(Z %*% beta0)) * wt mu0root <- sqrt(mu0) sparseG <- Matrix::sparseMatrix(i=cp$i, j=cp$j, x=mu0root[cp$i] * mu0root[cp$j] * Gtap, dims=c(U$n, U$n)) Rroot <- Matrix::Cholesky(sparseG, perm = TRUE, Imult = 1) ##Imult=1 means that we add 1*I if (verbose) cat("..Done.\n") } ## iterative weighted least squares/Fisher scoring bt <- beta0 noItr <- 1 repeat { mu <- exp(c(Z %*% bt)) * wt mu.root <- sqrt(mu) ## the core of estimating equation: ff=phi ## in case of ql, \phi=V^{-1}D=V_\mu^{-1/2}x where (G+I)x=V_\mu^{1/2} Z ff <- switch(type, clik1 = Z, wclik1= Z/(1 + Kmax), quasi = if(fast){ Matrix::solve(Rroot, mu.root * Z)/mu.root } else{ solve(diag(U$n) + t(gminus1 * mu), Z) } ) ##alternative ##R=chol(sparseG+sparseMatrix(i=c(1:U$n),j=c(1:U$n), ## x=rep(1,U$n),dims=c(U$n,U$n))) ##ff2 <- switch(type, ## clik1 = Z, ## wclik1= Z/(1 + Kmax), ## quasi = if (fast) ## solve(R,solve(t(R), mu.root * Z))/mu.root ## else solve(diag(U$n) + t(gminus1 * mu), Z)) ## print(summary(as.numeric(ff)-as.numeric(ff2))) ## the estimating equation: u_f(\beta) uf <- (Yu - mu) %*% ff ## inverse of minus expectation of Jacobian matrix: I_f Jinv <- solve(t(Z * mu) %*% ff) if(maxIter==0){ ## This is a built-in early exit for vcov internal calculations break } deltabt <- as.numeric(uf %*% Jinv) if (any(!is.finite(deltabt))) { warning(paste("Infinite value, NA or NaN appeared", "in the iterative weighted least squares algorithm.", "Returning the initial intensity estimate unchanged."), call.=FALSE) return(object) } ## updating the present estimate of \beta bt <- bt + deltabt if (verbose) splat(paste0("itr: ", noItr, ",\nu_f: ", as.numeric(uf), "\nbeta:", bt, "\ndeltabeta:", deltabt)) if (max(abs(deltabt/bt)) <= tolerance || max(abs(uf)) <= tolerance) break if (noItr > maxIter) stop("Maximum number of iterations reached without convergence.") noItr <- noItr + 1 } out <- object out$po$coef.orig <- beta0 out$po$coef <- bt loc <- if(is.sob(out$lambda)) as.mask(out$lambda) else mask out$lambda <- predict(out$po, locations = loc) out$improve <- list(type = type, rmax = rmax, dimyx = dimyx, fast = fast, fast.vcov = fast.vcov) if(save.internals){ out$improve <- append(out$improve, list(ff=ff, uf=uf, J.inv=Jinv)) } if(vcov){ if (verbose) cat("computing the asymptotic variance ...\n") ## variance of the estimation equation: Sigma_f = Var(u_f(bt)) trans <- if(fast) Matrix::t else t Sig <- trans(ff) %*% (ff * mu) + trans(ff * mu) %*% gminus1 %*% (ff * mu) ## note Abdollah's G does not have mu.root inside... ## the asymptotic variance of \beta: ## inverse of the Godambe information matrix out$vcov <- as.matrix(Jinv %*% Sig %*% Jinv) } return(out) } improve.kppm }) is.kppm <- function(x) { inherits(x, "kppm")} print.kppm <- print.dppm <- function(x, ...) { isPCP <- x$isPCP # detect DPP isDPP <- inherits(x, "dppm") # handle outdated objects - which were all cluster processes if(!isDPP && is.null(isPCP)) isPCP <- TRUE terselevel <- spatstat.options('terse') digits <- getOption('digits') splat(if(x$stationary) "Stationary" else "Inhomogeneous", if(isDPP) "determinantal" else if(isPCP) "cluster" else "Cox", "point process model") Xname <- x$Xname if(waxlyrical('extras', terselevel) && nchar(Xname) < 20) { has.subset <- ("subset" %in% names(x$call)) splat("Fitted to", if(has.subset) "(a subset of)" else NULL, "point pattern dataset", sQuote(Xname)) } if(waxlyrical('gory', terselevel)) { fittedby <- "Fitted by" #' detect whether fit used a penalty if(!is.null(x$Fit$pspace$penalty)) fittedby <- "Fitted by penalised" switch(x$Fit$method, mincon = { splat(fittedby, "minimum contrast") splat("\tSummary statistic:", x$Fit$StatName) }, clik =, clik2 = { splat(fittedby, "maximum second order composite likelihood") splat("\trmax =", x$Fit$rmax) if(!is.null(wtf <- x$Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } }, palm = { splat(fittedby, "maximum Palm likelihood") splat("\trmax =", x$Fit$rmax) if(!is.null(wtf <- x$Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } }, adapcl = { splat("Fitted by adaptive second order composite likelihood") splat("\tepsilon =", x$Fit$epsilon) if(!is.null(wtf <- x$Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } }, warning(paste("Unrecognised fitting method", sQuote(x$Fit$method))) ) } #' optimization trace if(!is.null(attr(x, "h"))) splat("[Includes history of evaluations of objective function]") parbreak(terselevel) # ............... trend ......................... if(!(isDPP && is.null(x$fitted$intensity))) print(x$po, what="trend") # ..................... clusters ................ # DPP case if(isDPP){ splat("Fitted DPP model:") print(x$fitted) return(invisible(NULL)) } tableentry <- spatstatClusterModelInfo(x$clusters) splat(if(isPCP) "Cluster" else "Cox", "model:", tableentry$printmodelname(x)) cm <- x$covmodel if(!isPCP) { # Covariance model - LGCP only splat("\tCovariance model:", cm$model) margs <- cm$margs if(!is.null(margs)) { nama <- names(margs) tags <- ifelse(nzchar(nama), paste(nama, "="), "") tagvalue <- paste(tags, margs) splat("\tCovariance parameters:", paste(tagvalue, collapse=", ")) } } pc <- x$par.canon if(!is.null(pc)) { splat("Fitted canonical parameters:") print(pc, digits=digits) } pa <- x$clustpar if (!is.null(pa)) { splat("Fitted", if(isPCP) "cluster" else "covariance", "parameters:") print(pa, digits=digits) } if(!is.null(mu <- x$mu)) { if(isPCP) { splat("Mean cluster size: ", if(!is.im(mu)) paste(signif(mu, digits), "points") else "[pixel image]") } else { splat("Fitted mean of log of random intensity:", if(!is.im(mu)) signif(mu, digits) else "[pixel image]") } } if(isDPP) { rx <- repul(x) splat(if(is.im(rx)) "(Average) strength" else "Strength", "of repulsion:", signif(mean(rx), 4)) } if(isPCP) { parbreak(terselevel) g <- pcfmodel(x) phi <- g(0) - 1 splat("Cluster strength: phi = ", signif(phi, 4)) psib <- phi/(1+phi) splat("Sibling probability: psib = ", signif(psib, 4)) } invisible(NULL) } plot.kppm <- local({ plotem <- function(x, ..., main=dmain, dmain) { plot(x, ..., main=main) } plot.kppm <- function(x, ..., what=c("intensity", "statistic", "cluster"), pause=interactive(), xname) { ## catch objectname from dots if present otherwise deparse x: if(missing(xname)) xname <- short.deparse(substitute(x)) nochoice <- missing(what) what <- pickoption("plot type", what, c(statistic="statistic", intensity="intensity", cluster="cluster"), multi=TRUE) ## handle older objects Fit <- x$Fit if(is.null(Fit)) { warning("kppm object is in outdated format") Fit <- x Fit$method <- "mincon" } ## Catch locations for clusters if given loc <- list(...)$locations inappropriate <- (nochoice & ((what == "intensity") & (x$stationary))) | ((what == "statistic") & (Fit$method != "mincon")) | ((what == "cluster") & (identical(x$isPCP, FALSE))) | ((what == "cluster") & (!x$stationary) & is.null(loc)) if(!nochoice && !x$stationary && "cluster" %in% what && is.null(loc)) stop("Please specify additional argument ", sQuote("locations"), " which will be passed to the function ", sQuote("clusterfield"), ".") if(any(inappropriate)) { what <- what[!inappropriate] if(length(what) == 0){ message("Nothing meaningful to plot. Exiting...") return(invisible(NULL)) } } pause <- pause && (length(what) > 1) if(pause) { opa <- par(ask=TRUE) on.exit(par(opa)) } for(style in what) switch(style, intensity={ plotem(x$po, ..., dmain=c(xname, "Intensity"), how="image", se=FALSE) }, statistic={ plotem(Fit$mcfit, ..., dmain=c(xname, Fit$StatName)) }, cluster={ plotem(clusterfield(x, locations = loc, verbose=FALSE), ..., dmain=c(xname, "Fitted cluster")) }) return(invisible(NULL)) } plot.kppm }) predict.kppm <- predict.dppm <- function(object, ...) { se <- resolve.1.default(list(se=FALSE), list(...)) interval <- resolve.1.default(list(interval="none"), list(...)) if(se) warning("Standard error calculation assumes a Poisson process") if(interval != "none") warning(paste(interval, "interval calculation assumes a Poisson process")) predict(as.ppm(object), ...) } fitted.kppm <- fitted.dppm <- function(object, ...) { fitted(as.ppm(object), ...) } residuals.kppm <- residuals.dppm <- function(object, ...) { type <- resolve.1.default(list(type="raw"), list(...)) if(type != "raw") warning(paste("calculation of", type, "residuals", "assumes a Poisson process")) residuals(as.ppm(object), ...) } formula.kppm <- formula.dppm <- function(x, ...) { formula(x$po, ...) } terms.kppm <- terms.dppm <- function(x, ...) { terms(x$po, ...) } labels.kppm <- labels.dppm <- function(object, ...) { labels(object$po, ...) } update.kppm <- update.dppm <- function(object, ..., evaluate=TRUE, envir=environment(terms(object))) { argh <- list(...) nama <- names(argh) callframe <- object$callframe #' look for a formula argument fmla <- formula(object) jf <- integer(0) if(!is.null(trend <- argh$trend)) { if(!can.be.formula(trend)) stop("Argument \"trend\" should be a formula") fmla <- newformula(formula(object), trend, callframe, envir) jf <- which(nama == "trend") } else if(any(isfo <- sapply(argh, can.be.formula))) { if(sum(isfo) > 1) { if(!is.null(nama)) isfo <- isfo & nzchar(nama) if(sum(isfo) > 1) stop(paste("Arguments not understood:", "there are two unnamed formula arguments")) } jf <- which(isfo) fmla <- argh[[jf]] fmla <- newformula(formula(object), fmla, callframe, envir) } #' look for a point pattern or quadscheme if(!is.null(X <- argh$X)) { if(!inherits(X, c("ppp", "quad"))) stop(paste("Argument X should be a formula,", "a point pattern or a quadrature scheme")) jX <- which(nama == "X") } else if(any(ispp <- sapply(argh, inherits, what=c("ppp", "quad")))) { if(sum(ispp) > 1) { if(!is.null(nama)) ispp <- ispp & nzchar(nama) if(sum(ispp) > 1) stop(paste("Arguments not understood:", "there are two unnamed point pattern/quadscheme arguments")) } jX <- which(ispp) X <- argh[[jX]] } else { X <- object$X jX <- integer(0) } Xexpr <- if(length(jX) > 0) sys.call()[[2L + jX]] else NULL #' remove arguments just recognised, if any jused <- c(jf, jX) if(length(jused) > 0) { argh <- argh[-jused] nama <- names(argh) } #' update the matched call thecall <- getCall(object) methodname <- as.character(thecall[[1L]]) switch(methodname, kppm.formula = { ## original call has X = [formula with lhs] if(!is.null(Xexpr)) { lhs.of.formula(fmla) <- Xexpr } else if(is.null(lhs.of.formula(fmla))) { lhs.of.formula(fmla) <- as.name('.') } oldformula <- getCall(object)$X oldformula <- eval(oldformula, callframe) thecall$X <- newformula(oldformula, fmla, callframe, envir) }, { ## original call has X = ppp and trend = [formula without lhs] oldformula <- getCall(object)$trend %orifnull% (~1) oldformula <- eval(oldformula, callframe) fom <- newformula(oldformula, fmla, callframe, envir) if(!is.null(Xexpr)) lhs.of.formula(fom) <- Xexpr if(is.null(lhs.of.formula(fom))) { # new call has same format thecall$trend <- fom if(length(jX) > 0) thecall$X <- X } else { # new call has formula with lhs thecall$trend <- NULL thecall$X <- fom } }) knownnames <- unique(c(names(formals(kppm.ppp)), names(formals(mincontrast)), names(formals(optim)))) knownnames <- setdiff(knownnames, c("X", "trend", "observed", "theoretical", "fn", "gr", "...")) ok <- nama %in% knownnames thecall <- replace(thecall, nama[ok], argh[ok]) thecall$formula <- NULL # artefact of 'step', etc thecall[[1L]] <- as.name("kppm") if(!evaluate) return(thecall) out <- eval(thecall, envir=parent.frame(), enclos=envir) #' update name of data if(length(jX) == 1) { mc <- match.call() Xlang <- mc[[2L+jX]] out$Xname <- short.deparse(Xlang) } #' return(out) } updateData.kppm <- function(model, X, ...) { update(model, X=X) } unitname.kppm <- unitname.dppm <- function(x) { return(unitname(x$X)) } "unitname<-.kppm" <- "unitname<-.dppm" <- function(x, value) { unitname(x$X) <- value if(!is.null(x$Fit$mcfit)) { unitname(x$Fit$mcfit) <- value } else if(is.null(x$Fit)) { warning("kppm object in outdated format") if(!is.null(x$mcfit)) unitname(x$mcfit) <- value } return(x) } as.fv.kppm <- as.fv.dppm <- function(x) { if(x$Fit$method == "mincon") return(as.fv(x$Fit$mcfit)) gobs <- if(is.stationary(x)) { pcf(x$X, correction="good") } else { pcfinhom(x$X, lambda=x, correction="good", update=FALSE) } gname <- attr(gobs, "fname") gfit <- (pcfmodel(x))(gobs$r) g <- bind.fv(gobs, data.frame(fit=gfit), labl = makefvlabel(fname=gname, sub="fit"), desc = "predicted %s for fitted model") return(g) } coef.kppm <- coef.dppm <- function(object, ...) { return(coef(object$po)) } Kmodel.kppm <- function(model, ...) { Kpcf.kppm(model, what="K") } pcfmodel.kppm <- function(model, ...) { Kpcf.kppm(model, what="pcf") } Kpcf.kppm <- function(model, what=c("K", "pcf", "kernel")) { what <- match.arg(what) # Extract function definition from internal table clusters <- model$clusters tableentry <- spatstatClusterModelInfo(clusters) if(is.null(tableentry)) stop("No information available for", sQuote(clusters), "cluster model") fun <- tableentry[[what]] if(is.null(fun)) stop("No expression available for", what, "for", sQuote(clusters), "cluster model") # Extract model parameters par <- model$par # Extract covariance model (if applicable) cm <- model$covmodel model <- cm$model margs <- cm$margs # f <- function(r) as.numeric(fun(par=par, rvals=r, model=model, margs=margs)) return(f) } is.stationary.kppm <- is.stationary.dppm <- function(x) { return(x$stationary) } is.poisson.kppm <- function(x) { switch(x$clusters, Cauchy=, VarGamma=, Thomas=, MatClust={ # Poisson cluster process mu <- x$mu return(!is.null(mu) && (max(mu) == 0)) }, LGCP = { # log-Gaussian Cox process sigma2 <- x$par[["sigma2"]] return(sigma2 == 0) }, return(FALSE)) } # extract ppm component as.ppm.kppm <- as.ppm.dppm <- function(object) { object$po } # other methods that pass through to 'ppm' as.owin.kppm <- as.owin.dppm <- function(W, ..., from=c("points", "covariates"), fatal=TRUE) { from <- match.arg(from) as.owin(as.ppm(W), ..., from=from, fatal=fatal) } domain.kppm <- Window.kppm <- domain.dppm <- Window.dppm <- function(X, ..., from=c("points", "covariates")) { from <- match.arg(from) as.owin(X, from=from) } model.images.kppm <- model.images.dppm <- function(object, W=as.owin(object), ...) { model.images(as.ppm(object), W=W, ...) } model.matrix.kppm <- model.matrix.dppm <- function(object, data=model.frame(object, na.action=NULL), ..., Q=NULL, keepNA=TRUE) { if(missing(data)) data <- NULL model.matrix(as.ppm(object), data=data, ..., Q=Q, keepNA=keepNA) } model.frame.kppm <- model.frame.dppm <- function(formula, ...) { model.frame(as.ppm(formula), ...) } logLik.kppm <- logLik.dppm <- function(object, ...) { cl <- object$Fit$maxlogcl if(is.null(cl)) stop(paste("logLik is only available for kppm objects fitted with", "method='palm' or method='clik2'"), call.=FALSE) ll <- logLik(as.ppm(object)) # to inherit class and d.f. ll[] <- cl return(ll) } AIC.kppm <- AIC.dppm <- function(object, ..., k=2) { cl <- logLik(object) df <- attr(cl, "df") return(- 2 * as.numeric(cl) + k * df) } extractAIC.kppm <- extractAIC.dppm <- function (fit, scale = 0, k = 2, ...) { cl <- logLik(fit) edf <- attr(cl, "df") aic <- - 2 * as.numeric(cl) + k * edf return(c(edf, aic)) } nobs.kppm <- nobs.dppm <- function(object, ...) { nobs(as.ppm(object)) } psib <- function(object) UseMethod("psib") psib.kppm <- function(object) { clus <- object$clusters info <- spatstatClusterModelInfo(clus) if(!info$isPCP) { warning("The model is not a cluster process") return(NA) } g <- pcfmodel(object) p <- 1 - 1/g(0) return(p) } reach.kppm <- function(x, ..., epsilon) { thresh <- if(missing(epsilon)) NULL else epsilon if(x$isPCP) return(2 * clusterradius.kppm(x, ..., thresh=thresh)) ## use pair correlation g <- pcfmodel(x) ## find upper bound if(is.null(thresh)) thresh <- 0.01 f <- function(r) { g(r) - 1 - thresh } scal <- as.list(x$par)$scale %orifnull% 1 for(a in scal * 2^(0:10)) { if(f(a) < 0) break; } if(f(a) > 0) return(Inf) ## solve g(r) = 1 + epsilon b <- uniroot(f, c(0, a))$root return(b) } is.poissonclusterprocess <- function(model) { UseMethod("is.poissonclusterprocess") } is.poissonclusterprocess.default <- function(model) { FALSE } is.poissonclusterprocess.kppm <- function(model) { isTRUE(model$isPCP) } spatstat.model/R/simulate.detPPF.R0000644000176200001440000003710714331173073016516 0ustar liggesusers## simulate.detPPF.R ## $Revision: 1.11 $ $Date: 2022/05/23 02:33:06 $ ## ## This file contains functions to simulate DPP models. ## Two simulation functions are visible: ## - simulate.detpointprocfamily (most useful) ## - rdpp (more generic workhorse function -- actually the real workhorse is the locally defined rdppp) ## ## Furthermore the auxilliary function dppeigen is defined here. rdpp <- local({ ## Generates an empty point pattern emptyppx <- function(W, simplify = TRUE){ W <- as.boxx(W) r <- W$ranges d <- ncol(r) if(simplify){ if(d==2) return(ppp(numeric(0), numeric(0), window=as.owin(W))) if(d==3) return(pp3(numeric(0), numeric(0), numeric(0), W)) } rslt <- replicate(d, numeric(0), simplify=FALSE) names(rslt) <- paste("x",1:d,sep="") rslt <- as.data.frame(rslt) return(ppx(rslt, domain = W, coord.type= rep("spatial", d))) } rdppp <- function(index, basis = "fourierbasis", window = boxx(rep(list(0:1), ncol(index))), reject_max = 1e4, progress = 0, debug = FALSE, given = NULL, given_max_volume = 0.5, ...){ ## Check arguments: if (!(is.logical(debug))) stop(paste(sQuote("debug"), "must be TRUE or FALSE")) if (!is.numeric(reject_max)||reject_max<=1) stop(paste(sQuote("reject_max"), "must be a numeric greater than 1")) if (!is.numeric(progress)||reject_max<1) stop(paste(sQuote("progress"), "must be a numeric greater than or equal to 1")) index <- as.matrix(index) d <- ncol(index) window <- as.boxx(window) ranges <- window$ranges boxlengths <- as.numeric(ranges[2L, ] - ranges[1L, ]) if(ncol(ranges)!=d) stop("The dimension differs from the number of columns in index") if(basis != "fourierbasis"){ warning("Non Fourier basis probably doesn't work correctly! Fourier is assumed for bounds in rejection sampling.") userbasis <- get(basis) if (!(is.function(userbasis))) stop(paste(sQuote("basis"), "must be a function")) tmp <- userbasis(ranges[1,,drop=FALSE], index, window) if (!(is.numeric(tmp) || is.complex(tmp))) stop(paste("Output of", sQuote("basis"), "must be numeric or complex")) basis <- function(x, k, boxlengths){ userbasis(x, k, boxx(lapply(boxlengths, function(x) list(c(0,x))))) } } else{ basis <- fourierbasisraw } ## Number of points to simulate: n <- nrow(index) ## Resolve `given` for pseudo conditional simulation if(!is.null(given)){ # Make sure `given` is a list of point patterns if(is.ppp(given) || is.pp3(given) || is.ppx(given)){ given <- list(given) } stopifnot(all(sapply(given, function(x){ is.ppp(x) || is.pp3(x) || is.ppx(x)}))) # Check that the window (or its boundingbox) is inside the simulation window Wgiven <- lapply(given, function(x) as.boxx(domain(x))) stopifnot(all(sapply(Wgiven, function(w){ all(w$ranges[1,] >= ranges[1,]) && all(w$ranges[2,] <= ranges[2,]) }))) stopifnot(sum(sapply(Wgiven, volume))0) cat(paste("Simulating", n, "points:\n")) ## Main for loop over number of points: for(i in (n-1):1){ ## Print progress: if(progress>0) progressreport(n-i, n, every=progress) ## Aux. variable to count number of rejection steps: tries <- 0 # Debug info: if(debug){ rejected <- matrix(NA,reject_max,d) } repeat{ ## Proposed point: newx <- matrix(runif(d,as.numeric(ranges[1,]),as.numeric(ranges[2,])),ncol=d) if(!is.null(given)){ if(i>(n-ngiven)){ newx <- coordsgiven[n-i+1,,drop=FALSE] } else{ while(any(sapply(Wgiven, function(w) inside.boxx(as.hyperframe(newx), w = w)))) newx <- matrix(runif(d,as.numeric(ranges[1,]),as.numeric(ranges[2,])),ncol=d) } } ## Basis functions eval. at proposed point: v <- as.vector(basis(newx, index, boxlengths)) ## Vector of projection weights (has length n-i) wei <- t(v)%*%estar if(!is.null(given) && i>(n-ngiven)){ break } ## Accept probability: # tmp <- prod(ranges[2,]-ranges[1,])/n*(sum(abs(v)^2)-sum(abs(wei)^2)) tmp <- 1-prod(ranges[2,]-ranges[1,])/n*(sum(abs(wei)^2)) ## If proposal is accepted the loop is broken: if(runif(1)reject_max){ stop(paste("Rejection sampling failed reject_max =",reject_max,"times in a row")) } ## Increase the count of rejection steps: tries <- tries+1 # Debug info: if(debug){ rejected[tries,] <- newx } } ## END OF REJECTION LOOP # Record the accepted point: x[n-i+1,] <- newx # Debug info: if(debug){ if(tries==0){ rej <- empty } else{ rej <- ppx(rejected[1:tries,,drop=FALSE],window, simplify=TRUE) } debugList[[n-i+1]] = list( old=ppx(x[1:(n-i),,drop=FALSE],window, simplify=TRUE), accepted=ppx(newx,window,simplify=TRUE), rejected=rej, index=index, estar = estar) } ## If it is the last point exit the main loop: if(i==1){break} ## Calculate orthogonal vector for Gram-Schmidt procedure: # w <- v - rowSums(matrix(wei,n,n-i,byrow=TRUE)*e[,1:(n-i)]) w <- v - colSums(t(e)*as.vector(wei)) ## Record normalized version in the Gram-Schmidt matrices: enew <- w/sqrt(sum(abs(w)^2)) e <- cbind(e, enew) estar <- cbind(estar,Conj(enew)) } ## END OF MAIN FOR LOOP # Save points as point pattern: X <- ppx(x, window, simplify = TRUE) # Debug info: if(debug){ attr(X, "dpp") <- list(debug=debugList) } if(progress>0) cat(" Done!\n") return(X) } rdpp <- function(eig, index, basis = "fourierbasis", window = boxx(rep(list(0:1), ncol(index))), reject_max = 1e4, progress = 0, debug = FALSE, ...){ window2d <- NULL if (is.owin(window)) window2d <- window sampleindex <- as.matrix(index[rbinom(nrow(index), 1, eig)==1, , drop=FALSE]) X <- rdppp(sampleindex, basis=basis, window=window, reject_max=reject_max, progress=progress, debug=debug, ...) if(!is.null(window2d)) X <- X[window2d] return(X) } rdpp } ) simulate.dppm <- simulate.detpointprocfamily <- function(object, nsim = 1, seed = NULL, ..., W = NULL, trunc = .99, correction = "periodic", rbord = reach(object) # parallel = FALSE ){ # .... copied from simulate.lm .... if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } # .................................. if(inherits(object, "dppm")){ if(is.null(W)) W <- Window(object$X) object <- object$fitted } if(!inherits(object, "detpointprocfamily")) stop("The model to simulate must be of class detpointprocfamily") if(length(tmp <- object$freepar)>0) stop(paste("The model to simulate must be completely specified. The following parameters are unspecified:", tmp)) if(!valid(object)) stop("The model is invalid. Please change parameter values to get a valid model") check.1.real(nsim) nsim <- floor(nsim) stopifnot(nsim >= 0) if(nsim == 0) return(simulationresult(list())) dim <- dim(object) basis <- object$basis ####### BACKDOOR TO SPHERICAL CASE ######## if(!is.null(spherefun <- object$sim_engine)){ sphereSimEngine <- get(spherefun) rslt <- sphereSimEngine(object, trunc, nsim, ...) attr(rslt, "seed") <- RNGstate return(rslt) } ########################################### # Check stationarity and window compatibility (if 'W' and 'thin' both are present) statmodel <- is.null(thin <- object$thin) if(is.null(W)){ if(!statmodel) W <- domain(thin) } Wowin <- if(is.owin(W)) W else NULL if(is.null(W)){ W <- boxx(rep(list(0:1), dim)) } else{ W <- as.boxx(W, warn.owin = FALSE) } if(!statmodel){ if(!is.subset.owin(Wowin,thin)) stop("The window of simulation is not contained in the window of the inhomogeneous intensity.") } r <- W$ranges if(dim!=ncol(r)) stop(paste("The dimension of the window:", ncol(r), "is inconsistent with the dimension of the model:", dim)) Wscale <- as.numeric(r[2,]-r[1,]) Wcenter <- as.numeric(colMeans(r)) if(correction=="border"){ if(!is.numeric(rbord)||any(rbord<0)) stop(paste(sQuote("rbord"), "must be a non-negative numeric")) borderscale <- pmin((Wscale+2*rbord)/Wscale, 2) Wscale <- borderscale*Wscale } ## lambda <- intensity(object) tmp <- dppeigen(object, trunc, Wscale) trunc <- tmp$trunc prec <- tmp$prec n <- length(tmp$eig) indexlist <- replicate(nsim, { x <- as.matrix(tmp$index[rbinom(n, 1, tmp$eig)==1, , drop=FALSE]); gc(); x }, simplify = FALSE) rm(tmp) gc() onesim <- function(i, win=NULL){ X <- rdpp(1, indexlist[[i]], basis = basis, window = boxx(rep(list(c(-.5,.5)), dim)), ...) a <- attr(X, "dpp") a <- c(a, list(prec = prec, trunc = trunc)) if(correction=="border"){ if(dim!=2) stop("Border correction only implemented for dimension 2 at the moment.") X <- X[affine.owin(as.owin(X), mat = diag(1/borderscale))] } if(is.ppp(X)){ X <- affine(X, matrix(c(Wscale[1],0,0,Wscale[2]), 2, 2), Wcenter) if(!is.null(win)) X <- X[win] } else{ X <- ppx(X$data, domain = as.boxx(X$domain), coord.type = rep("spatial", dim)) X$data <- as.hyperframe(as.data.frame(X$data)*matrix(Wscale, nrow(X$data), ncol(X$data), byrow = TRUE)) X$domain$ranges <- X$domain$ranges*matrix(Wscale, 2, dim, byrow = TRUE) + matrix(Wcenter, 2, dim, byrow = TRUE) X <- ppx(X$data, X$domain, simplify = TRUE) } attr(X, "dpp") <- a return(X) } if(nsim==1){ rslt <- onesim(1,win=Wowin) if(!statmodel) rslt <- rthin(rslt, P=thin) } else{ ######## Old code for parallel simulation ######### # if(is.logical(parallel)){ # cl.cores <- if(parallel) NULL else 1 # } else{ # cl.cores <- parallel # } # rslt <- detlapply(1:nsim, onesim, cl.cores=cl.cores, win=Wowin) ################################################### rslt <- lapply(1:nsim, onesim, win=Wowin) if(!statmodel) rslt <- lapply(rslt, rthin, P=thin) names(rslt) <- paste("Simulation", 1:nsim) rslt <- if(dim == 2) as.solist(rslt) else as.anylist(rslt) } attr(rslt, "seed") <- RNGstate return(rslt) } dppeigen <- function(model, trunc, Wscale, stationary = FALSE){ dim <- dim(model) if(stationary && dim!=2) stop("Stationarity can only be exploited in dimension 2 at the moment.") Wscale <- as.numeric(Wscale) check.nvector(Wscale, dim, things="dimensions", vname="Wscale") ## Calculate expected number of points if the intensity is a parameter expnum <- NULL lambdaname <- model$intensity if(!is.null(lambdaname)) expnum <- getElement(model$fixedpar, lambdaname)*prod(Wscale) ## Get the maximal truncation in each dimension maxtrunc <- spatstat.options("dpp.maxmatrix")^(1/dim) ## Extract spectral density specden <- dppspecden(model) truncrange <- dppspecdenrange(model)*max(Wscale) if(trunc>=1){ ## Integer truncation fixed by user. if(stationary){ ## Coordinates on axes: index1a <- c(rep(0,trunc),1:trunc) index2a <- c(1:trunc,rep(0,trunc)) ## Coordinates of ordinary points: index1 <- rep(1:trunc,trunc) index2 <- rep(1:trunc,each=trunc) ## Spectral densities: eigo <- specden(0) eiga <- specden(sqrt((index1a/Wscale[1])^2+(index2a/Wscale[2])^2)) eig <- specden(sqrt((index1/Wscale[1])^2+(index2/Wscale[2])^2)) prec <- (eigo+2*sum(eiga)+4*sum(eig))/expnum } else{ trunc <- floor(trunc) index <- do.call(expand.grid, replicate(dim, seq(-trunc,trunc), simplify=FALSE)) indexscaled <- index*matrix(1/Wscale, nrow(index), ncol(index), byrow = TRUE) if(model$isotropic){ eig <- specden(sqrt(rowSums(indexscaled^2))) } else{ eig <- specden(indexscaled) } prec <- sum(eig)/expnum } } else{ ## Integer truncation calculated from user-specified precision. if(is.null(expnum)) stop("Cannot calculate truncation adaptively in a model without intensity parameter. Please specify trunc directly as a positive integer.") prec0 <- trunc trunc <- 1 prec <- 0 ## cat("truncation is being calculated adaptively. Current truncation:\n") while(prec<=prec0 && (2*trunc)<=maxtrunc && trunc<=truncrange){ trunc <- 2*trunc if(stationary){ ## Coordinates on axes: index1a <- c(rep(0,trunc),1:trunc) index2a <- c(1:trunc,rep(0,trunc)) ## Coordinates of ordinary points: index1 <- rep(1:trunc,trunc) index2 <- rep(1:trunc,each=trunc) ## Spectral densities: eigo <- specden(0) eiga <- specden(sqrt((index1a/Wscale[1])^2+(index2a/Wscale[2])^2)) eig <- specden(sqrt((index1/Wscale[1])^2+(index2/Wscale[2])^2)) prec <- (eigo+2*sum(eiga)+4*sum(eig))/expnum } else{ index <- do.call(expand.grid, replicate(dim, seq(-trunc,trunc), simplify=FALSE)) indexscaled <- index*matrix(1/Wscale, nrow(index), ncol(index), byrow = TRUE) if(model$isotropic){ eig <- specden(sqrt(rowSums(indexscaled^2))) } else{ eig <- specden(indexscaled) } prec <- sum(eig)/expnum } } ## cat("\n") if(prec tobs) nties <- sum(sim.pvals == pobs & sim.stats == tobs) result$p.value <- (nless + nplus + sample(0:nties, 1L))/(nsim+1L) ## result$method <- c("Monte Carlo test of fitted Gibbs model", paste("based on", nsim, "repetitions of"), sub("Spatial", "spatial", result$method)) return(result) } PoissonTest <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., verbose=TRUE, interpolate=FALSE, fast=TRUE, jitter=TRUE, gibbsok=FALSE, modelname, covname) { if(missing(modelname)) modelname <- short.deparse(substitute(model)) if(missing(covname)) covname <- short.deparse(substitute(covariate)) test <- match.arg(test) stopifnot(is.mppm(model)) if(!gibbsok && !is.poisson.mppm(model)) stop("Only implemented for Poisson models") ## extract things from model data <- model$data npat <- model$npat Y <- data.mppm(model) if(fast) { ## extract original quadrature schemes and convert to point patterns QQ <- quad.mppm(model) PP <- lapply(QQ, union.quad) Zweights <- lapply(QQ, w.quad) } else Zweights <- list() ## `evaluate' covariate if(verbose) cat("Extracting covariate...") if(identical(covariate, "x")) covariate <- xcoord if(identical(covariate, "y")) covariate <- ycoord if(is.character(covariate)) { ## extract covariate with this name from data used to fit model if(!(covariate %in% names(data))) stop(paste("Model does not contain a covariate called", dQuote(covariate))) covname <- covariate covariate <- data[, covname, drop=TRUE] } else if(inherits(covariate, c("listof", "anylist"))) { if(length(covariate) != npat) stop(paste("Length of list of covariate values does not match", "number of point patterns in data of original model")) } else if(is.hyperframe(covariate)) { ## extract first column covariate <- covariate[,1L, drop=TRUE] if(length(covariate) != npat) stop(paste("Number of rows of covariate hyperframe does not match", "number of point patterns in data of original model")) } else if(is.function(covariate) || is.im(covariate)) { ## replicate to make a list covariate <- as.anylist(rep(list(covariate), npat)) } else stop(paste("Format of argument", sQuote("covariates"), "not understood")) if(verbose) { cat("done.\nComputing statistics for each pattern...") pstate <- list() } ## compile information for test from each row Zvalues <- ZX <- Win <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) XI <- Y[[i]] if(fast) PI <- PP[[i]] else WI <- XI$window covariateI <- covariate[[i]] if(is.im(covariateI)) { type <- "im" ## evaluate at data points ZXI <- if(interpolate) interp.im(covariateI, XI$x, XI$y) else covariateI[XI] if(fast) { ## covariate values for quadrature points ZI <- covariateI[PI] } else { ## covariate image inside window ZI <- covariateI[WI, drop=FALSE] ## corresponding mask WI <- as.owin(ZI) ## pixel areas Zweights[[i]] <- rep(WI$xstep * WI$ystep, prod(WI$dim)) } } else if(is.function(covariateI)) { type <- "function" ## evaluate exactly at data points ZXI <- covariateI(XI$x, XI$y) if(fast) { ## covariate values for quadrature points ZI <- covariateI(PI$x, PI$y) } else { ## window WI <- as.mask(WI) ## covariate image inside window ZI <- as.im(covariateI, W=WI) ## pixel areas Zweights[[i]] <- rep(WI$xstep * WI$ystep, prod(WI$dim)) } } else stop("covariate should be an image or a function(x,y)") ZX[[i]] <- ZXI if(fast) Zvalues[[i]] <- ZI else { Win[[i]] <- WI ## values of covariate in window Zvalues[[i]] <- allpixelvalues(ZI) } } if(verbose) cat("done.\nComputing predicted intensity...") ## compute predicted intensities trend <- if(fast) fitted(model, type="trend") else predict(model, type="trend", locations=Win, verbose=verbose)$trend if(verbose) cat("done.\nExtracting...") ## extract relevant values lambda <- if(fast) trend else lapply(trend, allpixelvalues) if(verbose) cat("done.\nPerforming test...") ## flatten to vectors lambda <- unlist(lambda) Zweights <- unlist(Zweights) Zvalues <- unlist(Zvalues) ZX <- unlist(ZX) if(length(lambda) != length(Zvalues)) stop("Internal error: mismatch between predicted values and Z values") if(length(Zvalues) != length(Zweights)) stop("Internal error: mismatch between Z values and Z weights") lambda <- lambda * Zweights ## form weighted cdf of Z values in window FZ <- ewcdf(Zvalues, lambda/sum(lambda)) ## Ensure support of cdf includes the range of the data xxx <- knots(FZ) yyy <- FZ(xxx) if(min(xxx) > min(ZX)) { xxx <- c(min(ZX), xxx) yyy <- c(0, yyy) } if(max(xxx) < max(ZX)) { xxx <- c(xxx, max(ZX)) yyy <- c(yyy, 1) } ## make piecewise linear approximation of cdf FZ <- approxfun(xxx, yyy, rule=2) ## evaluate at data points if(!jitter) U <- FZ(ZX) else { ## jitter observed values to avoid ties grain <- min(diff(sortunique(ZX)))/8 jit <- runif(length(ZX), min=0, max=grain) sgn <- sample(c(-1L,1L), length(ZX), replace=TRUE) sgn[ZX==min(xxx)] <- 1L sgn[ZX==max(xxx)] <- -1L U <- FZ(ZX + sgn*jit) } ## Test uniformity result <- switch(test, ks = ks.test(U, "punif", ...), cvm = cvm.test(U, "punif", ...), ad = ad.test(U, "punif", ...)) testname <- switch(test, ks="Kolmogorov-Smirnov", cvm="Cramer-Von Mises", ad="Anderson-Darling") result$method <- paste("Spatial", testname, "test") result$data.name <- paste("predicted cdf of covariate", sQuote(paste(covname, collapse="")), "evaluated at data points of", sQuote(modelname)) if(verbose) cat("done.\n") class(result) <- c("cdftest", class(result)) attr(result, "prep") <- list(Zvalues = Zvalues, lambda = lambda, ZX = ZX, FZ = FZ, U = U, type = type) attr(result, "info") <- list(modelname = modelname, covname = covname) return(result) } cdf.test.mppm }) spatstat.model/R/multistrauss.R0000644000176200001440000002043714243551505016331 0ustar liggesusers# # # multistrauss.S # # $Revision: 2.23 $ $Date: 2015/03/31 03:57:11 $ # # The multitype Strauss process # # MultiStrauss() create an instance of the multitype Strauss process # [an object of class 'interact'] # # ------------------------------------------------------------------- # MultiStrauss <- local({ # ......... define interaction potential MSpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrix of interaction radii r[ , ] r <- par$radii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) # list all UNORDERED pairs of types to be checked # (the interaction must be symmetric in type, and scored as such) uptri <- (row(r) <= col(r)) & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] # corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) # list all ORDERED pairs of types to be checked # (to save writing the same code twice) different <- mark1 != mark2 mark1o <- c(mark1, mark2[different]) mark2o <- c(mark2, mark1[different]) nordpairs <- length(mark1o) # unordered pair corresponding to each ordered pair ucode <- c(1:npairs, (1:npairs)[different]) # # create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # assemble the relevant interaction distance for each pair of points rxu <- r[ tx, tu ] # apply relevant threshold to each pair of points str <- (d <= rxu) # assign str[i,j] -> z[i,j,k] where k is relevant interaction code for(i in 1:nordpairs) { # data points with mark m1 Xsub <- (tx == mark1o[i]) # quadrature points with mark m2 Qsub <- (tu == mark2o[i]) # assign z[Xsub, Qsub, ucode[i]] <- str[Xsub, Qsub] } } return(z) } #### end of 'pot' function #### # ........ auxiliary functions .............. delMS <- function(which, types, radii) { radii[which] <- NA if(all(is.na(radii))) return(Poisson()) return(MultiStrauss(types, radii)) } # Set up basic object except for family and parameters BlankMSobject <- list( name = "Multitype Strauss process", creator = "MultiStrauss", family = "pairwise.family", # evaluated later pot = MSpotential, par = list(types=NULL, radii = NULL), # to be filled in later parnames = c("possible types", "interaction distances"), pardesc = c("vector of possible types", "matrix of hardcore distances"), hasInf = FALSE, selfstart = function(X, self) { if(!is.null(self$par$types)) return(self) types <- levels(marks(X)) MultiStrauss(types=types,radii=self$par$radii) }, init = function(self) { types <- self$par$types if(!is.null(types)) { radii <- self$par$radii nt <- length(types) MultiPair.checkmatrix(radii, nt, sQuote("radii")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } } }, update = NULL, # default OK print = function(self) { radii <- self$par$radii types <- self$par$types if(waxlyrical('gory')) { splat(nrow(radii), "types of points") if(!is.null(types)) { splat("Possible types: ") print(noquote(types)) } else splat("Possible types:\t not yet determined") } cat("Interaction radii:\n") print(signif(radii, getOption("digits"))) invisible() }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) # get matrix of Strauss interaction radii r <- self$par$radii # list all unordered pairs of types uptri <- (row(r) <= col(r)) & (!is.na(r)) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) expcoef <- exp(coeffs) gammas[ cbind(index1, index2) ] <- expcoef gammas[ cbind(index2, index1) ] <- expcoef # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=dround(gammas))) }, valid = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii radii <- self$par$radii # parameters to estimate required <- !is.na(radii) gr <- gamma[required] return(all(is.finite(gr) & gr <= 1)) }, project = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii and types radii <- self$par$radii types <- self$par$types # problems? required <- !is.na(radii) okgamma <- is.finite(gamma) & (gamma <= 1) naughty <- required & !okgamma # if(!any(naughty)) return(NULL) if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delMS(naughty, types, radii)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) uptri <- (rn <= cn) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) matindex <- function(v) { matrix(c(v, rev(v)), ncol=2, byrow=TRUE) } mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matindex) inters <- lapply(mats, delMS, types=types, radii=radii) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$radii active <- !is.na(r) if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- active & (abs(log(gamma)) > epsilon) } if(any(active)) return(max(r[active])) else return(0) }, version=NULL # to be added ) class(BlankMSobject) <- "interact" # finally create main function MultiStrauss <- function(radii, types=NULL) { if((missing(radii) || !is.matrix(radii)) && is.matrix(types)) { ## old syntax: (types=NULL, radii) radii <- types types <- NULL } radii[radii == 0] <- NA out <- instantiate.interact(BlankMSobject, list(types=types, radii = radii)) if(!is.null(types)) dimnames(out$par$radii) <- list(types, types) return(out) } MultiStrauss <- intermaker(MultiStrauss, BlankMSobject) MultiStrauss }) spatstat.model/R/ppmcovariates.R0000644000176200001440000000170714331173073016424 0ustar liggesusers#' #' ppmcovariates.R #' #' Utilities for wrangling covariates in ppm #' #' $Revision: 1.1 $ $Date: 2022/05/20 03:59:14 $ ppmCovariates <- function(model) { # generate list of all covariates in ppm (excluding marks) stopifnot(is.ppm(model)) co <- as.list(model$covariates) xy <- list(x=function(x,y){x}, y=function(x,y){y}) coplus <- append(co, xy) return(as.anylist(coplus)) } findCovariate <- function(covname, scope, scopename=NULL) { # find the named covariate in the given ppm object or list if(is.ppm(scope)) { covlist <- ppmCovariates(scope) if(missing(scopename)) scopename <- "covariates in model" } else if(is.list(scope)) { covlist <- scope } else stop("scope should be a named list of covariates, or a ppm object") if(!(covname %in% names(covlist))) stop(paste("covariate", dQuote(covname), "not found", if(!is.null(scopename)) paste("amongst", scopename) else NULL)) covlist[[covname]] } spatstat.model/R/lurking.R0000644000176200001440000005547314331173073015233 0ustar liggesusers# Lurking variable plot for arbitrary covariate. # # # $Revision: 1.75 $ $Date: 2022/02/12 09:11:35 $ # lurking <- function(object, ...) { UseMethod("lurking") } lurking.ppp <- lurking.ppm <- function(object, covariate, type="eem", cumulative=TRUE, ..., plot.it=TRUE, plot.sd=is.poisson(object), clipwindow=default.clipwindow(object), rv = NULL, envelope=FALSE, nsim=39, nrank=1, typename, covname, oldstyle=FALSE, check=TRUE, verbose=TRUE, nx=128, splineargs=list(spar=0.5), internal=NULL) { cl <- match.call() clenv <- parent.frame() ## validate object if(is.ppp(object)) { X <- object object <- ppm(X ~1, forcefit=TRUE) dont.complain.about(X) } else verifyclass(object, "ppm") ## default name for covariate if(missing(covname) || is.null(covname)) { co <- cl$covariate covname <- if(is.name(co)) as.character(co) else if(is.expression(co)) format(co[[1]]) else NULL } Xsim <- NULL if(!identical(envelope, FALSE)) { ## compute simulation envelope if(!identical(envelope, TRUE)) { ## some kind of object Y <- envelope if(is.list(Y) && all(sapply(Y, is.ppp))) { Xsim <- Y envelope <- TRUE } else if(inherits(Y, "envelope")) { Xsim <- attr(Y, "simpatterns") if(is.null(Xsim)) stop("envelope does not contain simulated point patterns") envelope <- TRUE } else stop("Unrecognised format of argument: envelope") nXsim <- length(Xsim) if(missing(nsim) && (nXsim < nsim)) { warning(paste("Only", nXsim, "simulated patterns available")) nsim <- nXsim } } } ## may need to refit the model if(plot.sd && is.null(getglmfit(object))) object <- update(object, forcefit=TRUE, use.internal=TRUE) ## match type argument type <- pickoption("type", type, c(eem="eem", raw="raw", inverse="inverse", pearson="pearson", Pearson="pearson")) if(missing(typename)) typename <- switch(type, eem="exponential energy weights", raw="raw residuals", inverse="inverse-lambda residuals", pearson="Pearson residuals") ## extract spatial locations Q <- quad.ppm(object) ## datapoints <- Q$data quadpoints <- union.quad(Q) Z <- is.data(Q) wts <- w.quad(Q) ## subset of quadrature points used to fit model subQset <- getglmsubset(object) if(is.null(subQset)) subQset <- rep.int(TRUE, n.quad(Q)) ################################################################# ## compute the covariate if(is.im(covariate)) { covvalues <- covariate[quadpoints, drop=FALSE] covrange <- internal$covrange %orifnull% range(covariate, finite=TRUE) } else if(is.vector(covariate) && is.numeric(covariate)) { covvalues <- covariate covrange <- internal$covrange %orifnull% range(covariate, finite=TRUE) if(length(covvalues) != quadpoints$n) stop("Length of covariate vector,", length(covvalues), "!=", quadpoints$n, ", number of quadrature points") } else if(is.expression(covariate)) { ## Expression involving covariates in the model glmdata <- getglmdata(object) if(is.null(glmdata)) { ## default glmdata <- data.frame(x=quadpoints$x, y=quadpoints$y) if(is.marked(quadpoints)) glmdata$marks <- marks(quadpoints) } else if(is.data.frame(glmdata)) { ## validate if(nrow(glmdata) != npoints(quadpoints)) stop("Internal error: nrow(glmdata) =", nrow(glmdata), "!=", npoints(quadpoints), "= npoints(quadpoints)") } else stop("Internal error: format of glmdata is not understood") ## ensure x and y are in data frame if(!all(c("x","y") %in% names(glmdata))) { glmdata$x <- quadpoints$x glmdata$y <- quadpoints$y } if(!is.null(object$covariates)) { ## Expression may involve an external covariate that's not used in model neednames <- all.vars(covariate) if(!all(neednames %in% colnames(glmdata))) { moredata <- mpl.get.covariates(object$covariates, quadpoints, covfunargs=object$covfunargs) use <- !(names(moredata) %in% colnames(glmdata)) glmdata <- cbind(glmdata, moredata[,use,drop=FALSE]) } } ## Evaluate expression sp <- parent.frame() covvalues <- eval(covariate, envir= glmdata, enclos=sp) covrange <- internal$covrange %orifnull% range(covvalues, finite=TRUE) if(!is.numeric(covvalues)) stop("The evaluated covariate is not numeric") } else stop(paste("The", sQuote("covariate"), "should be either", "a pixel image, an expression or a numeric vector")) ################################################################# ## Secret exit if(identical(internal$getrange, TRUE)) return(covrange) ################################################################ ## Residuals/marks attached to appropriate locations. ## Stoyan-Grabarnik weights are attached to the data points only. ## Others (residuals) are attached to all quadrature points. resvalues <- if(!is.null(rv)) rv else if(type=="eem") eem(object, check=check) else residuals(object, type=type, check=check) if(inherits(resvalues, "msr")) { ## signed or vector-valued measure; extract increment masses resvalues <- resvalues$val if(ncol(as.matrix(resvalues)) > 1) stop("Not implemented for vector measures; use [.msr to split into separate components") } ## NAMES OF THINGS ## name of the covariate if(is.null(covname)) covname <- if(is.expression(covariate)) covariate else "covariate" ## type of residual/mark if(missing(typename)) typename <- if(!is.null(rv)) "rv" else "" clip <- (!is.poisson.ppm(object) || !missing(clipwindow)) && !is.null(clipwindow) ## CALCULATE stuff <- LurkEngine(object=object, type=type, cumulative=cumulative, plot.sd=plot.sd, quadpoints=quadpoints, wts=wts, Z=Z, subQset=subQset, covvalues=covvalues, resvalues=resvalues, clip=clip, clipwindow=clipwindow, cov.is.im=is.im(covariate), covrange=covrange, typename=typename, covname=covname, cl=cl, clenv=clenv, oldstyle=oldstyle, check=check, verbose=verbose, nx=nx, splineargs=splineargs, envelope=envelope, nsim=nsim, nrank=nrank, Xsim=Xsim, internal=internal) ## --------------- PLOT ---------------------------------- if(plot.it && inherits(stuff, "lurk")) { plot(stuff, ...) return(invisible(stuff)) } else { return(stuff) } } # ........... calculations common to all methods ......................... LurkEngine <- function(object, type, cumulative=TRUE, plot.sd=TRUE, quadpoints, wts, Z, subQset, covvalues, resvalues, clip, clipwindow, cov.is.im=FALSE, covrange, typename, covname, cl, clenv, oldstyle=FALSE, check=TRUE, verbose=FALSE, nx, splineargs, envelope=FALSE, nsim=39, nrank=1, Xsim=list(), internal=list(), checklength=TRUE) { stopifnot(is.ppm(object) || is.slrm(object)) ## validate covariate values covvalues <- as.numeric(covvalues) resvalues <- as.numeric(resvalues) if(checklength) { nqu <- npoints(quadpoints) nco <- length(covvalues) nre <- length(resvalues) nwt <- length(wts) nZ <- length(Z) should <- if(type == "eem") c(nco, nwt, nZ) else c(nco, nwt, nZ, nre) if(!all(should == nqu)) { typeblurb <- paste("type =", sQuote(type)) typeblurb <- paren(typeblurb, "[") gripe1 <- paste("Failed initial data check", paste0(typeblurb, ":")) gripe2 <- paste("!=", nqu, "= npoints(quadpoints)") if(nco != nqu) stop(paste(gripe1, "length(covvalues) =", nco, gripe2)) if(nwt != nqu) stop(paste(gripe1, "length(wts) =", nwt, gripe2)) if(nZ != nqu) stop(paste(gripe1, "length(Z) =", nZ, gripe2)) } if(type == "eem" && nre != sum(Z)) stop(paste("Failed initial data check [type='eem']: ", "length(resvalues) =", nre, "!=", sum(Z), "= sum(Z)")) } ## nbg <- is.na(covvalues) if(any(offending <- nbg & subQset)) { if(cov.is.im) { warning(paste(sum(offending), "out of", length(offending), "quadrature points discarded because", ngettext(sum(offending), "it lies", "they lie"), "outside the domain of the covariate image")) } else { warning(paste(sum(offending), "out of", length(offending), "covariate values discarded because", ngettext(sum(offending), "it is NA", "they are NA"))) } } ## remove data with invalid covariate values ok <- !nbg & subQset if(!(allok <- all(ok))) { quadpoints <- quadpoints[ok] covvalues <- covvalues[ok] okdata <- ok[Z] # which original data points are retained ## Zok <- Z & ok # which original quadrature pts are retained as data pts Z <- Z[ok] # which of the retained quad pts are data pts wts <- wts[ok] resvalues <- resvalues[if(type == "eem") okdata else ok] } if(any(is.infinite(covvalues) | is.nan(covvalues))) stop("covariate contains Inf or NaN values") ## now determine the data points datapoints <- quadpoints[Z] ## Quadrature points marked by covariate value covq <- quadpoints %mark% as.numeric(covvalues) if(type == "eem") { ## data points marked by residuals and covariate res <- datapoints %mark% as.numeric(resvalues) covres <- datapoints %mark% (as.numeric(covvalues)[Z]) } else { ## quadrature points marked by residuals and covariate res <- quadpoints %mark% as.numeric(resvalues) covres <- quadpoints %mark% as.numeric(covvalues) } ## Clip to subwindow if needed if(clip) { covq <- covq[clipwindow] res <- res[clipwindow] covres <- covres[clipwindow] clipquad <- inside.owin(quadpoints, w=clipwindow) wts <- wts[ clipquad ] Z <- Z[ clipquad ] } ## handle internal data saveworking <- isTRUE(internal$saveworking) Fisher <- internal$Fisher # possibly from a larger model covrange <- internal$covrange ## >>>>>>>>>>>> START ANALYSIS <<<<<<<<<<<<<<<<<<<<<<<< ## ----------------------------------------------------------------------- ## (A) EMPIRICAL CUMULATIVE FUNCTION ## based on data points if type="eem", otherwise on quadrature points ## Reorder the data/quad points in order of increasing covariate value ## and then compute the cumulative sum of their residuals/marks markscovres <- marks(covres) o <- fave.order(markscovres) covsort <- markscovres[o] marksort <- marks(res)[o] cummark <- cumsum(ifelse(is.na(marksort), 0, marksort)) if(anyDuplicated(covsort)) { right <- !duplicated(covsort, fromLast=TRUE) covsort <- covsort[right] cummark <- cummark[right] } ## we'll plot(covsort, cummark) in the cumulative case ## (B) THEORETICAL MEAN CUMULATIVE FUNCTION ## based on all quadrature points ## Range of covariate values covqmarks <- marks(covq) covrange <- covrange %orifnull% range(covqmarks, na.rm=TRUE) if(diff(covrange) > 0) { ## Suitable breakpoints cvalues <- seq(from=covrange[1L], to=covrange[2L], length.out=nx) csmall <- cvalues[1L] - diff(cvalues[1:2]) cbreaks <- c(csmall, cvalues) ## cumulative area as function of covariate values covclass <- cut(covqmarks, breaks=cbreaks) increm <- tapply(wts, covclass, sum) cumarea <- cumsum(ifelse(is.na(increm), 0, increm)) } else { ## Covariate is constant cvalues <- covrange[1L] covclass <- factor(rep(1, length(wts))) cumarea <- increm <- sum(wts) } ## compute theoretical mean (when model is true) mean0 <- if(type == "eem") cumarea else numeric(length(cumarea)) ## we'll plot(cvalues, mean0) in the cumulative case ## (A'),(B') DERIVATIVES OF (A) AND (B) ## Required if cumulative=FALSE ## Estimated by spline smoothing (with x values jittered) if(!cumulative) { ## fit smoothing spline to (A) ss <- do.call(smooth.spline, append(list(covsort, cummark), splineargs) ) ## estimate derivative of (A) derivmark <- predict(ss, covsort, deriv=1)$y ## similarly for (B) ss <- do.call(smooth.spline, append(list(cvalues, mean0), splineargs) ) derivmean <- predict(ss, cvalues, deriv=1)$y } ## ----------------------------------------------------------------------- ## Store what will be plotted if(cumulative) { empirical <- data.frame(covariate=covsort, value=cummark) theoretical <- data.frame(covariate=cvalues, mean=mean0) } else { empirical <- data.frame(covariate=covsort, value=derivmark) theoretical <- data.frame(covariate=cvalues, mean=derivmean) } ## ------------------------------------------------------------------------ ## (C) STANDARD DEVIATION if desired ## (currently implemented only for Poisson) ## (currently implemented only for cumulative case) if(plot.sd && !is.poisson(object)) warning(paste("standard deviation is calculated for Poisson model;", "not valid for this model")) if(plot.sd && cumulative) { if(is.ppm(object)) { ## Fitted intensity at quadrature points lambda <- fitted(object, type="trend", check=check) if(!allok) lambda <- lambda[ok] ## Fisher information for coefficients asymp <- vcov(object,what="internals") Fisher <- Fisher %orifnull% asymp$fisher ## Local sufficient statistic at quadrature points suff <- asymp$suff if(!allok && !is.null(suff)) suff <- suff[ok, , drop=FALSE] } else if(is.slrm(object)) { ## Fitted intensity at quadrature points lambda <- predict(object, type="intensity")[quadpoints, drop=FALSE] ## Fisher information for coefficients Fisher <- Fisher %orifnull% vcov(object, what="Fisher") ## Sufficient statistic at quadrature points suff <- model.matrix(object) if(!allok && !is.null(suff)) suff <- suff[ok, , drop=FALSE] } else stop("object should be a ppm or slrm") ## Clip if required if(clip) { lambda <- lambda[clipquad] if(!is.null(suff)) suff <- suff[clipquad, , drop=FALSE] ## suff is a matrix } ## First term: integral of lambda^(2p+1) switch(type, pearson={ varI <- cumarea }, raw={ ## Compute sum of w*lambda for quadrature points in each interval dvar <- tapply(wts * lambda, covclass, sum) ## tapply() returns NA when the table is empty dvar[is.na(dvar)] <- 0 ## Cumulate varI <- cumsum(dvar) }, inverse=, ## same as eem eem={ ## Compute sum of w/lambda for quadrature points in each interval dvar <- tapply(wts / lambda, covclass, sum) ## tapply() returns NA when the table is empty dvar[is.na(dvar)] <- 0 ## Cumulate varI <- cumsum(dvar) }) if(!oldstyle) { ## check feasibility of variance calculations if(length(Fisher) == 0 || length(suff) == 0) { warning("Model has no fitted coefficients; using oldstyle=TRUE") oldstyle <- TRUE } else { ## variance-covariance matrix of coefficients V <- try(solve(Fisher), silent=TRUE) if(inherits(V, "try-error")) { warning("Fisher information is singular; reverting to oldstyle=TRUE") oldstyle <- TRUE } } } if(!oldstyle && any(dim(V) != ncol(suff))) { #' drop rows and columns nama <- colnames(suff) V <- V[nama, nama, drop=FALSE] } working <- NULL ## Second term: B' V B if(oldstyle) { varII <- 0 if(saveworking) working <- data.frame(varI=varI) } else { ## lamp = lambda^(p + 1) lamp <- switch(type, raw = lambda, pearson = sqrt(lambda), inverse =, eem = as.integer(lambda > 0)) ## Compute sum of w * lamp * suff for quad points in intervals Bcontrib <- as.vector(wts * lamp) * suff dB <- matrix(, nrow=length(cumarea), ncol=ncol(Bcontrib), dimnames=list(NULL, colnames(suff))) for(j in seq_len(ncol(dB))) dB[,j] <- tapply(Bcontrib[,j], covclass, sum, na.rm=TRUE) ## tapply() returns NA when the table is empty dB[is.na(dB)] <- 0 ## Cumulate columns B <- apply(dB, 2, cumsum) if(!is.matrix(B)) B <- matrix(B, nrow=1) ## compute B' V B for each i varII <- quadform(B, V) ## was: varII <- diag(B %*% V %*% t(B)) if(saveworking) working <- cbind(data.frame(varI=varI, varII=varII), as.data.frame(B)) } ## ## variance of residuals varR <- varI - varII ## trap numerical errors nbg <- (varR < 0) if(any(nbg)) { ran <- range(varR) varR[nbg] <- 0 relerr <- abs(ran[1L]/ran[2L]) nerr <- sum(nbg) if(relerr > 1e-6) { warning(paste(nerr, "negative", ngettext(nerr, "value (", "values (min="), signif(ran[1L], 4), ")", "of residual variance reset to zero", "(out of", length(varR), "values)")) } } theoretical$sd <- sqrt(varR) } ## if(envelope) { ## compute envelopes by simulation cl$plot.it <- FALSE cl$envelope <- FALSE cl$rv <- NULL if(is.null(Xsim)) Xsim <- simulate(object, nsim=nsim, progress=verbose) values <- NULL if(verbose) { cat("Processing.. ") state <- list() } for(i in seq_len(nsim)) { ## evaluate lurking variable plot for simulated pattern cl$object <- update(object, Xsim[[i]]) result.i <- eval(cl, clenv) ## interpolate empirical values onto common sequence f.i <- with(result.i$empirical, approxfun(covariate, value, rule=2)) val.i <- f.i(theoretical$covariate) values <- cbind(values, val.i) if(verbose) state <- progressreport(i, nsim, state=state) } if(verbose) cat("Done.\n") hilo <- if(nrank == 1) apply(values, 1, range) else apply(values, 1, orderstats, k=c(nrank, nsim-nrank+1)) theoretical$upper <- hilo[1L,] theoretical$lower <- hilo[2L,] } ## ---------------- RETURN COORDINATES ---------------------------- stuff <- list(empirical=empirical, theoretical=theoretical) attr(stuff, "info") <- list(typename=typename, cumulative=cumulative, covrange=covrange, covname=covname, oldstyle=oldstyle) if(saveworking) attr(stuff, "working") <- working class(stuff) <- "lurk" return(stuff) } # plot a lurk object plot.lurk <- function(x, ..., shade="grey") { xplus <- append(x, attr(x, "info")) with(xplus, { ## work out plot range mr <- range(0, empirical$value, theoretical$mean, na.rm=TRUE) if(!is.null(theoretical$sd)) mr <- range(mr, theoretical$mean + 2 * theoretical$sd, theoretical$mean - 2 * theoretical$sd, na.rm=TRUE) if(!is.null(theoretical$upper)) mr <- range(mr, theoretical$upper, theoretical$lower, na.rm=TRUE) ## start plot vname <- paste(if(cumulative)"cumulative" else "marginal", typename) do.call(plot, resolve.defaults( list(covrange, mr), list(type="n"), list(...), list(xlab=covname, ylab=vname))) ## Envelopes if(!is.null(theoretical$upper)) { Upper <- theoretical$upper Lower <- theoretical$lower } else if(!is.null(theoretical$sd)) { Upper <- with(theoretical, mean+2*sd) Lower <- with(theoretical, mean-2*sd) } else Upper <- Lower <- NULL if(!is.null(Upper) && !is.null(Lower)) { xx <- theoretical$covariate if(!is.null(shade)) { ## shaded envelope region shadecol <- if(is.colour(shade)) shade else "grey" xx <- c(xx, rev(xx)) yy <- c(Upper, rev(Lower)) dont.complain.about(yy) do.call.matched(polygon, resolve.defaults(list(x=quote(xx), y=quote(yy)), list(...), list(border=shadecol, col=shadecol))) } else { do.call(lines, resolve.defaults( list(x = quote(xx), y=quote(Upper)), list(...), list(lty=3))) do.call(lines, resolve.defaults( list(x = quote(xx), y = quote(Lower)), list(...), list(lty=3))) } } ## Empirical lines(value ~ covariate, empirical, ...) ## Theoretical mean do.call(lines, resolve.defaults( list(mean ~ covariate, quote(theoretical)), list(...), list(lty=2))) }) return(invisible(NULL)) } #' print a lurk object print.lurk <- function(x, ...) { splat("Lurking variable plot (object of class 'lurk')") info <- attr(x, "info") with(info, { splat("Residual type: ", typename) splat("Covariate on horizontal axis: ", covname) splat("Range of covariate values: ", prange(covrange)) splat(if(cumulative) "Cumulative" else "Non-cumulative", "plot") }) has.bands <- !is.null(x$theoretical$upper) has.sd <- !is.null(x$theoretical$sd) if(!has.bands && !has.sd) { splat("No confidence bands computed") } else { splat("Includes", if(has.sd) "standard deviation for" else NULL, "confidence bands") if(!is.null(info$oldstyle)) splat("Variance calculation:", if(info$oldstyle) "old" else "new", "style") } return(invisible(NULL)) } spatstat.model/R/mppm.R0000644000176200001440000006340314331173074014522 0ustar liggesusers# # mppm.R # # $Revision: 1.105 $ $Date: 2022/07/08 01:09:38 $ # mppm <- local({ mppm <- function(formula, data, interaction=Poisson(), ..., iformula=NULL, random=NULL, weights=NULL, use.gam=FALSE, reltol.pql=1e-3, gcontrol=list() ) { ## remember call cl <- match.call() callstring <- paste(short.deparse(sys.call()), collapse="") ## Validate arguments if(!inherits(formula, "formula")) stop(paste("Argument", dQuote("formula"), "should be a formula")) stopifnot(is.hyperframe(data)) data.sumry <- summary(data, brief=TRUE) npat <- data.sumry$ncases if(npat == 0) stop(paste("Hyperframe", sQuote("data"), "has zero rows")) if(!is.null(iformula) && !inherits(iformula, "formula")) stop(paste("Argument", sQuote("iformula"), "should be a formula or NULL")) if(has.random <- !is.null(random)) { if(!inherits(random, "formula")) stop(paste(sQuote("random"), "should be a formula or NULL")) if(use.gam) stop("Sorry, random effects are not available in GAMs") } if(! (is.interact(interaction) || is.hyperframe(interaction))) stop(paste("The argument", sQuote("interaction"), "should be a point process interaction object (class", dQuote("interact"), "), or a hyperframe containing such objects", sep="")) if(is.null(weights)) { weights <- rep(1, npat) } else { check.nvector(weights, npat, things="rows of data", oneok=TRUE, vname="weights") if(length(weights) == 1L) weights <- rep(weights, npat) } backdoor <- list(...)$backdoor if(is.null(backdoor) || !is.logical(backdoor)) backdoor <- FALSE ############## HANDLE FORMULAS ############################ ##------ Trend Formula ------------------ ## check all variables in trend formula are recognised checkvars(formula, data.sumry$col.names, extra=c("x","y","id","marks"), bname="data") ## check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Argument", sQuote("formula"), "must have a left hand side")) Yname <- formula[[2]] trend <- formula[c(1,3)] if(!is.name(Yname)) stop("Left hand side of formula should be a single name") Yname <- paste(Yname) if(!inherits(trend, "formula")) stop("Internal error: failed to extract RHS of formula") allvars <- variablesinformula(trend) ## --- Interaction formula ----- ## names of interactions as they may appear in formulae itags <- if(is.hyperframe(interaction)) names(interaction) else "Interaction" ninteract <- length(itags) ## ensure `iformula' is a formula without a LHS ## and determine which columns of `interaction' are actually used if(is.null(iformula)) { if(ninteract > 1) stop(paste("interaction hyperframe has more than 1 column;", "you must specify the choice of interaction", "using argument", sQuote("iformula"))) iused <- TRUE iformula <- as.formula(paste("~", itags)) } else { if(length(iformula) > 2) stop(paste("The interaction formula", sQuote("iformula"), "should not have a left hand side")) ## valid variables in `iformula' are interactions and data frame columns permitted <- paste(sQuote("interaction"), "or permitted name in", sQuote("data")) checkvars(iformula, itags, extra=c(data.sumry$dfnames, "id"), bname=permitted) ivars <- variablesinformula(iformula) ## check which columns of `interaction' are actually used iused <- itags %in% ivars if(sum(iused) == 0) stop("No interaction specified in iformula") ## OK allvars <- c(allvars, ivars) } ## --- Random effects formula ---- if(!is.null(random)) { if(length(random) > 2) stop(paste("The random effects formula", sQuote("random"), "should not have a left hand side")) checkvars(random, itags, extra=c(data.sumry$col.names, "x", "y", "id"), bname="either data or interaction") allvars <- c(allvars, variablesinformula(random)) } ## ---- variables required (on RHS of one of the above formulae) ----- allvars <- unique(allvars) ######## EXTRACT DATA ##################################### ## Insert extra variable 'id' data <- cbind.hyperframe(data, id=factor(1:npat)) data.sumry <- summary(data, brief=TRUE) allvars <- unique(c(allvars, "id")) ## Extract the list of responses (point pattern/quadscheme) Y <- data[, Yname, drop=TRUE] if(npat == 1) Y <- solist(Y) Yclass <- data.sumry$classes[Yname] if(Yclass == "ppp") { ## convert to quadrature schemes, for efficiency's sake Y <- solapply(Y, quadscheme, ...) ## Ydescrip <- "point patterns" ## not used } else if(Yclass == "quad") { Y <- as.solist(Y) ## Ydescrip <- "quadrature schemes" ## not used } else { stop(paste("Column", dQuote(Yname), "of data", "does not consist of point patterns (class ppp)", "nor of quadrature schemes (class quad)"), call.=FALSE) } ## Extract sub-hyperframe of data named in formulae datanames <- names(data) used.cov.names <- allvars[allvars %in% datanames] has.covar <- (length(used.cov.names) > 0) if(has.covar) { dfvar <- used.cov.names %in% data.sumry$dfnames imvar <- data.sumry$types[used.cov.names] == "im" if(any(nbg <- !(dfvar | imvar))) stop(paste("Inappropriate format for", ngettext(sum(nbg), "covariate", "covariates"), paste(sQuote(used.cov.names[nbg]), collapse=", "), ": should contain image objects or vector/factor")) covariates.hf <- data[, used.cov.names, drop=FALSE] has.design <- any(dfvar) dfvarnames <- used.cov.names[dfvar] datadf <- if(has.design) as.data.frame(covariates.hf, discard=TRUE, warn=FALSE) else NULL if(has.design) { ## check for NA's in design covariates # if(any(nbg <- apply(is.na(datadf), 2, any))) if(any(nbg <- matcolany(is.na(datadf)))) stop(paste("There are NA's in the", ngettext(sum(nbg), "covariate", "covariates"), commasep(dQuote(names(datadf)[nbg])))) } } else { has.design <- FALSE datadf <- NULL } ############### INTERACTION ################################### ## ensure `interaction' is a hyperframe of `interact' objects ## with the right number of rows. ## All entries in a column must represent the same process ## (possibly with different values of the irregular parameters). ## Extract the names of the point processes. if(is.interact(interaction)) { ninteract <- 1 processes <- list(Interaction=interaction$name) interaction <- hyperframe(Interaction=interaction, id=1:npat)[,1] constant <- c(Interaction=TRUE) } else if(is.hyperframe(interaction)) { inter.sumry <- summary(interaction) ninteract <- inter.sumry$nvars ## ensure it has the same number of rows as 'data' nr <- inter.sumry$ncases if(nr == 1 && npat > 1) { interaction <- cbind.hyperframe(id=1:npat, interaction)[,-1] inter.sumry <- summary(interaction) } else if(nr != npat) stop(paste("Number of rows in", sQuote("interaction"), "=", nr, "!=", npat, "=", "number of rows in", sQuote("data"))) ## check all columns contain interaction objects ok <- (inter.sumry$classes == "interact") if(!all(ok)) { nbg <- names(interaction)[!ok] nn <- sum(!ok) stop(paste(ngettext(nn, "Column", "Columns"), paste(sQuote(nbg), collapse=", "), ngettext(nn, "does", "do"), "not consist of interaction objects")) } ## all entries in a column must represent the same process type ## (with possibly different values of the irregular parameters) ok <- unlist(lapply(as.list(interaction), consistentname)) if(!all(ok)) { nbg <- names(interaction)[!ok] stop(paste("Different interactions may not appear in a single column.", "Violated by", paste(sQuote(nbg), collapse=", "))) } processes <- lapply(as.list(interaction), firstname) ## determine whether all entries in a column are EXACTLY the same ## (=> have the same parameters) constant <- (inter.sumry$storage == "hyperatom") if(any(!constant)) { others <- interaction[,!constant] constant[!constant] <- sapply(lapply(as.list(others), unique), length) == 1 } } ## check for trivial (Poisson) interactions trivial <- unlist(lapply(as.list(interaction), allpoisson)) ## check that iformula does not combine two interactions on one row nondfnames <- datanames[!(datanames %in% data.sumry$dfnames)] ip <- impliedpresence(itags, iformula, datadf, nondfnames) if(any(rowSums(ip) > 1)) stop("iformula invokes more than one interaction on a single row") ## #################### BERMAN-TURNER DEVICE ######################### ## ## set up list to contain the glm variable names for each interaction. Vnamelist <- rep(list(character(0)), ninteract) names(Vnamelist) <- itags ## set up list to contain 'IsOffset' Isoffsetlist <- rep(list(logical(0)), ninteract) names(Isoffsetlist) <- itags #### ## ---------------- L O O P --------------------------------- for(i in 1:npat) { ## extract responses and covariates for presentation to ppm() Yi <- Y[[i]] covariates <- if(has.covar) covariates.hf[i, , drop=TRUE, strip=FALSE] else NULL if(has.design) { ## convert each data frame value to an image covariates[dfvarnames] <- lapply(as.list(as.data.frame(covariates[dfvarnames])), as.im, W=Yi$data$window) } ## Generate data frame and glm info for this point pattern ## First the trend covariates prep0 <- bt.frame(Yi, trend, Poisson(), ..., covariates=covariates, allcovar=TRUE, use.gam=use.gam) glmdat <- prep0$glmdata ## now the nontrivial interaction terms for(j in (1:ninteract)[iused & !trivial]) { inter <- interaction[i,j,drop=TRUE] if(!is.null(ss <- inter$selfstart)) interaction[i,j] <- inter <- ss(Yi$data, inter) prepj <- bt.frame(Yi, ~1, inter, ..., covariates=covariates, allcovar=TRUE, use.gam=use.gam, vnamebase=itags[j], vnameprefix=itags[j]) ## store GLM variable names & check consistency vnameij <- prepj$Vnames if(i == 1) Vnamelist[[j]] <- vnameij else if(!identical(vnameij, Vnamelist[[j]])) stop("Internal error: Unexpected conflict in glm variable names") ## store offset indicator vectors isoffset.ij <- prepj$IsOffset if(i == 1) Isoffsetlist[[j]] <- isoffset.ij else if(!identical(isoffset.ij, Isoffsetlist[[j]])) stop("Internal error: Unexpected conflict in offset indicators") ## GLM data frame for this interaction glmdatj <- prepj$glmdata if(nrow(glmdatj) != nrow(glmdat)) stop("Internal error: differing numbers of rows in glm data frame") iterms.ij <- glmdatj[vnameij] subset.ij <- glmdatj$.mpl.SUBSET ## tack on columns of interaction terms glmdat <- cbind(glmdat, iterms.ij) ## update subset (quadrature points where cif is positive) glmdat$.mpl.SUBSET <- glmdat$.mpl.SUBSET & subset.ij } ## assemble the Mother Of All Data Frames if(i == 1) { moadf <- glmdat } else { ## There may be new or missing columns recognised <- names(glmdat) %in% names(moadf) if(any(!recognised)) { newnames <- names(glmdat)[!recognised] zeroes <- as.data.frame(matrix(0, nrow(moadf), length(newnames))) names(zeroes) <- newnames moadf <- cbind(moadf, zeroes) } provided <- names(moadf) %in% names(glmdat) if(any(!provided)) { absentnames <- names(moadf)[!provided] zeroes <- as.data.frame(matrix(0, nrow(glmdat), length(absentnames))) names(zeroes) <- absentnames glmdat <- cbind(glmdat, zeroes) } ## Ensure factor columns are consistent m.isfac <- sapply(as.list(glmdat), is.factor) g.isfac <- sapply(as.list(glmdat), is.factor) if(any(uhoh <- (m.isfac != g.isfac))) errorInconsistentRows("values (factor and non-factor)", colnames(moadf)[uhoh]) if(any(m.isfac)) { m.levels <- lapply(as.list(moadf)[m.isfac], levels) g.levels <- lapply(as.list(glmdat)[g.isfac], levels) clash <- !mapply(identical, x=m.levels, y=g.levels) if(any(clash)) errorInconsistentRows("factor levels", (colnames(moadf)[m.isfac])[clash]) } ## Finally they are compatible moadf <- rbind(moadf, glmdat) } } ## ---------------- E N D o f L O O P -------------------------- ## ## add case weights moadf$caseweight <- weights[moadf$id] ## ## backdoor exit - Berman-Turner frame only - used by predict.mppm if(backdoor) return(moadf) ## ## ## -------------------------------------------------------------------- ## ## Construct the glm formula for the Berman-Turner device ## ## Get trend part from the last-computed prep0 fmla <- prep0$trendfmla ## Tack on the RHS of the interaction formula if(!all(trivial)) fmla <- paste(fmla, "+", as.character(iformula)[[2]]) ## Make it a formula fmla <- as.formula(fmla) ## Ensure that each interaction name is recognised. ## ## To the user, an interaction is identified by its `tag' name ## (default tag: "Interaction") ## ## Internally, an interaction is fitted using its sufficient statistic ## which may be 0, 1 or k-dimensional. ## The column names of the sufficient statistic are the Vnames ## returned from ppm. ## The Poisson process is a special case: it is 0-dimensional (no Vnames). ## ## For k-dimensional sufficient statistics, we modify the formulae, ## replacing the interaction name by (vname1 + vname2 + .... + vnamek) ## for(j in (1:ninteract)[iused]) { vnames <- Vnamelist[[j]] tag <- itags[j] isoffset <- Isoffsetlist[[j]] if(any(isoffset)) { ## enclose names of offset variables in 'offset()' vnames[isoffset] <- paste("offset(", vnames[isoffset], ")", sep="") } if(trivial[j]) ## Poisson case: add a column of zeroes moadf[[tag]] <- 0 else if(!identical(vnames, tag)) { if(length(vnames) == 1) ## tag to be replaced by vname vn <- paste("~", vnames[1]) else ## tag to be replaced by (vname1 + vname2 + .... + vnamek) vn <- paste("~(", paste(vnames, collapse=" + "), ")") ## pull out formula representation of RHS vnr <- as.formula(vn)[[2]] ## make substitution rule: list(=) vnsub <- list(vnr) names(vnsub) <- tag ## perform substitution in trend formula fmla <- eval(substitute(substitute(fom, vnsub), list(fom=fmla))) ## perform substitution in random effects formula if(has.random && tag %in% variablesinformula(random)) random <- eval(substitute(substitute(fom, vnsub), list(fom=random))) } } fmla <- as.formula(fmla) ## Fix scoping problem assign("glmmsubset", moadf$.mpl.SUBSET, envir=environment(fmla)) for(nama in colnames(moadf)) assign(nama, moadf[[nama]], envir=environment(fmla)) ## Satisfy package checker glmmsubset <- .mpl.SUBSET <- moadf$.mpl.SUBSET .mpl.W <- moadf$.mpl.W caseweight <- moadf$caseweight ## ---------------- FIT THE ------------------------------------ want.trend <- prep0$info$want.trend if(want.trend && use.gam) { fitter <- "gam" ctrl <- do.call(gam.control, resolve.defaults(gcontrol, list(maxit=50))) FIT <- gam(fmla, family=quasi(link=log, variance=mu), weights=.mpl.W * caseweight, data=moadf, subset=(.mpl.SUBSET=="TRUE"), control=ctrl) deviants <- deviance(FIT) } else if(!is.null(random)) { fitter <- "glmmPQL" ctrl <- do.call(lmeControl, resolve.defaults(gcontrol, list(maxIter=50))) attr(fmla, "ctrl") <- ctrl # very strange way to pass argument fixed <- 42 # to satisfy package checker FIT <- hackglmmPQL(fmla, random=random, family=quasi(link=log, variance=mu), weights=.mpl.W * caseweight, data=moadf, subset=glmmsubset, control=attr(fixed, "ctrl"), reltol=reltol.pql) deviants <- -2 * logLik(FIT) } else { fitter <- "glm" ctrl <- do.call(glm.control, resolve.defaults(gcontrol, list(maxit=50))) FIT <- glm(fmla, family=quasi(link="log", variance="mu"), weights=.mpl.W * caseweight, data=moadf, subset=(.mpl.SUBSET=="TRUE"), control=ctrl) deviants <- deviance(FIT) } env <- list2env(moadf, parent=sys.frame(sys.nframe())) environment(FIT$terms) <- env ## maximised log-pseudolikelihood W <- with(moadf, .mpl.W * caseweight) SUBSET <- moadf$.mpl.SUBSET Z <- (moadf$.mpl.Y != 0) maxlogpl <- -(deviants/2 + sum(log(W[Z & SUBSET])) + sum(Z & SUBSET)) ## ## ---------------- PACK UP THE RESULT -------------------------------- ## result <- list(Call = list(callstring=callstring, cl=cl), Info = list( has.random=has.random, has.covar=has.covar, has.design=has.design, Yname=Yname, used.cov.names=used.cov.names, allvars=allvars, names.data=names(data), is.df.column=(data.sumry$storage == "dfcolumn"), rownames=row.names(data), correction=prep0$info$correction, rbord=prep0$info$rbord ), Fit= list( fitter=fitter, use.gam=use.gam, fmla=fmla, FIT=FIT, moadf=moadf, Vnamelist=Vnamelist, Isoffsetlist=Isoffsetlist ), Inter = list( ninteract=ninteract, interaction=interaction, iformula=iformula, iused=iused, itags=itags, processes=processes, trivial=trivial, constant=constant ), formula=formula, trend=trend, iformula=iformula, random=random, npat=npat, data=data, Y=Y, maxlogpl=maxlogpl, datadf=datadf) class(result) <- c("mppm", class(result)) return(result) } # helper functions checkvars <- function(f, b, extra=NULL, bname=short.deparse(substitute(b))){ fname <- short.deparse(substitute(f)) fvars <- variablesinformula(f) bvars <- if(is.character(b)) b else names(b) bvars <- c(bvars, extra) nbg <- !(fvars %in% bvars) if(any(nbg)) { nn <- sum(nbg) stop(paste(ngettext(nn, "Variable", "Variables"), commasep(dQuote(fvars[nbg])), "in", fname, ngettext(nn, "is not one of the", "are not"), "names in", bname)) } return(NULL) } consistentname <- function(x) { xnames <- unlist(lapply(x, getElement, name="name")) return(length(unique(xnames)) == 1) } firstname <- function(z) { z[[1]]$name } allpoisson <- function(x) all(sapply(x, is.poisson.interact)) marklevels <- function(x) { levels(marks(x)) } errorInconsistentRows <- function(what, offending) { stop(paste("There are inconsistent", what, "for the", ngettext(length(offending), "variable", "variables"), commasep(sQuote(offending)), "between different rows of the hyperframe 'data'"), call.=FALSE) } mppm }) is.mppm <- function(x) { inherits(x, "mppm") } coef.mppm <- function(object, ...) { coef(object$Fit$FIT) } fixef.mppm <- function(object, ...) { if(object$Fit$fitter == "glmmPQL") fixef(object$Fit$FIT) else coef(object$Fit$FIT) } ranef.mppm <- function(object, ...) { if(object$Fit$fitter == "glmmPQL") ranef(object$Fit$FIT) else as.data.frame(matrix(, nrow=object$npat, ncol=0)) } print.mppm <- function(x, ...) { print(summary(x, ..., brief=TRUE)) } is.poisson.mppm <- function(x) { trivial <- x$Inter$trivial iused <- x$Inter$iused all(trivial[iused]) } quad.mppm <- function(x) { as.solist(x$Y) } data.mppm <- function(x) { solapply(x$Y, getElement, name="data") } is.marked.mppm <- function(X, ...) { any(sapply(data.mppm(X), is.marked)) } is.multitype.mppm <- function(X, ...) { any(sapply(data.mppm(X), is.multitype)) } windows.mppm <- function(x) { solapply(data.mppm(x), Window) } logLik.mppm <- function(object, ..., warn=TRUE) { if(warn && !is.poisson.mppm(object)) warning(paste("log likelihood is not available for non-Poisson model;", "log-pseudolikelihood returned")) ll <- object$maxlogpl attr(ll, "df") <- length(fixef(object)) class(ll) <- "logLik" return(ll) } AIC.mppm <- function(object, ..., k=2, takeuchi=TRUE) { ll <- logLik(object, warn=FALSE) pen <- attr(ll, "df") if(takeuchi && !is.poisson(object)) { vv <- vcov(object, what="all") J <- vv$fisher H <- vv$internals$A1 ## Takeuchi penalty = trace of J H^{-1} = trace of H^{-1} J JiH <- try(solve(H, J), silent=TRUE) if(!inherits(JiH, "try-error")) pen <- sum(diag(JiH)) } return(- 2 * as.numeric(ll) + k * pen) } extractAIC.mppm <- function(fit, scale = 0, k = 2, ..., takeuchi = TRUE) { edf <- length(coef(fit)) aic <- AIC(fit, k = k, takeuchi = takeuchi) c(edf, aic) } getCall.mppm <- function(x, ...) { x$Call$cl } terms.mppm <- function(x, ...) { terms(formula(x)) } nobs.mppm <- function(object, ...) { sum(sapply(data.mppm(object), npoints)) } simulate.mppm <- function(object, nsim=1, ..., verbose=TRUE) { subs <- subfits(object) nr <- length(subs) sims <- list() if(verbose) { splat("Generating simulated realisations of", nr, "models..") state <- list() } for(irow in seq_len(nr)) { model.i <- subs[[irow]] dont.complain.about(model.i) sims[[irow]] <- do.call(simulate, resolve.defaults(list(object=quote(model.i), nsim=nsim, drop=FALSE), list(...), list(progress=FALSE))) if(verbose) state <- progressreport(irow, nr, state=state) } sim1list <- lapply(sims, "[[", i=1) h <- hyperframe("Sim1"=sim1list) if(nsim > 1) { for(j in 2:nsim) { simjlist <- lapply(sims, "[[", i=j) hj <- hyperframe(Sim=simjlist) names(hj) <- paste0("Sim", j) h <- cbind(h, hj) } } return(h) } model.matrix.mppm <- function(object, ..., keepNA=TRUE, separate=FALSE) { df <- object$Fit$moadf FIT <- object$Fit$FIT environment(FIT) <- list2env(df) #' track subset ok <- complete.cases(df) & df$.mpl.SUBSET nok <- sum(ok) nfull <- nrow(df) #' if(keepNA) { mm <- model.matrix(FIT, ..., subset=NULL, na.action=NULL) nr <- nrow(mm) if(nr != nfull) { if(nr == nok) { ## some methods for model.matrix ignore 'subset=NULL' mmfull <- matrix(NA, nfull, ncol(mm), dimnames=list(NULL, colnames(mm))) mmfull[ok,] <- mm mm <- mmfull } else { stop(paste("Internal error: model matrix has wrong number of rows:", nr, "should be", nfull, "or", nok), call.=FALSE) } } } else { mm <- model.matrix(FIT, ...) if(nrow(mm) != nok) stop("Internal error: model matrix has wrong number of rows", call.=FALSE) df <- df[ok, , drop=FALSE] } ## remove NaN if(any(isnan <- is.nan(mm))) mm[isnan] <- 0 ## get standard attributes of model matrix ctr <- attr(mm, "contrasts") if(is.null(ctr) && !is.null(ctr <- FIT[["contrasts"]])) attr(mm, "contrasts") <- ctr ass <- attr(mm, "assign") if(is.null(ass)) { ass <- FIT[["assign"]] if(length(ass) == ncol(mm)) { attr(mm, "assign") <- ass } else { ass <- NULL warning("Unable to determine 'assign' in model.matrix.mppm", call.=FALSE) } } ## split if required if(separate) { id <- df$id mm <- split.data.frame(mm, id) # see help(split) if(!is.null(ass)) mm <- lapply(mm, "attr<-", which="assign", value=ass) if(!is.null(ctr)) mm <- lapply(mm, "attr<-", which="contrasts", value=ctr) } return(mm) } spatstat.model/R/ord.R0000644000176200001440000000231014331173073014322 0ustar liggesusers# # # ord.S # # $Revision: 1.9 $ $Date: 2020/01/26 04:32:04 $ # # Ord process with user-supplied potential # # Ord() create an instance of the Ord process # [an object of class 'interact'] # with user-supplied potential # # # ------------------------------------------------------------------- # Ord <- local({ BlankOrd <- list( name = "Ord process with user-defined potential", creator = "Ord", family = "ord.family", pot = NULL, par = NULL, parnames = NULL, hasInf = NA, init = NULL, update = function(self, ...){ do.call(Ord, resolve.defaults(list(...), list(pot=self$pot, name=self$name))) } , print = function(self) { cat("Potential function:\n") print(self$pot) invisible() }, version=NULL ) class(BlankOrd) <- "interact" Ord <- function(pot, name) { out <- instantiate.interact(BlankOrd) out$pot <- pot if(!missing(name)) out$name <- name return(out) } Ord <- intermaker(Ord, BlankOrd) }) spatstat.model/R/poissonfitsbetter.R0000644000176200001440000000332714334115572017340 0ustar liggesusers#' poissonfitsbetter.R #' #' poisson.fits.better() #' and underlying calculations #' #' $Revision: 1.1 $ $Date: 2022/11/10 06:51:16 $ #' #' Copyright (c) Adrian Baddeley 2022 #' GNU Public Licence >= 2.0 poisson.fits.better <- function(object) { if(!is.null(ispo <- object$ispo)) return(isTRUE(ispo)) y <- PoissonCompareCalc(object) if(is.null(y)) return(FALSE) answer <- with(y, if(maximising) (poisval >= optval) else (poisval <= optval)) return(answer) } PoissonCompareCalc <- function(object) { stopifnot(is.kppm(object)) if(!isTRUE(object$isPCP)) return(NULL) Fit <- object$Fit switch(Fit$method, mincon = { m <- Fit$mcfit canonical <- !is.null(m$par.canon) optpar <- if(canonical) m$par.canon else m$par objfun <- m$objfun objargs <- m$objargs maximising <- FALSE }, palm = , clik2 = { canonical <- !is.null(object$par.canon) optpar <- if(canonical) object$par.canon else object$par objfun <- Fit$objfun objargs <- Fit$objargs maximising <- TRUE }, return(NULL) ) ## optimised value optval <- objfun(optpar, objargs=objargs) ## value for Poisson poispar <- optpar if(canonical) { if(is.na(match("strength", names(optpar)))) stop("Internal error: the canonical parameters do not include 'strength'") poispar[["strength"]] <- 0 } else { if(is.na(match("kappa", names(optpar)))) stop("Internal error: the parameters do not include 'kappa'") poispar[["kappa"]] <- Inf } poisval <- objfun(poispar, objargs=objargs) return(list(optval=optval, poisval=poisval, maximising=maximising)) } spatstat.model/R/sysdata.rda0000644000176200001440000030064414515336522015572 0ustar liggesusersý7zXZi"Þ6!ÏXÌá.Mïþ])ThänRÊ 3Å$ik7§dxxe•»æåOZ©Ðe–ŭʶ˜Æ +ŸÉ|3¾“¬­ùÊÝpwóMv åƸ¯2×Kâ4}¶‘²Y¶Þ7¾^¥"S3}’sPÙ”'R½ÃhÚµ™¥û:*-ãØ œ°ÿLJ66á>cæ‚¥®Úr$§ÄûDÎåDôÒàYìó¸s&,ÇÿL`p‚`˜zûFAœÀ±èez±ñg|pGÿ†ô`Ân›ŽÜ8ƒ÷5Ôó^Sí¯àfúÔ51å?I„RØÖgs=˹÷jÏ®Îy?$*®ãøÞÄÌZ\fqÂä{NÅ41ЗiÔŽÝÉzQ¡˜y'°À ™Y¤qÿ,·Á ÓËO~цW­Þ k3% §Ø žwiK7)Ôyê+oɤY?gòX¡csØmlS:®|jF˜zº[¬>“sl»Ê3XÍ b¥­hÄÅÿžX?äåSÇI•pÓõÀ«mão½2qUÏù+ ÿzœ‹ä€’_º<®/¼Õÿl¸7‹×\ÂdÇæ#Æ`›º}Œ;~À?3ª Ç=¨Áì£çpèâ„Þ,/xÿh3ÆòÈ,þ‘ÕЭcTC*ÓYÙLùêgèñÚzÚEúŒÁò<²æ¯ðYǦٌ….ñ*^rÙiÅþôß›P¨\ƒyð3QMtø?‹`¤ÈÃIpj¾peý©+Žý›ly»; ìÿ R§èý}ëÀ§\É;xÖ¾-™sj:®ÎÜ 2WŽßßýÑ¨Ó Ó‘8°ckôÂÂ>2µ7_^†4û…u'@û>O6Ï`ï²+ÚRú`ÒÂ$hû“ دY‚pz£u ]_HÝ[8ŠÉFÝ•óó! A{À(Ý$5UÒ¸>G”_±`Xø®ÒÊ­ µ—€¶Êt‹;’/U³:pú{ÉÂtY½óå}_¨mÐ}†d ª^®L9qÅnzãù'8–A½bQÊÛýéÉðÔ™+ŠXõ“::ô£¼=žöd;ƒùÕÔ)¾É|¡Š^ÌÊüàë-NÕeÇq/5YXDx-;±Ö"ÈpÏ´Ø›Äчú>b]\Óîˆ 8eÜÌ\ÕO§lÁy:ã ¾m€ \Ó_Øuž-› €9//Î|ýàÝTu+=O>|·ãPDÝ ÿÛ^D¾ÌO?Õ¥±½R´¿ìƒU§ ÈU)Ú%­û¹yA0îúð <Ô_lÚÌö9éõ lµR:çÕ´0”]: £²—žÄùa]+XþÇ\±ÁiÙm9+ÎU-çA–ú³q©bÝ©GS"@¤¾Z=./¥½twûbQ09ØÙ‰×€¸ÐmÅË´‰–›ƒ’ý h©l™ñ›4È’DRß« ChæÊ>å-²¯‹¦²&]úvOöÍöÁ—+1N|U¨dKZ&{ëŠPÚÒ¾«h°½u|«V‰éˆÈHÍ0-c®Œ+ˆÍ/Wù©æµÓ †ïå&š¦Æªã燣[tS¶m€O=â`†¥v“x-BИµ5õü)j›¤vˆ’¬ÈÌÿD1òîÎoRQT(hd1,h”gþyµÝHЮ·ç›gT–-€ëé¶å!ãHÃZ/ †Ð€Üé†Nžûcm‹¯Ïµs"M·ç5ìˆ-%m6ܧG!‡Bþa ¤Ý—Þ¶¦ÁxcÓ‚IGuÈk°ùÚ¨åŽ.8ªì rÞ±d¥×)“ŸGåtõ©ÈrŸúÑ/s;Êin©ÌV°O† r¶Îmx>ÝÞ8¡ñ°e:­ÝW÷.gt·O’¶íßß§&†¯n³@ °Ù“Ž¡¶Ï"p”JÎE|™UGu]ár‰K¤2gðåçêß³Þi[Ã’§*¡é¤fõ#¥( 7Téw 8‰Åª\£ é{¢B3˸à DY/ n?mÊ+ó î/#שtò4vø)ÆškîÆŠ¡';|2*ë5ÄýLPjË#@2ýèÚ‡Æ<«L 7N¨VÛä–¬™¦Îˆ…ÅK„ðà2§K¥ùï7÷…¥'Þ% <½.î šæ"ÉÕÊ>ë`¡WUÓ¯Òqžq-hÁLY-=R™þêÎ%\oœjN¤å–zßæL4að2ÃøvÚçdfüu¶—ç =žû”Í¥Y %E¸„ˆE¾çÁ‚ç[GÈ• ]hÍ#/§m†}ÉZÕ¯­¤ÒÁµòûΛD€ÏXt”H^~aËOG´6Oj%¢³":×Ûß´›?ˆÉÒ¶¨Ïuƒ×[Cò„óø„B¡ëºivd®”öðxzlôÕÖX4ÐÔUÈ•}¤ƒj9 tœGTXdo64#ˆÅ”dvû9uT@ó«þ[ÉÓq>F~Y•\<:_1›·,7Ò\~O¢)¾t‚³Â,‹ÑqO5G~»µ¸¢/°ë÷RÊÊw•ØîKõ/¹öý#w±“ÚƒBùCäï×HóvLr¯Ð@˜{tš=\JV1ðuü Ô´HÎv‡Õìó¼¹G‚Úç9À#O:¢¦‡©3dÖ±öO.L±D:Â-T7mÇWýÎ>2riÜð‹¼1:WölÞ†¥Ç_VÂê"™À)å{EÇîîìV”’É&» &a«ÎÜbbÈ’3…ˆzzTјã*Ú™P8êRX]”·7^$LÌÄi’¢yOnŠEwúûô¼£IêOþÜ.Ót¢ãÀt¤¥úa…:Óh5¦cKib¥°¼D¸À>‡v`ŠTvåk(5Ó§Õ¢SÃv]b…:uÒzä%h±>» ¶…AÜ Ë‰Ö‡‚i¼D@âÞѧZ,>»«»0/Zæ>ÙuÎxÐV SKÕ¦êíAzJ½uæµÏþ'8kæß9:¨ 1ã?¡ÇèìHbD»ŒÓ!&ƒÉÏÞžlŸÝ°èxO]rø´…H¥Ø¨XれÐîÜY;lZrß)ÙN{×ÌbópRê ¸ÎciÂŒ'4 ýòBt‹€É¤‰ }@ ºmLû"ž=÷§ÓU_÷fú×MiMîh,ñ(/õeÀ’ýñ-ô¥(µ#6sö–]-Ÿ1¹ÐL•*ÿ`"=¾Ïq“æáä+B¿kš–Vk7©¹×ÆÙHÀvÈÈ÷“ìŒd×ãs ETÏfr5]C½k( ¢£)ænUü ¿ »ˆƒg7¦fÅF&”âÚÜo³UÒˆN\€fŠÞzƒ±®A‹ÙÓç”HÒúññ8ªrëtµÍF:’ŸÛ««`ýèüƒ '<=œ‚~[üËö@ݼH5eïS-ÄU|²`%ãTFJª‘·ß·'JЫ†g>ï^îw—ÊIK'$"U¾ûÖÔ`÷0žYÁ–zÔ­ñbÛ½­) i–'˜aH#q³ïÔøZ4‰¥i ¿&ˆš\vuoßu.ŠšÈóŽ°Ý©TCðv×¾èÁç0ÔG!ôÉNÍ4¾‘ `!­{Û’àÞ±—Êõ¬¬€pYçMÔgºñ‚ECWáEuA(®žGý,«|ågè~H ø¨Àͺð35©ä§Í‹¿jý ˜z†GíúSä;hå%hßLP€Å(mhݨ>$UÄŸX0üO|½=…½µÞÿ‘n=¾—çÞ,ÖzIü:hìŠûLAkç€Ü~¢a”®þŠ)Òö•ƒ^ÞÝD½°ç@º‹Ýp1L*Þ óhK9ŠË<Ê ïeLwæè<í.`NÌç£çb÷—é)õØê©àïÃÏ5¡™[¹ÈŸàz1AT–(©nœ«‚)„0z ü©pLÊ`Ø Ú™©6&ZÓf€O5ŠšÐ˜»¨ ¸ÈF‘ÌáÝ ,tCŠÓ·â—¤$m ¸.ˆ°\ÎËî¼wä}”&5nAú>óbšÍ+·5žƒŒz çÖ‰ÓiÕø•c§˜ºû©T;JoAŠË†£db¿eä ºs½n¬×pdü¾}¸ILÒJ«‡ÛÌÿà±È#bì0z+»Ã¡LÊl¥sfx­Ïm;•8Vaíp´y©¼‚8Ö»#{õš¼×b†¨Ay°ÁKË/®JVþ ‡™ÑßAdìÆ*Fè(TƒƒøOR~¯åáZÉh§>ÚBÊe,X=§3qãdzHI|²(‚ìNC†h{0º”Hq'Zäý„<™ŒÃ·RáP|*/ÈrÏïΠ~J:ìH½°"¯ØIઽ‡núaV6y¾wq MI™høÔÕ|°ÃçQïGX9ò3ÒÑ–žÁYýq) ™mõó×ILg1›¬ŒfIs$nn.OÓæ¦F]Ò.Zñ†1>Ô¢ˆ[ùŸ <Ôô¸e‹ó|Vù”A¾Þ‚°f©¯5w)í±}Zü  \´ ¢òü]‘£lÔ<½àO#¾§_˲8æáʑۓ܎s vRáŠár«Z´£O—{ãôz8ÄôE^=nÉ:‘Ö"|x:(Hæ€ekïû‡AXçÕ+[_¦Ý> £²Ëo EuLÂüŠv‹B:[Ùaß H‡&“pGøñš8Nq»úZ‹šMÊ*I¸?2˜6×t­}¢í&[Q´QVÿÏ¡z‰ˆ17~‡ÂUÚA ü™ùƒ›ƒ2M*Ñ4¸™Q)UpvÙV휇ñjì*q%T{– =xüÏT$l!½&3„æ‚:ÂЉ[•dá糌BÃF³[ÿûè0'®‚ƒÎm4Á©"Ê@¡ÜóˆºÎPïÌ@á@öh‹«Y,‡6?þ¨ ¹4šÈ ÿã–šV)'CMð?.nÕdÁÔFBS¶´_½QW!ò[Ûù‰á¢¨'))rèMR3H«`ëgUd«ÝÈ¥5¯¶>3í æ‚– /â<ìû¨áš,¥@Ö(Z§ã=ütð©@›ï{y¯qxùKîiæÀ¼ö*H-ñâ+Pô3µ˜ü«v¡¤Áõ°n´lpR“Mø¦Ê#YÇH"_]…f2ÂUÇ»ØO7SR.w^ˆû¯ü*òšƒY6 G$êá¹¥#„Ì`Kejv¦Ê¤aM„åN0wf÷ÿþ¦·,ƒC½Û~ŠZ%S›Gö0Ò‚Mæ4é} 5¨&fs'ëy…2†¸CTE\Õj~²˜ƒàøùT'÷ö~uT€£Y»& ç릒"§ô©¹öK±²ë}ƒ'ÀJŽùÄÀ|D5•7~I苸ñ—m®èZªÂ!Kšˆì <Ó’.{Þ õ¥ì|h*Vj¦ñ¨¥f̿԰ˆA……}ùõF~XP øCË¥v^/Ô˜wJpÙ¶²²1°ßAÎönSœj?ø%1Îóš"òF³ÊÅF&Ë'2 I¡m˜¦øìùíÝz@¦ ‰e Ê‚I}sÚG¨ÒAÅWTž”´ÑgênÝ/©«„€%’%ìçd[§Ãéî+¦®$â6¡³T÷…J)Ÿuš’ôÀ¿p*‹æã€ŸþÄÍ1ˆŽÁþ~’ˆpÝôßCùÑ`0G„èûG—¾hÍðJv†þEürý¸5œÉ`W–IøÀ®x7•()Áð® QÁ&*Ð¥è‰v€ú¾Ñ8¨Uš§=n\]®>‚6V é¤<J%ìrtŠÒ9X·`Ú2P!ØcéÜ^6à— "]¸Í™qœÈ›Ãy.ù7ë=îë íöu¡êV[À÷bƒÖKæçQš«»HÂÙÐÖ¾•(βÖ¬=µ³×Á¹$í¶X:!'‹lvN:n­êÄÀ;²ñ£³„‡‚´~.Ýôþ|—«Ã¾v®•¥RÀd°¥‘|™Ñ`YòYÏwÇ$]—Tv1Õ{¤Zº#î_î J%•:k Ø(ó;ÐÉ÷âØÙR0„«ŠÉ¾Å1­l~¬o™9×A餪àØÃ[ŸPJ—–¦ õ_¯JhPXXEÎNÌí3àhÀòƒ?žb"æF jÌJpÅäë\¬ÌfÞtÙΪ3s°,ïãDtôíÚ;ßÌÅM<‰@¨°é¸?È¢¦Ã 1™b¶1;vÍO{“ÅSY–㞟 )ûeS™m¯óI„Æ8¾PÝëïþ6T{®÷`.ÒÀK.7‚4¦Sl‡ :õKßW)Ët•Íé4 W§›Ý– ³e’ÿr‹‘k'£¦pl'Í/pP}I&ÏcØY¢·š]ÔÐ@YªK´xKkpÖ…F!âÅÙ‰7}?Uý0ð¬O‰ ®Š,fvCÅ?\È:FÈ]]À-¿:ÈE?¦³!gY¥SÒÛÎqcÌx'$;ê!úè)ìqu¾ß÷4Ÿ1êGK i—g䈔ˆòOà ã@v¤ŸF3vÍ’¾ž+—P¶‡ÁÍ0'™ó3Ì ²²Œåê±®¼âØ’GAߦ2äëá(—B°Oè|uSr9óT•Ëd¢åïÓŠÁ[‰! ^kS°8{}»ŠÚ‚oé~N ÜÑ.¿ÀÐýc©(õ=©[ÎEyÁ×xÌò¥µ<Ëv¯OΫO üj0•ô¦Û†û©‘’2ßgÞ.[Læç b®< ~ЄÍ…‡PW•¢°ðúÍ·ãÀ¨¾1´*TW,F… ÆRo+vþ†XR¸è¼Œ™2ÂýEwPű¾:õÅ'¤¶é£Ýe¢Võ˜ `®<š[O§YuúËiB¿¾‚(Æ„YcˆV5å0åM_Xˆû’«yç<¥¶ô¸vñùxàgC³=ý®Nj€Ö oõ8•ìe˜0‹<«€„ÂV$èN¢†*9+—Kò–“³ S9º<6ÅÈ!dÚ’h2þôMs¦Øá>*ˆâÄwå#¢¡(løÜaøºgÁ¹oí›ïT:º#2»þA­Õ¥7‹&Ï•-ÐÉ·jËŸSEXsì£Í¡ÓU1ì@ã‰Cí¼”…UÜÛgyÛÍïDÖ¦Fã?Ýɸ‘áãŠïÞß}WqߊM¯7wæé åݾ5à‚¹Ka¬ψ‹-Ý!stí’©YÄwö›=G7ÛâÇd?”íSÖþøÕK\yöøðû𓌥(ù’©ìš½–Õùáßù°ßVI%DmF§IOún´·$‰-¥ '†ù¦„$£C{2¨h¾?)@R2æéyôâ“©` ‡™àƒÌZ\J0D÷¥oëc¹ÈcuIÛÑ~—³‡wñâNb ɰ¾ÿA7sËüê½:YV°³¦¦+©{NoÄyÏtu(žÂ[Q hø¹G"=Q ÝëÜwQj¬!©Ôéz„ºé5Þ«q³Εv¥™•­_Åïþn¶L¤± 'í, sI¾ù*;&YJîþ‰†ïà@;KóBôÄC®¬Žýo‰öU¹ì6]¹Ä·%òˆvŽõë‹"˜oûáÞ=‚KJnMÃw¤&il¾ü~Ñ„ÁL úűbJ¶Mõœ—cÉÔêeë³+³pm—€Ñ“ú®JéÔK´°. Þt†Dâ!%µ6•®TghyøKn ËMÑÊ*Î2ƒ* í7Uv×#3Ì=â„âx­ÕKøA.ŽÐ¨ UÁ>RoâQĄĊ~É)ŒéQüÀ3¾£j.sçÎ$æÅ@½«ZiŽ4M·Ô¬Hƒ¿C ¯û½†ÄSWÜ0%±§ ŸóÞcʈí·äjÞñpÆqØ>ûu‘çu ¿ö)Ó@{UÁù÷É(w苉œ™À.y˜+(Ö*ñ¿Û:¬ÙkCæê3vµËTúL'Í0Ƚjè➦ß},†§ÚÊùÿ(ü“ÇS^¿Ü½{,'c¬N~2gÈWÞ^˜Ëí.Ö×e‰SÚ_Jß=S¿~ÙŽ *ƒ&leX–±g˜R¾MFñ&@IQ)­»qxË·1‹ ÁM&Ç ‡mO±0Éš¥h»yÀ6W7뽃K·Ã3×4,O)EìÄDÞîå<‹Õì82yÙ²Ušá‡L«8Y‰u\ª!Xãß×59Ù>A)ŠÙô[ (\V†ÐwšrìJþl\Ê×±Û„uf›Kþ²í¶/ȱ¯iM„G±-`¯”­—ꟓ'òaB)÷[âXmüh±ù½7Ã;¾ ìÍlâë(ƒqÃ,@Ú;]§eó_.¸¯ ¯|dÄD”JãÅ«iЙ.C¶ïwìŠ.Ê0„0ëàs=tÃl¦¬AÕN$ÇÕ¸GZþ¹ÔÔ¯öš_eó£Dïå<Šœ?f}w,¾ö­fŒ4ñ‡¾/ém\8^³øèƒ6iõQ‹Ö;ã*eœüø˜üÙ ßž°•à3Lù;ð”káÚ–ÝZšÊZúò°ñS7ÜyÿoüKÊl—„¹£ 1ûÿC »Róõôõ£‘:Uq @×àmÖ—j Ù*õžÁÑ«øv‘„øUë6æ‹» c²P8%Ú²jZfçcã0G£:¦þ8k©‹Øy‹5»¬s´;sÛÇÿøª|ï1àD„qú¿AÍM‡Ì±q†¸Î>kE€G|[Ó†÷…½\.{ ûQßd>“(Ûðÿry`¬©\6G )!´üÄj!äV)Œ$w×ÉÃ3ÇöÝ‹«—8(-¾Ò•Æâì0s#¬°ýôxLæiÆŽóÖ߀5qû|õ2ü2FnBýÒ~ mÂä…Ú¥šÁ·®ûÏP¹\U!>aÙ4©nÉ—‘ëjM.ú0ñÈ J/¿‘r®ëàN#þñÝ-k:1_áÙšØak>žHrve·™@ö‡õÖD#`ù«òåø{–H„¨Mëw¼^̈́Ωƒ{×fÿèÞÛ¾âÊC/ªZ^S;h‹5wj_a^á\g·8 xË táe4®'û½´”nN¼Ü‘Æœ ŠÙÖ6ß⾦#K.M¡-.C*wI½{ó(¿ëIñ€ûœ”Ö~Ú~Âë¹Ào8©¬„¨µ…ؤž°ÇŠ.uÇc[@XmBƒž/íd¢Ú›äQ— d[GŒV,›%«ìÎà¸H Ù*/9_2W@×òªígia¹ôŸ,ó[Db£ÆŒŠ_ŸÝ•ý6`XË­Â)ÚÛ2›pë†ð•¦a¦Ÿ7¿×˜ñ%º¸®„?Œüí^'jï4 9á´@žFêCH¸ÃJ‡ÆT¢£Ÿ-Ž%¿£»Ã§’¯§®¨%ëµU,³óJSzÊVºrqNdÖw My(J¢[…¦"¯–“ÁCdõˆ}Ô©Ô®Bm1=åòÆ¢{Ö…„3¸Z€ªü¿ëFZ}ÙÎÖ X·6[ GƒÕø¨ÚM`"Ñ炃±L „â¯uÐÆP(+ÎëP#öå@ C#±fÏψ–%,V_ _ $E«=èG‚W2‘qÒ´KÿÖK×{ J«ë¤vWI|診Ód¶B¤;æåbƵ5lYDΩW¿ŸÝ!«à «ÿ>U¡lH.pÍ•tèC}žQlúrÚ¬šw²~ñ“[äèÁ¡pS¢ê æ>Ó‚lfµäM½f†0XG¥ò•© ¢š•Úꘚ—Ù=ɈÉò€¤C‡ÍKÿúè#´¬Éd3ÅKÔ³]ó‚Ho1>$—…9Ev÷­ÚÁÃS—1´–YþÆ:K§åí£ÆV»ò BPì§Ü“ëüniì$JZª±mt’€;ü µ˜~cŽ×HCŒ'£FËH‡Á¼2”†êêZ-ñ"M©!—ÑÍß[úí?Zx †IFv˜ìÑókD‡©øMjK˜Wb2HˆŠJY@€ÛØ%³M3ÔêÊ é¾±"àMµYž‰çÅßÙ¹zì|¬Žñ^€z1Úoä\g[ K±§½ã{ÚÖ€Ò ¢cÝÀiéüC½J?¹¢‡Çîb0<3‚%:a³K‡Û&i —( Æv†ÐËiÊÂ2ûq8¼O_p<ÏKFÊ•Pþ0xm}¤[‘oK( cÅô»pc”õX⥆¡!ލn æý!—ìºR¸ön½í­™nßÉm‹±œ¿¾¦´ÃÚšÉ]ä8uªâ5nÔb)¨/‰g÷/›mYÓ@&ËÇê Á2£òQƒè8_{dÈ—/¼ÄŽšðüÂþD™­rˆ<ÞXVŽDæK+ôäPmP®8TùÎ ‹zÆáEïaZÑd—â>vìÙGÓìéŠWLç9µ ) æC^º=ëB'"©¢áOÙU}ü È`‚çºfVGn´´hV×Ü6s"9Lrfoa´ä7i?§ 7.™. «<›UP“vk_í¨ÞXkºüÝ|"JûT+ ýÞ78§ûðÃúó§±°/$@ìÖ»´îåcwä™(åš{õ'’~˜–‚ó*à¶v™ÿ?%05Ç\ü“ÊÚq–²‘|mçãÀy7†˜ù‰\lÛÆ?$gÿle/ñ¢˜ÎxÏ-‹šÏšà3Í©¶oªƒá{lSkèèãælé GqK¯ºÉŸe< < æÇßÖB0›#¢ð—‰[ã@²0ôÔ¨kJ(“£ýUÕáp¼¼AI`ü¬=¾=ý=ªY:ó’;bDU©•†ÇJ ‚qd|»È9ó/í+ü­8l¨Iõ7U͉ˆ"äþâ¯ëOÀLÑÜ)©mKÂüÍõu1²šîWaYß0U~3­KRÿ›â»xŒCgœ%õÓ”yÜXÝÿw,>¦5E_ï!(#¯ú]ˆ¿#%çs˜x>VÛÛ]4NÈ¿jn ó{=‰þûãQ®N”í‹`Òá»k§¿LClñ{ÙD=Zh»ï—,58´â¤ÑÿÉÜ׹ؔC…LkÓÁåã,ÎéŒfáùµÂ_ÂX±ÍéÑñ4Ìm¿ìtYκ7$mÈŠŠGò½2 ´fqŠY:>ÀÒÇ”5Lˆ5'?V&ÆÎÉ$»«ômn…Ù!ß7ßÕ¾4ò(·š¾ åZEšÆñ¥Á°)Ж”“ûaK¬Bk‹G”ìšÒ+øˆ¬¸}Eϧ¥Yhå=Ä _Ña†XLAËÞV“¨´%;GsO VÕ—ÖŽÄq®[‹Cêã¬ãä6¦™í…Ö‹;/X b½ãò>Ï9ÖTãAµ7(Pú#)Æcµ)}Tˆ¨E€‘……åPAlýC[¾Û7ÉŵµH¦dõIYZ¾öu•š%c&Ž0IKlÐv]º¦}!•bϰ›@¾é9ôÕ%tU¶¹„h‡Iç3ƒ_ë²îáGEÿa .ð* ÅPqù¿ÐXsЀ,„ü˜ Ö„4ºË4*¬¶v_z"$ò¸ \¯)™od>´¨lž×êí¨sLNï?á)X÷h‰É+ß5Ýë— o7@§KkòãôR½í’ë£ úƒ7 ;´} ±Ic:~Z°ß)TRÀ/2ÓNnu5ø¨çJ'ØLNþØ,Yu ä²Å™®6øÛòø*f¹æžË#WO gļ¬LµžÁùºϨPõ‰Pº•/ñn¯¢*²vMh®}Dû’øèº€ûû8â,†ißÊÉsq²ßœÓIY½ë:—Œâ @¨#sU0é^0eáhoߥCtïÝD:©˜Xâ´‚'Öë-,2r‰z¶«²¾sÖ,Q–*OGöð²8w-âx:c;Ô]]†÷JH,Sù&îÍë¨!Ä ÛAä Ñu;!D!üZ•Á¦ÛÐò½k)ÙÅg©€W5FÓôÊ8çÃÕ!ÈeÙ‡–ÀœâÃø5—¤ w¬Ï A¿Õ9Ínb2Hë¨ñ˜xæ9¿7ú¬ £¾hÀ¹Ûo! §S][:j »øa98Éýÿé Ý¡Ïç^~8V˵á˜u–¹¿Y¥ƒ•ƒ*¹+аŸ°’Љ‰­n-8d¾%D±è4ô‘ uÍWüs~G¦Æk9rœµÕ6ØaL‡Œ ‡¸.ª•êÊn î»&è§è`‚%‰Ó€‚Ã_ÀÓ €ìR;^ «·y<êª(Ñk¡ï^Œâ |Si¼ÃeMð·AÁ:Á@~J·<¥¥(^ÒC¶YF+HCb£•‡ìˆE±³=ôë×eÍ-Ïà+xô%òg>\ë\6|&ÂÃQkýìˆÀŠ9:ô” dzŸðÐOjûM¶¥=ëJT¢HfaÍIÿêʹ$ ú£¤&üz²ô@O4~ádÅàÓÄ«\¨ƒÏr/n¬ÜöÊ) ±Köø•܆îÖM/Q–>`½ÔœŠZ˘ác×,¾USe‰(¼Z› xZ,þQÀU2=r€Až:Ÿ/ &NIØÅþª&‹dËŒv­“‚çu_ì?ïT‚òæÎ=÷6ù‘IüPÍ’àXák|&–’¦­Y?:þMÙ¢M±‹y6ñ!Ó|ÈA’´’N_G’¤ÞÛ7L·HçZÐEÐ ‰.‚"‘q©h”dÅf|ó¡0²WÇ)¬Ë)/^¯0yíÆn,'rTax†nr´gÈÀÍbaiIr|Y5]NaÆ,¯`Ï¿d€Z}W²ºßSk »¶©ÒEyVYŒ‹44¹¡JÉtÖO¶}ÛgµD. Uã]nËÔ»)çÿ¸E‹.s%vmª&-ko4dZ+m¡¶vž*^ÖvæÝs²Îò›'¡†N¾½Ú*<¿RÈõ™ÞëÌÅ}”¢¯¦(§‹oÆ(Æö€jkMfßOÏ Ö’£* î…ì>%ð=Aþ…úmzÇv›‘_Uð|™+ H©«+·Í$Ð=ˆg­¦”,?åNuÍÄC6°|t3!Ó õ<‰‡ ?¶„R¸å‹ëÜíú+çÝÕ.Æ\Gõ$ OÆŽÌÔìrêÂ1¨~Ñ ØoLB:Гߛ—¦¹Ðš³R’mxk¿ÑŽ›M‹¸5ò¥lÛèö§È†ÇÛí7æÉ%®²1üñ~ß?¿Å*ð^XFj ¡fôM#˜ÓOUãÊÖоD4Cbªúz¦OH±ùò;’—JC¿Ë<‰ê p.¦ÔŠž¨Ÿ.'¿×»I´¶¶Ò!HÚû-¾íæïãc°¤M0Æp&VÌAÑQI¡ÏÎ`®Ž» ¯lw>õ6¤ÂÀó ;¶ž•8p¬ÊŸ9fbR%«Ž¿)•Ý䘙dÓÐõÐ~è¦iÿTxÕ‘ TN?ë§:Ὂ‘øy‡ÏyãϰV±“ ;LþÓåÔuvEϯÄÀÂndc?ãhxO%òÐÈÌïA†¸ó¥jÞSž~ ‹¼µÙ7\ºÆH+aŽI%38ÎÓVSC‘>ý6<¥øCñÕ.Ä©ñ…Þ½g߆MúùÿÝO¶5%&­ªí¸¢v&ø- ìCFKæT4Î/¾øäÁðïŽ"I²!´$™§Î>¿ …¾­€†YKú3LFû¹Æ.Õ^á+"oóñкfœA1‡!¾E`R†õô%¬€@1½a=¼o€V&ÇQâCX¿@Ú`íÛ ™xìу´-Ôwß©³v[¦]hAŠ:/TÇOSý[4öKPÖ¡Ÿ‘™×úÔçcW~œÛ寤ˆûäU+ù ¤Cs ”Ï”>Q¹ÝVJ<*Ʋ¾áõ–axxxäF¸@RýX×¼–³šuŠßßA­LÄ8 [YtÑÔdÍF»•¯°ƒ[„ô¡Ò ”‘àYUØ(7æóÁ†Þí#Æß¡½e«ƒ…Úww*‹ì8m}EäŒ$z,Z£àÔ°ELrô_‚Þn4²µ€RÆAÝ77ÝÚTBÖ ç×ÊDPTÛ £‰Çφ`‘-¾®çŽ756"L®ÉH‚>ON戴å‹‘D±ôç§<¸Åß5, vî½v8CŒôðVáʈ5ë?«ûQöË|5@˜;†oR䨉bYAöÜÆŠWÿ`³HÙTV–ëtMµ²®•ùéh•;Í2¨ÎŸî³ðÍ= %µã§½ÖãâœW›c%|üi ;Do«à‚»ñß‹ ×Je-¯‘-õå½÷,=ÙÆÒä“,`ò²Âèû#¨ÁfWžd‰… Õ=,beY×RÎ_ä™7ª#Ϫ²|«Ä/Ø%¸šù+¢†w÷ÑÇ䵫™üÊt|cMûX=QªA®ïTl’Ò>ÈS|¥æ¡|¤šE¬]cÒÌ4ÒÓÐ"0ƒ:‹*­…]æ‹¡ÌVÙv¬¨Ò³ïÔ•ãÀ†ë½ì£Þ¯öÑollÊh"’XŽâ· Âx„]¡3ì0+ØÕ¦5To÷~Çšù@D4>ô ¢;Ã#\ø«x±¦“sÖ+NQ>¼—v5^æ(À²j …×mm<†­k&hâ(üxžä+RB¤_lF‰›ÔG0:$WGd_ä—Æ®EûýÙ ýë=º‡‰ùfËÑ—MC^Ÿ²öS¶0a ¤ðÁVd¬Ä4±EÉZÍ¿Â+ŽîÍr$U^¶â(¶oÝíp pÞ”±Õ¼Š÷«,à€üÙs¸s+ZÚœÑs>ì#qq*z¯âÓH©vÀ•âà<Ê*@‘XÅyyÙ¸i+Xóà#§½›6§ØNãŠ-—­JKmÅü•–Ì£ªKu½ØrEãØ¶ÌpÚé£*$ìÑ·=ˆ†zVÁA#ém¾1=1h–ܼ/¡;2f@äܶºOæ÷M¥ ¡U*ÄNůÈí[{ º2S—U)2>;~Ž2AãúBÅd Ï šÞoñ Oçe2b—Õ©c¹3JYY'X*çE%Ìâ–zy"|àÞ±ÀÀwV„tGUì͘]ÿ(Ülϼ]’IÙ½Sû"µýù̇ÙÉ"l笙  (LÿˆÚ+°†žþ¡cé¡ëéwïÉÏÚ3ÅëߋוÜ3TáwtÕ€¿µˆÐ¥Š»†'¶Dõ8ÀÁj’Ì ŠÅÄóÚš*Ÿ±iß•Ìï(sš‹\­xÄù"Ö2 çÓÃv0Wæ4x0‘Ž·V­lg@7B!Íͤ‘ñjŸvÉJ¦–èÝ@ņôk`ôFè†eQÿ+ËHÍn‹¾Ë¾ïVÏYŽQ:ÀxP7øŒ;:0WYÆËúˆMýè òÒ®cšþ;~ÀÙàEz.EU9˜ãë3<’‡—°&û°aÞmHPJæ•ø’]ªO«z¶÷°ÎlÕ|ü0«c+•)‘z!@Ìs¨°ñ¦bæ>b JÐ?Å­v 5¨SóW˜ÁêºE˜·eB«JáÌ÷ë"÷Ñ;Œ8Š7C^²í2wY›Ð¼àõ(GÚÑT¦LJc¨'¼s/H »»ª€“v Xh/cXýÀew8٠͸ý¡Ï>’ÕŸS{ýªÝ`é››„WJ3¨‘åÛ¤ ʤhžÁóˆgÔ#ÊšnÁn)ñxèBçiM?'€.ë1="]è#‡[ 3w•®Ùã#ä‰áVKN&pU½üÏZ,ï£k  DЭ|åÁBÕ§ÇDëT3/±¯huhE&zqàqa˜p_æTbC¦ÕºøºEÛþVô³ûÆw~&Ÿ`O„Õ‹ñÕ©'”‹G¯½.áÊ|¿•šô£i2B!D&DÃÕ[m¦´&­&B’Žxž9k1HEüiÆÁêÒìÀ^š}VoÆàGf•›½³pni é×Q„0ÌRg¸›ÿ*X§bE*-Vó«@ÐP/¦ö{W{0„èž’[„nѵ¿…¾«ÞÕ-8—­í’ ¼éµ@@ÇZwù½$Âç8gÍÈæå”t.ÏiuR)Ʀ}J…U”.¯›ÍéaV®DÙ³ðpÎ<~ÇÂ6P §Žœâ¬+õ¿`Ñtä˜öêzŽ¦Â—6EDË«Í{7Ÿ?̽i:ÿäJ_Û÷n!/³YXQacÏG»U’b@n½»ˆH6/ÆGÌwCåZt6jàþ¤ËÌì&|»9s,=¥ßûúg= Ëä’Wþ†x×ß’çAùÑ哯Hvе%­g΀[&·$ð]Soäî9øüMOr8äCÓ‚üd“­+àè`)äuÎV’Jr\¸œp„ðå\jv™ã¡u`Ñ$)ÄO)=ïy·Yð ) òt•+BÇN–ªAjõ¯C.ëÑú<ÂÊxC2Ó‘Ûê µí"Ñx‰ÊæUd·¥ûÇŽ;?Z ·CeÔ‡»Ê¤Ÿ9Pâ<Ûþ³‹ê£ÓF !ì¿ç|_Õ°‚ënÒ˃0:/È ‹9z5[tAÁ“‰K™D|¾ŸŒ¨g¨be…òGd³_…|¨ªw”áÆ!$hŽš©×éÑš§ô©eêScm[+Ö§8´äÿ –z¸9;H–>Úl™Cˆ{} s†2‡C$UðáÜnà,â›Âê±¢ï~¶­`mØÀå4ç˜Ç8ÖN}t§´¤ÂÑ8>ÝŒиe ÂZ2–ýOƒ²¥Ÿº¬@ºëè~˾e´2¬û zÿ˜Åßøc> ±Hx§x#ˆt¯s¥a3ç”·½ÑNHîÆcÇ$MVzʼn×ꨎßê çúàšIȶÓì͉‘¡1ù?ûÀ´œ‡] {û¯Ñ‡fÏËÌY߯¹rW¢¯kV‘H•×àÔò³Ìl¢)íÆe@Ôש`µ]úë–qV‹›ÕÝÚ%”éš»„&Pµï÷'‚2y9Òqåò¥äbž¶ž$31GHygšPh„”考Ÿ-ÙET"Ê{ß@„t!Ý ß°kš–Ýúñ¼RëxåG«ÓíHNÅïD XTD¯BOþí¬Ü?‡`<\Ê<¬tC.cfÓÀÛ|µŒé]2ž|µj00r.".¨J¡gáÊàË P†þÿó÷ À`y¿7¯Ÿÿ qéÞ«é%9&"7jR-ïm齺–s:ćiûËF(¯}¦ÈŒ" ]ÒRê°PGü雼%içò%‹?ÞšµO5}1•ñcªÐô6=AEt(dOÍ"[‡ ‘Qbw ÛUŽrgò…'+~¼÷,ÖÔ:eó™ðÕÁÙ”‚¼qãY5¥·3ÉÁ»CZÅæ‡&ügÖG,õjk€ÞÔ®y3Ïâ{iÔ:ð e¹ùïL.&Y0GgÉT\È€µÿ‘1•v=e[ù""ÏǶxö*z½lÄê`ú¿ñkÁDÝÓYWo0ûl3qCï¡ýF põuBbëtfmºÓL05B—f Béh€É©ˆÕ÷\ëq/HàéÙ¦nÑo:ÑÅ{Æ#‘u©¹cÎI·k'ø¥ÅAî* ~¢ö Ìâ#f‚~µ¼X¸JÙ·ñyôDEùØH»¸Xï,BÐ!ÝšâKÖ :¬Ù@rNºÛTÁQú“nýÒ SZX\ŽŠ±ÎctLƒ^fÿ$ßá;A?ú±J„0çÞûhÇÙ ì†I§”II¡eWÆóüx±$÷“\ä7UèÿÇ»5ñÖ¤ Ëñ:Ä—Øü‚°yDæþ<וãf/*¸ß*ÅòiT‘ÃÝY–îŽõT©G³$Ê0 ½#ÿÂüŽGØÎ(,àZ¦×’hyΊ!¾À NöþwØhô¶vY˜€›Iq&í‰|².š{ÙîJÏÐ {ìm(³BþŽËçmM¬_aATß´IbqE[÷ÖMâZÓp(kifWë$\EøÁöŕԎôãwœŒð(½‚[ß„s¡ÿ«]»Ø9*Ø*•£þ]Í2“/)X¥&éŠOÁ ³_ÖMÄÉõRœ¡ùÖã4J*Æ*råÌá«UÞ€¦ý/QEP^Þ€éh=ü:ÝÒ¿à<”¢/XX¶.4ŽC”Sò£X”Ø/”¨]\)-â¤L‚àFø NÓÚNÜ"{&]z(rÐöOÛò¨Û7• ¸ÐO¹“ˆª2EdË1ä˜Ì”*£:­}¸âŽS QVòÌ®ÿ+é­,¼ØáXç0k¦bˆ–æÁ8ŸÞ®¿YLáìyLø‚ïXôþ)´¸4?í\:“öÞ9î´YˆJ:[lM¶vI ¢ ö+ŽG.¼1š…,ÄѲ0·ÛûÌJeq]-†á3yeEsÖÈþk P´Ÿg²’µóªÆÊ¶¿çœalÜ…¦N<™¢Rò€.W°0ÝÁØßÂQ÷OdêA„9±ã7UïCÓâÚ¦uÖ,µüîÈB¾òºl@[mIÖùAz}L®@ ×Ï(Â&nȹßôÉ«.ª,¸.רb¹H¢¯¤$`ê7|â4‚Ûâãgèâô £Yù3ßAøQßgçÙýj´ºÙ–|mqâ¼+º?ÐÝ©+Nwúg‚eÀ…ÑbtÕ$˥̃Q0ýaTDñPS†ÜÌâÀ+u˳ûJÕñ²yNÊüÆ×·È›1MêF8vOÏÛV,>~ugPÃg+1î]š³ÏâÌ—•[ÕõhؘÃÕÀá’é˰ÞàÝÖAwÌ>º—ÞÔ7¼{ÊyA›A€]µ§;ŽÒ‡Fµø—¹ùÖb¹øB@¬òkHìõ1b¹v»ë—­ôF3LŽC¥r¦Ü”û™tj@Ѓ‹:!ë´œ’]ï Þêf È­æwFQ}ŸHbÛeò¯¿Aóý‡ÛG \;v•½h+4º$ ¬}úŒ*ª% |ö÷ ëA:<6mŠÞ»Aׇ?¥QÛ áªÔ’kHåݯ1ªÏ ‡ÞžIyïcrÍÕÑFKO³Ulh¡Q}]g:KOE®u*º£§A%-o ‡hÂø‰õ©{Z(½¥¿°^]Ý ñ[åÑY5žúÈLÙÔ>Re-è/CžWÌIM²2¼·Ÿ h޽})ñ.·àc]t#óCÈ]—ì¸ûã|ür 'Öœ"y,ê¼"Ш&¡6}Í$E¬©cív¹ì=1³Ù–PôpvW퇒#Úzî Høñdþêëþmµ2K¡g±îC/Û#m»Iˆ,;a7ýÙÀ²—ù­ë± Š}c×™6>¹ª¾ÑDEzÔ ;ˆÂ¬qTN%¬&ñýä]–žËÇšIÃN¾Ö µà–Ífþ'u>SD`IãF`œxY·¡ÈbÅò¡ÜþÀ•Pj¬=†o¢¯÷TUð´Ó AÒ ºÆ“Ù8÷4AÆ1XÒôLyøF¸<® ÚÔ Efj«›áÓ•¿²l ÎZîìSÌ$´pn¯6Ò‚ŠF-‰fX€R¡˜Bšxd»¶ÓÂù>'ËÎàû;ëÄTødÄ–$r\Ù/ü!—Ë¡^!X*÷¡Ñš²` ÿ¹-Yîˆ&Aº!Ň¾¿¶ÌhÔmY¨¼‰É‰Ÿ#:È,ɃDÕëK›BƒuVT3ƒºê;Žì’fp3Ï¢Ä(KRKªb:è¹FÀË¢þ`îõ†sGéä°\\Å™ æW߆¬Î2ig‚¨žTC>…Eéø‹¯Ã6þÆi°g–úKß­!²†µRsèŸ?}Û4ÞaIh;ó|œ‹){ë…¸¼¾¼Ó¶ ß+£,KªÒ¯‡ à"pé0 ìÂIÈ:}™ˆkq~5ÅÌ /«‘lÀ¼Ú/2Ò=’Òò"zK1×ÍÐ]y0-‘8Ô/@›w¿¯ò• ´È Gõºí`ÀQTÞ0µÀ0š©Ü·|Ýêe&,ߊ¤¥ÿ6üxℳú]ª×ÿ|•{óazˆÚ÷QÜK}ܘ—y¢¯ô$.e{§N*seæ2e®/ƒ')þÏêh°òÁÆR¢˜”ý2DÑ47ÊÆa¥57Â,¾HºA=ñD'MÐ[ÈԌʴ¶8[Õ÷% |SL÷ Feˆ¤çdèÌÑÚ8#¶ØµY´ (¸ÿiC¦ÁvãìÁö}üSZ‰ƒ®¹^R ÏfÉŽêÃKî;Àè³½–qö¹¿ ¡sþÙÿÕäA¼1 û^¡‰”Tü¢w…B°_ó¯°ÞªºìñѱiÔRrüqqð›ª&ã`.Ãö¦ `d#¸áuçÛnú‘õ¾™+J­¡Å¶ä*™•ñi€åÆ@wˆÒMÛ’,Ð ç³"„‰@4¬êª‰jšù­1- ;5œ„lÊÒòTY#s€ëczš~…~â?“˜,'Bé‚3/ûÛ¢‰@ê’QA¥'*Ž¢Gð³—MæÒóÝ[àC¾“¤†RÆM‰Àm×èÿ‚+Jû?½ˆþD{³U$÷Ÿ1Õ‡$Tmòpz!J ;O‚hjkÓBK‘•õMón¤•PKÏ,"x|d^õ6ûåoþ­¶îô23ŒçÜ|²B!O‘ÊÆìÕoÚxóQ…Ëh{fø‘÷·÷`Ïԃ΋˜1Æî1¦Ú=·ŒÑH!â£:{òƒÒK1Püí’±­ «âõÚ—÷¸¨L0xg¾¼pŸÏgoÆè!wh—Mî§uÍÆ¸S<;HPY›?¬±ê¶¡aßžÇg2õ4ÅÆ †v•!ß:´§£’Ät™‡ÈIE29ÜK(„9¤= w2`¢ˆðõKÍxR[Èiϰ.Ž7ìÿþû€”‘|æJ¶XrƒV{4¤û‡C†dÊxXº‹×‹[¾­[q 30˜ Šâã ˆc1ÆûfÒ?5ç‘,ÄÃ}AÍS¶Ðcu°J Œ{‰g\|P.ìʲò2¦?¶£ã«Š0yÇƒí¡æ>u^œaÀ¬]½|€ŠëS‡(¿pÇYŒŠ)íXÅuo`ÂüC<®S *Âü»ÕÞÏì°ÎÕ¢ˆA=SÛ•¸ºQç]Ž#3Råì?ù}Ïr¼ÄD¶åHWÑ(þKç>¡iÿöé¶–Åç> £Â,þ+ ׳+•ú çûXs™ÔoL> çÜ×Xž¾ÆÃ›ÌýêØƒ¥xú¹§þ~Ð*ŸÆb5Vvâõá¾ÕDPéF7´¿"0©ôJw` gTçtâþOS £‚„Á€$EÓE>-Ò®05)šv÷œ»JËd}èîâò—q´ ‘Z´Và¿h¢Î`[ˆé)’ÊG÷ÈÜeOÓÑn·Ñ€>½®µN«ÐömÚz­ £øã[Å6—ØpB_âo“3ͳ’‘24»56²L~nX®eœæ #6P¡LýΧÝ>t5mé•b4õßÿ^"-ÇÏÎéQžET½¢a©pç¶$—õFMdmüÌdÈkž ŒŽ&tŠÞVC·ã pÿEJOA•Ê”ÚyDÿÿoB1.c[E–©] »?9Ý\•ˆ‹C9âîìÀSëÀ¡©ð„ýô!@¬¤òúúÇ0ñ“0&H4:ÕÜÚg©2p£<’+çäÔ_»éËïQlUgÐßUz”©#£m m+r¨[ðÔŒb¾¦”ú ;WÏ3U¯ï‡87ÓçÆÂò&þ¡äaÑËûýÇL–&««ˆ²ºà®À.Ÿ¤²Q0Ûó´[k'l¦±Âˆ~L‡t?ýú2m¡‹°ïhõ£ëòFEŽLîSÕÈ?3kû%Ön\ÎxYŠÎV<—.u3_È=ÙôGù,†„ÂÖï(”‹2—eš6úé§Xeƾ.6P˜Sëõä0•x/ ôÀ*î‘ÿd¼µ ×#¾'Ö˜­?’Žîe^Iç©øõ ^n¬=P‰+ÔrýÅÅù‚ziæOcÑLéðfß8Ê϶5M3CÆpÖP¶_ô²TÜ=CÉo~Æk—»=©P8$ë!çòû¿njS;ý:Ъ¤’]D‡S"N¤ÂÏôÆZÑÅFp«³Þn)ýüçWúE d·ÊJ€-1É$xá)6óŒ‘< Šeº.~Ý×ÈTÜÇv $Ά4Þ€è7„sR›Ëǽ/lCý«A_²::á—MO<»ÝãM–ˆõeÂVÉB¤C®¾%g!»fCÄÕÜó” >-GfÜï ìßâ fI¹üÛÛ‹Ž¬ûÆ”ì¨"TÝÙLÎZ!¼Î…¹6ÖVÈã­6Å¿+HçdrÄÅ¥AE „‰†ùÀ–»š7ó2¥÷ÒóV‘»€µ¸þšërNsˆ:‘ÿ)ªé1‰> )»# =”@ó{}zGYì:‰ÀÕtõ~]šº4›„CçÔOÅ[ˆÜÛÅDê$>Åð!V¼aþüOϤG !¯œH ÇÂ/uI#¨ƒÑzhêº3e|¤×è¬H–߀­æƒ+ƒÃ çüÍГŒ!’„#^Çx`§Î8«“±µZ Î¤mO•MZP.áìNÎÎ7ÉÄ¥LÞ²;ÖNŒç]ÄÞîyÖ.VæÁÒ#€28_ÈÐAÿ9Ÿ¥F¿N×(¤J ¦_V6¬xˆ/zûìÃѯô‘V¿ÞSG“ŸªvÇ +m,†ûAökЧjkî7'.M õ† ú¨C#×ëxòû7¦ŒÏyÈð}Ň\.fAäÞÿs¸ÉŠ8;ÑD_êæåÚߤI2*ºßÕ Q“Œ á3:¢Øù£¤æoXè’7¾O8^'[ªÎ0Úÿ&éá;¾Å‡ÌxTkþÂ…ó]¤ÙÙqmÕÞw„Éìá/º¡áíï#<»WQTw÷…©î‘XùB‚;ošt⤫Dõ^¯¬l–CPl7PE¦ ©%I. ›RU¿UóX‰§•ƃŠ#NLáÊë£n£ã½õà&q2dâKÕj›Dq“Ë3UÛ®ØÏ—½#¤SÑÛ8'[’îåæzò+{ Iéâ/ÄVúówÝV•Ù!Æ­‰¡„Ì+m±!Ÿ¨(#Hî-™FÑ4ybW £Gc]¡ BÛ;½*° ¸:¼. ›©r{©âÓÌ›m˜ãƒÄŸuíÍ6¥TÔx;ŒùÍéömÂQÿ ^þ#/O¤cÛf—k.Å¡ÅÜ›—ªf¡:RõäðÜ\~À™B6§¯¯äI†i—ô‘\,TŠMÞº’³WÕ«4‚p€€¡¦¬o Ô0ýÁd¿þ7ÁÃE3ŒÕTƒK‹³és”n?µcLµ¢5ïV™ ï.I±!¥wuT;Ç]4¨ÕÈßvî|¯¡s;æg™ð"…1…•/VŒ…®]¼^ÝC¿Q~¤ñšNýa”'÷ CwØ´SRCуq8Ü[¶å¥˜I«®´³=#‘>þlÑÀ¹JzF‘ø¾ ,IùÔøÕrÝ8U 1 À| °’ÅUwžP< #âM^š ™3î]‡–ûüùõ¾ðÀùŽœbŽVNÖDÿHÛè}RœGÄéÉè8_¨(nÎ!”LA†* èUs”$„À•*׉tøÝÔ‚cF•` .HÅÁ¹^‹µ#ÒÊÂ÷6˜}e’¶óöÇ¿‘A é¶/™¬ SêŽ= ¿[ÝeÉQJ¦Í8$Ò1cíäå]0íÝùóø†ÇÁv¬}$w36í}«Æ#ÛŸ¨\7Utï1£`â…WßêŒþ aü•ƒ+ÕwÒ”¨uRšÃfûmÉ“g‰]êΣ¢KIˆ ~H9{Ÿ¦*âPßóðiØ{ظ|÷(Vų4&§é&º¿|çWGwŽRœ/TP¹bŽª•™gý\ýÂB epÇ_i—o;˜q¥Â¬“žSØåÙã¨CÙDr÷ñ©î¦tb. ¶Š.nXæº_Óœ œÍ”Ÿÿ™&25ú¦+«m‚Q±|ÄG·Ô½W<• z‚‚ÉZEa )wVÎï1ý†GáB¯œ^æ°lcë²I¸Æì“þrÔÅ=—Š—'³/e$›7²ƒº_·ù4wÿ²ÜHËk7›k&$Il¿Áç›æÜz$õ<"e¥ß+ð úô7#",ÎgÓ A}I^TYï ϱú"%>½‡Ôþå)‡x~ïë"¼‚á(`±çȬÕ*“ wM ƒ† Õñ®³å:篇q1RÀÃ:s`@9GŸ!׉Ûï½ûeK‹W•mU…ÒoæÚ£b<äNàgQ•+Q=)áì-R”=–{)p•7ìnÍšXå´%†V//?º“µvÿ¡%¿ Æ M?ͬ£hÝ‘p¦â\”Ò¢TçQùk*lGlâsX±Ìªf«Ð9’ØiX½-eK=T=a…‚ZšÚÖž†ãZ©Ÿþxº¨[:•'žcš?%ð‹v@›)”`_–-]©ù¦_É2÷~Q09†Àñƒø{èŒóòc½…(mºà¶ž/ïmÒ·“O¸\Lgzew!%bvžй3Ç.çf÷mø*TM~×Pí5]ÌÑ!(ý«„Ðuq0EX7XvgóZ*p4™T¹ƒzl´v0æM/·Wå‹E`»KöÒ8Jà3!¥º¿\ö†Ï¸=>ÅÅmq)¿“mÙ½€“õ’rsø ¤í˜Ç}þn=àÄÛW±åC ³RÞ6#ôhMkKE¾°×½‚©7‹Þ)Í•AVFÆ]3Ã’øc¡?Cþm³ž½Êõ}0Š`HS¹5w .ÔXù ¨²è£†=B ¼àµéÍÍ«XPËß±…N¥Åã>íÓ uyhS$ìGþ®&‰%(š¥sî =V±A(¾8̈¶›7’'ƒ Å¤¨1C $ê¸OË%WÈÀîŒÃå¡Ú+ûN1WF‹ƒpB‰æÚo_U\f6wŒ•{f‘†z}^´:÷Ùc órÑ] תƒtǘnjÅmÓ|©aWœJ屇òýn; Ò2D´—ó1¬Í^ù/tëŠCõ,kQöLmå]æKx•ö›cw¾@ÅCN¨ ›ýÁ‰$éwNõ3¾íÅ<îŒøÒ~Œj»¼b^~”h³Í8D³úd€{d¸€:&xÖCÈ1+T0h´SQÖæ˜H=;¨ˆ“úÁ!CÕ¾êü€NuMè½?ÔRq>ç=š˜¶ÛyøxîeJ°šŒQ¤elò„L£DŽ˜²Ú·ËŸóÁåBÀ¤¤kòTò”ïVo ñ¤2L6ˆp÷±`‡'(ùÂ)ñ Ç’%¾ÌRYHÔ³¤: 4…´êG¬ô i þýÈ@qÂѰ0PAƒ3Y…žÃE ôá…ùg&UÝdn>:¶—&v€CzÌx¡“F  aâå^‡tpÁT' ò¦`†šÔðç*y-A ³ZTû0ÿ …l+¿ëà& K® Hû[™cLFz…‰Ú$_þȺO=Øß‘cA†„O¨O@XTÙ<†˜­rN¿öZTäà5.I·$¶Okæ¶’e™s/J\YÃ9‚+j¸®ýÇv>1JÇ™Ðo]W[•±Ò¯Ÿ¶)]{¼Œc×(ÕëV¥bxeŽ9ñèQ²˜ÎÑ>ò LÝÁÆ[qdëÚÚB ˆoOô\½"…Ò—®Ú]–Ù9#Fn·®t‹R v|ÇEªyÝØ‰¹×ciá=‰A¾ÐÙä5“}úiJm£ú[|$CñeÁºOõkoÄ®¢K#¾g’2’$€Ñ àݲ±{\rÀu CX!˜ª¥ÀPkÃo 54£I™“K ûœ²æt-êÏ–ž9ö˜²Fé¾4áç|½%6“T 8¸  6:E}P•£/…Kéx–P˜!iýÞwcépF}Z¿h©äí½Ô@^É¥WÊað¦™Ò¡”uüñÝ$­C)Õ¬æš8»j溑ßZ0ò¬×JŒW.÷x¼PŒ3Àðòï’ÓïC¿‘Hì› JÄÎy) hE—0õ” ÚTÊÕ¤ßô8È!úï-;gâ|Vx71µˆS o…Tlqµ"®üe“ê—;wBõ*æ{";õf”<ÀÛö­ï|ýZÂm*ð)cöíæ£CÛO“v¹ÉR·Tf|È ýÇ+w/±?6ë~8@ògrúÌÃþ3ˆü•÷çZ8¨¹öޱ΀œ<5âhîê§ÔDùO¾+ÓáR‰—Ö´NëÁT%¬¢® Iiåk±‹-> Kú¨ ^u[@³_DÔS×ÜÜ yzöþå'ê¿Í‰|mBlm¼ïˆ{J(¹.¾gGð6+ƒÎô¹ˆ‰H2¾y§û_4a )ØÎ!åHoeÄŽTlíkóÕÖí/)ÞŠ¦¡å˲†M‡Ž÷˜Ì¢ûŸŠÐ¦1ÈÝŽXÜΧdZf£? Þr©ÌšD•el[Pøó@j®ž.¦!˜¾Ó¶ôC}»â€êÐh™N’<|HK€™Œ*H&È«4‚cMr÷Ìr«nðç¡0—xMgUr^³©æ( ü v­½Ø¥É>"L”ˆ2Ü$îñSý´©9¡¿3­§§A'C࿉™`é?üMH ФX)­þHÄÌ‚äߨž `aïÇûFÁˆæ*‰Kúàg5ÏóÕ Äó4†ÄÜìúúað㨲®eb²ôÍ<¢ ¦æÐ*ZQ Ó}:H{³=¯X–£§–ÄÓ8“3¹Óëíq²yÁ½Óòþ4•6DtAt›çÔit¼Ïh˜ÀOÝöö\¡Î€$‰Dî_8½3,Y¦*°Oƒ.ªîPÛv)åöSŠÅÔÜÈÉòŒµ€Ú²þ*eBŠÿR6•%tôo§¾bþÄq5Wˆg;ÆŠ]HÓ £t>®ÚuJ3‘WWSiÚ!ä÷ Š!“u×õ·­²;D4ø$96Z¸6Â’ùÔtÍ„->…Õ×dq†½ðÝùPF(û×tk«),5=*­è±_@¼¡(ÙG'þa4 “º§­ÁZÙ/@!ª _a*ûµ'ÝûÓåš3¤Tp “²ñ¯¹’’E­§]á{5–׋SƒLËZr[8ò¹O›ŸumoLcAxÈ@8ÚÒ&憃«Á¿KâN ü)Zb¤]Ëï×úÁºG$—ðK¢ i êq„øåu5Kö'ðþ£Y"¥‰ð ±;ËÅ!8‘ –•&wª2ذdç{ ä¿’VÈ4Î:¥±ÂŠƒÏï4«œ4fSõ\+Éà F…´Ãý$*)5M˜Yt$”Ù•ãk‰ruMvªó¬ð–² é_G$C³z¾ƒ*%ë‚G ³vËÎ4úƒ—Å{+®/pÙ@usÑø€¬}jLÜ Rœÿ g@*_ü`BžÃ8Úsf• ¥šË&yÂüWó…¾ù3jÚêôû<–øÝXp¬ÁȈƒ´ÍuŒ^=Ziè^’u8%˜ˆ¼—Å×õ;5ñÓ*­è<Õq}0åò;dÊmåóf5ŸÑ×Ë(§WèG}/C™ÒÊPäïêê…d({؈K‚úÍ •‡8wìÞ·DY%P5%š¶ š¬3ˆŸ6l5ös+<0“Àê½½ÕìŸAø}óR…"Û–›ª7žŽ–õ#˜ZÖ'بlDr=€ Ö__y7lsŽWÝCÎÝßg@bœÿ1]m;ú7“^#° KT7Ÿhø¼É‰h©µƒrÏhµP&_[Z/¤|º( ÑU‡F› ˆ¤˜’ˆ;†!®Õ`ëI9,ˆq.\ò?çÀìæB …è–a6ó“ê8©ÿ3(ùcíÀì¯ö¦êi_׈«`h2¤.Æ8àÞX ãõJ:kðÕâB:QüeL±Êë JKssPš>vå 'œßo!ðMÆ®{«àþ„bWúLe;’™Ñµª¥¡%­²(0œÔÛþÊ‘GR’—äG}.òo¯ N˜è4à„k„Ö^€Ù±†½è$Ŧ˜ò½CO¡zÓœ#@^vk̼éÇH1N ÙR{¶;$ñ]fF+ O“wÍ a§ìå‚9Ën¥<|Ce©¥ôÿ)»¸Ò•È=;©áúò}Îü°×ßå¤ûM¯ƒˆ|Aà ú´Ç1ô|}t":Àµ†‰•cÚÚõšÅJ·ýë,õêo x„ oI\LK‰™™9´˜³nÊJzñç>Õ¹¥uL ¿rG±+%Þ_ðý U@V Aýb‡“\IÐaü ¬Ìx0&æœê„qÔÒ—üÍ<©;·^_µz þvýW%ëñÌ-<ÙÈÌж?Å‹«T'Ùðc¿-'Xaåav±s˜þº½ âF[ @ê%6ìöÅGÛ ÉjNς̿r«(<;K|ˆû€¹ÈÜä¸ÖÎʯ¸#Õ3!õ”b„Ÿ‹A€q¿gN‰ìN*ðÞšG‚ޱĨsÄI—غ‹•ú!Œ0̯ Å æ³çýÙ@Ü `ì˜Þ?³ŠA»þæßÏÍ›¦`”óÙO;_ÊYÎ͆eáО¦ó ‡Rô®V’v*L¶#?.ŸTàq ÷Y(nzLl¡îùè‹©#ZÉöÊtmJA‰/ÞQÍb”¼ Öw>—ӽƎúW Ù¼Üú©ž{íP6{ôCSÌY›öðÊiHzÇ_©÷åÖ‡ZÀR×oÁ5e,n›Ä#¹&éÒÍÝ2ùe/ î‰r^äWfîð»F-˜“îå¡X«OA;Â;° …üžÓYϯ–üRC7í;lޤa?+ãg‰ }ªrùLgƒáDi&Û{%½É› Ú;ýø‹Ù<Tý!f$5 'ô|sniÏšrv 4µQ÷à)Ýìa”¯òB?AŒ§ÛŸ¨4ûŒbÍM \ã`@-eˆf^åù‚IôúaB[¹öÙ’í\“u}vn0’MøhO+61¤Ý묖 dÌ@CÞM›©¼~¿ûOsWó!ÄïöZÎø›óí#À)œê2ƒrtè–Wâû²¾ÂPAø _µàÅš€älœQÕêÈœî:TûC ‰¹êŽ“ñÉžó§·2ÙÇ óañƒÑFËöi²ˆzmÄÅê{ÉNØ"ÈÐrzšelâ»Å ó§è¦—)”a%D:ÆYFù±î;œFýÓP<.-×¾{7FáëhbêµVú¢<iˆ²ànÐôÁsÅÝíÒG¬ðР TzÆ‹^9p<¥]Õø×"Cƒ¬°ç|Ÿ^Ÿ£5@Ý}"êòh®i€ûšQ±Ñwñv(“.jG)‡<Î=× »w±zñ0|nv¹—ñ0@Z~åCvW’sö"ƒ lH’OêB×À¦»rð'r–^#ºxyi¦ºÂ¾uAÓÓ?ëw/?*™«´ܮéƒ?B3ÙïݯUoêYîÏ髯éÈ<ë|ÜÐúnæ;L(qÖIxƽÀššŒ•˜=¡ˆù¾Ê3(Äø/6¾Å|©\\M-eH°†Ž}@žÀf°ðË" ”ú9]+ïIŸës´·k­Yœ‹<ÛT¢+—ë]“7š¸ø„,¯+E[3ÙsêE‰/CÚÝw–œõ} Èü; gd [™È0â´Í0# œÆ€XxܺàqãɼýÁ"Kî‡Æ>´&P¼cAÖ˶p/ÁäDÕx»(ØÖub+  åÌ3î¡JŸ@Á>ð3¢›y]moNêØ‹)Ò'gž ½ÂEi’ˆ!˨]‹ÄåC Ì»ªì(n>;A±DàmÞk,~XN=ô¼Ò^¾bXôÁÞ9ÙÁQKÂn\[ŸÜk¦˜žl†ù[Îuì7W§ÙÂ%LT‹4ÍÆæbXUå‘宩ï›üÓ&f=16¨öE÷Vµ™¡}‹aG$;=²hz)¶©»¶cΘ>؈RV=%Aú1ªi_ÔG$<+[°xýDÙ%µï”ùnZ9´¿|ÉŽlØëY$Û ÂkÅEé'1=”:. ?}sùŒY¯OõyËf×D·L3ñ–¥úÔIñèÞbæË‘´€oªOœ!¬Üo®®MSW]ލÿðP`<Ç´8·áHP8…Vwˆù¤ÕP=çoň¯+BhëågÆšÚÆ  ý£ÖL1ÀÀ#fvHlrõá¥2âv¼²^ç6sªå >ÃÖˆ×Qà¡ê ¬d$çÛGSóê­%ôÀs'a{F¡dÒ˜uçâ¹J[€úù!-8NËAîzpíºËëlaýdÇšÀ´UîËgT1žr ø׋ø5—}oï´iÿìRa‹CÕ,Aßâ®ÕìkÓ:Iß甽ÐZ§Çúb“»øÀbÊá‘(¾b@² àÚGÔ_eâ-ÑT*Yà6Öhwð¿…qxÿ½¾ ”bÖd¡ž¸ Ë„ÊǶŢÄ[­ÇÓ´S5]*­GÄf€å3›Ôz(§²ȧ•ºð´EvØU??×¼Oq=j^<ºçOçM‹± \lÔÒ[[£Ë"Çþp|Ôj½ÊÏ0I–ËÏ"4‚¬ b~×ñ³8²Y‚üåü)sÁgë¾FEÛû&êô´§ò’N³ÀÅ6¡Û*JØwJ+P„²Æa³aßzëXc˜@æ*ÂDÂ? ˜†pÁÿL¸mľ+ÀÌm0Þ܃µtJ˜rjDbÅ‚ž«NujXÛqšß1y1v­–ãî}½4IÑ™Çê·´‘¦ów߯º‹TñF½¸Â–Ä:4¤޺;›×£e ^>ÞëAg¹– {û``½B SéÒA„æ«·(³O“· /` #Ž¢ôà3¯Ä7oÑcä VÝÜ´ù53;QÁ|¸k×¾lÿa‹ !ëïQ‰2ç‘â–tI˜*Õ\¹2Â{=DfU(+çÇ.ì›ÓœuþtwWAÀ0"ƒ9×YfìI~¾¼ÑÛ6Z•øô€¼~w9Ô/g•œÈ-Q‹ý§Yµ+T>”!”” Ô}A¥jdMCÄwQ’MÍ5íB$ÆÐ¹èD§HÐ_“wÕ+maï%j¿‰uÚЕ@{]í»¼¦ŠAIˆDI0¬bīݤñúü߃ɇTŽ‘Qïú*.n[V3 ÇÜ’K9á‚·òÎ\Žy†)ž2øs½æ„¡ A0s ÛmâGÞÇ»+C½´Šl€QM\çݵê&ÉISo 2c…ÆN€pšnMdNPÙ-;P ÏØ$Tåòá—ú=†©î¹E;òm½œÄ ÛO䘓ϻZ?‚G"۲ו®ö6› âšÚ“Jz”³¼h«n)Çr!!ÝmH “ k%{å˜6î¯v«;(ØÒãi•ô&5Ó °A 42ÙìjøybL B/«¦ó:ñ«;:ë•)úSi—üzèw£ Aoù …0¤{!ÌÿÈOC‡­“æ.^§ "fØ+‚'²d•ÓL§›µ(gà׈„ó(]ÍIqºpõ…ä}óªPJ($瑊q9˜‚ħ§vœ;gîò˜m&—˜yX6x[:×;½è̲LÐ{4ö×âkØt¼kNZGÈ9¦U¹A[ºJ~)ýVŸßÀ §äΫ¦”fMÀIŒ¯ Ô×Z@¢îuc(‘àdž`HÐk\[4fŠ®2ÈwÜòÙ¡dÝÜ––¯þø ünú°À‘Ɖ<ßL÷PÄ3¹¦£ ‰}h¦[ä[qy*؆³|f.‡¢¦öUÄÐ?_c¯‚2Ÿ{‰È S8÷ØùÃËì‰%8Øn±^3&ŸñD²Ì€Šx²dãú¹!è‘¶·ÿ$a UÉ+ ’îe‹)v¤ÝhX¬»Iæñ0pRÖÇ¢ó5H ›q¡T¶°©¨ïñ OÓ”Õz¾Ìú#1ËL2 |+š%ŠÅAms~sã Œ^  û‘].µ”>FU¶µížãœ›#3w,Ëî›+;ÑÒ)û“=žk”»«çm¨˜<¸—8Mi“7ã…Ñcõî:ß](¡'\òÊC5y¦°DÊ{r E¦·¼ ©Â{ë¼VRoW;DõînðX.D£e+Éô³É26Zs­95^õJ˜¸œê@,œok*²–4ÉPú;1kÀ*-ò4ÊžI cP¬Èw+ùæ½­*2Î2fKÔWWNæµ鎪›×6®öá»ÎøZmÈبªb¬`)7Sœ.¿ûWÞD÷döfó.¤is¨¹áÚT>D˜ªêF¾•SÑ­¤‰ÇŸÓÎÖu¦¾]úYe°'‚-îñÿ¡OB}ëáXÂ)*å½(aæ‹‹¡K‚|a¹–2ª×dq”ž9î¢zÍa‹ÇÞ®ùáê›»’Þˆ¶£—¸2 RHc—œ;›o<˜®é[M²v´©åæÁÁ³¦°$/‰ö¤A½®bïis–ÑTQëÛjj26‘?¯&é¹ã…Œ™ãEÕ‚í•#©+eXЇ†„øàìï2­ÙÍ ‡~uJ]¬…tºDsûpÙ¤ifí¨O¡q*AÏæÃK§7+ 5QúVs{y·?Ïl¿« |;–WÒXCD4Ç’]'‰C?0ÛÎ+úÜ?x“LDlÙšŠ<¤C·É€*òI­<$„^±îÁÉŽÑyÒ€ÿøEÒƒ‡ˆ*<’ªÒgÝ"(67–0Ê< †0±L]ÈK@m(€H\6Å«Y­0N´ÑI«$ŸœZõmÌ¿UQ;òž¾lÏ+ ÷æ¶×\Nø—`8ž ›3‘ È ò??‚ç=2–­¶þ€¨ûAÆcô¬aJõ ov¿xóä/“ÔrD­\BŽ`«²žÎ傸GÖõll.?š÷½vÊ©ö¤ï¾á~@¤¬µÇó1…q¡£–WÙ…“¶$¦WZs¹ yÔÊ¥èü9¿v]-ý_Ö n´{}jF2X[Cõ[=Ðß?ý;}Yt(aRðíWÊ ›‡oo›ïÂH¼|@,Ü…b5N40w ÀB1Zì¿òu8nÎôYgB,Õؤy–éD–‰Ú\-Ger,vçzX4þ"S–þ ã˜OVG^Æãõh:\W×*‘þ9«ÝBø‰}  £°~ âX·S“Årá—r¤ÖœeßšFÈÕfù¡¥ëŒF<Î{ÿœu¦ß&©€§PúUtÌ ó(^¾)–=Žó Çà?…MG)Dà$(ÒÝjÍÖTOßkòÀcM“ÃÚolg®Mã;Ïg$K$”×ËQF³MQAÕL×öœìòâÏTÿKHuª®{$½¥*ÏÞªŽ©0>o°^«;;´ ÊZgEÛ'~+ »~®!{UÙOé‹ÜvS'ôí¿ôºÉ»ik¿RS€¨ ÕÈú˜©CF2ÿñûÓ×:ÆQ¶F6Ò ç®¹Ö¯úµõ]Ç›ÁiUiMÇœ24Ä_«`—›«™ ƒ1¯ßHIÞ+xÜ\4Æ1æ}·bR`™]}KôqIˆ/F”EãóìM%åž¼;šzd¼‰Ûþøƒýsš¥¼‹Ë o‡:ÑÂ#FL=¦Øº/xÙ6˜‹Qp9èRZ °†Æ†olΦ'ÐÑ¥K3ïo-Éí£îRc‚øwí‰×ñYžÞ3ï6îªÊÞðyŸù«ÿaÀìçÙ/£i›ì“Ñ•9Ì3 ›`¡#”†y,‚rZk«#Ø9îØäÝs"x%HP>7ªóÀlÔèWëùafówh44z‚Í×ø³ÂC İò†ÛînqimÍcwñÀý¢¸FQ‚ã3¾4~‡Â© ¾Ò(€“BŒÓ-Få 8Çd€ágñŠ|´uÑP̓̒«bIÈÊpÅ%(1…Ö)6ß–¸‹ôTFÎÞ_ô×Âðe>tmg߉NµtÞ|é EøC –0u8~S£ÎeN«lJJéTö@„tnàFæêl¾³z%o§´Ùó‘='÷:O+øøÐ(+ßsüÙÛp’8äõÓ¶1ÊI]`áz™0bᢎ¤+ÎJ_V° uGTò°5t¤38xô«ð”l¨ŸÖ¯‡´”‘yBÆ xÚ£bËÄüvágç;FÅ}BÖî¬%ÍK>DÌ•ßȰó/¯â…¹: TѳW¸„ê —ŽS\ŸÐôbëÝÜ-¡–%Â_Öä•U7ö b,¥eæYû7F®uΊë¼Á.n[¿³Ã–‘-â™Ç{N³¬V½KAÈ{È ®³¯ xŒ[‘kqç0ÍÔ¾”wðxwu¬¨…•Ÿì¾ áY4І]7NQJ‘ìã­J-'È{ßzŸàĈ~tûÙa·8ÚZ{¸*F?pD%(ˆ‚Ó-Ç5/šÝÆæsl>Uô= äÕÒEæ>ȸ€gEߺ>K€öfàY%êƒÖ}£±•aÜéI#w­w5€óÄ^­Ä<ÏœEÈ´3Ÿ§ß¿å@S+uà-.µ|„Æ©ýÃt awÎÂWõ©ƒ‡fÄ-¬û{JÅÛ¥8„¨»„7›ÍFå ‹Ê5úw3t©¬°ƒò¡Q9cF©Q)@ÝE¶sr”12è¿•ÆñÄäb\dÉ^¯ÌŒñ†¿r‰M§†ÀkƒKaõ dÝ4VðfLAQ‡O]0å(qØÃœf÷@^œÙúõ0¢Ô´'°a”„“ ct½×;øGwáÍK­ÇšxrÍ¿gnjúzN=²$˜Gð×wÀ hqÇ(9ãFØ7é»6Í È»]Óƒý“µ.½Â³˜~”Àí›çÁ\èqjsõ\7¹¢aR0ÀÞ*_ß{¡Ê ÃD¦‹ô¾WÇ›uå(N8¤ •ç2”ˆ—K5•rû{Æùèh i¿Ž‘~ä=¶”¤ÁV/½à'#ÛF1—¯Q<êñF¾V ý(Ü1­‡ŒLk¥É°5«`+ˆœ‘Œ(:W=õ&ˆq™O| ”0KSO`ij(ãWº«£/+?Û†ÃGÁYûœyÕQÞð åVˆp÷¢.’ýøfhìT¦Ãýê 7´O'$F8-Åq•UàëÖӒû5Še·ßô\vßÒ 2Ú¸dG¹†ªèx»—Ô£ê!“?‹JQšùûŸ½|p~Q¢2×nSlfõ0ꚬ W–<lU,#=ØŒþ”ºS­mãÍÜz€•“(hnô¿ÙÏäÅî55½<,ZbÔä­è…’— 7¥#=“&1+']D†çWóÁ<8åâ)Mí^‡ê•‘O[^—õù®%Bµj¾°VŽhäC4LŒN- rIzCJÙŒB#õæóŒnÎ6+xãE¸Ûh˜Ý¹š¢KFå1÷>é—:[#(Ì"§Ôш,åÛ˜è G)·GéS´î¶€c*D*¥ˆ+}Á”³ùãwÆv>èJ‰?òúXt«ë“R@É„ •¨Zòsvêg„ÖJš]ð›9]E5¹yÅÅë Qƒ( yƒß†f˜m|k³ØH`¡‡Lò$Ƴ¦µ°0äJ§¢eÖ'£ÛT%ogñ A"Dj‚À-ÞÆfdR§a”öfYç‰D4t°ÍŽ5T!iÿtt\ýÍ`PËl̶§˜öß9)÷CÝ{XŽÍRÿ4OK>_~à«] ëgy@ÐMÏ.ðíGD×÷PK 4|²ÎÈ©ñ”“+õ5—έàTœQ¾S¦™âOsú ÷¸5:Ü‹äþ}Ü«cŒñÈÍË´‚ÅÑÔ6KÚ€B5ÃP‹ý÷½v 5²Ÿ¸„yYòQŸÜt¦)´ú6ÿþ´£icj™—2 ÇæŒ73‘ÄïÕGJ3AùÑb_‘F"§{·PNÈ• pþBÔáªWð,¥+ÌÅÇý3j±îT7<‘*Ô|nm³prõ¿Ç=÷Câ`;”_4q#›í°––÷°1ë<¶K-€ÔKÒõhÊh;¶·Ÿ,Uì{9¬ã§­­g1 _#Ûi@Xät‚OwVWßcË%’ÞZº¹à6Š .0Ï·³öú«ØÒPèbŸ} P0¦oJÙ®ž^Ý`üRzÚ…ÿc©zêd€#)ûÅŠÀ<)’Â[Åã ÇŽ=aïsöjÁë³vÆ8l]æÌ•-íkœÃîFÿmÐ#å*su­ù¿ÉêÔŒX§XdŸU~ž¡ÒÛoó·k@í2ËÈ»†.ÇÕ‘7´æÈ…¡8¯µØ“ÉãqØ6Òr—Ô@ü’Ý[‰ü4uáì NnÀ7‡¾è¶ã*¯)çˆx+n%¿÷©ê2·K‰É•'9o$ ÄÜšåv&.¦¼{eï#¿3»‡³o'²&·‚–7n>œwj)n"Zj†Ém™¶û Ÿ°VÅwÒLÔó&’€H™ËzBr R î)ÂX޾ºH)@Mw‰þà©s:®úÔ(è#Ð C›“}ñ¹Tœ¦my1æìÑiòX‹UFP­C“NyYøQ‘óÓ¹RÍøA!ÓwÀñ[ÊÝèˆßZïO* %°Û ÷Ö̘²ŒâýºÔþ°H¨Ðq :²yqÓŸn‚£k‚Ɔ!=ø³–l ¢<­ƒN­ÂYDó±W»}º¯‡ ‚9O®`8È&Ö”¿áÅBǃ³¦÷V±¶]+Ó¡¬Ï‹\‹SÖü›Ær½4Ê¥6Õ(Pñ 1Oë7üÏìÜW» wЫ&·JNn-;qëD~£)ϳvHµ}r3_½±°eßàÃ63HÀD4u"yiZª~8àèáè‘öe HîñðÏÕ6³¥YÒ» P«æéÖ㢕q¿IA!»hËFýÇ'W” ëÒ÷q |XnüºÐÕ´~<ßèò D¼É=f¢‹ƒ9Xê¢É_C¬÷48ݧD”¤€#nŽAâ>¼7Ö2¨<3·¶«_Dë&l¼T¡é3MÌù ƒ<ÞóYå­©nßæZÉ`^•Z(ýØ„LÀ&2öP·£Š4‡;›Ún¸ÍÜÕƒYÙô¾k ÎsÍw¯=½Úƒ^´Šo€×Bc„<ëËO‰O4a²‰ç Y ½_Á|1`˜qÁâ%æ€Âª uX,pÎ0}›¬îK‡/(Úyš7‘b^ØÅ.ÔýÊ«H+¨ò%þÅü÷ Ð3WÕ^±^ÖVp½®)šâªËÒ–÷ÍÉ`“ªRÔuáqQ½ å9…ì–ñXKoìðˆª%]ŒìrØJ M“ÿ±€Õ±ÌɽZ²2€(zš+¡-wl&æ½ó _èÍ}’`MWûÒÕ+\ÉO}Óàv¬þ1hsXý#Úϲ¬ž½½ÂÙ­Àá$ƒ) í’}„³@Ê©¡'õcóÈæ ËL”Ýà†)'Í—W4ÉàùÒj9 Vvëðµ^>Y"°¯,± †š: ¢3©þ¯·A‘Õa-Ý™.Z›Ì@ÂO\þøx6ú™Ÿš3#3¢:¶É3™ë£úóýîF—a½îš8fäÙM*¼mÇbƒÜƒöBq#n’B›°¨Ä}²ÿ£SÝGuìWnË‚"öb/H‡“A<Vt¼cͱ|•@BÞ?—ÓK à× š>‹)Ïu«ûرÞtG* ú3Þ5ÒmB\U›~ä[}¬‹jYš}Ž/örÌS÷>&¯çß¿uÝtĬÈÝÕ•Þe˜#Úï"Céún}©-lñ×j½ëÖc‘ŠûåQ4Y…Æ#ä±V&®`oÎýÂꨮ âë–lrüÉM`»û $Âv. ×¾‹ç¨Wâ|oA‡’çâH´‰à—ßž¨ÙL$®?†we^ž^]ÜyÉh(06ë¡c8MN$S"µzSû]W3a‡‚qÜ>õÎÕ!yù›WÂÝà2.O$ßÒž‹ÀäUÚ´”ʱ=HU¦5ìr&Ì*¬²ÔÎSœpK<”'¯NA×θ›ß©ÂÅŸÖ-Æ#KUŸÿÞí!¨rwÿYCÚÙ'þùÞjÞBŒÏÆ‚JМ°¹‰äÅu,øý3FqÛwý®î7=Ï\D׳ÕOvTþ#Éi™©u W4+#@YˆÍ3˦1Q8‘DšœRt8.ýqS 9š1·—ZîíTËlgrêÞýW*—»Úb˜’´yµÔ­¢ ÀŠ|w¶µ@ô§F`+.9v ™¯ÐÔF-„›EŽÓÜ]^…,ê>l»'§0©0B™Ìžê#m³Jj²'Ú6Eªò&w>R*ý«›ŠMƒì?Z€3ø©Âw1uÒ@{ú$œÏ#O;V sÉY’3°!ûÔòuDÕÇz8^ÂJ½/Z²ŠÓ»xèÅ çi4IMÞ|öįýÛΞl+ŸíB•üºÓ’2×*GºØGæuA‚ÓœÎZ7@Fö;dϬgzÕÃ#|œ> p¹q¬ï=ö–ä#+Zfð¦þ}yaä=° ÆYé²:iæ®;Ⱥ?5A`œÁ&KE¨‰Ó§ßFº…´¤eTäS)k.?A ÷)6ÉÆÜÚ¾w«‘¤éÜÚ³K³›ª€C,Kççêêà+²—|O硌ç;8œ¯Í¦]èAUi ýC2°´Ï½ö]$=2A$û[jU»+@6䤛 Ìœ–¼P¯ø´ÆøŒcœ.?f‹CîWûÍ b5H™¨s$ ¸»~‘¸u¾ÛÇõÊïñyMKHDc4³Kû‘÷¾/";G¬Ð#¥StW‡‰ ƒ®þó|oöTcEY-ùŽ)€sªÁ¥ó!†ØhÚA;ÉQN#¸*Ðk¨¼-s'y¡k]t[Ãe uÅäôЄ‰ÏÁ¨êóæwn°EÏ’šil<ÞŠõ-\=lIQÁÍ=–7ÉhŠ;€—‘™žD›³Pìvzêxl?Î:ödweq«¬½­éìØü(íLÃc{Ê»dë01£ž(,­>g¹ çÑ̪ùV½¼f>ïîHú}ýVW±´‹'0V¾B}†b¾êóó²q„‘Êér÷”•‹J=—9_ùmë'ª~ÛìÂÍ2+â±àFV;ñ²;©n88L-E<²óÔÐÇ!Vcå†bXèœ|© õÐy:ÇÄŽ¸#žf»·¶SÁR²¸2êV.¯‰P‘Ôš¢‹Ãä¯Ù؆Á­ŠvCb‚þ÷µMÃLÓjÝèwY˜]^N"@½Ýë92¦Å=³²å±rŽ ñê§yÎ$.‰|M"‡O஌zk>òv5³eL] {²ÿF‰áZåZäÜœ©¥°Zz§ô获öˆÃwЉKþÌ‹ßc¨€IfË¥¯wCI˜ŸGfw.€Åå´„Ó•KÉ[}B<›í ¸zH{]"^®žíï!Ÿ=\Ëf¾YE&ís?â€N]Sì|À’û{,á¶)‡Î¼ó†0–'ÛnæsüçqÏñnõ$—÷.’ÎIªf ÒcWðv½°Píæó%à²Ût=ôº–»n¼då^Ò[ä¾òâ. YëÇÑ]܉Wåû!˜!ÿ‡jâý;üÿUÄFZ%Í(§ü´ªŽÙ´>Ž‘{àè;Gèpg¹˜ÁueWjHà÷S:ýd8ݘ¾GkÛ£»@ñçÆè‰³çÌÄSÎ(Õ£×SoÚØL%¸ÉÕ›Q»)TГQ³pÕKFG5ðñ ó\~݃ãÓë¸Þ[«kÒ² _N7اHʺøð¾Àž‡±¾u (¦x®^°Gö|4Ö“¶ã7äþ¯ÝâT;:ldÅÕÉÉ"–2Å6Í‘…ž[Jò˜J–ŽBëu¤âò§ƒ´þeÀQ¶ö<]'¿ 5š¡ìU…˜D¨¤>€)9úDº{.•¨Äº& ØûP '>¶ï32ëÐJQÂ×ÇÅjêÚ¼ºàç’ÅT†ß)ƒJ–Áé¼g«0»e—+)›šÚì:ˆÞ‚J4S9¾Š>ÙÜWTbœ†fè'U`;©™ÕAR•߇ä¯(Úê ˆô™ÀKcÛÌGæ|º„@‹<¤ÊVÐMâ—Ô¬vnêsŠê¡ûü‹õ‚éû×6ɪ6¯À¼½ç¶ÍBÑÔÔgl=mI”‰:aN®¼R{Ÿ<§´±ðáÛå:){Ò^úêØ>öüFÇ îÚShðîú~ü]º–&‘Éõê#!÷qƒƒ°KD2»eºãG Zƒ#îÈjNh%v*7YóîKÑŒ?u(§Úa]ŸŸ©:[®Ý˹­¦™D3FZÊFD"ÀÀööu§µ5©š¶s“*Xçwãèt'AÝFÌ&Ò1ª‰ïñ¤¶?±É2ŒQ‰·h)‚“4LVc$¶l=ÿpª~…¹­JÖÂÁ‹±rÇ<ü Yy‘ùJB´ Î˜Gµ¯æûW²LãYƒ˜6Ð×°í „AmnJÀÜ#ZKÖþ¥§‰N{åßÐã C!½ 4ªmÅÀÝŠr „v9aŒÒ`Žõǂ̟z±Î?¹A?V]ˆÈ¾yÐEÒ\õ‘}d‚üÉ‚ë…RF¯>/8÷ÐÀKøø´®I>ôPKtÿßCå`œ„Pvê*x…<è¶i¸I]%ó®ÚLŠcºöJEÃãÒ ”¸7ˉ ;8j|>Û®Ú«—`>—G#ïŽ9Þ;tN'oý¼6!“ŸÕ;$`µ,kSq4×¼_é­‰yú·(j oU¿¿“ﺷ˜&ÆÃç\dÁsœ`áÜ—ºªÏƒt'*ÐFWh`Üå§­&¤¶¡…°DÏ~ˆ ¿‡z€Mà7˜cFW®*«Àv{Áæ²f9°¼Öe1ºÿàð]ýnÕh›K4PÜoRz¹¶j•.ÙêÀ.vÑ#£»Ñ–šmÁ'uj鎧‡Â5•ÿ8¤³8xTDšš… PãSÚÔ5,¾ A×ýÏcŠÂÁ(äðnP9{­ZRöl8€ƒE8od´9ÔÆËTÙ•îrîT;÷¨¥L§þ“,È™+¤ûx«NÏîax¨Ý3} £âÄ Ø9ØçzÀ8îR((^E.2=™3ð͟íu~@—Þ¾–öeªÝ&Þ3JJ*M›/6؆b¹¥[GÆ ­³ü@ÐñuRO¥×Ò–=<ÿNIk?çRPv‘ß` iø“Ť9,ë9:STJ×µ–ž|¢œã[EõýŠ“ët,×<1éÆ 1ô¥±uç¬.óé Ý'>Fµ³À1¼©1Ú)BU¼¦@ËRuaTJ|¬[»‹‡|ìV[!Ù¨–lü1>—÷–é]Ùõ5Uª„h¯ÛxCuVZÊ5â(ž’˜œƒY/†$5ÅÑ­.Ã8bêBG5$e»JñdƒúÆ]e4.«Ÿ¾ƒ¿JÒ•YYõïåS C³u_Øê€1[¥RaíyeƒLÆ‚;;{Ø#­z¤‘<<¶À7m¦S“šƒ< ¸•.Ø;&b••‰5åè«›ÿÙÕ€6õÄ{žuE즌Ë]T^~ïI£¾ñÔŒ›QþÀ6ŠÓ@ýºÆŠgñDLrå URFê×vX•#|]p/ö£&ó sç´ÛÀŽ0ØH:àŒôU>FO-8Öä{ec4‘EOôu݃ɥûz΀Nð2ñ²ž $ª‘lÁ¾‰ÈÒ¬Uø»’ॅ äîÛþ½ëü–ÂTÓ¡¨é€@¸™³>l$ÏL\F+¨ärvÝþÉrnô˜~L™›ïwžqÛûiëíx ÖýZ{6ì ü­\d!]󠔟¬Ð¿ùçã>¡*¹U·s{½ò”úñˆ9÷Ößþ€ÀAéìOÕb¨5¢KNbÍ%½aË•;˜Wר1;aœF:\­ÐéW„±Íf·ÛT¹ØK·hnmšê<2˜Ù¾ZícL£Y%ÜšS9Ý ãQ®ðÏo›ÎðéqDzk;àÀ†”ò%G:ýF–±æ5ìÈy'¸¾õ>”²4š–»Y_nSÿÊ †êá“ÿ¸:"ü·ŽÎÍâÇL¯æ‡2¬?E„ÞJr ÷ )~nð¤~aŸ4ÃWÅ;ü„|’›¿¹šOÊk°å5lRèÿÀTWó/‡•rÙqµÇeøñ ÷|eº¤BÊ$M&‡“ùÞ_©ÏΔ\ËW+‘‚²@Üqm¦s‰ò§Bs¦0GšwêÛ…þ!¥â/.AþØlÌ¡S1u–GÌapÑäðbÛÕÂ.Ê$;îä_3/Ñ…½îCsk¾îLõ€€`Œ¡" 4*A:!…¿A0±o‹ës£äËû›;JhXÄV›®Z³Eת$Èá`ÔgpßÊ{vçÝ¢ ÒÞWÅ5•Û¸dè—ïeÖðÏÖPÚx Zž™ 5\-rU[Ð[\àŒôºÎÄ6B¶…‚QIrèx;ÃA­&6þów¡rÞðŒi…¶b|…Ÿ“’_îŸÄ Ï—û'Á·;± <‡]XQ­‡ª%ÔŒü’¾é†ø‚ÄFÛ¾\L®· ÔnÀ顲7U‚q5ÿ›‰¦t¸ nÞ9ö­/÷’Ðï›(L­Í *)™jðÒé¥rXß<Ï9kŽÄoœ|9SKV;Ć]Oj“¦/"¸Ž^%YÖIW=­y>½YM»™I}£$ý¯¥1ÙëËÜl- ™eD꫇JvgûaQn“Còë¤[2W¼cç™þßL¡¯0˜/åóUÙG›OˆwYË\ÝÉåtˆîw’í¬·›äbÅ[o?|VѪòâ1¡wgºx $Ñô(«u¾í|*g>ÄÛ‡Œ‡|Û„ éî½–ÒW¡em¿ðÒÑô‹hÖy÷ó Dí.ø9ÀÏ ‡Ì^û-ºgbŒqf)éÇQjЉ”êì Ð †1™]UÒ•SW¤ÙzûG;µ`#÷…Ÿc^ÆôÓÄÕ³jN¨´•Ê ¿¦¸?Äø'/å+2$Üb¼Óµ|vÂu.Žù[½ Ðz[šÿ°éª;ÆÇJL4MÙEôÍ)8Jâ)Ìå9ÌAç˜ëLÖƒžÝ¸ã9 ìÖÒ NV3– `4Œ 6ÀÍ ]4Œpa´ÎË¢€v3T Ê¡Éœe:Œ)1€x6 N%i6”:Áö:´±þ“}•‰c`zÕ $ÞV*5|§s7¹-Š¿.JÜH1lô)À—\ÂpúÍoh•RhTOÛs)a|ïž³6Úί{øI’½ÔmÉÙ)ø:"D¶É[£t+›æËáP.=gqGþXGQ̵©2)ú¬®w‹¸¥"˜ZO†"Çå5×ñšŽ³Â ÿ{A[ÓÃî´¶"™¾‘rüRx—¯:îm±B1ýÎëT˜ØnÃbk¨ÁmU‡SVRÐù¹³D³ÕÍ@N $xb^‚ÇÓbü‹À«ójcŒÐª¼P)yì‘Ë,Ï¡‡ŠœÍ2J‰Bϔ¯Ë8¨d£»C tã¶M1grˆ“¨[«¦RŠX‘ }€õ”q¹Ø{P—DêÙˆÐUHJ®.±ñ[X#óÇô\ä'£›˜å¨Rrpz<¹0p-z;À}*l“䢣Mý==!ÕÚͱ+:Ä©(cRþY3"èÀ÷Ïk Ç®—“lT~’ zÁ|ûâÂ2t1¹£ÛÐÏ9¤qÅÕ,«à3â¢Yˆ3†On/jœÿJ‘]Oõõ’[t­Žg_Ó3fÞp6;Lûs¹;…ÞF…ÌÍã(¥Ùd˜¤À&»ÒžÈ º'q¢­#È7°:δ‹’…}F\RLÎèÞŸ{ÂpíÍmèC›³Ê·s~tö¶Q_ŸŽa‡éõµØ1²¨ŒéѦÔsa}TúÝu9W‹Òøöðøéiá¥Î†`öÒGd‘!å1DC]ßx¬›1ÃPìÕœl½*ȕθ.„>NÔµYÑL¯J¤ÃÀ½ÎúÔüÐñf›ì*‡ vm™ZÅÇ‚à߀®I1üÇC÷nIŠ“ók”RÍûá;vºnU" ríI'&Àíd±¼à–x­ËõS‰Mg¬[lZRÂú×ê0®*Šš”í¤¶-øÇuc Ò³>xívAÂ5óL§d«ÅƾÁ™-£&Œ+pc£Y6Ôª¬,¿Ñ1@üE¢eþÖ?Iª)f‘UOlž–žÁ *öP…nš ‘»š)@õÁÜßmiÆ$(‡…ÄrALH²ÚÄn?¤ž×1Ûo(ç)é/tƒƒ7úÜé ³ÿoÛ”èBÛÁ@ÕYªÙ1ŸÃ±“ƒ¼NŸ+Ô.îŸF¬É…ÏVÚ1psô&ظŠ] ï5Hã§=ö ñÜòÃßp›Õ7[aÎc œ°Ú©ùtwÊÏàèŸÐ8¢§#Ù¬cà\ŽéU¶»×è® º=€)iÂqîvmzȧæ9daþ:ðòÑýþf…cÛ4øÙªáL4~ä±xÒFø˜@š6kK6ß¾51·º'C[£Ùƒ@vñdòw_Ä„†£Á‰'Äôœ8•u5ný½kk•ð{÷Ü ¨®ÏG‹dnðSe«Céü_D„m.¹¢E »xñ^Jêáêg p*ãÈ‘‘r¥AáÕ7Vs«Wš²n|yÀØœ(Óª¦?³ü`àâåÇ ñ_L¥.žð\Å΄kx¿`sM¿ Š™BæwÕ2ˆCñeÀäãŠùÄ>†NÚ¨ßB²PSõá5)z7?¾uìØýH#hš·$ÎX,!r¯ö³„µÑ FPèËvèèØÌÔúµ÷c~@ý‰]m˜¤öPÓû¯ÂmÍâ&`…ÿ͸LìIV#0ÒÉt¹Õ_DÜ~çÍÚ™zTíPËpö›‡…×zþ]û½öäƒ-C?^ ·˜Õöc\7^@e¨?y©uMÐ8ˆÔjºþE:(ñ¯ä8çÿåpJè¬7‡=B(#–Hà30Z IuþëCméN¥W÷àƒð«ØÍ9³ W¤ÿvBçŽ¥ÊæõýXüJ’[Éþ#âóIFL‰>oþN[ :£uqî ³Çê$Bwùhøž‰vÌjaŸx ‚ô›ü W»p¬^’RÉžU>úªòâ]G,xYÑ ykÍÁúôýøk™nPÐPÌsd­ð¥’Àßc¦è;™ý…y,?jIºÁl'W!4[OSWŸûðF©¨•NVÛûµlJÖ^þèŽþÚ± NÅï­‰žÙöfôS˜— ?F û‡ïøÑÅðÓBB / prgt1G·óBÁØ^éØœìþü öW˜·)4ç‡æ›5²N®ŽÔ"¼™7aw~ie Ђ¥nHòC-#©¸Çf5‹¤JªwÅ‚·¯xÞÎ}uwÒ½Ó´éº2¿^9´¥dá\Z9‡‹ “x˜ÈiEL)R*,Æ»æ5 Ä~oÖªg@^ÖÏB•¸vÒ¾ÙæJjâicïÿp8³nÙä1â(Úe’yÂÈÜì“VÅ?<ñ¶{޵la}ذ—W¸ˆ«æiÝÙd~Ï’›OÅô9ágn+焪åJÒ<,©'«qØÖ^ ýOèp›ËVJïÕÃêA­÷%¯bÐ¥ø5»6B[š­É&¢×XÆÆœXa£1ÎA˜;¯„–Q»_ñS’Ð0 qïýü"%Wã%3ËÎH(Óò<ÿвoH*Ó}÷Ñê³n¯"ÆèÃÇÄèoà©Ë@IHp·Ða; SÅΗ]’Ló† •Õd}åµ.éô֊ºxÛ&…VGƨ0¿†ŽÃL÷" l_ñ‹  ù걉1 H/çbZm@çÍ«ÂÓýÏ ¬ „ÔÞM2'ÿ7çìÌ#Æ6$«tšh×*8€K9.Iù äü»ö:wqÊnè×…‹ÝCžr-8ÚnÕÁcpúªˆ–ì¹Ä½IŠ:ÄÆ{I?k>'*H4ÿ†ŸOh&âžýwËš?e”rk géáä[[†”ˆéJšŒ^p ¶äŸ© ùüê&ƒÚ«WXСüãY©e+%TÒ J‚ëa}#Ⱦ…kSò§=¼ŸS—UoåsœshAÒLÌéb+VÃz g üæè95Z`un\Ôh¹¿†ÍªtË¦Þ¶Š•pnÔ´"(SMámoéB¼¥Š¹/%º šKÏ&¿nÆŒÊv•ªÂZ's›ôíÿƒÒÙ üŽ%XzhqŒ:š@UDB–± ŸÀtd°æD}ÔìL.¤À„o%„“‰}.ðz-”òº‘à--ßr5ÆlÂ{´ðgo;­TnN^M,~ 4%‹óVgÆ™˜ ÂÜ#BóWqL̲ ŽFSw’¿;ˆù$‚R:Uf^öm«VKâÖe¤V|‚üú\n ÿ†HÊSÍ=ó‰é×LAòù¤B”#Šë¤Ë†¹g*L}zí6\79þƒÿ»áàbÆ^=éXG“ƘFëDU&©×\wß–„ÛÉ?Fì+E†BÍf—=ÎkY2oƒ£o†É•®a¥—ß?5£âžÝÛ+ó:x†í}9ˆOmºóÅÄ‹ÅÜo¦ö„Q*NÙüƒ#ÊZ@QO Ù“azÙ«Ÿ!ù“èph˦–"1^ÌvŸcj©N?ÿû61'F—!Mý©Já¡ 1´4«2a투¥–¶ê$œ#‹–/åEÿU ú/~ÿá&Yî4vÀ|ÙZ~sƒu±_P}Ÿ嬨:†g!ÁˆÃ¨e½ýê]!8÷PjCÂÅG‰‰Tf*-í¥¦òˆ4Q@ënDI!dÚ R öŠ‹È;Ùu–,óuú-Þ==£mR‰Ã®Ñ×5ÃAº?’ö£Î'²µ Ò@¨tºÙb2%özoµÉ®…b<Ú)ݼ•Uw£…*ëùª ¼)Ø¢W:Øœu‰d'Ûâ!Ð¥ãCx}ørJ…»`†a~²ÖÜBrÚˆOí‘¡´ººÆY¯_¥_©4qî› vc}$²Ê­QÏ£ÐׯbéÆüZëÜQów}_º¶'À-î·7pWéy¡Õ ´ð{‹Þb/`‘f™…Ío.ëø¥>O‘7ý;9몉ûavÞ'‘á|eÁ’/(½Né´ü屋+ûh–ëµÚr5¸pñ­#±¿‚í‹|N„þLd\Y«fÀÕÿa­Ú]ë&Ý>’QÖ5ïïØoŸ¶±Ø‡*ai¸ºbyojAb7Ì?ÎÄo)Žæt¼&ž ²ó` ¥ /q­Ï×èXlNÊ7 ‘`naÚ®ѯ9ñPp/þF½|(|q»`€)"êMðÉÄYí=M0[ËÊç6Cðþ‘n¢±n*¸ãŒ¯%žF-ÓX[šTäÊݦÀZ„ò{¦²‹¦›r´;OÌT«Vöeù[@9œ8`šÄ¡N€‹!Söä=í¯¤kIÐ[oØCÆÓ£_O?éì–°ÀÄF¸NN"ΊUß‘.tfL|Y|ô’˜Aã-š*º&鄇y^æh¼ð®Ñ>£UÒßb.dÖ\mo»T1c$ :2|.‘ྜQ%ÖÌ$«Ç£d9Ù¨?¤ïìPàðb<ݧã}ÿ/ù2U%¬_ºß¸&ûk6FI^Šåfr=_üJÖ—5É‘)¨ß¦mú:=<£Q>KÞŒD“€ÁÚh  G¯ÈÝa{Lf·Üßü·NvÌ%ß™vâwÍC©¬Ý >4¬ P@¶ð—VíìÖ\£é]µ^k7ñÕ‡?,¿¦·û•o+3–.¿©œãˆ´˜=ZtÀžä:¿/“ª3ñ4O6n„–ëØ•ÊÐÙºiÎΨþp…'Íp´OˆæÓaÐú†]ˆ »bÌsD1jGЩѳü'„ÐnÜÉJÒñnâ6—âr‘Tj(P+ÈÅ4âhØÍñKá>rñF×[+¤QV‘;É Â¬P5ôT”»å™¿ ²¶äÅçŽÙ»`‰ÝIé³f±_÷£ÁÉþO²8è½4q³ÙOhƒYwþfïß0†9T}äM7ñ›RûÄå'þß*ÈÇhÀ­ ~pÓ$PYÃÁT ?¦Él.ÂÔ¼ÌåÛÎÒ°,ÚÖÑåãM¥¦ú›ª–sm$9RÚ^,Å «[©Ä•ìÓ#ÿø¨þw H}¦1;Ä;¢l÷ÜÅù6†G3ÛÓ)•‚öEjÍ&fвr樸rŠ$ÏX&j²ûK'Õ,y0õý]íx¡Ò^‚qóÁ¼DBœáù½ÊÖqô ½/5äEý?K‚ *F±L¦çD ÛíWž5È­LÿsA¯á·N×W¥x›jBÑßÔѳæ ¾rMÜŠÞGì·Hx€ÝDÝòãS8ÌÊ­3y¦8³{3©ºˆÞÿ*;äkоÖC@ŸqËøZ˜™3;˜Æ]f`ã–¢JcKÖX±RÑsÙ3ÏÞ’W¨îèUÍ?u3#âÄÐE±¶!’Á)FWÝQü—J}ñë±µ„¥€Øk?å@´ù‘ s“…~”¼i£T!°™ ÒäQeI¯±jÏ¢=<`Ü`t¾ñPBOþ*և¢¼~À¥6«$Â2z¶üÀÂ*Æ0‚&e D\ÀL_J­§Ý$X·i—´ÇTãùøä")>€üƒ]·©ŠjÐT.‘Ôsÿ?‚ëì +I~u‹Ùk޳x$¦SEz‡dcíK&ÆèŽÆn'1oM˜/Ò›uUi*CŸ‘’i¶=þë®ÀY¸Ou2)yìç×H\d?œÆeû Ç"È;Ô»0¢³¤Ð—~xWÕ\¿S[äüQ°~†i©—r{&Òu@câ¸*i7ÖÀ`ùÓì›=twOQÛ^3^“RŒ†›¯ºf\½G—^º%kgŒ†~¨œ›\yŒS°rkž‹ÔŠjÕ¶„q=@㓆áD´_/ g"$ÜcZË€Þ7ZæÚ¾° ªˆßi×^kš+“†ëY¼·‡RNÞˆªË—I{ËÈ4Ž Ê‰Žˆlæbœ‰† oºœ+GPv4j z}WÝSm ö,>|eÈ»i€³aÅÃW$99˜Ã‰Þ‰0ãLt·î ¯lU1sñ,G8ÉjÖY¢(ò@i—E‰X—2¨‘bÎZhýê#‡n/;ã0Ðè¬iè’´¥«8eQ±Ègó&HgL쌕Shñà¢y^P¿8š› !9“Eh¯å ‹BšÊ»#LXnðPEÚåŽ ÑÎ$—6éB·T­…r˜N±¤1êDFq£tg3!¾ÖÚãŒ\êÜlGÈ®ttþh4“d…Ù‰ÑÎ$èY×›¤× M–à!¹{%Ýñ¸N`Äw7Œ)¼?8B|í?Qê^< ¦¤’~–kN¼:‡iô|tØö…bd’íkeS@/ÁëKVõ$ïxt.)§xÕ ÕMqN°çØëŒÔj“2D| \'˜“é®`¯Ý¡©.°r¤eX·mþ6–›‘‰65ZFàÔÆ4XOH Ô?ióÇyÖDø”Âík´äZ.çzÉSƒÈµÀ¥}2 ]»?ºtÑZwM.€M‡ä›W@ìxhòÊGË鋈}¤Å0~l*~€$!u¬a“Æ¡ƒ7´D‰þ>ª7Aq!XÔº+ÛÂßDˆ{2Öhb(._ì†r4>–éø]#$‚—ÐEHªý€ËÞ£¾ß8o&ceŠœ»4޳*µeì8SÁ ¬ÁQIcßRýˆv#H° êîµ§O×Hñ8Åž‰#Øì1nF¡Š8ü7ï„ÜS‚ †óÿàÞÄÄĤÌ#²Ä𪺠¥L(Ô÷Ÿ¦ÇìK ìþY­I6ay1îéý¢i–°¡ÃžÈ[´§™É*35;éªð€6t…v,¬¾ºÍbŠÃu‰xƒÛ°ÕúXÃèÂËÎFïÂhÒ}§ªóØy´L¨+CÏ´Æ¢NÂç žd‹¾£†Œñ¾ª`™J2Æôq7¿F®¢xŠ@}ßr b-ÔeŠiˆ Žft¦ÅZð¼Ö²5Ì,Fºÿó¾¥w&䯷Z/9>iØz¨o ÑB1ÃÏc"Q} µGŠ@­¼ÉðºM hzÔ(»Ùhò‚wfÒ ˜ïíqh3¡¼“ľòa”cÄÎÈÐ"_}‰‘ÑÑß(ãXÍbé¯'zýÖ“×çÞæåI¶©Ø„9i{ûª¿Ö˜ºñy­âÜ,DÏŸ•Uù}TÒ7bÅ!.muU‚ùÕlAÛ{0dǤJýðõÀl¬Ý£›»Vid?äÑn½!뛓žå»©A€$‡¹Ï÷¦Obªm°HøÔ¡y;Éô å(ÀñóàVçwô^—­…^ÖxÑÅ-ˆØÅ%¿Æ9JCdÂHIhq³Ó“÷Xž’KíÚ]àkA±¶hОǡ E$‘z¯>¸r@ä8VøÅä •„d!#¤ ?x‡SÓÌi*஀#a”kNܤ죙:…-öí H0bÁ²|pVŒú±¨XnÒ HŽ(n²ãõ{Z.:W´dpËaŠ.3Á\Ð÷†#X­GkŸ’2a/v®;èêX ø™7ÎɹþV}sôS­O}K ÄÅèùÞ`½\€aèâ–:ÌÒŸ}1|Á¡gô0ðíüª ÛçbÄ,^en¡÷œ½cb»žøD…•@ìg¯–{@úÑžnò–p68Ñ›`Ï!Í1š\DkŽTŠÌNc<úB[÷âàM˜OvZõþ$á§™‰C—G Ùæ2ɽ!Îй6çCåÒB¼ ‰àiøÏ<}ýx~7¥IÑÿs¢ÊòBS “z³XÞóÞœ1‡¶ngwÓ¹åÖÊI}øûïW¸Â“éðÀiä˜n¹¶™fvÍ 3;™^Y‡Í“ª[íPv;ÌUËݱ‰Ðㇱ÷{¡¸>ÇlApð¾%¯9®ïwoô×jòÔ¨<¿ÀÓBÖ­.À „WÈŠ|U“d.À^Š—à1#;0ÈÎϺž•ëpŒbÇ)p²ýä -Ãëû×LqÜÄq"bVÚYiÇfGž‘ÚTÒÚk.#u8©½ËœR–·J‹ùƆz_¯nöPCSËl›¹Q7CÆ·Ätf^jQ³wÃZB€Æ,órôЗæ2s¿9hÄÿ×Ì›âD×Éž¦KrÂb@AvŸ'§w!戌¿Qp¯èKz4ÿÏ<ÏÏr‡>/-K™ÿC Džsµê,íZ¯çO Õ ¦¾b#Õ0€˜€˜RóÌĆQ#æD‰±t?¦báa™ ¾D¸ÊâëN¸Ú±Ît3ì2¥TE¯ƒ%v“pµ1*%{QâŠÄ~Á‚ NhK¢Kqÿ¦°§’™³6g ­>ö<-'1EËþºâÖ|Qs¯u›÷˜>zŒª•ì•`1Nc&5| 8c£Ø•šÒœ:O¹x¡?7Ê7ìgB.kxAßò»APh;èÜÒD{n+Íç?—2&fVàj:.Œ„ˆFJ¹S¹t ø-Œ>®!øydFÁ—†…!Ì—G ã!md&%Ä t$ÄüX_Ùåœjµ­`PˆWøû”ù9­õ»xï콨‡Ë x°÷a×ç3´œs•™Ä4òŽ!¯!èŸÒüÁ7ýí.En3Óº²]pW9õÏ^/ç¬y AUÿ À'ˆ¤5êÄq;ÆlP DÐâòwÛŽÖÈóS'¦ÒÍm‡ie†Ø³E;!÷;´R$™×10AyÒ;Ht‹›Vr®0æ ˜ý¾´Ði®.‰:A÷Rûåå§Äa1¬ŽØ~5ÐþXóâ¡nÉmózNRdŸ𯅂çñ¹Fõî¤dðhjé'¿²GÝ3…&—KòÆñ,\ÂU`‹:…ñí$^ ¶âtSÞ ¬¦SolË' ¼ õù+ç0gñ°óaâÀÇ©“ÅÓcyLMŸ›œ5Ì^3À/ó½4ÛáoLKêU£™€ùØ á펥˜  œ1@IIµ±ª-þºöÛ.O(7¦ÃB°?–aì¢/¹¶c—ó0*@‰·<[ïÊÒºÝÕuagì¡{@G:žçð.œšJ—V¹›e$üØÿgÂIýÌÌ ŽPOÞaaÍ rU“îs•P/;B×úû0o u\U`ƒ.R䊊Wú¼{ýr—mBýØggoÁz­™ó/ýéƒwC>µˆ¨J©ø?|r¡/Lfq–q:ÑÝÅÖ’}غÊA`8Ñ⬲˜^Tdkç¾á¾ÄåƒäÒwž#·þû@ÿ#IAcœÞ#ÚHWûy"•ú$Ãã­H6m=N¤0'¨§É«û?Y>6ýeÃa?£áÍVþ $Awå@@˜ÇÁjÿÇÞ†5tÉÈ„Ò2©ë”ðH Þ4m ”ø Ší^jg!_вøM<+—ËåüÞ. œH»½5 Ô®—K­9. ¨ÓåËNÝŒÛ{ðuñúl¬vDÏ ƒñyÎýÙµ±}E³Ô>@¿˜FŽ™[’ؼ61SuPâ@äÊÀöé •ÇeÙŒ|±' ³Re̹TÄh×T¾ 9aϰ„K,÷ ™ešücÄ——ÑUWmÚ#§ÄC"ÈG°ÎÒþèªO à¿+KÁ+x¡¿:;ŠBáÚ¹ˆòËëOÞÑ„ô|Ò×[~:Í£®,Xð e!Eez¦õ4å9904(ç·„—Çs‹[… `~fvõ*rtT-›:ÄyÜeLå0Ñ@[ËfèþànebS”¼Õl2?°3"Çc3%‚ýªZ¹nžëút¦¦ ï‘R7Øòùœ ¶2"ÑÔÌeï/l›¤Ú‡ÀO€õIHLW#@-jd÷£ø0¥Hõ(¾L4d·ï£ð4c'Í¥PŸr†º0|)ù_&„ßg©t—ÜÒ‡é*á ‹÷P´ÍªET7ƒ€ ýÏcÚÅc‚7ƒ„cÁ4BˆÙ®—“ÏFsæh*ö™fXOq1úBÑe0˜Î¥ð©tñšÊoû—7[ÒzIhU…¬ Š ’^1wÆWlžâÏÜ/ͽ~}HÇõ{êšz —:¹ÛZ‘"ö2tÙ½Ûö]¼Å*¨ ³—!0[?|2Ý–Ô¤¶¬R+±²i5µ ¤%TðáKϨ.‘[€.ºt ÍìÑeñUqÚ¡ôDQBšØÇ Ø*tò¯¼£È»$šSOÿ¼»±]¯¡ÖÓœ˜¬ò0hËÔ€ÕÄpt(ÃS‰ÆOk‹`‰iÌFŒä¦µ¼š l^.K‡yAáh¦±í[߬őþ'1øõ1”$tõÇ‚dvçÈì`†À@²6ËÅ¡O½˜¨ +ÃpÜQ£îxˆnŒ2¥èôh Ÿõ™ $gHäô»ÇØÒÅ=€5ßä˺©hÁ÷/œn½Åî@SlœÉ!U§€ »³`éKù¬qùWBrt7ÞI;>£1âO´9*<Âð›_oû§öRIT¼s8[•ó›aHiF· õÝ |,I*E†úNléçüL>õßs3¯µˆç'>2e$i¨~úC¢xüÓ,auñf)¹´w®| $µÊÀRSuÂŒ1~ †Çñ1,O–_âz²s§s’hfØ£ÓI<ŽpüÓQqSº˜Ásï2U‘dÛVv«U«W¼þÃOñ 70cã¡ ÓÇÇÿ AUáU÷[qá²xžc@gáRKÇÐúF1s»öà!¥)…ÿö÷„’ó_.ùDó©*#Ÿ£|Éc÷éÈF•Í4åÂBlØå?Êé³×;¼ 4ÊÿÃØc#ÕöÁŠ1—¬› Òž\6eú¶v:çÝküzîŠ{öš>ÎÐ0Ñ•h~Ë~öCvª_'öÍ &u’À—Z§‚nü=4|léÿ‘Â>£3v5'»s©T}±[\d@.`­bY‡s„’`ÑtíÒì;cÔ*!9rZþB+’ÔâÂæFæí+–Ç[;ò«Èš в+™4žP MN4ü Èè)Št_¼Ñ÷‚O §ªÀb¿¥ÿ`¸Ý1By ¨XƒåNËzR¼ ÌþÚ{gH’G)(­žœ·´—üýF]•³²+y“’€Wdº˜Èïlà›Ë@Ç+ñàÀ‡ÿÉáizõ.½ ´Jc ws/TŨ?KUCÒ¿,èçÃâÅ4Ÿí¼=Œ(-84€¾lp`üõ­¯Í?øÇX¤·\/µãY¼Â(—›µ.„nfbx¤ù?kb g}*&~Ù’`;KAÛ™¾T#™7i:ãD\uIÏ&ÃB™ÈØJ]Ÿ ¶Xжe]•ë` ôq»Äz1i©` fr¢ÛÏÿ®u°t¿ËáÓI®’¥Þ>ªgè} bˆú^Ó(È7K#ŸT!t»}(’ì¡(­CyîÒ:&ÏŽZî Þct+ÎË3žÑ©5ãP.–SX¼ÉL> •x«“ÂŽ1é%š:6 Úy~â1§Þ­@·ù®ÇÏÃj ßÒ+–¨ªârųÕXÉÉ©ì²erà°Ê™«KÕZjª†!‰=ÈxojÒhŽöÉê ÝÒUØðÀsÒY1Ë3c¡g¨Wì4*¡†áìGQ+ºÞ¡åN²O=W(VT¸cÑ®‡5Jm½"øYÙS-°?;å©©n±Yv¶\ '@‡°`Jì ÊÑÔÐú^*ÏI¡û ÿƒÄ(jHGHÕN Êæ—ª^XNdýQÈÖ?M¦.Ê´Pη6Y lmýCRÒ$Ÿfáüãm \µˆšUÖݹY‰æØo ªJ'Ú B¶ºŸç–¡EëcÕª£ìýz! ”í„\"]WÛ¡cG8)Ù^!i‰¹11b'”›! !ùšè#{µ±‘oýËìès/3ǨІ÷Ÿøtæ„‘©äG’£¦”+YìÖ&ú#½NŠm«ÁÂóQO{¿¶0S±ãƒ)jK–²Œ5[È2ìïïúÏ×# :ÃA><ñ'‰´^za½Gðl¡b(ç3fT12˜ ´ÐùωöŽu‰Û›ˆQÿ.ùm«`[´ ­ÊÝ©=(ÎÕ6uö.{üÿûÉê l`¢8døkõ1ÿW‹™ýði­A/ %ZC}¨tJHC2R=¬,ö>P#I•]êónQ‡q<(6\¥LM3Œü^5ÖjÉ]‹õâ~3#õźÁ¥aq]«Z5 ¾Çþv oqÞCô¯¯¨ø]dÙòKz¡ ¬¨˜÷äz2aªÓkоª‰p„_z¹ à†ÛÆþƒ]:c*®­:;¬Ï¦;kû»56&'r>ªÇÆÓ±DÎkèHi¥§4߯²åŸQ‚b•ó<˜z:Ó ýãs°ŽNߦ~"Ô~>7Áå¡é¤ôV†®@\—H.•Ÿ0Ýœ€}ÇÓ"4‹ ×½ñ™0…òÛ¿Y³X /9Új•©@D®´+ŠÏ1=/­ tÑ9Aãá H 1më8ûÑÙÓnùKøUiñÛÞ¤ýf¼ÞÃÕ£ntÉ‚‹/Ü|_íYg4X{N“uÄͺzu‰*]//r¥9JC3ð…’{¯ ul¢€Œ1Þ‚¸òóˆ}ñaÙ²#Éß†é´ ƒ™$wá8¥üH›xíØø9*ÙäâæÃ2"-"º÷^õ‘W»ÕP¥ÙDÇ6ǘB#`ŒØÐ“^«Sö‰ÝØoÁ`h©6­ry’õSVØãú"¯ïë.úij(ì Fùio56ú~©e™ËĬoìdüÀÅàgp~“~mf•e»\éÉE ¬Éeoã9kœhbÜwï@ÝÊ>V}ìALÑ<À„5F$2‹T©R»ÑÁ«`øfš"Bô6p^Œ¶B]Û {t‹ñ9ÑÇv ˜Êh<)ÖlBܧ‹Á°6„_qv+¶(҆Ɏ*eê:àØhS¶;м*]g1,á²-icÏ&wߥ°lÇrks‘Ü"½Ä2 ¯¥—Q¬ñ_E3Iñ©>}>Ï é ›j¥AEÔPC¨­ ©Êðf8Ǥa5NqhI2´Ô3ëTtYQ#¹i§y Q3–<9 ÄÅ(Ø?µßúeCÒì••FBÔ"p¡+àf5xž¢àÍ¥•Ïðà6º^U>®ô‰:òÓ­ÉÌé@ç4*wAWÞ’.‘71r”€hMÄáþy›ÀToæÈc"zìå>ù]¾yžÔY|™ý÷ "Á HãtY€èÖ0­{>óR;ÜÆÆS @z%êß/ÝVAw­â¡¥Rñªµ1L[d3Ó0çeE² ïü æ#ISTXæYš¥X-{È.™(Ëܾ%kV&Òu[>–hTuQøÉ…U(«bñ”ÅÈ mbàZüìº_ÇÝï é  rOˆø;¦ž·"@€ã%‡'°»Ýur«¹‰QÖ$г2÷¯š3KÁ°¡ÜÉuÛäl(ï×,ã-ûhÀWa±ü—bW ƒ¾Š5%S À–|‚ÚU­:ýB¾ÏýI™mÒÌOÇC» üiM:|YiÍ2¾ž í?À»Q0üZæ€üeH:ñðE³æÕ¤4æz)Jü î÷4ZEø œ+d!ýïƒAªô_$A¼ðKý8Ò³¦gµ¾pìˆëñSâr¦¶)¢¿ªRg¿óÙ –p†rÿx sÛ!PÔÛ·³mpBVz‡·koó„ç¤fñ*e!)ØAN¬ u-§lkÀEÖyÝyG]ÑvÆâ‹,è¼Ú?qÔÙA”›R ®‘-†Lû_õ3Ò_?©KÚ`…M®KöìÂ?îþ8Ë‘»XÌÇGóøâqtšGò<¸œ-Â`Û›ÀN\úÓ~YÈ0£Z œ9?zï¼QeŠ“äå.ôœPr²ºYðd%LRžÐ`Ÿz³J᫵gNèRl½w³fi)pŸ“(N$#Þ‚wtP ;óŸÏ~~o#2Z\ZiyÕ·î³2[¿ºÏßç`~PA?_Vž7À+0âm¦Ì/ƒ"æÛ‚Іu Õü“¥ó«Ð°9kTϸFpx Òó ¼_,Ö³x:w¶¨¤µÞDšì¼%x†tÞãyýÊi¥&O¨I,%—ßµì#ÊY‰-Y0¦…ÌÖ «Æf"g1Ä׆'•ŠG’ärú ’fõ…HD ÑÚ½ÇþEÖÚoÌAh¹#‹ZÖ—XËcF/W*Q"79@TÁs^œ°È†±°›$îðî·mý¥Dm9ɵé`\ÿ?5c% é0|)Pâ`_ƒÒâã÷~Eçÿ½_r·*ÌHd˜ð/›Cä‡]Dõ3xp.wURÛ“†Ìô€šØ¡§§mݵPŸ7Y8sƵùÏ·£à9drÉ&$“çRúBì¥UïÆƒ0mKûaD&ðÆ¥F;àÒýCù¯Z^k8‹½‚ûEì­ðw¢dHò'NŸkkàûÃö*›lûöpHwŠ•ò‚d‘ÑÚqk½lqWûϱs›ÇíðåYë%sæÀxºÁKþMÕ„BJ¢5paÎò{Brh}٣߉™,ôÁÅT"¶·Üb6ÆbÓÛÇÓ­ú8‰æ(ºh·øÆ?˜k•JHp–"Ü<|ŸýQÁ›}rzÃ+xw~UXßêÎÈ,ÑD]ì³4m°íþ½³s†šâ'¢.>Ân e{› –õáMœüÞ€§DÇĨןýçË@wŒ¯ÏÙ¯¯J~ßÅõzµŸôœ¹‚Å2¦u‡!œÅh¥’ˆ±í]"R±ý m>“‚Ò^Ñ!Õ]3 ÃæRÊmù[ãXèÝ"ÉEð~‰ ,1é2×n¢˜ó7UOiEƒ9“a Rav; »s2.§&#ÝÊŨ²U×›`\T^i³ q£$MøÆÀ€{b|D÷„4èì[—b!@o0ÏÈ"9ýßw’õ-ÛEuþÐ×Ä]®Š³Y~qvj:“L2³Ó|F‚Î<˜YÄb6¬c³þžê˾º|*ÝbP+ú°9¦D³30ͨ'˜­¹|Þá<°èUTì§ÌŒê9.…¼#M¡moþV(ž_êÃ~2‰ÖÖzs–š† AM§:á\êkxwä¡=LÒ¦Qzê*ÁOk±±þ‡–†¬q>ᯠ¨á(üÖå°½…ë7ŽþÍáÖfû6w€·ôbsýÒB¹qŠ è(;Ø?ÍRʵòì(˜ñ úü•M|¬šLchU⎢" £°èÜXÀú¼.¯íÊzD‰äÇÖ°Ìëñ8…u¬-í¡O“´XT.š×¹§®>v¢/Ú´_aí^ÙÓ3ЇKÞÖà'ÆŽÙn%Ƕ{þà†“ðNk*|ÞÅwOX•|¹åÚ\sÅ¿o=ž¡Ï& ?UA)jþ'4 LsÛý/z[jÖÓ6ú³ß÷ce0Vúù“èê1ðúɨ}»ÚËh`Ôåh4@Î÷ˆL€6ͦêËñMª4ÁYÜËó÷¡;CÝpTy@þ7°MÕK0ŽUS˜ÐTÙUºÌê¤tà ‰PjÊ÷ 1ª†6€ ØÐ½®1Ç¡‚pESF5ŸäaFÕxƒð6J³¾œ'‘!­3{§ù;ŒŠ¥ëå‰ Kmê×Ç$"@g× æÿ#ÂNš®pÂ÷°ûÍšGÉ?Ìåö ü WeËÕ¿ZÏÉôg0YŽ”€íž"ç³§Tnܺ¢’ð¢+(Ã[xv\PÖÚ"ÏÈM×ö‘¼Áê/Æ`Þd½ž‚R'W˜{²#ß¼æ7¡Þ?IIoËÔ#Ôidíc&.å4ÐgÌæñ{ «Ãyñ7Ên*•ñŒÖ§XEë`–‚7§²à}¹8ÃyØX³a·Ó–©<4áÆÚW!‡æ(1ÏWE.¸kÿþ»Jï'Øüi_ÁÁ´âÅ5ˆ^6ÀîPÉy¾ƒÄv›4‰Ò¤¨ðnt!?±N «<ä·$«ÀŠ3ÄÀ),ªJÏ=3Óú}@©ÜD¬"äèÍÐ [¼Ó“û˜¤¯q‹“!ì’õYò¬í„ÁÓ@>bà¹o³lÊ-õ¿€¢%ÇüË'ãe2Ü{¹øÕóá"^ò¢Üdžs‚ª—¯®ÖÎT|0Ê¿øÍ²,æMfUOzr€›7²@G=6Bß1G…xÁËO›9°lï½^€\9”ÅÇõòÒä™A«-™ƒ4<þi™x"Ø:|gÃQ¢ueå|Tj#Õ;úè ÙÀ‚^€¤Ûê3¢ •8Üž0¿(…jÖ#M€E‰•˜L»{lFûÂö—v*ç½Õ“_Ëwi…á(S2‹ðfÞ¤íÈ.Ì=Ûçà†>Ö=&nÎy"‹ È¡”ô9Î$ø^lçåT`GPÌ$ó¡Fµ¸ÜÕƒB.ôì6éä]Ê:n™ftøÐÕr`bGÃ9èáÆ !]•ènñKùÎŒS‘É¢Ýæ^¼¯‡›mðc{ 4×òK’P§잆ôÇ;{èÌ7Gý,p }£ö­8©×vÖ)h!º2èsÇ'%œH.·ˆM©à§„)…0cÐñá6}?•5…tºUµZpOaxŽ.h9 —¿uɉkG3¦ŠóÑHF&Ø[ yÓ‘¶8‚(8À8W1´îb°bŒ‡fÿ4‰=Ð,ßN·&ò ~àO¿èʼ8`¢ßá¦=”&½6Áå|Ú$ãí¯é\éÝt3Û¹÷&UN±&áK/ŸJ0(1ŒF¨%ç6kñs?NÈáê ¬ ­%g…ÛŸ %œµ1€TÏfêÈÙPݳ”¶ï¬á›_GÐOLäOŒ§W¹íØiWYøâ+™æì;FiúRÕ­Só!R\ÝCD@~Ίù ¼üà|0nà ÿ> ++Œ½%Ïݧß&réêt Oâh ÷í'û»SÚÈ"ƒË¶W`øUU,Š¡gd)Ò¦†¼rŒÊƒg÷ý/¹¡„yÁÃÝÒôƒÎPI!ß!§ðL #/иpÊý²]ÌB]Þ®|¬ú{!—*êLÂH¤´ÄK‰DÅs‹ã)V?«ggPKmû~ºLÏE ï.•„>ËÚ wÈ*­Q”ý .JWîØ¡ˆÑóÄŒ`¯Bp Rxš»èÀUÍ¥¤²¨ÁÀ]äàºÈn» Ǩ7Þþ8  1 5M{E‰­Óã<Ìçk¨E¹[ÊÙ—z?†xΰ‡‰RµJˆ}ß+®ïâ´,ôÒ½iN{Ö½Í TçØ¡? ,_F9U^Ç×H›”h;f©Vì×íÇiB!d[ê²¼D·*yZÀ>5iÂ%ç~”—àïùÄ|¦Ï©·ocù²T¬àJÎÐÆšŒ b(óoõ¯EH{ÄÁjÎB€9*•·K† ¿RŒŽz5ûÑ(t(5¼o¥]¾71^A XDéð’ŽhÀrY=”iÃB‹jôªá~ÉP¸´ë§Ú·ô èKì—9Ä‘\gþdâ‡u(Ùã:VáÓ€O»ÜQêÑè[é5hU?Ô|^ƒ:“‚hVñ¯5ˆ¹†@²þhî4±.O«…¿Ÿ1!r,P[€Ï¡´•ÛÓÞÑôU§{·ØÝaêô|üPžÓ° X•ÛsÇ©E»¯u ~ëB­;®iÑ–¾û(.“02f…Z;µãþTÝ }ÐÈrØíïÒFÀq˜û!QW3EóŠÁ¨~Ë2kîR–î¿“x©ù‡ %gðÌ§Æ úêt‰hýp–ªïŽjF ØGpøÎŽQˆ°Š!V/ëËk1›×m5®7%ýñûJ±Óû·ÛÓíÎ\0V[ùcR€¦ÞÀ $m˜Ê .ÔWê-]¡2 hûñUŒóöñ¬OmZÈ#’ ¤!Ôd~â·á 9Aë ê ÞïpúJK ß(çu`íÒvÒgøìˆAÂÔ¨–'ÞM?u‘™»RëÙë:ðò ÉùQäKiO¥ ƒ¥¡²¼ÜòìßñtRףνêu)´8«þˆó ¾›Â:(Kú~ ¾ö -Ä:øMEoÉr¬¯Õe‰ûb‡ÅUÎTg©ïa±ºçû¸f±.[Àœ¾Üç á9t´ê§Ÿáv!æTê.èâehü÷¹çÈø{ˆ·".ŒL‘…å@N‹ã -ZüU †½6~ø;V Ý+Å9ì4ÀM θkÈ™Š ÂÏ—n²÷Ð!Yp°IF‹àÇez ¿Ÿû…ž[Ñ¢jÈÇ1jn2 á}H¢[EùdÃŽ~DW‹Rj¢i s¹Úzج¬;TC¨ƒ,`péÔË>•÷–Õ¢[Ï„{0û¬ìŽ{Þœ1,Ud¶»“íî7á÷Ëðõz˜UcO¶fén©×±dƒ†NìaæŒHwPB2f_x¡®»d'k³ˆ–î…®_¤Š™ :Ï›¾Á ¾5s¬Yª8Ôã“……gÜ™a߉¥‘ò öæõ¼pRý‘(Q¿Û ÷†×Ìî dUHtÔ¦v~/4&³…ÅkEèòª7-ÀeŸŒ P@íÀGÍÞÑ/ ¢ý¬èjnˆHóU)œ’ŸFgØV6Ge‹ë×"ÌV¡þE…?‚3dgˆÍ‹ È Q“´…Ÿ à¥2µ±¤ ¿(CÂdEЋ¡¬¬yeÖü“.‰ì‚!®¼ƒT’¸Ê'­¦é)§Ù;´ ³,y’?,s HS}]u¸¬ d üF“ÊëEt‚æVÃ!^"€ÐNGU—½ï»þ.¢œR:âÝŒÏÆùµe¥ñöSçh‰ ¯qg÷ä`Ÿ²Yñã,óYA¥-ÔØ´|uŸË¥ %%;Ú„)°<ýýè›õ=«‘lØÿ]ìõiw ø3—$×râ\&.¨Ëȇq<Õ¿UÂg«ã­ÐA 0~k‹sœÅ¨Yd±%*•>Q’%Ý#¬sÅWϱèÈê!?xoN+ÀUàRÇ,8qaÊ:Ìš±¶ôc>N˜ð±SÇ@©” ÐbØ P㣑x2åw)ÛÝ„ÀMžÿñk µ¯Ð7Ë¡”¿~56@¾Æ|áˆ¶É ÇX¥S…ëm"û”–À ”<"I¼P?^ÿkù( lE§ïgÆ,î¼{ˆ1˜£KU_àl–iê2Š!ÿœó'Áë^Ò!Æ¢”§(¼˜Û¤*Í‹~Êû‰whr×B~AÐfâW„u^ß¿Æ;LI‹•ÍLæu@[6¸’ ­0@uÞÔ%õs¬)çUÀ´bþÙ@VF\é  ‡¥‡ùw¼£ò›äÍlÔ¯[\\\Ä…ì7\Y>ˆ%7…98Ûˆüɾîž©ƒÏêv]g[YØj ,v ¤LýøP“e7ÌQ¼@þqÍ©ŸÁéÜBÏMç—2H(ær‡L]P(u\|cORÀ€Xìxw›€5=1ã+>î÷ÑìÜç>ÄTzÔosïÜ=žØ^h–®/ÅÍþ&7ÕiÎFª?Dƒ˜ó£j‚/m»·`é¢UR^€<ñÞ˜Š"åZ“Ò×ÓUƒ9Ñ3×ÜCÆ øŽT$V4.IØæ(ËòÞ]VñkÂ-ðìÏ Ô#TÒ" :EwPf¦~êCoXV$g_u.‘ø¦—ïuÉžŒ&øì)æe‚³ õ²“’Gp/Ä#Ϧ¤ôŸï*>8²+˜ð®FgA<-²XXÚöO»A>𮜿qæÏtK7¡GmÉ©#˜¢÷IÍ”³¿ÊÚs ÁòNÚŠ%iÄ´ ˆµ„ÌÎÔ‰ÇÓ|ñçBc0Y8SÑk3BèP'SÓ_ã=¸>?:ïÁV“Ôõ›®s ÞH±æq†\_pn¢sù']µa˜œÀq‰ÅJÁù¼Wëk¿:åÄuì|RJŒÕ…EtR•Wºc j|ùY(¼TmlzîÅñtrúÈØ§J…ä=»;‰MCÙìd`®R‡‰ÍÊœaÖ߇ûî»èÄÛ6ìk>W4YÏOÐ#ÓN,âÙËà$²;tQýE쿘d´|¨^e¶œ¤xr òbX-Ä_wœh©[ã"ÊÞH„’£¤¨ùÞX‰¦Í‰SÏ‚Áúf,úЬ®qÓ¾°üWøøÕÇiy1£ùd©Ù7Û·pðþõf‚i½±ßåz¼{Ræq•ZFùûÔ^T‚иntX³3KÛ„9aþ×@Ò,@¨ý¤Î¢ô) Á˜á)ÏÁêw Ö\õ?"dØpGʾ §^N ò{œ,RGQ7P%yDM%5v¡a"Ý[´†!%/¨‚,WƒÊЇ–MnÀ]6+•¨0=G!öÛÎìt\=óŒ¬DÏUòzŸ!xÌsb6 ,ï‘-¦Ä º3.h+ècT|#058âàÏʽ̛s0XRæN™g[ZMbÊÐÚ±Ïun¤àdÍ/q$@žQ÷ýf_µCÖºsp_ÈüaìŸ4e05Yùø 3\#* ®He¶ `jCõË}BŠaòÍÝ•µjhâIû4'”˜ý]¤í«5÷z3Ùœ!\;Ýc' l¨Àí™çbñyø`WÃ}ÚägŸUþY: hY솼?Jœ¾måªAítÓ4†þÍŽ‚³Åz…§æ)ÖóqOÙ5С o$?ÓEp90èëGÀÝSXÒ'UØ)ÝÒ´Oy?ׂ—V_’«øjÜlZƒëIcyi—öy”ÀŠ«„Ðü.¶ú ½,é”ö2RCá+äJëdNN‚Jñ㉠£Gjù]6c–Fü›vU#ªØŸ5†÷ÂǤ®9›V€Zˆ¹ß*‚m&)…Y!ÐæuuÄBú)÷“…æ2§û›ÇgI‚hr-]tbœ{q€Æ\æ0CîÞÕH9üòlÄ‚ þ„§¬ʽÙñÿEàv¤êŒ¼ ð}”æ©í ßújþ.Xù†è«*‰òÝç¤$6þô˜£ócy)¡Åc‰p·:ÉÚT‹råQìᤚM;¶™1“Òh0CSYhqÅ E»âN°jÞÅ Ò¾˜)ªVûy\hbÆš… P­‡ë­V†B†l%|¾V*²}‚Uid!÷ QŠÅ"RgÅ–,–æ½™fGZ ²‹Ж¤Å›âC$¿©ù½cŒÅî+Ä7³'M»Øb)©aZø¤g­Oôª™ßüuÒ „7¾élô£B5]¡›êço;\B¢Ã(ã8ºØP˜l˜=u6¿%Ëè2©ä’uœ%$^ÝÏpÄò•^‡¼â8¸iQEN/wüE’^3ÊÛÜâxm8oüΕοVäØzfû·¶Èùº÷n+óÆ(²î×}‰¿ðDðg}(ÎùŠvoAQKC@à^¨Ò¡Íúr˜ É$£™õ,žA¦û›Le ÈQoh¬²tÔ´W˘1ØØ@‹ÖÑçl»=·`á»Õ1ަƒ$\ø…_1–L^z§ð„=¥mÛýK|9KYü!.ûÜ(ŒeCéE‡C®Ï×A–Ø:f-ýÝbU–==€üÖ†îϯZ Ðh‡¶_xÐj @³L`rÍ¿óáÊnzà¡sÂ_ZÐíS–?­ïÊâ{â™ËÉæ%!FOªýrf‚ZÿP£½­ð} _/G«rÞžç4j+ºz Iô+nðr;m[Lj•ÝXu"­¢Ô]|æøO½ô”y7Q²6'<Æþï!†¡OhTú¹—j|ô:囚îkIóWó2Zàq8Xi’.%!Ä•˜±<éE£ó•O²èÏ€Cÿw‡Ë¬Ì dÙcçµ%—,ºVwƒ‰íí請XLE;*…Q«ÔX#;«ø“¡m¶®ºS‹ÖÌÿ2’tô‰ëV(ÀÁ혭êjy q#À•œXqñ7t°”Wõ5Rþ×±mƒ¢àÇŽtJ€–hâ.žP*ßöj}²ÎÄ®'‹F\D•íæàH°n ¡ü“@—©M”óËòtãèlÊzsݪ"/w»4ªýÇ&GAâGW ëqÅDêûJ˜qt?q¢æ*¤)VnS~ô½¹(•@]”¸ìnðJA@”XýÌÐÖ*»LV´‚)jÍ?4—G埴±kŸúŽÁVZ¶]|ú$H|TLk”wº›ãDœ¡÷ÿoý pÀí=<Íšü+²üÝðèøgA÷ö°óH„ Ým+b°4.cƒŽbY;ÓWáÓ¢ï¦ È€þRѲh§AsòJ rUžtªN:ö7Ýö9w&rg óA!ùo>·TÛ?Q—R=‚ºjØpKO;ÆÛlÎQa¡c´UÏßÉ"$2~¡tæÚ7À×Myœ)†e0R¼˜Ê)ÉM‹çªF$Á^BCŠíqëŠz·%7½Õ´”[mxïB¾yh}·³ëfGÍlj sžËïkEÀz(™+75ÓònšŠ‰3ë³?²Gâ…tÇùZ,›Ag#øh`YD.NâJ4¤¸ôÑ—]`KKà æ¬Â+×ûè™Åo}³{Qy¥Ï?I€¦¸IõMBÃÌp5Û–e+s^¿_il#ò©Ä»U ^½?ÙZI8#Ë\{±wåC›R?‚¦š¨r=ä­)™6§íš—>IÑJr¨V)†ªÌ;§FtüéYÑÌô>ÑÉ öëe\ <Öª=Ñ:(ù‹û€ÐuÌo7•ŽH™# `­ÓorÉŽ0^}4`Í©¿Àg]0ùëTNV¤Óm1¡¿ngz”(¹¸ïdª/Õ0Wt`˜Š”JXâtÊvm†u70šùžQoظj•ÊÑÖñ¤º)ÑÌŠÞ¿+q{FfäÚ•d2ôñÕ Ä•ÿosfåÖª›g«üWÒøp7‰roWz¬)Xä7²O?í¶ç%Å™Îì6$H%òëɬðFåî¿#*=~Žæïfn`Oׂïùl¡á,a‹z,²5+úRC¶‚.sãtŽ—¿Fv˜ü}§fûµ·?ç¼],O "n³rsÝÿÚD²”@I±UÚ•w(•¦>rgjLóšÜNËÌ ß±k"Ø>mQ“à ÓøÈný @Éàî-n;Øö«Ôvñ2înIÝ,Bã$eâlÊ1ì½rNdŒy”ß«ŠÙt#5 —t©*¯µB“CËÀCÚØŽÌÒZ|e­W¨”[WÔf;ãÓobg#x´7& ²´wÊ hðö?²¾’À49V¢-cû™“±ñ*Áý|üý;Ä/x"™Æg.2ýÆ£6LÕÉã„Ј¬.§ x)ëe| ôË<ÔRúcý]8šÜÑ ¶1¥ZÚ!ž•p&>w×H÷2ZÊð«—œü]¤?Ž`zªÈ'=øßÐ1áYwÕVôñ6×qYx:ü!W_"~žŽ ²)£[É‚â¿Ýí·9özEìL—^¡ úÕí(M‘WLTÄIª™žÞM¸·±0¹ý9Þ~ðXi„<$jû›@ >𳩭5“¶N ÑBŠME~kÁNØ-…ÏçNŠ„!ü«=ªŒûSœÓõR©ØÓÔºN½æD¼šÉ‰S{ï³'®óµ êsQtN…ZxP‰C"åO/Ù¼ÑÏ_ô¤ü —{Š“u'Ì‚ª‹ðIÖöÌ-2·°¤èµñ•Ú:ÃéèŽþOÜ©TéTY&a ÄzéW(ßUwáô¸„,FáW?žï"+ ¢>À¶0e¿’º_ ¤¯WÏ?‹Q¤“Wž¼}!t$NvAÈþ©¶‘Ï5U˜LÔ{¨$ÏÌ"Z¼ þKë‚sPs ›Ë‡0„cÖÃÁ°¹uGÑ£„×ôdh½‹¿Í`hRêSÃ>’«7pûuKóî þÏ4dg]”(ƒÂŒ_Vá'û,ºÐ×F…hhY£YVF«Byý‚òô±7b ˆ™Ø£—þo±É°l8ùq¶y}8w‡ÿ'<§óâLG36/»Z” ¬ßúûRÿÜ×pÏG”ïÄÒõ.j8Àôì¤2çx•¬"H8Š÷¦ßY·‹wJQàîiv}²q¡€‡Ë†9T¯RÅ0B!_±n5‰ å‡çlm<+—x³ãThï¹?°çl$¿òÙ¨ü‘cd[6.?ñÅÎÞŠ_brSTŽ¿,yªÊÆ…3´j…ˆQzuKôªÀ¿MèÓïåÈÁ+f§´çì7£\‘â嘒Úûý!اËä_-¼\«½Àz!Û•w-ËÙú¶K5í ÕRgªš|XO™å¥ê·:óС,{$Ç?C­KÈíÎŒÊÅ>Ú¾ë‚ }`þdàPVºjË+âcm ð³"Î-DoëS1Š¥˜çGp/Ü&Ðá*ó{ã1/þ—™D ”sê­ÿ\¥­„,GY"øŸíºXP û]¶äš´­æ[á¸Ù1¸ç¨ÍÓëö½j9Ïú …xtn&.xdÎ}@e¥@k¸*óÖtûþ ûµž¿\jP‘˜OͦS–9âgÝ̰ˆY¼~h3ï¿%ñ³DØ„ò—9±Zágöµ9ï7#ã¼¾ÿ2¨Wo:¹ü;iâXéí/]ðúÐ$a¶{‡ááŽð4 Þæ¬W.ЮÿŠÞLâ’tÌ81¿t¼ÖQIÙ+ù—†¨ž+Kœ+BéÉ-v{Ü&ËÄèçÑ¥)/ÄÆôު߻åzf•|åGCJ´K4AtáØíÅlR…‰}[ñžID*þ4­ôIœ$w¸Õ dž}ò¡O·¬Ò¯'Ûv„öS·’PO;H7MŸ|²s§C{4±«š‹‘½Æò¯’VºŒCA|ÈÐÎ¥HÇ•ÿ£oCËT³¿—£‰‹»0&9ó&|6Ä…Ùñ&WçG\MÆkÕp©žæÝo=FÖø.ñ­$ SËNY.¢8‘Ⱥ¢é ¹åƒ¼öc«fàÚ¥yuv‡¸ŒxR#ã´2œôó£]°zeôïßè´€$­º)0$ñt3Kã;~À3½¯ˆ™í¥Nl/îg¸°åZŒ=ÂgŽúßXüWœÏrýݵt"ˆ|ÿ@M½Î(„™€H¿äX^ê*8xaÀ×Þzf‚êw‡Wx}xtéµû·"½ à–—¬ÑÕ ›$8eüÙ1ܸ€4 Phà<×9ay©»Î4¢mÏ/>¡ô?¨¦•Ð&HY§PI) …†ô% jåB:¾à2Ùh„o‰jš¢!u>€(•á%pJwf :Éh ©§dó\¸Ðá%œPµ±¿ªêÞï‘¿«'°^ŒC)¸H™h«Þ;‹M²aòÏ ¹ZjáÄ‹£¦o‡\ÂiŒjSKWœ  ¯Tª4¬N­ƒ’2 ó‹©ÖíÑ­þÔÌ€—þïß±®£•^qÈÀ·øa/*¿µ°(Lhò -BÒyË9³ÚLR^BŸáꊃäJ»u”µŽ÷2©CQ—˜᮹Ý]J)¹á—™:wà¸Ðàµ:I“AF)mi{ˆ×E®Ûí6®}ä åÒ–ºpŵ‚ý%  ,ˆúŠPÚà[IÿX@σj(Þ%‡ e4íjø…ºPS¯ÈõbVž2ÎwE>™kã(!UïV~3-_êÛf·“—kÃ?UÄu€W¿zŠkjŒ‚À>[KS°J܉f€-gA sÐ…¿ë¸pEôEÕ‹Oáéb«+ì;=ñóðä/Ò¦‘æ›A1Nò“}‰ªò‹´µŸ¾°?šÌ=üwXå)©¦ÚH æ Æ9lJoàºBmñâ<…œCäMÄÎõ…ã_¢¼IaOø±ÞTEããŸÇÇõGç¼Ëí(d3“|è Z×=©£4ñŒ!²/+i±e#WìíP®V6„º‘7ªã`ëƒ×šnÈs*ËjÁ¬øüÀ&Ál¿”²ú=ýäM«ì}xÑ×ßm…5‡ S£Iµ²ZôäÁÀõ-´P á ÎYT~yU%çvP„㤺w½õdÙ†7uWF{˜ËÌÚ&!€÷†SqeÁp¦×Y@ $K¹sUfŽ,c–xù-žÙ¤Ú„R˜hè¦oIØž°ÝæçÓ¶v¼çÖùÊ7u<Í×¶3Âx¼/tµ]¥6a䀘מ´ÁüÛ}Õë_caâây äG “¼ùˆ]÷GÌÑ7„σ$¶žmn†fÓWŠ0ð†¢Hpày\ŽŽ]ñ8ªøHWurûMÆIõÌuNsá\Åš$vÐÂ5ÆwÞiœ’íSn•€¯Î„ srÅJå{šTê»f¦Ëv{êìŠSBñ1!ÒV-~OÔ‚”F¿È‹Í7ö™ âÇ,÷… %Ù<ÁÚ?•„{u©ã»+°'ÇéÍ"Ü­iR]`[™—«-ˆfµ¤*¦µ€d/‡×É Ù˜¯¹…åÅ|²`PÓn羬ëýZnløµ‹A›?êü1iATžÒR)€ï¯Qn^ VàøeÊ„Vo$Ù;«\°“ä0<#&§Òæ£Sì™Ali­Î?þg’¤ŠÕ.ú …Šç>øŸLRjS鸬¯C§uíþ㼃eliðŒÇýû–J>e|r7ž³é?ðÿ I2Ç‘©sP|}Ôñl~ȱ/ndžÉUךýÜU’¼ El6RÞ3_vêZ®gÓ^ ¤«MÖmBÕÜ ´_LL>Ù.0Á-ÒGÛ“XÝ·¶Ù]ä„k”ôRî …¹c§ÚCøÏg§€=îI~{ÿ©qǺ~ˆÅÙù‚÷'-Õ‘)y—£ùø®”nB‚Žv·îvEÞŽBÔÇã[AºZÛ+-Øž»ÏP?þâ9‡(TŠÚº¨IØÛéÓ¡•µŒ!Óuù¨Üï[±· ÉLK%¬ª™n¾õ“Lëù”ó4W è–ƒÏ3Ž1Ù«ã­"wÈOsžÀHúXoó˜Úô]Y·3ß7Ç«ð‘|Å@ñL­õ8À«0TƇvÞî7…o–Ù ± 0ýciÆ#œÂP³%ŽÏÏœ–,d(kCK†`ÚWü¸‘×á0€'­§yñœ¹ÐµÊ‹YüF9ÇFu€XêFFÌÎîBóçfÊOÅÑa„Hl«;q‚LQÎî¢rç‡^䯒•r¢½Çf(¯dÂeI^ÁD/YÁÙ'ýHÃwšQuü¯Œ‚çW•Óâd50#d;m€Sëû‰«‰§È‹t)Š:!hyg¡²¥Žèú¾ˆå®1Þ$؆3)åhQ•+6pÝŠÑŽé„#éÍ> #ëøUÙQ Ì]±ÝcyRŸÏ÷?†“~¹¯°ôÙOSVŠH}}ŠãHR÷Öb°¤ ¾2‘|·»Îhyr:~¢À’Ènažxº^1ß(JkëGz32­ã+6Ò} ‰½ª„ƒ2ý»©||®0ÊÀùC­Öãë^áãÚyŽûi_âÀt—åt>»ö¦.ƒíx ¬´²¹6cê¼1Œè%ÞÂàÃEp vE"Û­ö$ðâ7ÀDØ g]5¥Q”09ažR å5µ~ÉÔ?t¬HäÜŒÈÇö%¯¡Kq3«W/phcxC ±€bTÏ!š ý (ˆõ%‚£ÍNPí–hWÙÓÃÇ.ƒË=ŠÃ¶‘Q*y# Jîü,å²3¶µ.¶GNåîT]ûwÜ$w9NíÌRMÜc%Ïî¹g š¶y†ð“ ì^f•À8ëîYrK¸ÑÝv ÇËÔx+­‘Ò«>=×K©Ÿp¢±îÞþ»tÏûX9^Á;áGŒ)OØÑ¸IPBRïm1êÝQ^gY(.aA^ÑNëyœÍ#6ÈÏò\VN Ç {±ƒÏL ›‘>.¡ßøÄ$£}ƒEÔ[ÔJXöxËúvl×ÇiÔZßy-é+à Ên£Åàצ; þþCvìàf<¬‘sµ¬;P1¼Qå±ðw²Œ‹:Ý£‘Gl‚U^5úòž%r‹Ú“ñvVX[N—~éï½äù¹f£n^™}OÐa™½—Ì>.õkõùëšÃàñšXoÅí´–EDÁªÚ ƒm)}l ê ˆV#Ì»Fz×Á˜¥€gÞ{ùtvd6Wž´¯•G”ƒ4'÷dÇ2 äÑU†É©?­cH>îOúб”HDÅo·›¡’8n妄64Ã.2«’L°ŠºJèão^¦n$h}×b¼*¬lHIÃ5å$¹ºR3˜k@ €ï=i%ãÓs;ÿ“{hÿ Ä.×Ê‹*ÊèÏßpû\èÖÐh_~ëB¯^i«Î)ЬóãR iRg"Rîš8m mt+Š{LfJè+¦,ˆÒNœÈ1íØt-GcŒ¡_ûtÔˆØjúå™&Ví9(ÓÍÎÌ!®aÛÓú ͈ª“49)Æ] f/<¶Õ¿àÇt¼6k+Ùµ×/ñøõfDú&Û¿¶_cã-$µ¾éÔ|«ˆ´L¹ö–žßrý!uÈ…÷÷Mû„-×c…ß–£±Ò´é¬e .ƒnÉ×”©^ŠŸ´Àüù·+´MUâÍD'GÜ$O©Y MòvVH„…a{¼ÄÒMBS‰½š¡šŠ±%3ê4coåów?ú#¹yQÿ†¡×„V÷Źbªß¹ÓE‘ƒzågKÂS5\Љ ÌÑ“ &Ê<œïFÌk‰K²kXÉX¢+¤Ñ|ŒäóM"‚†9Û»õžy A‘ÿx¿œÖÎ:ò§‚7Ò§è>'œøÐý³ / àÖ-ë4¾†÷ÔAÒ×î6At\Ï*² :ð?“·¹‹ÎB§ÂÂC} W盂”;y3³©¦³QU OÆC(WZŠ7ò#å/Ö¿#îðÐA,ꟈ´“½sí¤L9ác«n2a´ÄHŒ{¨5Xô%°K¢Ú;Þ «7®.ùF¬²ˆâìûšª_!Ž´àöŠOçƒâ6³›/vVum2SÓ°á(Ï‘Óá„ñ`3Õ0ˤ~˜2 ý€õÛ¥¦Yv,–ÄÊ¿0ÁÃz`½mµÈáQ}›“ËBËÙ·îN/¢š`%Ÿù¥‘רs!O–¯"‡Ïj’®!sBpÆ_†;z§tÛêþÌ/Yµáb’š¶°·<"‹ðK˜³ÈCÜÁb‹1c+K¬–æ»SsÌ4¬RÁ[äÎò=g±—™SRµÞ”²È_×7´ª»!û-¦G%/„§þ}µÏ‡î;&S¢y¬5´ÁûR]ƒ|_PCÉg_æØ­±D[ºÊfš=å_#?b"Òø¼ëVá;²QnÈWß÷HãÝ;T#üÓ«o•vÙ6¿Ù¦BÖ(âDî€6M4|QÁ¿Ì•lùñ=#ÝËF6Œ‹ðÚù]q­‰j$N³t«ÿä““1 w‚sÔËŸW^׿¶3œ¶Y ‡—[4˜?ùs–ºm¡)œg)䩎9#ÜS`×#ÚÉÕ Åm½rµ“C?ª*¢ìÁ6zd÷ߨ‘U£™¿3á¦ëüÁöRζA¡Ã¾_„õÇ7¤G6¢°ÔÄÝÛU9Þe§ÚŒÊ»!FddfDÔ.¨{%¹ ècT;¾ÁÖ:qTxÉJ¼PžA›„ž»3‚þ›yQ.é?ÿ\‡x •AÕ¢`*Õ‹Z-ˆ®Âd8âJÛh¯SZ`¹ •UrÌ—}Å:ˆ135ëÂ>ô®ë;hRYuEû°ˆB6¡÷—&‘¨Y úuš@ûœôZbŒºâ9ˆ}BÔÕNÞ{óïA/J q‰%P!•˜|v¢¾çn–ˆ´ä—ˆú¦Sާ£ÃÅÑmA’¦©Uù9Ï(ËÛ/Rã1GŸgÞt€Ï£Ü)ÝQåÓTÀœ<‚ë'vøÙìמo<FðNuw«×Ðrv½ž?ñg–ƒkŒP"0r`ä! <»K³at䑘:_ Áïà#ö¹.¯Å¡íÿ•0ÄÊ~Ÿ+ÜØ£Xý‘çâä~dõå NÑÒ8ˆÕï „,<c:" K§>ÍÚ}×| –¹õšráü•ÆXeìÝ?>dÞV}¾C²uÐA3 8æ2˜ëþÏ„ưÌC±QI?³3 È,S,3ÿfM<±…G ¸Uí·È…".îNwp#Y¶5 Q¿{c®Š£Ñ' ÿ$ËÆ¶jº€q*£î{ïåþÙ¿k0¹€6‚°Ûõ¨D÷wäˆè9»§ÞEî«ä\™Œ‹õ0¬}aJ ÷îßÜÎï~ŒÛLêŒÏ œªû?ðw/7Àa#×fÁ»?PlœÉ½E’“mÕG"<(›ºbåÇÄ›VÆPêªj—RÅÌ>9Mì>Èá ‘¶¬çx›™¼ˆ[¦¸vŸs7Èé2H’ßMúc¹,{®êŽ-^"Ènù¸R<KÈæmþ´ l4Òöâ°Í§ô^0d½`ñt©Ø:CäÓ·Z7}Ød‹6”ÿ±ÙE/.ÂnÌØþèj' äšÜ8݃O˜x&\ò:U–ñ@ޏí(Ì„(eònÄ3–ôÛWkë†Üi„Џ‘Ǿêã\Ç.Ëét5]U$š§á$y\cÛµi{ :ÛÊeQ‹gJ(x*ΛG‹jé*¾2€þÇRF(ÀßK©ÑÛ~~ X+›Ås«€…Õ²|Î&áâ(vÚ‘0ÔP"-¦Ècg?›²¾^•ö\”úòË”X9tT`Ê ¬ ˜JV=ÆzåžeÚE¨ PNr-ìvW|k¦XyÖIÖ¨ >{„s@ÑŽ²»`O<ä€ S T¥ú#a'æi•áï‚dý“‹É¹Ÿt:ÙM5ÛÔX»ûâÛ(›­·¯œÛìŠWxô¸Ð‡œÕzM. òß±û¶¢@î$c7Iþ5‰ X^[ž•k$úóϽ:×îØÿç¨Ð¯oØÄùšÔ¿™<ïa‘—²Ú@`ÑŒäŸuZ”œÒ{hè®9TùÏÅe2€ÐÉ¢î%mŸÇ·“fÙœc±ÿÕ»â†nú3S ,9ÍÓA¸àøÅºðø€úŒ»ÍhgÎ~­±‹äøhZaÕ:ã;wQ‚\ß›9Yµ¿ÿýg…À=Æ’¹”¥øx$=±Rø?•K[„ïrGàÛìÏ<¶L`:~®åVè{¶Ú'IÄîT ·Ù`ê©ô.å.ÎÆOòÒ 4vÆëlò5Ï–F‹†ç+ئwÅ®Þë¶zÌw `1$TÈDš Ö$=ªœH¿ e+]<]è 5ÅŠÙóQ1øT^aÄÛPŠ#5Åð×w_„ÞðÍ™ÄõÇž$UK",æêZ¨_>‘›²F9aÿ†k„€xRªÊx“ºÆ@Ú]ìyÛBuá„U¼ #¢…G™ž÷ÇÔ³7@šŠÚÆRBÑ8BöÂØßÁËä]å Ke‡Ö\mƤ:yƒ#N‹£L ÍDH ß+@œÊ}Áz#W®®»˜Wà"∅qÖhiqùJàÐä@ K•€/¹ /œ+ÑŒ¶–^,TòÒÔšî·úqÐ[Þ<‰>µÑ³P`Ax²ƒc^ «Àø[¡“ÒzŸóø"Õ°QúBYÌ*cæ'™óÅ,¦Îß„¢hÏ£¬Æ ëÑšØFPÈP®[«Ž¥LzITØ|>d8sÓ}šA¸K¢2GÐÄü?µîY}ÞB1É¥ÒÜÜ»u£N½#÷3úw«;šsfSÏ!ž»Då›’:­jäbq`s˜ž_ÊŒÌ߀¤ýIJòÀ6@z_ö†Ï´Ù; ½”ts“« ÅámÏ¡žJ ÐR]„$œ\g ·0ϸ4ßaŒb Á7ñ42Ù ¶ÚCöŒÓàì·ÌìÎI½×ô…cÛ—6ü¢­æ¶¼®BAÉ»$&¯ò±Å€Mδ6Œ M›Ö- [éçÃXpÊo ¿)\÷:‚L†Þ™ÿë¡7G5_} IîîLñ™ú%­a­ìëºÛŒ1™!ƒ¾ÃYÀ¿û0’Õ†á_g>rbäØIÈ_Z*¬¡—Š7 x X…`ÊÊòj|Ûùö5±(ÊT_PHêýø”ýyú%Z HI¬‘ 0ײð¯+gc“ÙJ‘Ùó¸G›a½-í/5ÐàӽʖƒŠ,Qí]7Œ L}+ýe¹8ÕÕ„fÍ}ón‘/ÒwÛ5Eˆƒâí:rØ´`Úáú>4%ú`ì²ÙüeV.ã×'¨rÝ<¯?²ÃªÆ1EßùoS6ú{Õrd`Y:…üXXUðÍÛÒ“{ÜÚΤoEO6lþ*kú¬x—šH[w±éu‰Å\”B^ ¾P‡e—VÌa» ¿{-ZÃVbQ+ª+}¥£©Úú'J­û¹2œB¾ÒZSAþ´çúÁruÂ;ÃÁæ¯øAqÌHfèb‰ŽnlUê"N-oÖqb2ã!"Ÿ‚w7JDȽ’ú.µªaý'UáòA­ª$¦«Àûbÿ•˜ ~ DOJÎfû‡­Óƒ (ÄNµ*ø Ù *ëÜF‚AÌ*þ'f c]Þ÷‚•”àgó˜£.¥úžÝ‰NiQ0e«z?óª¶Az»¾’>×ð?7z–É”»±îùO~©&·¬º[(ž(ïÁ"³óåwøG 0Œ'¶oCU°ºN1%jpè"B“:EÎÒ¦"ÂËu@uL—ìÈ®u! gùò—aÉ]CÍêÓc 2ÄB*8c³G !¡*Qâ#÷ê³¼©å&vCNwýq‘-TR†{ΰû'ñÛ}¬ñÝUÒíù-7Ÿ›çÒrr.ÒÔã÷«—£ÇúyŒ¶Î5‡’®‹a-¿^|¡ù§&}>磹f‚3WéÞÿ73'þËUÐ_ÇVW”&$Çá8¿N „ì0ñM°`ë¯zX³´aÒ‹ÑB¶¬¤°õeŸ96!ªï¶–d?…,Œ3ÕVã&Y†eÄ”LÊýð8ÎÁu€°n:ºó˜ý(Iø¸—<ûe¶Š®iÆ€‚ÕÚàb£b‡À”Û›*æu1üv‹d¤K+q §Q¯Ÿ:Ä£râÒñ2¼ú5Y“Þ Õûä[Ëǵ’'"7ôNä+Qu’ Þ^ñJð.¬’,*ÓQŸféèQr0ü¢YñĽ JÑýіͦ껮<Ò¨+er©BÞ‹\ÞÂÃpϾ?•¢¦8ž´m$ìõ‚ùéC(Œö‡¶ó/Sn„6•þ#UI¼oBU;HÇxh;‡ƒ¥·Ö>¡c4Q>z ˆ ¶Qš¿rïL d±Dœd –ÔHàCÝÍ þС®>#ê1?AvLKTÉêÊKB&5¸•TØ©yÙi÷wí›-ê+(ÕëEF„†uKôø—[ .Èq!–R/±ª…þ~Ý47­î^+äB@æ›Eä‰ü6mAÊÛd—²òíZR› Ü$Z¸X]¤Ä«b»0]}‚Ü-,aËÚ_xaÎ4èèBHkOŒ#$|­±Ì{ Cãçúæk çSñ~.-0ò¦Ò6Âù³Û–gUÏ!—`9nÓº!ªÇ[röÀ×øP†¾kV¯1ä*…ï­XšŠIPº¯­03i®]¬‘ìS:ubVýÐPg‘À8jðÅ,té:‘@Ý)Q‡pyÙSHÇpÕFd’Vrµ¤#ß.Ý^ƒŸÜ}ú„O×ä0üNå”oµ&˪䈲EÍobæ?ž½Ûõœ¹ÿŒ¨1_ö÷¿+øñÑcÇgUáE‹yþEô‰(šùgÆì4_†„iŠ-s«–m¯&ú(ñÝ'É9-È(é7¬[QÅâDmŠf Žü0ÏYÅNstF7iˆ*‘ ²/ÌÑo$ëxÀ+·ÙŠ¢Çkù§—üÁHF Ì ¹{*’±& “ñ™wÊ—zèñàfôëZðŒyAT~ª)puœ‘,€ý#LȾ.byÑZîƒbê.— ŒÖê*‰É<«eùì.K7Fl©‘"ÃH¢Y3ÿµ¿>š´úrҪȄâ)ÚœBÑÖ#þ×÷r§«wGö9AÖ ]X­aL§´½õ®ä“wxá>Å(ÝêŽÐÅ@s˜ÚHÀ¨Ø ‚ ¿œ*vÊök§Û¼+z9ì³oTÈþFíuS­mÍ;>fô 1Èsi¿<òjšåG ú‹ó¯vZrŸ òÒ:Nê#`—^ ¼’è0lƒ!‚À{²Tˆþÿ¼ê{KôX\–¹äçÆËåR…¶Áar20Gz‰™1±å‘D•6ö¥PéEì0/[ªŸœÏÁ@x†GÃGK°LéNÕ‹8¢wª5ÐŒâ'éŠÆ6~µ}…¯ÐaKŽšp&µæêOÐ>š¾À²ª_—Šîb@¤Å¶ò-ûaÉ2-q:,é½j—+#Ä=)y´;(‘p¯Zà¡°¬‰1÷£áš±âÌ×â”ÇÞ’ÂÈ++^¾Ûä#QFf[Ñþ(¼Â¼6i<Ó"ƒvW,Ei“W¼| x/ZøÐ$ ÚšÀ@%wXž½âQI&5ÂïÈŠAøˆ}n³Aº—o'&cZi* .¬€Ü2¶íØöÿn/'‡—œƒ²Y•cÆÙ‰f ®kîh:i‚lB²˜ÚŽÀO"4zZSæ­òÛÙ.ïgdç×Aka%q5^ßMC.å…hÿérûÛ  Ò¶ð]NEi-ñUÒå Ù§Q#59òÕ@ëÊ­š^>;‡’[Di(3b|ˆØéç¦>ùáŒ3½ú™‡a)›¯9º¹R ü Ž6›ªý‚Ižý'“õ¡Æ7”Ë6Û‘Yìÿ­çgëw›éÓà„Í>‘ƒ°=u¦l™ Ç`SGÓ†¡°ŸŽG!5ökUçDl]é.Êï-®ª<‘•rOBÿ{î‹~ä]_V)Îú3Äõðc’º6±¼é¿…'Usƒ £+=ka=½’šƒµ$uÞg õpŵñ½ÇÀ1žëF6^ è$²“%váãdèÀ5‰'_‘­Ù·Õ7­ð©bœo–[óìB)ÂX»á5ñFV1O~!Já2³m¬^Å£J2‰†;´ y‡Xµlä}ßåX3\xØ¿$Ö³íä#d—èÐ[Òx¿XúÔÄÜöÿä‹HšS£Ê%†-FÈ-$vÉ,ÎÃŽÝûÔ£µZ‘«š¤öW¡¦ó¸I^âxWM=Ábò¹Õi«‘Åy+EÁz×ï!í&и"ŒéԜåÃ|14»­qI[{Má@HŸÅ¹Ÿ–ñ \Ÿ<†BN,hRyzM·Ž»Õ$Üm¤þïïKb,‹¥¿½Ò£¹8Ñ¥b»Øš8©Õl¨°`²«9žÎ2Õ膟bÆžir»$KŽš”x2HÚtAfñ¨²ÒÄzi]Þu®°I;øͽà,Éï–‰5ª½k¦·5[Ÿi?_Û O)h£—šÊµfKúÿ‡m l*¿³R x•#b©1anÍµíª›œÖ¿‹ UÞ3"€‹Qã‡ÄØüÎøl!N¬€>iuŽ 4h*˜ûË]8ÑJ.‹‡™Êò—*Ø À¥Ãâ9åÛgûlÔåÍBºá¦±c'L@Å›¼¹ ¦P^l¦?uXª³åÑ*öo7½Ôÿs­Lùœ¸ˆÛö†Oñ7D[‹V»9FêòŽ~‰‹ölJçðµ}¾ç Êx«R¹GÂ÷ËÈ(œñ÷h"`ç2‰é‰ëx–Ù­_›%ŠZ8¾¤aÔÊ€ƒ@ýˆÿ¼…´š*t]Çùéê¹9ogq• ˆú¶öÒôÏít`¤‹ÆVp1\pOµv‰§É^ öwª@KÒ¢‚S&ð`u#‰e~ð«¿–F–ç†zíèJ¯¥· ¨Yæ–ƒUAÍÃ^bâòá<%®¯%Jy}ÜûˆÕÜœ)£?'gÃõ7ýO¬`ÛRHVÒúÉЀ§j-ë9²k3 i¯üÁç_ ØÙ`¼2ŸE/¿ tnƒ¿ÇBnh0’Ó˜ÏNyTjsŽ€ü.|Ž…Ì§¿\Î×ÐùÝ^<§£ê`Ìá0ÉêP8ˆqx^µm)Ü6%x³pLÞêÙiM©Œ†Äü ¥íPb<&Ã×Ã;c£ÿ+?‡¬ h¡_à&Ýñ‹ý”nøp—8Ñl¿XO+ÔôîAÚ§ËhZaŽh˰íŸH!ÿ;¤ê§E}Îóh!™o€÷PÿaK•âß)“UÙu?ͼ.2,„×Þ¥6e}:—ö4¸H[I†Ù%0$q`Üå¥^õ‡“+ò9(B¾ ±ñÚ»¯3Úá… ¯s“^®{Oœ¾êžðÃwä>W, W@@–M*$ôxPdõð|ð|!¦c3X÷ ÎJù°õ"h¨/ŽÈrãëÄ âÚê ì —ÿåÁšX'ÚFÌQÅÌ—UÀ‹Õ–qñÈÁTbu…6J 7I9$¢ž“æY›H£.U‡-ãÊ1'íw|G ‰œ­‚7ƒqS½Sc—ŠTäâ5%@»h :(V³àna a„ e·9®&yE¡[vqzíV½ß}k³ž²žßäõ7xÖ}'‹'_;]¤cÓÁÿ‘¢áÃgmMh.ªH6TÛl˜B©aäýiH–V°·„F6 %{gA¬‡Ÿ¥ª½NA'Ôò¤Å8„Þ,ÞNXIóÄ®‚¯Á¢¢‡œéˆC! "Ô“ŽôØrq38ëöù{­aq0¹ûÑïvÝ™±b>UƧ²p ³§]÷ŽÀh5»k~ ]Ô·°ËÞa\€ØZƒè„a¨ãôñ¢ÉĤ_~³˜‹osC΃—Ì­úË Å£4£àž9jš+溲.߸ðé›fÀÉéÇê8ÑÞýjö$eD9P/:ÄréÏ)R©ò§šá"úÎ(aÌ<Ï,¬JÕäù²½#ìlÁ]ÚÕb«£Ÿ» u¹¼€ºù\V+ÐÏ2ì|f "4°è%uIµÉR¨ùÔè „aÏýµ- †Q:Zcf@ÿ§Ð4¢ú6b´ÓGEð¹ü'œåhŽ¢0UþÝ §…Ÿ&.CÈy#^î$l€j4¢ùž‰œ€ñ+ãKZœë¬ö˜ ¸³~(‚ú•à ô~PÉO×PwjwbÒ#v‰˜cdC«dšT´²ä­üåÂï%Ì1Ÿ±øgÉû¦d¡•;8hÌ÷0MÿÓXf\ZAZþîvôò2<²¸°`_üÛ§ÏuC‡}Z‘gú„éU Iò‹~òtöǽ*OY“=ÔêöηÁêÀaà˜«.VþÞÓ¬…{½f—Ò=K2]Gu›ÈGX·§(\<€±í_|ûRh¥ï#Œ0 :O¯ªo{ó’8ÌOÉvPÎÁª{"W$ÆžqE¼m\(þÑ(ý¡`#Ù;„Ôܼ;¥ÿ9Å+’µ |Éhà¹1y4BÅÜï×]msû{íä¶·-wÃcG¢{UgüÚg_1¡ˆÉ¹Å¥ƒ_¦Ѽhèj¼TAšéH‡f=£Ú{©r\íö@©ŽAL)Cû[/tÄ%pÆó@±(»·¼=—î«!WÓ;|ÓðkmL8µt.1© ßfBò„àåÚV ê­4À²l¿BÄYƒ K„ª䌶({î§ç[8½^¼ãý/É+Ÿ¯¼¾“°wM¿SÒlê1-¨ü\yË?9Ojœf"L's½Vã¶Vé‚ÿ£¦Êµa&õº a) Ýõ¦(qHí^ì°þ3R摲|úò'ÿ<ÛŠf veTæÖ»ˆ¤ðÄÞšèWð ™p þHoCfØ¿ÜMÿºåÂO¯(s€ôyi˜ÂíQ…zæj&Ó Ægscâ<Š+IE´ckY‚%O¡òþQe$ëO…&?µ‰úÿ„I22Bpì¤+µBæ0øºÔ¹·y*„‰Ù¥ô•ãØ´wC»ýðé é±m³ŸÄω¸x6q„Þöè1`¶V ¤¾NŠØæ¹1xÞºÝ7¢¤¾¹Ú±Ž8¦³î6ê°ÁiÆiˆmL‚õA"&•€¹q¶-ΟºÊâµÇ>F}w#êéÖúgoðxÙ·pÇ`®vs°³¯Ö½H]ªÛÊp®yÀmºŒîoWªh}_>‹ÈmÁå,Bws›¦¾!H1TV&Ô`~~ò)^>˜Û­½È^i6Q˜Ò¯‘˜£ÉË SÀ館uRñ˜>ç‹›œf³rÁzMÛnZ÷½°ÃÙÕ—‚&™Ȱã7q]‘‰ 8Ó¤c,ŒpÑ3•Ý”ŠÈxÑÖ¿ïaPÖöäå®À$^Ù¦xê"'˜6ÊšÖ#vN†$ T=áᤵª½ná¤_BKá1³ßþÅ®’æÄ-ÿ Ÿó.”e‘rß’•„ è 7ñŸa 2¥¿>h¥Í£ eÑ!÷~Fru»7ÁæSU:PÊËÓ§®ÒÒ/Õ×›væÖÍœ@I£Õ-x |£ݽkŠ6I“"®Ñ"² Ì£Ÿù„aˆŽ{÷Ük·ô/!¸ÖðCD®{ìY$v|„‹×uŒÉ8¾ÿ23EO˜Ã¯úôÀX­QµµN?oëÀ³%*Búíôà4ýZ˜œA¢=6ÇÀì,Ž®Ù7,†Õcl™¯b0ƒ›™&£,˜?çÁÌ[*;ŽR ×ïµ’)ò”m:Ûý},ª') «ÔSV|“d’ñfç†1Ã@þE0 #çábí›XÛB˜d!'QØ„0EÓäl.™OYfÞóýÑgúk`K¥6 .«?>C‹ï´M ìç/ÊÈt4…ùUú*>\«¨ýáö¶à¦˜ëZýJ¨‹ü/TƒÝ岕{^5ÂEíãlÁS~+=éÕŸ’øxp£¸›ôKÀFõôPm¼2Tp–¹™TßrÔ ÂÈCL*ýˆhkø^Àˆn()òéë¿à{áà{w›ºV~2_o©4>îÓOe]Tk6×~ƒ‡Ã Ë¸wÇAßO% ]ÁîµÉæ½=·²Ý#c¨;ýdû•S³}'Í:¨›†+TÔÄy,Ÿ›“ß-7ÌXú#³%9XV?ÛþR×9V•ÑíÚÚ±Ù{ãIkúŸii>i«Böô}‹žªdîoöÈ –§/v`ÁR£ï )lÛ#Añë,XŽ/Ö®uÕ¹ìÒ¤ä¡>i9ë.·ïŠñÙÙªîlÚ™ß`H®”ŸÈ’*%PAÛºÅðW»ÿ•Ý8]§yj:vuGZÌâ—Ž’&Óæì↩ÚÁ?Üøþ0]±˜¸uSâu"ê ˆÔ­¾^érã?QÀDLÁBÔ¬ Ý"Þ„›ò&¿ˆè» ÊEÌbÄÄÈÔ®›@ñ ÊÝ®LŽ.o^5Eòº,´Ý©FY39'"62nÄŠåàùO B2ɯèí/2‘<ŽÀW Z¿<F / ÙG·QÊ âS—t:p-Þ"s[ÀL¸~«ßâùíš¶iY›1.µ¹6œ“A9@(Y¸…Cà,€n*ÃuìîÈ®°ô ÓãyI$±z»Ël¼®PyÖ!í4Æç3xœ Ú’ö´·hÿ…Ö0u±GÿyÎo‘½~\(áôŸÀ?Pú}›^ëG¹/Pž…϶ÚÐëb"ôh8VÊï´+uì€"F©~‰1µæèʧ•V£„6–òVêùtnBvb&pE^Œ#j˜¼0-„j¬hYPƒóîä©X^£¦åî÷«±êœîžÆûë“7¤~Žƒ)žr=VÁb¿V'4˜nÀ¨“Žä'3}< 8\È+ÀÓQÛBDÖ!|>Ç¡t^Nñ=ÎìÀ…¦€4··xõ4oÆ|KS–‘ˆaT@Þ+‚ví£œ‡¦¯C˶uÎ1Ôúô,lÚI{¹2ÄÆù^RCH@ Ks¾—úÅ [¬f__w% „ä1ÿs6™œá¦Ë^ÙØ¥Šé›xÆ>Éé È…¬›½Ëp"ìJ!=7¢uU–Óp\9{PXÖ‹æ‡ëì’ÈWÃ(ìH–Ê$,8zés%g•·ÑŸ`YÑftî&„]\V›ÌæpÍDé脆)§éºÂ±†Iƒ_Ût  µJpÅgaäKRåLP¬m-špÜí©qÝóŽEBkLµ…ˆrÿU[êù[…MÙŸèßbcéL««#tvåOª=r@}­><Ù@­ÓÌ’J e¸¶«¬û|MÌ–"4ç‚ç8øÞñ%êVNþ6Èj<|–;¯k¦‚͸ŠÃûLx@oë=Iô7¿ÑÓî&š4&"̸ŠáU êÆy=›æ®¤¦CØÌÙ•OjÝ(ºÛ€m,&»;ž@½‡ÑÄyÞ»6Ŧ P[æ•*ÒpÆÇ‘ J¿Áär1ܧ'Š ˆÿºÀ&:¼½ËñV–Ö«2s‘ÅÉÎ×Uç7͇ºïøì@]ÞÚè¦G˜8@ŠöEJžÊ1}’ÊÊ2–¶9O˜{ K‡”1…¦­áOjh²YZ‘ æ;[d¯HïSœ ᯿Ï\W¶­w£§Ž#nÉÂV´Î¡®d°å§LÝçÈÌ,‹BllŒxü'ÂÀy53àuŠhlCä üýf™¼ÖªjÖ»–†¿¼ï½£AnI1¿žÈkæU9_÷n„-ÓV†¶ï7£¢¦i‘h– V^î¬{N¶4‘ñ«xN@‰èÆ­½_Äh• ™«öëÊR67âUHá`lî“ïOÑ7ÂÓ÷´d#0xsço¢‚|"ËÏ<ÚÏLžõ1B¸ìÜ+(ÐtØzé®á_Ó€ T’•4º+Ž]/Š{^²˜…Râ«\ ¶,+¦ÔîÆ—©ž8𸼡ù噇:«‰ÈßÏÚ~¯Ìï ¸mGÚàoâT¶0ýj¶è¸¬nm Ýv`ËnÐhn¬q™ª;Û3‚í¥oɱ¾Ö™€wÁíRÛ®Eíœâ£~JºGèê2«˜Nê8äù¨S¡yå2TG×çŽòU^ç_t;ôUXܤîm\ ³­™Öa²Ä?²sV­6és@¦ ¿s‹ù!ÒdÈŽîô§Lž²‘H·rßøãJJø¡€Ú» @X²“ 0ì{:¨ÿ³ †]V,™°ôX†i…2P¢´v_”Ðæƒ –ۣǸÎb9T5ˆª&a܉ U¾ÐŸ˜ö1[Ca€Ñ+8m"2W;~ÖÙÜýY– ÏWñ76iW¦c ´ M°þ,§Ýq¡si˜`’r^ˆ[ã ‡'×Ê2šÖuÕú‡h‚ºx1š?âQ®í#É”#WT¼Î4§QF¬I’̰YΪÆþ˜ß†ÿAµ÷„tKЪÅï‘«½j·e¯Ù«Eâ0†rÿãÁ”ý%Ò`ÂaɈá^o…ƒâ…Z¿˜ ãñÑ(œá,Ïy~ËÂë†Æë6ÓšD°? QÒ·$ëÚíÛ,ùE]7-¹ ¤ Ó¿› (_ûÉfK€ËBΫâ.ãuú›2;Ï*^î'¢æXpwÕhØ*¢så)º+ˆZ¶Å|‘qe1L°Y¾èqeÛ>{쨛K7Êt>¹Í>¥Û).Ç3ƒ0M Ã|pìÇX}ª¡ynö“™À>õ'Kh·lw±¿SÝÙ8¥ß5`#:F˜¿ÁâokW0ÖãšVöôU¸IžWo9KÏ„cy,ô_ SxÊȰ+6àFª¦È(Ñ#WGŸxŒ轌‰S³]$CÃVüwcî21¢~·aõ|ù¢áÕY‰J´Q®Ä{®—(”ò †KÊ©>úÏÂ,£D¿Rí;Ø0£¡¤Ü¨Û+qNeÔÍ’äḆz­¬ Ó®5¤ÞW—tçBë`{M˜»k¹|×áh•™FÛór€÷è/’ %ö @â~ØÓaF â–Œ‰¶?s)㦣G§â‡Hôw—`éÍ^h?a+|aôz }AQÂYBƒ¨‚´€)UŽ{¦{48åã 2Y„f-ÑÁi}‹Z<6ÓÃM£m̤œù}jê7`mFêõDjã3H Àx&¨‰Úb¼á„KÇ¿m™QòqŽ BAhÅÆé~>ÄÀ9È¿öÂ~™Và )åIßÍuîêHì,˜ÅH^×ÅhHè ›mÚù 8ùÙê§^SÖ…ÿ8Ímì™ édÖ{²©v8ƒæ™ðp5þfͰ ³¹ÓM›³ØUãvá?uäK9ªÇ“HèzãÔ¯ô⯟ví™ ÿÛ'òo.hP.}ùµÏÌ¡ƒbº8ýýí«bÚ¸L%ïøÛã×ДYë{sl?äÖEÿúâ÷} æ˜màC9(Ö`æê} 0¶¥…jò1Hf')ýÞ ‰ÀuÕ FV—>ø.?ÎáÒ†¯¬å÷N¶ßww ¨ ÈÏÈ;Ô»èÔrA™97ÍÃ]^ÝÅð:“õdâð^ £ž!V€¹rçÊ>’¦*_Q%9)îº*têe*}Vó‹¼ [ ‘~µHeÕê ÜÉ‹¤H3:æ¾£€”ÀKŒ×ì/a©r”&8áC@\H'È-Æ<æÇzÛ®JéŽØÃØÎ’ÒÂ<úknÂh?>BÇ<^–îU 6÷ÿý '×XsëëÔl¡W¡c²Žps`­A4IÆwØBÆ*$ }?è4ý  Ÿoš/×íÅ.³þd½ ÷ÞÄ ¬*æßãƒRNн¦­_¤LE¨mÿp~Â[9E@ýXªÂv.¨ò©[_ßSi^e¼Yʺó€l Xôõò(̤Ú+êÛ$БŒ[7ˆ±Ëœk ©GéUÇ«™{E/¯„cãPœcç\¹ê¹l’rØ>óq&¶;ô좕¬ Ÿ áúæÓüúµ¥ÜO¿ßj¶5Èy]>D@ïÞ.x ¸/ŸÔ1 V¶éÿÁ4°ßÔD D­Á÷¶Nj$òÖÕJ±ø°™ì[È–à®á½/þì Þñ8"¤´z1ïD¢&N=÷è Õö_­Ô_ÑOŸ18uZO®]$dGŠÊë¢¥áŒØ]Ô/ŠTõ´]ïjÐó=ZI’šG0?ÚYWŸ¤ëÕÍâBÇJžMMB°]œ1§iuIþd®”Àìè´¥å~Õoè“Ø [R¡€nû³u€e¿ê 7ã‰XêzÀ÷W›ó ?òê î.˜lK?L Ÿ ÊÖž*8×Sž¯Ùû¬)‰î,ÇŒmW¦ þÅ!R_‹άw¤^tn,˜“¨ÃRŸâ¸}î^]®!JzÇ…F{«2‘;ʶÎ*¬íŽ}-&=…uýÞ£ŒõêZE¥UȨ]»exÓhuÀ<΀Qºò '­ØßW×oÄ(vÊ„Q)ýý ì·ŒÎY>MIÝG‡êŽñ’ø‰h*0>=´ô}Õk¶@!z F¯õH’—ößzàŸ{œ€tÓbTg™ˆBÙÄ„(p67ŒrqÁ°a®(½“N~–ðíPþöÅ<…Hõ¿DÅ*Ò­ UÎ{^Ü-^ר¥òÍ.¡,"YÀºÀ“— Xlˆâ,M³œ¹Y@ÏeÜrsŸžW¹0à/¥ÆÔ>,Î=㦒òò|òcÁJGUÛš ¯° *ëxÅÔёȤ† e·tÃFˆŒ»û',‹™žadAæþâ ä̱©EÍ4X…'³ÿ¡Ë_\Ó‡ÑõÏ ô ;°Ñé!œü?‡8(‹Š1¨Ôô¾ *i\Æ&°«Z¹³DÈ…3Tûv9Æ $‚åÄ5.Íóó³ˆNëÛ¢–ÅÞÚãûîf®Ûž ss/µžL±œîhx8bÓ«¼3€å&ýâ·Ry ‘Ä}Å5hĬsqoˆïÀ*ÇÍï¯HuVŒ$ZÛOUƒnÙ^Î_N’„¨2TwTòþ÷E¿í¾ VN̉dv¾yÆQt‹®}ûw“¯a™™²r¾¥¶m4«³DÖ„#¤6zƒ9áreŠû£W¸àå`«/W…xá©Å¨âs,S;Y´i…ÿ½šàHãkãÍ `!³ùd¼‘gÎÖö¿c©Ì.ÑÙ1éߎuÙ¯þ„Ó;C ¯I$¡©ƒO²«þ,A‚!ï<ᨭèo…d!y¸5¤žÄ¦òE´ñ3[ßÝ~A™àgµ*›CÙa4ƆX³l¢öu‚B~ëãÆ™Ç¹Uж2ËÚ¸kÜ11 ¯Ó+üÕv±‘õe$õ6ô¤cÜÒá¥KX"?û–%¶Éÿ7;éž*Õ¾2 …ô¢Ú^ÁäyªgM€ù! i€ÊÞ&VßJ…ížJHÞšŒÌãØC“!Љ Vm¬ ´žŒúrúÜì¶¼6ñ0L7xC|´ÁˆXlÍ“ì¦>êOäx‡†ÙË8†ŸÒjÚîI9ýé@ÕúeÖv¯ÆÖ‡cJØ «‡IÊm*nA.Ì0«m·°·»('¥_#7Z³DéôZÖ!ƒ£NççšmI§ŸÜ_̲˜Úâ5„].úÑ<lÇ_qrÀŸ˜¤´ÁV Ñ÷äɎɉÒCØEÔ\®ß- 4p§ H qgì|; M˜.Q`ÉÃíì¥ZxÆt1qŽa¦…ZdMTª%¬•ó,åzìÇßK_ä#x×´½ž†aOíF¼´IDè Bîs»·â–‚­¢pÖºe˜]V(Tÿ8\Á,ˆª¤;ȵðJ]%Þ< ðoNåAthŒç3¨FqHã•ÁF;Ësk¨æFo~û^Ëà´düíû㨵„0|„Ó}a^eå@* FÓnÒëˆð8FnÚ ð*耾èÌRú²ó„vÁÉßòã[KöÎj‹W’iõÕ.86N”A«#äžß/‡nÍYÿÚõx¤û!¦uI4hæF zˆèoã‚·„Í\áÀýBŽFã j•HA¦ û¦QE‘õ -U†–#¹2×[¿~$vc›ëÛóùÏo¸¢˜|kÄ8vx &}»u·ºÁ5)˜ï$!¦(ŽäÍ,‚s¥Å™5ÔkÑÑ´¹’Äô ÕX¥Õ@p1w5D7¿×(€Ä>hëµ4…nƒôÿW`€_OÜ«¬ñó/àUlu6?ýèú¯Ÿ¬©ÕܾJf@ÓÕ  D|‚S²@ܳr9ÑØ¥»ö¨þ§ Œ¾CtÙn±—…+wl}Uˆžrà ³Ù^!5V>2;UœWúÍ[`.Y6›;Ã[¦MWUþ²n“¬y5©ªoaa F øKØ¿Újœ%ôlµ”ÈÌ+>­¹ßm‰E™T^+îu³x#—ƶÐëa–ÿÞ_©Õ #±ú* Ÿ ë{ _ÃÙ¿BHïC)×pR M¾`¥ôø4ruéU`KàL¡O…B9óÃ+­@Î' N¡>Ûƒðå˜EüUr÷¾ªf–“ã{/âçÿ]Øî,Ù¶ qá°oLdÔ¤;Å@sJÈ%G<ÙªÛ/Ë;ùkzÃÕ€É{}¸`”Æêr¸Ó~®lB©:Ü#.DÑÝèÂ’$A9WÑ¾Íø‰4û¸IÕòШºõ¯Þð×2 b5È®9ÐùqyvÀ¨¬Ê©=’ êíÖ]&¬1ÖZÕ¢ôfcëHy“&³½3$ˆ0Çå)æ†Ñ}Ä&ØsjðA!|èÿ8ò/X¾ìá'íU=ýËÎn&¦þ•lÄ)Ö ¬›?rWtC9™oÌVn(#æ;¹NÛâLJ{ÒRÐÖS”6-¸nå¶ gWÛóŸÿšìG\”fð*ô[ÿkrÇtRò‡n¹À™Ÿ$’²>¾‚^º0º©äó£€wâÿ•°ØAV÷}ç[í-wùnÃ<Æ…F¡>†C¾…~:nD†Ì[@’½óØlÿ´ÿîÿkn?¶þoÿKTü¬ò~:_ÏÕµ ¶ðº„E¥#G‰–Ö?¿@×½¤X$´ü“DƦŠSqèÌÔ½î.3ñŸýH&yd(ÿ­çê8<:JB” õ‡ É¥?+‰•zY ‘J‰¼ÓÜñS  ëHqW¦êøÑXI,8\¥FÉœ¼Z2Dæ‹L§Zª)ÊñØ#s ¹5 ¬1lj±Îa x¡´æ?`1Ê…L«§žJˆPY’ÓšrW`ÎÀnÜ WH²@ŠLÌ™¢üˆ=8Y¥çrƒD/†­C–— -Ï.´,RÔYè†Fñ¬ö4‚#a[Hl,Þ³än¦Îeß$`I•²’Ñ;ðÇ a.âS{Ø|àÇD´_Ò^•-øú :àvÚhf˜ð×yÇÑ™gÑoE³áGEFܸòÃó× þb¨yèº8ÿ|@‰BŒ=ÿN¹¬˜­RaFcž”‘dNmw\Âô-Êöг!N]FÎÓ¬ø»e0Š tF—l]O;³óá뛂ÇLs_mZtx› ©éhgÖÔéÅ`ny´ïÖÉHÀoÛKÔÞ9Ì·ž½ù&ž{. Wcª,¥Þ>›.ÚÍW£UÀ—š‘ÙMyi[[|ÜËŸDiÝNÆÙÑ‚Œžt°Vj ^¥ÉÁjcKÐju^È“¯@̹³Pz–˜Óp’v ÍÛðôL0¯ÕßÔÃ÷>v‰_Ú†k±”£ÄOð$ùž2%röhÓ]ßI´‰RŠ)Eq*|‚ÜPù“æne[V²Xåâ U'¸ð3m× ,ýu†G¬Þµ@…  ú'è"ua÷?5æ?Y›â3Š/¾•0!õuðñqÿÃ?Yš‡0än;ÎñìN=–˜|_‹ô-œ¤þ6 ¨çsnº€hH›¨ßÏ‘´ê~=cqS:­ç©tX_qðêrœ,›^Ç£nî/VZÄ"@r«¯0‹õY%_›Z4ýL¡Ö ×ÔyDz.O7¼Ï¥žå„¾ÝlbÃ: ÀB_póƒY·g1Ø)ÈD¡#¥]£îd‰ýãËÙ$:ü 6¿5®i $f þ•ÐŽº¤núx­ÌºmÚY¹õ:FW5b4K#ú@S•þð¨Ô_OÅŽ¡«}R…'öF’=8ë£.“‰Š7NñË8_|QPh*¤BȨA»1E±êM¡îüÏ´„²º±.Jqx‹[&„çÚ²–\¡ê š²£axÑâê!Áɵ¤y¯Ï¥UU·f‹wg À££Ï4Èù—¾$ëf#Á ž¾¼€ÆNß]7Ía¹Ï^ÓÏ#‘UmâåCçqu*Âz½Ô_;çµ3=ˆÖó²4;éRYñ6 ®ÇOF ?›…;wúí—«ÆØÿæ¥/q,‘fâɨ.»Þ·´ÿx,à§V. ´TLú`åcÁÊê³ó OkËVªXDʪÖÚî¸âøÙÑÞ)x£ço„ýæl£Ê²ÀÔÿÙ’Úp²Ü89x`qÆš Ž]]bGÝé ÷~ ~d‚épÀ…½ «¹q(³út7å¨5úuE\pE}ϛ㺹òÚpîvçºJä‘TÈ&*í^§°„>J$¡é 'ØÓŒ©s2·gyåÙÌtšžµ¢U9aNÌ<”;H—îâõÀL’FfÏÂn‘ûŒ”_Eá\¨öz(—ãíÝêÈf $%ÍþQaíÍvqˆ¼šÖñÛÏ×e–|/¤€HÄsWb°Þ‘ ×€hºË¡b(JŽË‘÷'™Ó«Í4/×3£’–o x¾îEÝ‘V2b°t@ 1ºófH^Y_¨ß½ ¡‹š~5¢l¸«@‡ìì•ZeØüaÐڿ͆©@TºH Ófà­à‚òAß.tž;7éÅÒ¹î*6R47Û¹ëÙîÛ¸RD£Û,?‚ƒršJx€bÌFŸüCŒ<”"ýÅã>”¹Ž“Ü}óAË~0ûþMjLÇìIê ïÕËRÆÆ|׉]ï°,aòÞªj„Äm2“ÀÕ—#tJ¥ïr]°d =±rtº€e†´ºØÑ„F°b ¦Ó éAyØnO0|žžXF`)î¯É†NQtd+ hÞ罜+¨"y v¾Ä‹hÄëîð)&Üg€‚tí\}©Â¹T·ÛÛ¬qÕÖ®`©¯ÀGœ¬Ï;›ž» Ýáå'í/m%çÌÖ_ƒ³¤Ã¶R…®d23c‹œÿw ˜ö 9]ÿs\_ÅV`‘j, ‚…Ú1¤ ›È]"È"yËz(ljúwOÔÔØµíà \¯~ù.>=†2»œüntÂAù#ÕY ?ãu‘ʘ¡_x–xÖÍg õͧÓó».—hph@úx†l²°¨åH©^l] ˜”$²Ï>{‰†æ¾`.è±òýP†¶Sz'hm9ᔼNáþsÕ3A3s¹Éâ7f×hÕ[8ì%ß ‰]74Ó ü•ÄÜ BÔHA™dP“ߢ錅ªÑ!ô@!«Ï QSV”D‘BYY79Ôˆ&S‡9£>àä zôÎX]–v9MP¸ûœb Ê®›ß2åŠB/e(ÔóaæáÑ‹9æûr¾¸éuLÿz5 ±›w¤zWf¼`ÀΠ™M>—9é1–ÑGú ”°E ×6å'q`®eCÛjü³«ÀÙ!}³3½ÙÊôXØß¿]i^[B«’öÑãAVަ"ãøì^5êeÀƒ¼¶ÙL'än0Ëåg±S ‡bx€Y³Wöù>n¸È™@/Ôq­é“âOµ|·y¡Œ&LåùeJL0|…^•þI½Zî{öcÙsN>“'p"Þ*VTÄ"8ºQCL?Ñxx_Pb–û¾ö-cÆ™èׇ‘…c v©KœÖ h†ùáÃ5ZxDÍ–šøß¹ix¬[3Üè˜rW FrmíðKŠ$áÑSà’e9½ò‡zç‘j¢4] ª]â?w™™U”cÇIWy‚³¥X«r7iêÈdiSÿp¾;o£ 3rÁ÷`{ñïö”l‘ض—K¤'Y„çePm5_ØŠ/_@€ü½ÎPÓƒ-¥ò{#Ö~,e·à0‘Yº[ÑfÞ¶G­bÇó&Û"»¬Ž8šñl°Úï6ªµu“X/ê¡O$ï/m”Æ‘ÙTïï«”îùЕÀcÕÆÀ pX M¦#%î@ (7/½­¥S"Üš9X¦üwiè:¬coç0+\©nЭÍâ›Ç˜Û­@yX+PXyìÅhîŒ9ðGB÷4ä’p?<½¹8`:(ú ÞGG¸£Ò=@<‘_[̆uKv²+Mì]A[ÏNzÜI(Ãv?zB?Œ@+ëÐ#†f> Y«§‚:ðù#U{úÈüB8•©ÁÒgSBX¬Qu÷ɳҹßoÑP5ÛÛ_ âëŽ)“«%Æi²ÆJÖ­é - Ÿb›ÊFó± º+ŽÖ‰˜œCɨ”ybìÊÀ’J§÷äÚçí™Ö麱ÑÎÑ•ÄåÐ@t¤4ËYrNj{6—}ø÷¡Œ§3¨¶þ L@dHÖ±àÿ;UÓþ,·ê†"À~=`û„ü£>j¨ û}‰þΩˆ0D>wx²NlŠ0v—}gßý*]õ¬ë"ê*“ ýOÄ@±íšCm äýÊ?–Ùx ܶxšVä¹!r@—É…' ßLçºHàe$—i»¹åºüSª¤ÜdT¸í³²º{¿á¥·°àTɇ®O8ÓJ¬q'ƒÁŠ/Èw²%ݰÇÞCåßçåщ^ÅL¦àÍ mÊ7tœZçJ•Ø‹/›/R\ˆY€{JЂ+(sDWñ^– 6^£Í× »‘x“íw’åS}iø³hno|@È «‡Ýnšì_„^Ia ç%h>¤)ÄXÇM™5¯ƒ‡úåX@‹Îœd‚T\¾¡~×—@ï¼O0Û<%kvB öC€ë*9¼½UÅVÀIãÇÉk'ºv T_&#vò¥{” Æ?üÏá¶ XzšŠŽšìœÉÑ `ÍdlýKìN¦uD70¥“S'£ŸeÚÚg§-±~½K{ÆFí™ï>ïÅŸ&¸­ËÜ‘òõ|ÐCM> ~~íSD˜7ȵ]t_>—?mñ$‚¶Ýÿ¬õu€þE iœ#cA,zV$õ5¾üJóìh‰mÎä@yŒëaò7„í­?Ú^0+š‘­ÊGfŸ=ò²VLŠ/çè ce&s‘ €çÅ{Ñ2_n¢´û†j˜r4§SSîÑÈU;YÊÿ¨z¯@ªüåÕ‘-Ë˯J¿lŠ7‚ì¾æØ”KÉß“ô~ß亷re ûÅxb~Ñ“ä£Þ6Jö "üÇ»é>ö.޲_Xè£&¾'â ElçœãbþÒ×AFG>[k¢/µÞê5+øé ÓèkïG_"^>´‘e=ŽýØ8ûQnˆîÞ2î-P>ç‹evÛ0iʼùÐc‡BP“Œ>ôê¾iÿíoá>oò£Ñ*œs':EFóµVOñ€žÃOÅü’“<`1‰¨øKe~ð%d`€e‡ÎÛ3–s^ÒìIü‘¶§Ì„íJE$CìÒèŽ~h8æäq‡ÖôÒîîxÃ-F¾Õ©l#áMïÒ\êé>Ø•H}Qnœ›Ô&ö~ñýe}©Ú¨{ Ÿv§¹A ýlûâ„QNyÃ-eHGŠ–¹m‘f‹R*åÌÌ‹ZUÍh\™Ì‰DÆŸpìÔïç®ÇB[غ^þQŽ N!QY£=¤Ô">þߌ+)˜»Ûîk×ÔÓñû„8›=96rÏV-HïñJ¤,t *†^Ö;Ƹ®7ÜgÏ•†Ö+ºÄJø‹àÆ›ì¦xaXÂëS`}Z”è¤Æý ÆE"VÇ„mKkÁÐÌüÞé?ZûÿôוrE‰ø“¦±ÃÿÐOUV „ò¡$ÜKK”…S$ÏÁé8O°Ê|NErÂŽŠm)¦«ub¶7ÇIoÀÕÂ-vúGBQ2ÔÕô&x ?,Öm•LmâÙL~s©Hâ»Oó.û—3>ð>tiƒßRà.ǤœpkaLµ¯7}´¸r+`äå%cHÕ?óƒz~ߊ‰HZÒÑx‹® 3yŤu’wK®§ ü•f©¤žšë÷uüíÍÀ®ìú4J£ç‹FÖã†Å½È¢’ñbgÒÆÑy$n Œ{§p®I#×0xXß.ДÓDíY„ªsNmšac Éýò7ìî _ «Ùsš¹[Ä;®oQ¢â‹Ì'U‘ç;“¶{&BÀçzýŒH¿ãl"~Hxå9®[[_²LioM°×8ãêYo›’I0î¨àû (e?=x®+³ËÄúÈ™ãQ¤¦T­âÄ"ðyÀ.×!Ƚ$²4ç­R ëêd$rW\¥‰X!„íKôédše¼Ø÷²Ì¾{ПØèð@‚ŽÒJg„)#R»ëþÁ)Ê™÷Ø U3iå/efæ0ž€OATJ@”záP¼ Ãùh)À>ë(U˜º!*¥yØÞM‘Ùp\Ó*pûk®Ù]Ú©ŠóWn§Fw ß¡˜(†3=ÔØÐY 2ZY·QàlÞYøìœ\ÚâÄ/¸á— êñ&h„´E¢[ãrdjfÁ½¨½a'f,Ö ˜Ìÿ#t²²ÇRº6k”$Ç+ñÔnP4Ö_Nc1°‹ŸÐÒëxÀlêtS;1PÞL=;èɘ¼Ö}tx‹F ƒ¬Ä ¡¥u%Ìd ûÚÔõ ÆK2®ot: ¯®ñ›7/ìQÂïÈ„¹^2Ï ø:ê[1áN&š Ýë·Ü4K§ ªg. æÖ¥×„l§9lP¡­=¤ÀXRŠ;‚½ƒwŽºØ-C·þVÿUŘ–ÖnÒ°ÜÎfdvÏÂDg‹  Ùï·ÙE¼ËY¾=¯¼×ƹ¦¡¿*îy«ÌœÁQà;ô‡¸*ª]yxÕɪ•Úì°ÙëÌ냹äÍ\,÷~$+ÌTÃ…ÎF¯D¥ì}±¨Òç;¯ìó>GÛÇúELëØ.AVöË¡ˆ`Ñyä€ÁO'ºf °–äZŽG¤P×ì׫ý¸+ çÒMYH* QfpÀ¹í¦î涉¹ÎšÊƒ_«Hi#n¼” æwмOjvþ÷´-»É&¼f†÷º¼@˜»_âc“ªzÏ"”‡þ­ E­%#:VÕ˽·ôð CM³Ú\ÉSjo†=Ìi¶úNb>ÂCþ]ûtdñ±÷á\/#Œ¹àWUêΉé]LÆY6Œ*V¾Šj!ÏÆ)Q´pçU¦DD”“lïòо\¨w<~ÊXXª`‚qe û%Hg§:ŒãçÞ‰=KL* Úîóè7ëN’¯Ij§Çó9œÞ–GÄìpŸIþéêm!äŽá‘ D—ÌÂú•}S1“„øýƒq£û7ñ|A ¤W´ßÔv £Î© ½3ì ¨ Ú,•’ @("ÂÚ´pO 5²3Æ”¡óC0`'Ÿ¸Æé$˜ùøtò,NHäØú~@ÛÓh9äÒ™(åžM\ Ò7WÔͲ" Ó‚(´”¶(/ÊTCf”' *ÄdÏaÑõ}Ÿhî¥E4Ý ¿ÕUŠzÄJÒŠ[ÛðÝbõ\4"<0>EÏH`M§zI[¦4‰C¡]U@5¹rF§”49eŠ2såîì21Œ|w=á;6ë¢àÛ™À"SG)Wœ×”œîõæGX9$Ÿ¿ }ššò­9¦tBdqƒ¹àçZ>É]D!=1`ògì¿g¬*<~Ùß“Œ¸¡^jcZêJWb+›¥^›w¿R¶¼ëᦙkSõ™(ˉ‘àcÆoeëT;èäÛçRö¹¡PÆa\¶^ZfkømÛº½« C™ææÉ{GûÓ¡¦Þt·=¿aˆ' Wô,3C}3ÑÚâ0qï]ižiã‚r§Ýý Î?|Ø…v8}C7kúY\×*$沩„Év$ʃB"ƒVÉRó™BÍ‘Îrž“_lm –¢Ï΄§6¢<TcÚÝÎ¥ t<`¤N?©Â&—„cóÉIÓË{ùiÙ¬Õ,ÛÿŒËi›.€?DEÉ‘žS†ÀÕx:Ä¥”IN¡ãÁ€r#¿ä¬`‘ù{{ÆI°´áAŠ$,¡0Æñ(åâx‰¯”,¥µ´¤ÊyɦÆÕ˜¥QFæ\ÓfsÒ^"2)YkCøÑñ`ÎR˜çáœï'!µ}ãÆiÞCïÿ+-?9¶îí‡ì‚/xÞµ:7£³Ú…lÈÓ@™xäfµz‡k“£µ#×sm.Ù1ºäÃ;Hn:Ò@Ò‡Àµÿbò±£â­Í7vÝ»!Ñd__&"Ð ;›‚€£ƒEþèræéXWˇ¤3Ð(L/cEcî(s"Ÿxß™Ï;ˆ%²¡Á^Ö) QËYëÌŒ„–“}&ã‰þT‹*` À±_¥“û)Ú=‘MwÑå¾§СZR5,™_» †j†Oå 4¯J0@€^!$؃Ö_žs:ÊœNú~•¦Š„Ÿ†C| úíÚjÐ}o±TAO-O¢áꀞ;œ’°X„‰„–~q¨zÖ‡Ý,’9D l·ÕÙÏ’Ó&¹Ñ¢\dðÃЩÖåh>Bä”*PR;¬Pø?ÕâîTé¾[›ï—: ²;>nt’_˜Qš`¶Û²]úA× •ÁÔqPk‡ÆèœÈ^g–&'•{ûîo‰V²àµãõ(CJɹè|¥#}BÇÆ—{ð!=ñ÷/%³üË1ÛËRù¬8Y1Ø4ÂÀ¾¡ÿN¥G'ÉA\’¦¯üa4Ëð)®Ñ@  ”Yã}Ô‘îh)nëTUÛ1NHX•¿É>í]òˆ®ÍôtÇQó+>¾žÁ{:ïp‘6‡³$Ѱ„D8ÿ+7&7L‹$B½yÙvM‡4ÿ\FððÅüŸ¬ºÞ»OQÝÞ,|Êmª!œðÎŒë Ÿðe/ Ø_Äxr2Õ„¢PEKƒ‚æ¨[I `«¬k.Ó¡NÞò3DðeÇ‚°iL²MYqÛyMêÜL$´(°ÝÊŸçCNÄ,A"že¯¦Åa²F´'Ó çå^þ¿?-›>®M„±‘s¿B:ĘQîzpëŒh– wÑì¼hÑ ´4uNR”6'ôTÂÚ ¹,zƒ)L¶% ó þ8Þܸ¦zv 7–Þ¡£¼DõÞ‰yO'ÍsïÒ«¸®5lÆ^®/Wè˜è•H^®SbÒ´W¤Dð¦GÃãêTР:à-ƒGs3å—0Z¯ '`ØYcpÀ’jñö«#Z~ãêkDcøÄD u›%íÐZ‚üAüžÚš£æþ0ŠHT\Í¢'Êßë7Ië…ßùÄg2ƒýÉRÃLmd@$âß¹j}ãð\q _÷¼äRe©Ö‘„&bKÞ­{2*ë"OY+{EF(®’áÇÇä¾”+H+Úz]Qrjì­p wvªdU‚¥„Ü5ÊpœÁH`P-°6È“ýßß•|¦J¹Ãõü 78¸<_nÚÅk_«}£Iy¡Þó5~±sòH@±3ÞC¢ÊRä;³Œ~”Ú”¨}cú~Õ«ÂÛBxE{Lƒª9òé7ûUšÞjÎ}'NóÔ!,ø»J¶F„+êµå‘£wÒÅ»„ ™‚³ä¨G3yœ‰Å-lÀ’ƒ¯âŸyÃ&î`În}A)bhgj˜ôs'K©ºµxñ×(€ÄÇU…¯,µ£SÙvHÊùc=.<»¥Æ2ìi‰Ìr·È“,nzŸlÉQ7Z—L•¦¨Z…ì0z-½ à¬ïŒ~TßÁ N;åœjÊmºxGÑÐÖM~`Ó¨w§à|áÖœI®®ïa'¢XÖ‰‰#Äï@¨q $` |iRÃ\„´7õmÀ;ÕÁ7nðMœ„·ßL!Œ‚òÔÍŠ'¥cT¶ŒŒtÀ¿ß2ÿÖÍÃÑ4¸©À˜+(b‹¦(×~5x¢å–à%•ôé¢n<7"Žz ÐÌö4îôªÌ‡á>ÏÚªõЇÈíÿ⃨d”2ȼ/Ø%d—ðPæ¹kù©&Zç»Ðj} ¦Tà2üýù+Zx9ªÑçÏõÚ©Ïœ•E·iÔæÀÓÚÂs^¤7ë;åZ:½É)îõ†kq]Ðæ…Ö Ð" bÀUÙ¡gŸqG8¶kõ:¤’›©Êb¤ŒÊ^¤wªñ›3Žÿá‘*Áǧڈ˜;v]3a—BúÁuæÙ½r¿7:^,¬ fš©ùí™<×È.›Ÿ¹ÓjÖ½ÜäaWÂzë`OÃëcJÈ® 9¤uw=&èä;ûزޭቜˆþýøVÿ“»©AˆªyðªeI߆*>Ò¶3Sƒç,°‰ÏH¶é6ïS ˆ¨ ”wz€ÉÆå”ü£J‰¦AÕà[-EãzÊVTÅMpü[LŽÛhç„S{ЉÝ4`M¾sÁÁ$xQE½ÚMDuÌ’ÕæUâáŒÓR+lÝß p PÕØ{0¬ø;kK|!¤Í톻ql²¼ #UÐjx~Ýp´ö'y4¥]p€2œáYEä)¡Ù{ãVN¹¯Ï¸òrgTHZƒ;‰(÷óÏŒWœ»ÏÙX"ÃáQÆ’š/±bÖžBݰÌúâ£*¾>äKÖ¶ ÂîKñ0 6ƒð„BÄãUNG§·ØCGœno­žbs¶|pB)¼„þ„Ѐ,{©¦!…ˆ*”åÀÚ°qÚ¨ºJ¼o}¡é“ñ™bÌÓõEEìœVà«ÍŽ”%I^±¨ƒ¥Ÿ§JÃzŒÊÐ<è!|î6Kà&oÍõeù~ø r1p“x÷ •—ò°«ÀÙ õû™4+6Y—5zÏ¥f–o”~?UpŸobܵÈ „Ðìnd¾³_”Î3ã·– S.YͶ‹ݱÜ`=ËÊ„|Ð*á»v*˜Wú-Fó=Zë^Y`jp7Ã] €õs¢{ÚWþ쑱m)¨ÒJê9´>¼Èˆ¦3"ãºk£&^¸$ÈuŸdS’6f¾­ílõòù'Êu“2$šß,ÉÛ¨¯È* ›êO,S³*óÝœ»4‡ß©;º_Ý·s’óH1ƒ|¨ 8Äö¿æ\¿FÌͽ37öµs;€Çý)g3›|9и½¤ëwãÍúcM‰˜ØxÒtéL?q1sÈô (¤þ÷ÕJ÷Š©)©ŽTý­MèK &iD”ÙZ'œQóß O´m{á6=ãÜѸGp£¾bû"Ë}‘ì£sVŒí·#©ZçNY8ÖÝ0e(R­®“9z4ýÌeÞr‘{RØ¡0«úÒÚ”¥ûU‘;¡A„:SL1ÌõíS B:ú4Ïɶåy_õu¯Ï(dWb/w£Hñ^ÍPm“NПëƒÙ:ª_'®ìA¥gÎ|½“ÃÔ,½^÷üN¿]/÷^þÀ´U˜ÆþðàåVÔÒšZ±ìé°sþhŸ%äóÜ™÷ΨxêŠè NóÈ›„Ó £œ8éÃå½®Sã¢1uÏÌð–Xªh8Í÷Zå9rs/¥”¢:N ã¶inñöΦʢ± ó 6!ºÖ»Øi®a¤”3Bf?¡=+¾·¦j$¸ k#ZSûÿ Úò€Ó>žKh7CŠMŽ©eÍ—%¶Ýí-lç%ÇĸE¨u#:¶|/ö 5úoù/õ˜7  çË‚¨Í- wT^‘~ýã¼(&?2|­8xžüP«Ç-#—ŒÉ$i»„Rp!t1À58XT äœnˆÈÿ?š«0 ·•D*iº•k¬ÖœUiZÖ›-:³Ž?±· ×‚Z¯À·AâÕ¼8óžgÈ9€B«zŠ›|qê2CSe?qS°ž<ñ _~'ûR%ˆÆo3ý~WÕGú+$,7 èàÜõ_Aû æj¨ÃzýßûÓ’( Èa=0Ö#1k}-JèõH›c%=Ò'ÿ쿺Ãâ:5b›ƒŠ¢ßü{ÇS-õ®í??\³…òã n/¬øÂÊfUPŠÙ¹I°`rïb Ž«ãlFÊQ?ï¸PÙ–ùu.lj¡•ŽeáV\–æî SSïE£`éSWˆ÷X)ïm‰$Ì\܉ Ÿ¹eFþ"5/*&‹6HçVãÁ;‘ög[ý·FÁ ê nFÂ9tÛB‘º`e‹º0ˆ‘%ÚöõjTFÚþ5JL„ˆ¸ëgÚl0ê¾Kºk…ñÁû Ò¡jZ_T~œŸ¢ZSaž¼6ÏXDÔ…èæSE¥˜Ñ%¿‡Ôîk˜ëõxeºSüTG5ç±RLmËUz¿Ÿ4Ñ’>«‹»k—ì'¯G¶–èJl/;Na:F³mkm8DL¯]t í%„B<þÔÉb®1¸º£†¼„ǶK-„Íy;Ðé,n¸ŽK±œZ ý…Eš`½×ZòÎv¼@ë¬õÌVÀa2[«Ì§à†ÔB-;¡‚LJÚX…;(ðj\¯½}0HðZÌ–ÌØ×.j ÑÊÎV6‡¼¶|GM(6¨JŽ¡Â ÒƒÔ¼F‡³Õ1‚´oìHàÌÉ$6¿Öb'@²-êÕý¸/Î(§xJÙè¬Û¬y4¾*ër\×S³BÍi&[ã9ßì*ÿ_z¯b›Ÿ[xE$uý ]°b1ÓxëŒýºzôvÆu;A«Ö‰pÓ¿ˆ©ï;´A¬¥vÃ&(Š®UXLߌñ‘Þv-<}ÕΡ™wàœ²:{.°øô5à‰io[? 5 x`«±r=·{Á4‘ |*@l~·ÛÀƒ(˃¨Uèàïk:ô„ƒžZއJ®‡Éå°j/ô@t9ßV&|›(›Óæm-Q¼ o]úmpdkaÅuî›Fô 8–Ù9Þ¦ÊÜ ÔöÇZ>¡|hä´aSg7ÿ9“¿N½–0 C¹bª.#C|5¨UQ6|p0@9D ÷]?"èŽN€ba" ¹•G‘<·‹íf‹oRHÖ`(JxëŽp‹á/;$tu’Ðã(ÖÎx%˜ÿlÈçØà9SávXç […A«iwý½Ôt9›·@Šöê"¥£3±Í–GØZ÷‰§ø?œ?`ím4y ¿.!M]([SÀtŒP½„„54£“Bh$-P`RÇ1Ù2‹ÊVtBÆ_ýÔXxd)nP<­ !UÚÿ¸2QÉc\¥eýåËhí¹êpÕäÒž|b‡6»eÊàzuÕ Òbh+ò`N*ñ9³ bŽÒ׳+¦.áUC» ²=†ÄkÙ%n.ÄÊAfK ’„'þ@¤iÁãŒa"`Cˆž §¡ÐÞ8ú«m a0P!ñ›Ñ?!‡Td‰Ü¡(Vî?ß'óýòîøç­3"¿^Ö¼·Ê6³üíXìÕkWmHÄá¶ÖË–µFJžE@·ý™ùã×gZÉÞm•ž ÉÊÑh €¶wŒpšòÞô¶ð-…¬‘²·(œ4âæ\ûƒíŠD”Ǫ[ÆäŠ„CA„HœÍ º¸úžÁ–˜su‹Ãùc¿â‰~XUM<¬mÄd«Ç]ˆ­“Z´:¥÷6åkÁ£”bÖ¼6Çrø|׀Љ!|Î3ÛFˆê¥í"[£ :,hýâõJfú¶èÄõO"´±Ãt¼5.Ïm>s@Áƒ+ø¶TsS†êTò-}Æ ù»îK‘;F=¾ÙÆHœŽ¹KñOSe—ç ÞG”•#N•óûÜ3i6%]M8\km„¬Î;‡øÜªqŒºÿ÷î1„|»#Wc|è‚ú3…âåB”ÆJGGà%– õ…ý1 y¡|éR9uv˜¦¢Ä|Ä„^fá Ε4¼ò%ÿß™wžq¹Œÿg_@áEÚK>¯ÁžÃ²ÚbM¾iÖÈ y•w²LÔ\¤„jÌÏÙ42CÙÏ¥r¶þ±³ÀÃÌ *$ <…]·Ùj)a€¤°n¹åÃoô!ÞÜc†K-$‘ ×LÝd`ö”ûØÊíç.áÿ†‰íÚ ÑüZ){òh³0o¨D@ûl uˆ)¼²¹ˆË/@áíð°ðß4°òèÖÖ ‡ÆT«´¿¯NI=W*óïq5&Ïô`Zõ<;±Ä‚ˆ ’­pŸ®ˆöúð´¨OA®=}ÞSc¶I¾r’Ô±èÛ?…ŒÍWÅ”5b Hc.R}X2æ=4À•Ðz$¹KëQ7d­ä„ÐßôF6á2?£:» ´´›åbk¹}»ÆãT‰ë8’©PàkŒ7ZK 0Ê`_Œsr¾4Xqsõàø8Ij‘ÊôÑú,&:Õk"ùV(àÆH¹M¯Ï×êú–n¿!oÉtèY2ã誈Âc…Ÿ Sù)á‚×3¯C›€žáêç…“`=Ú/EgBØ jˆ~KEÌçëÚ<¯CVÑá߈k<@ÞDçqÝSœ×ñ¶Õ:À¯¦ýº»Ýo $Ëk_žR‘}Àx×ôêEƲuq;ƒ‡Ã=D h_"骧Vt9ù6‰WµÃ«æ†fÏ{`þ'‹LV´(ÙDiM¬Eú„X+ù45 W±‰xLÕ uÞÝòÓ#¸n}5\0b²qü°ñ‡é¿éXñÞüfcðo Â^ m8˜IC`h™éýÖjOض`Úºˆi>Žfmúc⊶ÛBö.c’žpF(cÇO-¬PæÓ»§–ò›´ŠšÌÂhtœsÙS÷_ËR{o7®eæqÛQ!´‚ý˜ŽS¤d·ªEDÇ<ßès;Õd Ýê ¹ö©P¬ñ7Ñ‹“xØàSž×ÿ߀Eƒ¤ “l :+PPM‹Âp¬”¼ÑWɘp†õRt粘¤ÿè»ÜC©ÃPÎ÷{P¦í÷-1 ƒ]Û¯âÍeÃÝM”™ 6Åôj°f×[]®ŠT€ÂY‰Å¤Ö9|ÚÉPÚ÷ÞBÉÁóõÖx´,KÚOx$äIXà™%f)Øåh#9sĬ0樑† ‚éÊÝK}‰ö>ïb²Ï™ÇÜØß >¹)®áüý'‘× ›´N_í—ËPEq UöÂ'¥’»1A-ªØ¦ùYÉê-É®Œî8lb6]ÄÒ uMx OîèpŠáaDXþîA•–þÔ:'ÕÙ/ÊxäºÒ/#^íØ¾úo'ß0™¤}–TÊi’ñ€’½Ž3o$‚Çz(åØ~ VÎòH‹R^qÙý©' ®8Ìì0(°5-÷nÔÜŽ_CÅÐ6ZG›?N!½¦…ôùh”™'R“œœ ¸û¥öŒÔG W‰A§\—;P程ÒC©®b\>;W†4ÏËŽ *—À_$€`®$´×ó0…’5õ)WCjKÿÍ»Cm\>£×ž—Ä,¤ÇP³[R©„5®Âtš¢&S ‘•ðì1Ÿ»TÄäQ¯h:äOõï7Ef2R~ûÁÃÁa|[³•Q7ìð5§H {#ÚÚ<ÆÄt4›¯m÷Ù"‚“•«yEùÌæö¯‡Ë9)³KŠÂïºìx]¿&ð°qH{Žýî vQt拽¹‰H›8Òš©>½3|P´ÎôËßw¬ç=ô ï+×§:ï­È*F ôt¦~yG²ÛÜTÇÄ~VÜ?Ãy¹Zœ¡†Neü)”òS` àË'âU9¦Kk òˆñ`WúãUb‹O´ ³8Î2?<ÙqäÏíìÉܔ䬯RIn£ØÁÛ|Ÿj½ ©¸‡ýÚüÅì9v_pw¹•`ì±éÂVäÒø€ÍÿWjí’±ér@Êâ‚TÖÇ>q൑nj_EÒ5…ðøŽÞ‘º²pr?•âìÍ å‡ö€w[ã°SV1‘êjÜâ’¡TzËâ°Ù7<$QÏtƒ€•ñ‚x€Ù­à2ÉÒ}U!×yZàñí<-lIµ¾¯ç;æKæ€qˆÆÏ{i8±œj›P/h—Ö͹õÿÆÀ)ýa¨È|ß-‘DÒ°Èàà0(,sMÇ ’ðq+m:°1ZPK6%Œ¤mj\;‹¢!¹úØÍ¬"•8œtHÇW?ÄÍ•ÅàÙôóü6Œ°±áFŠºth ¹5õ›º+ô0IšU¾LnÃlè±·×|ôzªÇucÇŠkÀ†»8F¦o`B̽Z˜í)ÂIè4HÒI]úÌfÈSmÔ:ÿ&NÃõõ@n‹ò¤ô£{wÑ…x¤ 1fY#cðÀÍÓˆZÔs¹ä[àØ.i·ó“p%è½ÞFjåãg9£È™ji©GÁCÇÓˆ¦ôÊΙvLxKFS¢»@P0d–Ä7ç1ñtLâ/G°¢8« ˜AÕnþ–ê’§‰ÞQQ“ýÎj6L·vz]¢‰'±[¬ÔÝ%¼­hE”ŽÜw¿;x~Ž.ÊFkТceæº4já¥d÷\Á¼³›òu ì좒9{ ŠÔµÄÂ‹rCÉP|XàáN {¸G_7Jm¦€æÎâ†Ìh“tiÕiØ©«S0¢_†žÕ 0RrgGËÚó¾ÛœÌ‰FÅÇ®_ƒ£©ØIª¢KhØÆô%=·í·Œƒ" m -šÙ ª\¿RÄó+!Ň‚ -A@ iñWè&(³Æ ª<] *܆|Á?a(©‚Í6Ü-CH1c@Á‰©x¹;kËkÝ8b´¥þ•skxí«i5ê¤=ÊöLþ wQØÒI˜ÚlPšždqôR6¿ibó³!l¢×¾pÉ]ÙHú>ÅLFå#åàêr,ãæ[ØcŸ«4ç[Kï)Ïî<ˆ|à° ‰nÀ 95aÚ¿]|ö[å}}o¸¿ÛÈ­”À„•²ÉŽó6VÀf8ε4ùeA$`Êôu·êê¦ÎÓ¯þSml©Ê=ÐQÀxC`Ð"Â×ä†[ˆ¹G°"ðD.Òß-b@CÚ«"<™ˆ1Ø4K é€ùCà4’TVil½vµÆbêÜËîÛ ~DEþ·Dü×÷×þ@KIð¾®²|`‘Ûn]JI=h?VߪŒðVŠ6ÙŠlÓ-’»Hâ>kf‰"÷ü”䟤+hž 5?xf ÂN.2,•u]µmÙ »%dÌTRàÇ#¼¦Ýëm QbcHG­%Ðý¡rBýˆ@ ºG¬3['´8QÖ× ÐEîx$ŸôŠÆò޶غ²#ŠS%&¤Œ³‚ݨ/Ç^Ðsñ&ˆ¨E¿ó³Wá"öqw.ÊÊQ«<æ€"Àsà¡c°E‡å,Hgð­¿Ð”Nî-±ÈаÇ:£ÄÊû’Ìå8g©µ“HöºgÊ¿'?ÅÊ4[‹ þ¬ëâr] CÞb|*€î ㅽׇBóö¿‡Éê2ïàçúfQÌëªVvý¿3¦~³ w:Ñ—÷èÆûûIˆYÚ91þWoç<ÎZ Ÿ§FŠ07âà+¹C5Ê•5ã·éT^…¹qxš…yÙ[â"óž[¦Á ¿b¸Œo’ò(¿¬Ëÿ‘@¤½¦¢æ|.ùh:hôTN-T¨|& ㋘A†•ÿì¾jË›mQ·K8;éàMO–î¡Ð„£þ?“•JqÒÅÈæÞ3øk©”“âk-‘aUíàéÛ^¦îzÔ¨Yѩך–PWýþ‘B”Uî h$-&]á}‚ˆA~à [ÐE y»ÒfîAïÝo2Î7ð=wÊo<xýìîy—t¥¢ ¢aKˆÎ×–$öËD3¨/É1K)ª¦ßJ²ËÐVaFTàÏøn„Çw\#_d<_ƒ1`Ó>!À±‚€¡­æµÎŽÈQå4|(â´üÃM «Áš0Æ=Bõ„)8Æ)Ê’If'm;ó,ôÎWÚÖBÅQnɾµ™‡þqM8"ш™Ý]±çžÀ‚ùxVìbYð%´½) ëy¤Öºíí"°nwØK€ictñ‡KÊs|«~v“ÔÌDÐQ<@ÿ‚ìíhï/Œ>0 ‹YZspatstat.model/R/vblogistic.R0000644000176200001440000002135714331173074015720 0ustar liggesusers#' Variational Bayesian Logistic regression #' #' author: Tuomas Rajala < tuomas.rajala a iki.fi > #' #' Copyright (C) Tuomas Rajala 2014 #' GNU Public License GPL 2.0 | 3.0 #' #' Special version for 'spatstat' #' #' $Revision: 1.7 $ $Date: 2022/05/23 02:33:06 $ #' #################################################### #' Used inside ppm vblogit.fmla <- function(formula, offset, data, subset, weights, verbose=FALSE, epsilon=0.01, ...) { mf <- match.call(expand.dots = FALSE) m <- match(c("formula", "data", "subset", "offset"), names(mf), 0L) mf <- mf[c(1L, m)] mf$drop.unused.levels <- TRUE mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) mt <- attr(mf, "terms") offset <- model.offset(mf) y <- model.response(mf, "any") X <- model.matrix(mt, mf) colnames(X)[1] <- "(Intercept)" Vnames <- colnames(X) #' then we fit: fit <- vblogit(y=y, X=X, offset=offset, verb=verbose, eps=epsilon, ...) #' names(fit$coefficients) <- names(fit$coef) <- Vnames #' add some variables to conform to summary.ppm fit$se <- sqrt(diag(as.matrix(fit$S))) fit$call <- match.call(expand.dots=FALSE) fit$formula <- formula fit$method <- "vblogit" fit$model <- mf fit$terms <- mt fit$offset <- offset fit$data <- data fit$xlevels <- .getXlevels(mt, mf) fit } ################################################### # the fitting function: vblogit <- local({ ## helper functions needed: lambda <- function(x) { -tanh(x/2)/(4*x) } mygamma <- function(x) { x/2 - log(1+exp(x)) + x*tanh(x/2)/4 } vblogit <- function(y, X, offset, eps=1e-2, m0, S0, S0i, xi0, verb=FALSE, maxiter=1000, ...) { ## Logistic regression using JJ96 idea. Ormeron00 notation. ## p(y, w, t) = p(y | w) p(w | t) p(t) ## ## Y ~ Bern(logit(Xw + offset)) ## w ~ N(m0, S0) iid ## ## "*0" are fixed priors. ## cat2 <- if(verb) cat else function(...) NULL varnames <- colnames(data.frame(as.matrix(X[1:2,]))) ## Write N <- length(y) K <- ncol(X) #' #' #' offset if(missing('offset')) offset <- 0 if(length(offset) eps & (iter<-iter+1) <= maxiter le_hist <- c(le_hist, le) cat2("diff:", devi, " \r") } if(iter == maxiter) warning("Maximum iteration limit reached.") cat2("\n") ## done. Compile: est <- list(m=m, S=S, Si=Si, xi=xi, lambda_xi=la) #' Marginal evidence est$logLik <- le #' Compute max logLik with the Bernoulli model; #' this should be what glm gives: est$logLik_ML <- as.numeric( t(y)%*%(X%*%m+offset) - sum( log( 1 + exp(X%*%m+offset)) ) ) #' Max loglik with the approximation est$logLik_ML2 <- as.numeric( t(y)%*%(X%*%m + offset) + t(m)%*%t(X*la)%*%X%*%m - 0.5*sum(X%*%m) + sum(mygamma(xi)) + 2*t(offset*la)%*%X%*%m + t(offset*la)%*%offset - 0.5 * sum(offset) ) #' some additional parts, like in glm output est$coefficients <- est$m[,1] names(est$coefficients) <- varnames est$call <- sys.call() est$converged <- !(maxiter==iter) #' more additional stuff est$logp_hist <- le_hist est$parameters <- list(eps=eps, maxiter=maxiter) est$priors <- list(m=m0, S=S0) est$iterations <- iter class(est) <- "vblogit" ## return est } vblogit }) ################################################### #' Predict method predict.vblogit <- local({ sigmoid <- function(e) 1/(1+exp(-e)) predict.vblogit <- function(object, newdata = NULL, type = c("link", "response", "terms"), se.fit = FALSE, dispersion = NULL, terms = NULL, na.action = na.pass, ...) { type <- match.arg(type) if(type != "response") stop("type not supported.") if(missing(newdata)) { stop("not implemented.") } else{ # newdata #' build the new covariate matrix, inspired by predict.lm tt <- terms(object) Terms <- delete.response(tt) m <- model.frame(Terms, newdata, na.action = na.action, xlev = object$xlevels) X <- model.matrix(Terms, m, contrasts.arg = object$contrasts) offset <- rep(0, nrow(X)) if (!is.null(off.num <- attr(tt, "offset"))) for (i in off.num) offset <- offset + eval(attr(tt, "variables")[[i + 1]], newdata) if (!is.null(object$call$offset)) offset <- offset + eval(object$call$offset, newdata) #' predict using probit approximation to logit-function mu <- object$m S <- object$S mua <- as.numeric(X%*%mu)+offset #' was: s2a <- diag(X%*%S%*%t(X) ) s2a <- quadform(X, S) predictor <- sigmoid( as.numeric( mua/sqrt(1+pi*s2a/8) ) ) names(predictor) <- rownames(X) } predictor } predict.vblogit }) # ################################################### # print method print.vblogit <- function(x, ...) { splat("Variational Bayes logistic regression fit") cat("\nCall: ") print(x$call) cat("\nCoefficients:\n") print(x$coefficients) cat("\n") splat("Log-likelihood:", x$logLik) splat("Converged:", x$converged) splat("Convergence threshold:", x$parameters$eps) splat("Iterations / max:", x$iterations, "/", x$parameters$maxiter) splat("* Caution: the estimates are conditional on convergence.") invisible(NULL) } #################################################### # vblogit family method family.vblogit <- function(object, ...) binomial() #################################################### #' vblogit fit summary method summary.vblogit <- function(object, ...) { splat("Variational Bayes logistic regression fit") cat("\nCall: ") print(object$call) splat("\nCoefficients and posterior 95% central regions:") vna <- names(object$coefficients) s <- sqrt(diag(object$S)) q0 <- qnorm(c(0.025, 0.975)) m <- as.numeric(object$m) df <- data.frame(estimate=m, "low 0.05"=m+s*q0[1], "high 97.5"=m+s*q0[2], "prior mean"=object$priors$m, "prior var"=diag(object$priors$S)) rownames(df) <- vna print(df) cat("\n") splat("Lower bound for log-likelihood:", object$logLik) invisible(NULL) } #################################################### # Coef coef.vblogit <- function(object, ...) object$coefficients #################################################### # Log-evidence logLik.vblogit <- function(object, ...) { object$logLik } spatstat.model/R/summary.kppm.R0000644000176200001440000002361714336564321016223 0ustar liggesusers#' #' summary.kppm.R #' #' $Revision: 1.41 $ $Date: 2022/11/21 02:52:46 $ #' summary.kppm <- function(object, ..., quick=FALSE) { nama <- names(object) result <- unclass(object)[!(nama %in% c("X", "po", "call", "callframe"))] ## handle old format if(is.null(result$isPCP)) result$isPCP <- TRUE ## extract 'optim' object Fit <- object$Fit opt <- switch(Fit$method, mincon = Fit$mcfit$opt, clik =, clik2 = Fit$clfit, palm = Fit$clfit, adapcl = Fit$cladapfit, warning(paste("Unrecognised fitting method", sQuote(Fit$method))) ) if(Fit$method != "adapcl") { result$optim.converged <- optimConverged(opt) result$optim.status <- optimStatus(opt) result$optim.nsteps <- optimNsteps(opt) } ## summarise trend component result$trend <- summary(as.ppm(object), ..., quick=quick) if(isFALSE(quick)) { theta <- coef(object) if(length(theta) > 0) { vc <- vcov(object, matrix.action="warn") if(!is.null(vc)) { se <- if(is.matrix(vc)) sqrt(diag(vc)) else if(length(vc) == 1) sqrt(vc) else NULL } if(!is.null(se)) { two <- qnorm(0.975) lo <- theta - two * se hi <- theta + two * se zval <- theta/se pval <- 2 * pnorm(abs(zval), lower.tail=FALSE) psig <- cut(pval, c(0,0.001, 0.01, 0.05, 1), labels=c("***", "**", "*", " "), include.lowest=TRUE) ## table of coefficient estimates with SE and 95% CI result$coefs.SE.CI <- data.frame(Estimate=theta, S.E.=se, CI95.lo=lo, CI95.hi=hi, Ztest=psig, Zval=zval) } } } if(object$isPCP) { #' ---------- information from cluster parameters paper ------------- #' sibling probability result$psib <- mean(psib(object)) #' overdispersion index win <- as.owin(object, from="points") vac <- varcount(object, B=win) Lam <- integral(predict(object, window=win)) result$odi <- vac/Lam #' detect penalised fit result$penalised <- !is.null(Fit$pspace.used$penalty) #' optimization trace result$trace <- attr(object, "h") #' spatial persistence (over window) g <- pcfmodel(object) d <- diameter(win) result$persist <- (g(d)-1)/(g(0)-1) result$ispo <- poisson.fits.better(object) #' bounds on distance from Poisson and mixed Poisson aW <- area(win) if(is.stationary(object)) { lambda <- object$lambda mu <- object$mu EN <- lambda * aW } else { EN <- Lam # integral of intensity mu <- max(object$mu) } #' first bound tvbound1 <- 2 * EN * (1-exp(-mu)) rules <- spatstatClusterModelInfo(object$clusters) newpar <- object$clustpar oldpar <- rules$checkpar(newpar, native=TRUE, strict=FALSE) if(all(oldpar > 0)) { scal <- newpar[["scale"]] kappa <- newpar[["kappa"]] result$phi <- phinew <- g(0) - 1 A10 <- phinew * kappa * scal^2 h10 <- rules$kernel(oldpar, 0) #' second and third bounds tvbound2 <- phinew * EN^2 * (1 + scal^2/(aW * A10)) tvbound3 <- phinew * EN^2 * (1 + h10/A10) #' fourth (new) bound tvbound4 <- EN * sqrt(phinew) #' save bounds result$tvbound1 <- tvbound1 result$tvbound2 <- tvbound2 result$tvbound3 <- tvbound3 result$tvbound4 <- tvbound4 result$tvbound <- min(1, tvbound1, tvbound2, tvbound3, tvbound4) if(is.stationary(object)) { #' characteristics of nonempty clusters em <- exp(-mu) result$kappa1 <- kappa * (1-em) result$mu1 <- mu/(1-em) result$kappa2 <- kappa * (1 - em - mu * em) result$eta <- kappa/(lambda + kappa) result$panysib <- 1-em ## distance to mixed Poisson A1d <- (g(d)-1) * kappa * scal^2 tvbmix <- EN * (phinew/A10) * sqrt(2 * (A10 - A1d)) result$tvbmix <- min(1, tvbmix) } } ## ----------------------------------------------------- } class(result) <- "summary.kppm" return(result) } coef.summary.kppm <- function(object, ...) { return(object$coefs.SE.CI) } print.summary.kppm <- function(x, ...) { terselevel <- spatstat.options('terse') digits <- getOption('digits') isPCP <- x$isPCP splat(if(x$stationary) "Stationary" else "Inhomogeneous", if(isPCP) "cluster" else "Cox", "point process model") if(waxlyrical('extras', terselevel) && nchar(x$Xname) < 20) splat("Fitted to point pattern dataset", sQuote(x$Xname)) Fit <- x$Fit if(waxlyrical('gory', terselevel)) { fittedby <- "Fitted by" #' detect whether fit used a penalty if(isTRUE(x$penalised)) fittedby <- "Fitted by penalised" switch(Fit$method, mincon = { splat(fittedby, "minimum contrast") splat("\tSummary statistic:", Fit$StatName) print(Fit$mcfit) }, clik =, clik2 = { splat(fittedby, "maximum second order composite likelihood") splat("\trmax =", Fit$rmax) if(!is.null(wtf <- Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } printStatus(x$optim.status) }, palm = { splat(fittedby, "maximum Palm likelihood") splat("\trmax =", Fit$rmax) if(!is.null(wtf <- Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } printStatus(x$optim.status) }, adapcl = { splat("Fitted by adaptive second order composite likelihood") splat("\tepsilon =", x$Fit$epsilon) if(!is.null(wtf <- x$Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } }, warning(paste("Unrecognised fitting method", sQuote(Fit$method))) ) #' optimization trace if(!is.null(x$trace)) { parbreak() splat("[Includes history of evaluations of objective function]") } } # ............... trend ......................... parbreak() splat("----------- TREND -----") print(x$trend, ...) # ..................... clusters ................ tableentry <- spatstatClusterModelInfo(x$clusters) parbreak() splat("-----------", if(isPCP) "CLUSTER" else "COX", "", "-----------") splat("Model:", tableentry$printmodelname(x)) parbreak() cm <- x$covmodel if(!isPCP) { # Covariance model - LGCP only splat("\tCovariance model:", cm$model) margs <- cm$margs if(!is.null(margs)) { nama <- names(margs) tags <- ifelse(nzchar(nama), paste(nama, "="), "") tagvalue <- paste(tags, margs) splat("\tCovariance parameters:", paste(tagvalue, collapse=", ")) } } pa <- x$clustpar if (!is.null(pa)) { splat("Fitted", if(isPCP) "cluster" else "covariance", "parameters:") print(pa, digits=digits) } if(!is.null(mu <- x$mu)) { if(isPCP) { splat("Mean cluster size: ", if(!is.im(mu)) paste(signif(mu, digits), "points") else "[pixel image]") } else { splat("Fitted mean of log of random intensity:", if(!is.im(mu)) signif(mu, digits) else "[pixel image]") } } #' table of coefficient estimates with SE and 95% CI if(!is.null(cose <- x$coefs.SE.CI)) { parbreak() splat("Final standard error and CI") splat("(allowing for correlation of", if(isPCP) "cluster" else "Cox", "process):") print(cose) } #' Cluster strength indices psi <- x$psib odi <- x$odi if(!is.null(psi) || !is.null(odi)) { parbreak() splat("----------- cluster strength indices ---------- ") if(!is.null(psi)) { psi <- signif(psi, digits) if(isTRUE(x$stationary)) { splat("Sibling probability", psi) } else splat("Mean sibling probability", psi) } if(!is.null(odi)) splat("Count overdispersion index (on original window):", signif(odi, digits)) } #' cluster strength if(!is.null(phi <- x$phi)) splat("Cluster strength:", signif(phi, digits)) #' spatial persistence (over window) if(!is.null(x$persist)) { parbreak() splat("Spatial persistence index (over window):", signif(x$persist, digits)) } #' bound on total-variation distance from Poisson if(!is.null(x$tvbound)) { parbreak() splat("Bound on distance from Poisson process (over window):", signif(x$tvbound, digits)) if(!is.null(x$tvbound1) && !is.null(x$tvbound2) && !is.null(x$tvbound3) && !is.null(x$tvbound4)) { tvb <- as.numeric(x[c("tvbound1", "tvbound2", "tvbound3", "tvbound4")]) splat("\t = min", paren(paste(c(1, signif(tvb, digits)), collapse=", "))) } } if(!is.null(x$tvbmix)) { parbreak() splat("Bound on distance from MIXED Poisson process (over window):", signif(x$tvbmix, digits)) } #' if(!is.null(x$kappa1)) { parbreak() splat("Intensity of parents of nonempty clusters:", signif(x$kappa1, digits)) splat("Mean number of offspring in a nonempty cluster:", signif(x$mu1, digits)) splat("Intensity of parents of clusters of more than one offspring point:", signif(x$kappa2, digits)) splat("Ratio of parents to parents-plus-offspring:", signif(x$eta, digits), "(where 1 = Poisson process)") splat("Probability that a typical point belongs to a nontrivial cluster:", signif(x$panysib, digits)) } if(isTRUE(x$ispo)) { parbreak() splat(">>> The Poisson process is a better fit <<< ") } #' invisible(NULL) } spatstat.model/R/pairsat.family.R0000644000176200001440000002062514331173073016472 0ustar liggesusers# # # pairsat.family.S # # $Revision: 1.46 $ $Date: 2022/11/03 11:08:33 $ # # The saturated pairwise interaction family of point process models # # (an extension of Geyer's saturation process to all pairwise interactions) # # pairsat.family: object of class 'isf' # defining saturated pairwise interaction # # # ------------------------------------------------------------------- # pairsat.family <- list( name = "saturated pairwise", order = Inf, print = function(self) { cat("Saturated pairwise interaction family\n") }, eval = function(X,U,EqualPairs,pairpot,potpars,correction, ..., Reach=NULL, precomputed=NULL, savecomputed=FALSE, halfway=FALSE) { # # This is the eval function for the `pairsat' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # For the `pairsat' family of pairwise-interaction processes, # this eval function computes the distances between points, # invokes 'pairpot' to evaluate the potential between each pair of points, # applies edge corrections, and then sums the pair potential terms # applying the saturation threshold. # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ######################################################################## # # POTENTIAL: # The pair potential function 'pairpot' will be called as # pairpot(M, potpars) where M is a matrix of interpoint distances. # It must return a matrix with the same dimensions as M # or an array with its first two dimensions the same as the dimensions of M. # # NOTE: # Note the Geyer saturation threshold must be given in 'potpars$sat' ########################################################################## # coercion should be unnecessary, but this is useful for debugging X <- as.ppp(X) U <- as.ppp(U, X$window) # i.e. X$window is DEFAULT window # saturation parameter(s) saturate <- potpars$sat # interaction distance of corresponding pairwise interaction PairReach <- if(!is.null(Reach) && is.finite(Reach)) Reach/2 else NULL if(is.null(saturate)) { # pairwise interaction V <- pairwise.family$eval(X, U, EqualPairs, pairpot, potpars, correction, ..., Reach=PairReach, precomputed=precomputed, savecomputed=savecomputed) return(V) } # first ensure all data points are included in the quadrature points nX <- npoints(X) nU <- npoints(U) Xseq <- seq_len(nX) if(length(EqualPairs) == 0) { # no data points currently included missingdata <- rep.int(TRUE, nX) } else { Xused <- EqualPairs[,1] missingdata <- !(Xseq %in% Xused) } somemissing <- any(missingdata) if(somemissing) { # add the missing data points originalrows <- seq_len(nU) nmiss <- sum(missingdata) U <- superimpose(U, X[missingdata], W=X$window, check=FALSE) # correspondingly augment the list of equal pairs newXindex <- Xseq[missingdata] newUindex <- nU + seq_len(nmiss) EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex)) nU <- nU + nmiss } # compute the pair potentials POT and the unsaturated potential sums V V <- pairwise.family$eval(X, U, EqualPairs, pairpot, potpars, correction, ..., Reach=PairReach) POT <- attr(V, "POT") computed <- attr(V, "computed") # could be NULL # # V is a matrix with rows = quadrature points, # columns = coordinates of potential # POT is an array with rows = data points # columns = quadrature points # planes = coordinates of potential ################################################################# ################## saturation part ############################## ################################################################# # check dimensions and ensure 'saturate' is a vector ns <- length(saturate) np <- ncol(V) if(ns == 1 && np > 1) saturate <- rep.int(saturate, np) else if(ns != np) stop("Length of vector of saturation parameters is incompatible with the pair potential", call.=FALSE) # replicate as a matrix and as an array saturate2 <- array(saturate[slice.index(V, 2)], dim=dim(V)) saturate3 <- array(saturate[slice.index(POT, 3)], dim=dim(POT)) # # (a) compute SATURATED potential sums V.sat <- pmin(V, saturate2) if(halfway) return(V.sat) # # (b) compute effect of addition/deletion of dummy/data point j # on the UNSATURATED potential sum of each data point i # # Identify data points is.data <- seq_len(npoints(U)) %in% EqualPairs[,2] # logical vector corresp. to rows of V # Extract potential sums for data points only V.data <- V[is.data, , drop=FALSE] # replicate them so that V.dat.rep[i,j,k] = V.data[i, k] V.dat.rep <- aperm(array(V.data, dim=c(dim(V.data), U$n)), c(1,3,2)) # make a logical array col.is.data[i,j,k] = is.data[j] col.is.data <- array(is.data[slice.index(POT, 2)], dim=dim(POT)) # compute value of unsaturated potential sum for each data point i # obtained after addition/deletion of each dummy/data point j if(!(correction %in% c("isotropic", "Ripley"))) { dV <- ifelseNegPos(col.is.data, POT) ## equivalent to ifelse(col.is.data, -POT, POT) } else { ## Weighted potential is not exactly symmetric dV <- POT dV[col.is.data] <- - aperm(POT[ , is.data, , drop=FALSE], c(2,1,3)) } V.after <- V.dat.rep + dV # # # (c) difference of SATURATED potential sums for each data point i # before & after increment/decrement of each dummy/data point j # # saturated values after increment/decrement V.after.sat <- array(pmin.int(saturate3, V.after), dim=dim(V.after)) # saturated values before V.dat.rep.sat <- array(pmin.int(saturate3, V.dat.rep), dim=dim(V.dat.rep)) # difference V.delta <- V.after.sat - V.dat.rep.sat V.delta <- ifelseNegPos(col.is.data, V.delta) # # (d) Sum (c) over all data points i V.delta.sum <- apply(V.delta, c(2,3), sum) # # (e) Result V <- V.sat + V.delta.sum ########################################## # remove rows corresponding to supplementary points if(somemissing) V <- V[originalrows, , drop=FALSE] ### tack on the saved computations from pairwise.family$eval if(savecomputed) attr(V, "computed") <- computed return(V) }, ######### end of function $eval suffstat = function(model, X=NULL, callstring="pairsat.family$suffstat") { # for saturated pairwise models only (possibly nonstationary) verifyclass(model, "ppm") if(!identical(model$interaction$family$name,"saturated pairwise")) stop("Model is not a saturated pairwise interaction process") if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") modelX <- update(model, X, improve.type="none") } # determine which data points contribute to pseudolikelihood contribute <- getppmdatasubset(modelX) Empty <- X[integer(0)] mom <- partialModelMatrix(X, Empty, model, "suffstat", halfway=TRUE) # halfway=TRUE is passed to pairsat.family$eval # and yields matrix of saturated potential sums # take only those terms that contribute to the pseudolikelihood mom <- mom[contribute, , drop=FALSE] result <- apply(mom, 2, sum) return(result) } ######### end of function $suffstat ) ######### end of list class(pairsat.family) <- "isf" spatstat.model/R/anova.ppm.R0000644000176200001440000003111114331173072015435 0ustar liggesusers# # anova.ppm.R # # $Revision: 1.30 $ $Date: 2022/06/21 02:42:45 $ # anova.ppm <- local({ do.gripe <- function(...) warning(paste(...), call.=FALSE) dont.gripe <- function(...) NULL nquad <- function(x) { if(is.quad(x)) n.quad(x) else 0 } fmlaString <- function(z) { paste(as.expression(formula(z))) } interString <- function(z) { as.interact(z)$creator } anova.ppm <- function(object, ..., test=NULL, adjust=TRUE, warn=TRUE, fine=FALSE) { gripe <- if(warn) do.gripe else dont.gripe if(!is.null(test)) { test <- match.arg(test, c("Chisq", "LRT", "Rao", "score", "F", "Cp")) if(test == "score") test <- "Rao" if(!(test %in% c("Chisq", "LRT", "Rao"))) stop("test=", dQuote(test), "is not yet implemented") } ## trap outmoded usage argh <- list(...) if("override" %in% names(argh)) { gripe("Argument 'override' is superseded and was ignored") argh <- argh[-which(names(argh) == "override")] } ## list of models objex <- append(list(object), argh) if(!all(sapply(objex, is.ppm))) stop(paste("Arguments must all be", sQuote("ppm"), "objects")) ## all models Poisson? pois <- all(sapply(objex, is.poisson.ppm)) gibbs <- !pois ## any models fitted by ippm? newton <- any(sapply(objex, inherits, what="ippm")) ## models fitted using 'ho' or 'enet' ? improv <- lapply(objex, getElement, name="improve.type") improv[sapply(improv, is.null)] <- "none" improv <- unlist(improv) ## enet ignored if(warn && any(e <- (improv == "enet"))) { ne <- sum(e) warning(paste("Analysis of deviance ignores the fact that", if(length(e) == 1L) "the" else ne, ngettext(ne, "model was", "models were"), "fitted using improve.type='enet'")) } if(gibbs && !is.null(test) && test == "Rao") stop("Score test is only implemented for Poisson models", call.=FALSE) ## handle anova for a single object expandedfrom1 <- FALSE if(length(objex) == 1 && (gibbs || newton)) { ## we can't rely on anova.glm in this case ## so we have to re-fit explicitly Terms <- drop.scope(object) if((nT <- length(Terms)) > 0) { ## generate models by adding terms sequentially objex <- vector(mode="list", length=nT+1) for(n in 1L:nT) { ## model containing terms 1, ..., n-1 fmla <- paste(". ~ . - ", paste(Terms[n:nT], collapse=" - ")) fmla <- as.formula(fmla) objex[[n]] <- update(object, fmla) } ## full model objex[[nT+1L]] <- object expandedfrom1 <- TRUE } } ## all models fitted by same method? fitmethod <- unique(sapply(objex, getElement, name="method")) if(length(fitmethod) > 1) stop(paste("Models were fitted by different methods", commasep(sQuote(fitmethod)), "- comparison is not possible")) ## fitted by MPL or logistic? if(!(fitmethod %in% c("mpl", "logi"))) stop(paste("Not implemented for models fitted by method=", sQuote(fitmethod))) logi <- (fitmethod == "logi") ## improvements 'ho', 'enet' fitimprov <- unique(improv) if(warn && length(fitimprov) > 1) warning(paste("Models were fitted by different 'improve.type' settings", commasep(sQuote(fitimprov)), " - calculation ignores this")) refitargs <- list() fitz <- NULL ## fitted to same quadscheme using same edge correction? if(length(objex) > 1) { ## same data? datas <- lapply(objex, data.ppm) samedata <- all(sapply(datas[-1L], identical, y=datas[[1L]])) if(!samedata) stop("Models were fitted to different datasets") ## same dummy points? quads <- lapply(objex, quad.ppm) samequad <- all(sapply(quads[-1L], identical, y=quads[[1L]])) if(!samequad) { gripe("Models were re-fitted using a common quadrature scheme") sizes <- sapply(quads, nquad) imax <- which.max(sizes) bigQ <- quads[[imax]] refitargs$Q <- bigQ } ## same edge correction? corrxn <- unique(sapply(objex, getElement, name="correction")) if(length(corrxn) > 1) stop(paste("Models were fitting using different edge corrections", commasep(sQuote(corrxn)))) if(corrxn == "border") { rbord <- unique(sapply(objex, getElement, name="rbord")) if(length(rbord) > 1) { gripe("Models were re-fitted using a common value of 'rbord'") refitargs$rbord <- max(rbord) } } ## Extract glmfit objects fitz <- lapply(objex, getglmfit) ## Any trivial models? (uniform Poisson) trivial <- sapply(fitz, is.null) if(any(trivial)) refitargs$forcefit <- TRUE ## force all non-trivial models to be fitted using same method ## (all using GLM or all using GAM) isgam <- sapply(fitz, inherits, what="gam") isglm <- sapply(fitz, inherits, what="glm") usegam <- any(isgam) if(usegam && any(isglm)) { gripe("Models were re-fitted with use.gam=TRUE") refitargs$use.gam <- TRUE refitargs$forcefit <- TRUE } ## finally refit models if(length(refitargs) > 0) { objex <- do.call(lapply, append(list(X=objex, FUN=update), refitargs)) fitz <- lapply(objex, getglmfit) } } ## Ensure GLM/GAM objects all use the same 'subset' subz <- lapply(objex, getglmsubset) if(length(unique(subz)) > 1) { subsub <- Reduce("&", subz) fitz <- lapply(fitz, refittosubset, sub=subsub) gripe("Models were re-fitted after discarding quadrature points", "that were illegal under some of the models") } ## If any models were fitted by ippm we need to correct the df if(newton) { nfree <- sapply(lapply(objex, logLik), attr, which="df") ncanonical <- lengths(lapply(objex, coef)) nextra <- nfree - ncanonical if(is.null(fitz)) fitz <- lapply(objex, getglmfit) for(i in seq_along(fitz)) if(nextra[i] != 0) fitz[[i]]$df.residual <- fitz[[i]]$df.residual - nextra[i] } ## Finally do the appropriate ANOVA if(is.null(fitz)) fitz <- lapply(objex, getglmfit) result <- do.call(anova, append(fitz, list(test=test, dispersion=1))) ## Remove approximation-dependent columns if present result[, "Resid. Dev"] <- NULL ## replace 'residual df' by number of parameters in model if("Resid. Df" %in% names(result)) { ## count number of quadrature points used in each model obj1 <- objex[[1L]] ss <- getglmsubset(obj1) nq <- if(!is.null(ss)) sum(ss) else n.quad(quad.ppm(obj1)) result[, "Resid. Df"] <- nq - result[, "Resid. Df"] names(result)[match("Resid. Df", names(result))] <- "Npar" } ## edit header if(!is.null(h <- attr(result, "heading"))) { ## remove .mpl.Y and .logi.Y from formulae if present h <- gsub(".mpl.Y", "", h) h <- gsub(".logi.Y", "", h) ## delete GLM information if present h <- gsub("Model: quasi, link: log", "", h) h <- gsub("Model: binomial, link: logit", "", h) h <- gsub("Response: ", "", h) ## remove blank lines (up to 4 consecutive blanks can occur) for(i in 1L:5L) h <- gsub("\n\n", "\n", h) if(length(objex) > 1 && length(h) > 1) { ## anova(mod1, mod2, ...) ## change names of models fmlae <- sapply(objex, fmlaString) intrx <- sapply(objex, interString) h[2L] <- paste("Model", paste0(1L:length(objex), ":"), fmlae, "\t", intrx, collapse="\n") } ## Add explanation if we did the stepwise thing ourselves if(expandedfrom1) h <- c(h[1L], "Terms added sequentially (first to last)\n", h[-1L]) ## Contract spaces in output if spatstat.options('terse') >= 2 if(!waxlyrical('space')) h <- gsub("\n$", "", h) ## Put back attr(result, "heading") <- h } if(adjust && gibbs) { fitz <- lapply(objex, getglmfit) usegam <- any(sapply(fitz, inherits, what="gam")) if(usegam) { gripe("Deviance adjustment is not available for gam fits;", "unadjusted composite deviance calculated.") } else { ## issue warning, if not already given if(warn) warn.once("anovaAdjust", "anova.ppm now computes the *adjusted* deviances", "when the models are not Poisson processes.") ## Corrected pseudolikelihood ratio nmodels <- length(objex) if(nmodels > 1) { cfac <- rep(1, nmodels) for(i in 2:nmodels) { a <- objex[[i-1]] b <- objex[[i]] df <- length(coef(a)) - length(coef(b)) if(df > 0) { ibig <- i-1 ismal <- i } else { ibig <- i ismal <- i-1 df <- -df } bigger <- objex[[ibig]] smaller <- objex[[ismal]] if(df == 0) { gripe("Models", i-1, "and", i, "have the same dimension") } else { bignames <- names(coef(bigger)) smallnames <- names(coef(smaller)) injection <- match(smallnames, bignames) if(any(uhoh <- is.na(injection))) { gripe("Unable to match", ngettext(sum(uhoh), "coefficient", "coefficients"), commasep(sQuote(smallnames[uhoh])), "of model", ismal, "to coefficients in model", ibig) } else { thetaDot <- 0 * coef(bigger) thetaDot[injection] <- coef(smaller) JH <- vcov(bigger, what="internals", new.coef=thetaDot, fine=fine) J <- if(!logi) JH$Sigma else (JH$Sigma1log+JH$Sigma2log) H <- if(!logi) JH$A1 else JH$Slog G <- H%*%solve(J)%*%H if(df == 1) { cfac[i] <- H[-injection,-injection]/G[-injection,-injection] } else { Res <- residuals(bigger, type="score", new.coef=thetaDot, drop=TRUE) U <- integral.msr(Res) Uo <- U[-injection] Uo <- matrix(Uo, ncol=1) Hinv <- solve(H) Ginv <- solve(G) Hoo <- Hinv[-injection,-injection, drop=FALSE] Goo <- Ginv[-injection,-injection, drop=FALSE] ## ScoreStat <- t(Uo) %*% Hoo %*% solve(Goo) %*% Hoo %*% Uo HooUo <- Hoo %*% Uo ScoreStat <- t(HooUo) %*% solve(Goo) %*% HooUo ## cfac[i] <- ScoreStat/(t(Uo) %*% Hoo %*% Uo) cfac[i] <- ScoreStat/(t(HooUo) %*% Uo) } } } } ## apply Pace et al (2011) adjustment to pseudo-deviances ## (save attributes of 'result' for later reinstatement) oldresult <- result result$Deviance <- AdjDev <- result$Deviance * cfac cn <- colnames(result) colnames(result)[cn == "Deviance"] <- "AdjDeviance" if("Pr(>Chi)" %in% colnames(result)) result[["Pr(>Chi)"]] <- c(NA, pchisq(abs(AdjDev[-1L]), df=abs(result$Df[-1L]), lower.tail=FALSE)) class(result) <- class(oldresult) attr(result, "heading") <- attr(oldresult, "heading") } } if(newton) { ## calculation does not include 'covfunargs' cfa <- lapply(lapply(objex, getElement, name="covfunargs"), names) cfa <- unique(unlist(cfa)) action <- if(adjust && gibbs) "Adjustment to composite likelihood" else if(test == "Rao") "Score test calculation" else NULL if(!is.null(action)) gripe(action, "does not account for", "irregular trend parameters (covfunargs)", commasep(sQuote(cfa))) } } return(result) } refittosubset <- function(fut, sub) { etf <- environment(terms(fut)) gd <- get("glmdata", envir=etf) gd$.mpl.SUBSET <- sub assign("glmdata", gd, envir=etf) up <- update(fut, evaluate=FALSE) eval(up, envir=etf) } anova.ppm }) spatstat.model/R/softcore.R0000644000176200001440000001020214331173073015361 0ustar liggesusers# # # softcore.S # # $Revision: 2.17 $ $Date: 2022/03/07 02:31:44 $ # # Soft core processes. # # Softcore() create an instance of a soft core process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Softcore <- local({ BlankSoftcore <- list( name = "Soft core process", creator = "Softcore", family = "pairwise.family", # evaluated later pot = function(d, par) { sig0 <- par$sigma0 if(is.na(sig0)) { p <- -d^(-2/par$kappa) } else { # expand around sigma0 and set large negative numbers to -Inf drat <- d/sig0 p <- -drat^(-2/par$kappa) p[p < -25] <- -Inf } return(p) }, par = list(kappa = NULL, sigma0=NA), # filled in later parnames = c("Exponent kappa", "Initial approximation to sigma"), hasInf = TRUE, selfstart = function(X, self) { # self starter for Softcore if(npoints(X) < 2) { # not enough points to make any decisions return(self) } md <- minnndist(X) if(md == 0) { warning(paste("Pattern contains duplicated points:", "impossible under Softcore model")) return(self) } kappa <- self$par$kappa if(!is.na(sigma0 <- self$par$sigma0)) { # value fixed by user or previous invocation # check it if((md/sigma0)^(-2/kappa) > 25) warning(paste("Initial approximation sigma0 is too large;", "some data points will have zero probability")) return(self) } # take sigma0 = minimum interpoint distance Softcore(kappa=kappa, sigma0=md) }, init = function(self) { kappa <- self$par$kappa if(!is.numeric(kappa) || length(kappa) != 1 || kappa <= 0 || kappa >= 1) stop(paste("Exponent kappa must be a", "positive number less than 1")) }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { theta <- as.numeric(coeffs[1]) sigma <- theta^(self$par$kappa/2) if(!is.na(sig0 <- self$par$sigma0)) sigma <- sigma * sig0 return(list(param=list(sigma=sigma), inames="interaction parameter sigma", printable=signif(sigma))) }, valid = function(coeffs, self) { theta <- coeffs[1] return(is.finite(theta) && (theta >= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { # distance d beyond which log(interaction factor) <= epsilon if(anyNA(coeffs) || epsilon == 0) return(Inf) theta <- as.numeric(coeffs[1]) kappa <- self$par$kappa sig0 <- self$par$sigma0 if(is.na(sig0)) sig0 <- 1 return(sig0 * (theta/epsilon)^(kappa/2)) }, hardcore = function(self, coeffs=NA, epsilon=0, ...) { # distance d below which interaction factor <= epsilon if(anyNA(coeffs) || epsilon == 0) return(0) theta <- abs(as.numeric(coeffs[1])) if(theta == 0) return(0) kappa <- self$par$kappa sig0 <- self$par$sigma0 if(is.na(sig0)) sig0 <- 1 h <- sig0 * (-log(epsilon)/theta)^(-kappa/2) return(h) }, Mayer=function(coeffs, self) { # second Mayer cluster integral kappa <- self$par$kappa sigma <- (self$interpret)(coeffs, self)$param$sigma return(pi * (sigma^2) * gamma(1 - kappa)) }, version=NULL # filled in later ) class(BlankSoftcore) <- "interact" Softcore <- function(kappa, sigma0=NA) { instantiate.interact(BlankSoftcore, list(kappa=kappa, sigma0=sigma0)) } Softcore <- intermaker(Softcore, BlankSoftcore) Softcore }) spatstat.model/R/Gcom.R0000644000176200001440000001560414366621174014445 0ustar liggesusers# # Gcom.R # # Model compensator of G # # $Revision: 1.12 $ $Date: 2023/02/02 02:39:48 $ # ################################################################################ # Gcom <- function(object, r=NULL, breaks=NULL, ..., correction=c("border", "Hanisch"), conditional=!is.poisson(object), restrict=FALSE, model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), ppmcorrection="border", truecoef=NULL, hi.res=NULL) { if(is.ppm(object)) { fit <- object } else if(is.ppp(object) || is.quad(object)) { if(is.ppp(object)) object <- quadscheme(object, ...) if(!is.null(model)) { stopifnot(is.ppm(model)) ## update 'model' using new point pattern 'object' e <- list2env(list(object=object), parent=model$callframe) fit <- update(model, Q=object, forcefit=TRUE, envir=e) } else { fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } } else stop("object should be a fitted point process model or a point pattern") if(missing(conditional) || is.null(conditional)) conditional <- !is.poisson(fit) restrict <- isTRUE(restrict) if(restrict && !conditional) { warning("restrict=TRUE ignored because conditional=FALSE", call.=FALSE) restrict <- FALSE } # rfixed <- !is.null(r) || !is.null(breaks) # selection of edge corrections # correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", Hanisch="Hanisch", hanisch="Hanisch", best="Hanisch"), multi=TRUE) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) Win <- X$window # edge correction algorithm algo <- if(!conditional) "classical" else if(restrict) "restricted" else "reweighted" # conditioning on border region? if(!conditional) { Wfree <- Win } else { rbord <- fit$rbord Wfree <- erosion(Win, rbord) if(restrict) { retain <- inside.owin(union.quad(Q), , Wfree) Q <- Q[Wfree] X <- X[Wfree] Win <- Wfree } } # Extract quadrature info U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy # E <- equalsfun.quad(Q) WQ <- w.quad(Q) # quadrature weights # basic statistics npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW # quadrature points used USED <- if(algo == "reweighted") (bdist.points(U) > rbord) else rep.int(TRUE, U$n) # adjustments to account for restricted domain if(conditional && spatstat.options("eroded.intensity")) { npts.used <- sum(Z & USED) area.used <- sum(WQ[USED]) lambda.used <- npts.used/area.used } else { npts.used <- npts area.used <- areaW lambda.used <- lambda } # determine breakpoints for r values rmaxdefault <- rmax.rule("G", if(restrict) Wfree else Win, lambda) breaks <- handle.r.b.args(r, breaks, Wfree, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max # residuals resid <- residuals(fit, type="raw",drop=FALSE, new.coef=truecoef, quad=hi.res) rescts <- with(resid, "continuous") if(restrict) { # keep only data inside Wfree rescts <- rescts[retain] } # absolute weight for continuous integrals # wc <- -rescts # nearest neighbours (quadrature point to data point) nn <- nncross(U, X, seq(U$n), seq(X$n)) dIJ <- nn$dist I <- seq(U$n) # J <- nn$which DD <- Z <- (I <= X$n) # TRUE for data points wcIJ <- -rescts # determine whether a quadrature point will be used in integral okI <- USED[I] # initialise fv object r <- breaks$r df <- data.frame(r=r, pois=1 - exp(-pi * lambda.used * r^2)) G <- fv(df, "r", substitute(G(r), NULL), "pois", . ~ r, alim=c(0, rmax), labl=c("r","%s[pois](r)"), desc=c("distance argument r", "theoretical Poisson %s"), fname="G") # distance to boundary b <- bI <- bdist.points(U) dotnames <- character(0) # Border method if("border" %in% correction) { # reduced sample for G(r) of data only ZUSED <- Z & USED RSX <- Kount(dIJ[DD & okI], bI[DD & okI], b[ZUSED], breaks) Gb <- RSX$numerator/RSX$denom.count G <- bind.fv(G, data.frame(border=Gb), "hat(%s)[bord](r)", "border-corrected nonparametric estimate of %s", "border") # reduced sample for adjustment integral RSD <- Kwtsum(dIJ[okI], bI[okI], wcIJ[okI], b[ZUSED], rep.int(1, sum(ZUSED)), breaks, fatal=FALSE) Gbcom <- RSD$numerator/(1 + RSD$denominator) G <- bind.fv(G, data.frame(bcom=Gbcom), "bold(C)~hat(%s)[bord](r)", "model compensator of border-corrected %s", "bcom") dotnames <- c("border", "bcom", "pois") } # Hanisch correction for data if("Hanisch" %in% correction) { nnd <- dIJ[DD & okI] bdry <- bI[DD & okI] # weights ea <- eroded.areas(Win, rvals) if(algo == "reweighted") { # replace weight(r) by weight(max(rbord,r)) ea[rvals < rbord] <- eroded.areas(Win, rbord) } # compute x <- nnd[nnd <= bdry] h <- whist(x[x <= rmax], breaks=breaks$val) H <- (1/lambda.used) * cumsum(h/ea) # glue on G <- bind.fv(G, data.frame(han=H), "hat(%s)[han](r)", "Hanisch correction estimate of %s", "han") # Hanisch correction for adjustment integral nnd <- dIJ[okI] bdry <- bI[okI] wt <- wcIJ[okI] x <- nnd[nnd <= bdry] wt <- wt[nnd <= bdry] h <- whist(x[x <= rmax], breaks=breaks$val, weights=wt[x <= rmax]) lambdaplus <- (npts.used + 1)/area.used Hint <- (1/lambdaplus) * cumsum(h/ea) # glue on G <- bind.fv(G, data.frame(hcom=Hint), "bold(C)~hat(%s)[han](r)", "model compensator of Hanisch-corrected %s", "hcom") # pseudovariance for Hanisch residual Hvar <- (1/lambdaplus^2) * cumsum(h/ea^2) G <- bind.fv(G, data.frame(hvar=Hvar), "bold(C)^2~hat(%s)[han](r)", "Poincare variance for Hanisch corrected %s", "hcom") # default plot does not show all components dotnames <- c("han", "hcom", dotnames) } # compute sensible 'alim' endpoint <- function(y, r, f) { min(r[y >= f * max(y)]) } amax <- endpoint(G$pois, G$r, 0.99) if(length(dotnames) > 0) amax <- max(amax, unlist(lapply(as.data.frame(G)[,dotnames,drop=FALSE], endpoint, r=r, f=0.9))) attr(G, "alim") <- c(0, amax) # fvnames(G, ".") <- dotnames unitname(G) <- unitname(X) # secret tag used by 'Gres' attr(G, "maker") <- "Gcom" return(G) } spatstat.model/R/slrm.R0000644000176200001440000007330314374302014014521 0ustar liggesusers# # slrm.R # # Spatial Logistic Regression # # $Revision: 1.66 $ $Date: 2023/02/16 02:33:58 $ # slrm <- function(formula, ..., data=NULL, offset=TRUE, link="logit", dataAtPoints=NULL, splitby=NULL) { # remember call CallInfo <- list(callstring = short.deparse(sys.call()), cl = match.call(), formula = formula, offset=offset, link=link, splitby=splitby, dotargs=list(...)) if(!(link %in% c("logit", "cloglog"))) stop(paste("Unrecognised link", dQuote(link))) ########### INTERPRET FORMULA ############################## if(!inherits(formula, "formula")) stop(paste("Argument", dQuote("formula"), "should be a formula")) # check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Argument", sQuote("formula"), "must have a left hand side")) Yname <- formula[[2]] trend <- rhs <- formula[c(1,3)] if(!is.name(Yname)) stop("Left hand side of formula should be a single name") Yname <- paste(Yname) if(!inherits(trend, "formula")) stop("Internal error: failed to extract RHS of formula") varnames <- unique(variablesinformula(trend)) if(isTRUE(CallInfo$dotargs$save.all.vars)) varnames <- union(varnames, names(data)) specials <- c("x", "y", "logpixelarea") covnames <- varnames[!(varnames %in% specials)] # add 'splitby' to covariate names if(!is.null(splitby)) { if(!is.character(splitby) || length(splitby) != 1) stop("splitby should be a single character string") covnames <- unique(c(covnames, splitby)) } CallInfo$responsename <- Yname CallInfo$varnames <- varnames CallInfo$covnames <- covnames # Parent environment parenv <- environment(formula) ######## FIND DATA AND RESHAPE ####################### Data <- slr.prepare(CallInfo, parenv, data, dataAtPoints, splitby) # W <- Data$W df <- Data$df nY <- npoints(Data$response) ######## FIT ############################### dformula <- formula if(offset) { # insert offset term in formula rhs <- paste(as.character(rhs), collapse=" ") rhs <- paste(c(rhs, "offset(logpixelarea)"), collapse="+") dformula <- as.formula(paste(Yname, rhs)) } linkname <- link FIT <- glm(dformula, family=binomial(link=linkname), data=df, na.action=na.exclude) result <- list(call = CallInfo$cl, CallInfo = CallInfo, Data = Data, Fit = list(FIT=FIT, dformula=dformula), terms = terms(formula), nobs = nY) class(result) <- c("slrm", class(result)) return(result) } ################ UTILITY TO FIND AND RESHAPE DATA ################# slr.prepare <- local({ slr.prepare <- function(CallInfo, envir, data, dataAtPoints=NULL, splitby=NULL, clip=TRUE) { ## CallInfo is produced by slrm() ## envir is parent environment of model formula ## data is 'data' argument that takes precedence over 'envir' ## 'clip' is TRUE if the data should be clipped to the domain of Y Yname <- CallInfo$responsename ## varnames <- CallInfo$varnames covnames <- CallInfo$covnames dotargs <- CallInfo$dotargs ## ## Get the response point pattern Y Y <- getobj(Yname, envir, data) if(!is.ppp(Y)) stop(paste("The response", sQuote(Yname), "must be a point pattern")) ## if(!is.null(dataAtPoints)) { dataAtPoints <- as.data.frame(dataAtPoints) if(nrow(dataAtPoints) != npoints(Y)) stop(paste("dataAtPoints should have one row for each point in", dQuote(Yname))) } ## Find the covariates ncov <- length(covnames) covlist <- lapply(as.list(covnames), getobj, env = envir, dat=data) names(covlist) <- covnames ## Each covariate should be an image, a window, a function, or single number if(ncov == 0) { isim <- isowin <- ismask <- isfun <- isnum <- isspatial <- israster <- logical(0) } else { isim <- sapply(covlist, is.im) isowin <- sapply(covlist, is.owin) ismask <- sapply(covlist, is.mask) isfun <- sapply(covlist, is.function) isspatial <- isim | isowin | isfun israster <- isim | ismask isnum <- sapply(covlist, is.numeric) & (lengths(covlist) == 1) } if(!all(ok <- (isspatial | isnum))) { n <- sum(!ok) stop(paste(ngettext(n, "The argument", "Each of the arguments"), commasep(sQuote(covnames[!ok])), "should be either an image, a window, or a single number")) } ## 'splitby' if(!is.null(splitby)) { splitwin <- covlist[[splitby]] if(!is.owin(splitwin)) stop("The splitting covariate must be a window") ## ensure it is a polygonal window covlist[[splitby]] <- splitwin <- as.polygonal(splitwin) ## delete splitting covariate from lists to be processed issplit <- (covnames == splitby) isspatial[issplit] <- FALSE israster[issplit] <- FALSE } ## ## nnum <- sum(isnum) ## nspatial <- sum(isspatial) nraster <- sum(israster) ## numlist <- covlist[isnum] spatiallist <- covlist[isspatial] rasterlist <- covlist[israster] ## numnames <- names(numlist) spatialnames <- names(spatiallist) ## rasternames <- names(rasterlist) ## ######## CONVERT TO RASTER DATA ############################### ## determine spatial domain & common resolution: convert all data to it if(length(dotargs) > 0 || nraster == 0) { ## Pixel resolution is determined by explicit arguments if(clip) { ## Window extent is determined by response point pattern D <- as.owin(Y) } else { ## Window extent is union of domains of data domains <- lapply(append(spatiallist, list(Y)), as.owin) D <- do.call(union.owin, domains) } ## Create template mask W <- do.call.matched(as.mask, append(list(w=D), dotargs)) ## Convert all spatial objects to this resolution spatiallist <- lapply(spatiallist, convert, W=W) } else { ## Pixel resolution is determined implicitly by covariate data W <- do.call(commonGrid, rasterlist) if(clip) { ## Restrict data to spatial extent of response point pattern W <- intersect.owin(W, as.owin(Y)) } ## Adjust spatial objects to this resolution spatiallist <- lapply(spatiallist, convert, W=W) } ## images containing coordinate values xcoordim <- as.im(function(x,y){x}, W=W) ycoordim <- as.im(function(x,y){y}, W=W) ## ## create a list of covariate images, with names as in formula covimages <- append(list(x=xcoordim, y=ycoordim), spatiallist) basepixelarea <- W$xstep * W$ystep ######## ASSEMBLE DATA FRAME ############################### if(is.null(splitby)) { df <- slrAssemblePixelData(Y, Yname, W, covimages, dataAtPoints, basepixelarea) sumYloga <- Y$n * log(basepixelarea) serial <- attr(df, "serial") Yserial <- attr(df, "Yserial") } else { ## fractional pixel areas pixsplit <- pixellate(splitwin, W) splitpixelarea <- as.vector(as.matrix(pixsplit)) ## determine which points of Y are inside/outside window ins <- inside.owin(Y$x, Y$y, splitwin) ## split processing dfIN <- slrAssemblePixelData(Y[ins], Yname, W, covimages, dataAtPoints[ins, ], splitpixelarea) serialIN <- attr(dfIN, "serial") YserialIN <- attr(dfIN, "Yserial") dfIN[[splitby]] <- TRUE dfOUT <- slrAssemblePixelData(Y[!ins], Yname, W, covimages, dataAtPoints[!ins, ], basepixelarea - splitpixelarea) serialOUT <- attr(dfOUT, "serial") YserialOUT <- attr(dfOUT, "Yserial") dfOUT[[splitby]] <- FALSE df <- rbind(dfIN, dfOUT) serial <- c(serialIN, serialOUT) Yserial <- c(YserialIN, YserialOUT) ## sum of log pixel areas associated with points Ysplit <- pixsplit[Y] sumYloga <- sum(log(ifelseXY(ins, Ysplit, basepixelarea - Ysplit))) } ## tack on any numeric values df <- do.call(cbind, append(list(df), numlist)) ### RETURN ALL Data <- list(response=Y, covariates=covlist, spatialnames=spatialnames, numnames=numnames, W=W, df=df, serial=serial, Yserial=Yserial, sumYloga=sumYloga, dataAtPoints=dataAtPoints) return(Data) } getobj <- function(nama, env, dat) { if(!is.null(dat) && !is.null(x <- dat[[nama]])) return(x) else return(get(nama, envir=env)) } convert <- function(x,W) { if(is.im(x) || is.function(x)) return(as.im(x,W)) if(is.owin(x)) return(as.im(x, W, value=TRUE, na.replace=FALSE)) return(NULL) } slr.prepare }) ## ............................................................. slrAssemblePixelData <- local({ slrAssemblePixelData <- function(Y, Yname, W, covimages, dataAtPoints, pixelarea) { #' pixellate point pattern PY <- pixellate(Y, W=W, savemap=TRUE) IY <- eval.im(as.integer(PY>0)) #' if(!is.null(dataAtPoints)) { #' overwrite pixel entries for data points using exact values #' spatial coordinates covimages[["x"]][Y] <- Y$x covimages[["y"]][Y] <- Y$y #' other values provided enames <- colnames(dataAtPoints) relevant <- enames %in% names(covimages) for(v in enames[relevant]) { cova <- covimages[[v]] cova[Y] <- dataAtPoints[, v, drop=TRUE] covimages[[v]] <- cova } } #' assemble list of all images Ylist <- list(IY) names(Ylist) <- Yname allimages <- append(Ylist, covimages) #' extract pixel values of each image, convert to data frame pixdata <- lapply(allimages, pixelvalues) df <- as.data.frame(pixdata) serial <- seq_len(nrow(df)) ## add log(pixel area) column if(length(pixelarea) == 1) { df <- cbind(df, logpixelarea=log(pixelarea)) } else { ok <- (pixelarea > 0) df <- cbind(df[ok, ], logpixelarea=log(pixelarea[ok])) serial <- serial[ok] } #' map rows of data frame to pixels attr(df, "serial") <- serial #' map original data points to pixels Yrowcol <- attr(PY, "map") attr(df, "Yserial") <- Yrowcol[,"row"] + (nrow(PY) - 1L) * Yrowcol[,"col"] return(df) } pixelvalues <- function(z) { v <- as.vector(as.matrix(z)) if(z$type != "factor") return(v) lev <- levels(z) return(factor(v, levels=seq_along(lev), labels=lev)) } slrAssemblePixelData }) ## ................. Methods ................................... is.slrm <- function(x) { inherits(x, "slrm") } coef.slrm <- function(object, ...) { coef(object$Fit$FIT) } print.slrm <- function(x, ...) { lk <- x$CallInfo$link switch(lk, logit= { splat("Fitted spatial logistic regression model") }, cloglog= { splat("Fitted spatial regression model (complementary log-log)") }, { splat("Fitted spatial regression model") splat("Link =", dQuote(lk)) }) cat("Formula:\t") print(x$CallInfo$formula) splat("Fitted coefficients:") print(coef(x)) return(invisible(NULL)) } summary.slrm <- function(object, ...) { y <- object$CallInfo[c("link", "formula", "callstring")] co <- coef(object) se <- sqrt(diag(vcov(object))) two <- qnorm(0.975) lo <- co - two * se hi <- co + two * se zval <- co/se pval <- 2 * pnorm(abs(zval), lower.tail = FALSE) psig <- cut(pval, c(0, 0.001, 0.01, 0.05, 1), labels = c("***", "**", "*", " "), include.lowest = TRUE) y$coefs.SE.CI <- data.frame(Estimate = co, S.E. = se, CI95.lo = lo, CI95.hi = hi, Ztest = psig, Zval = zval) class(y) <- c(class(y), "summary.slrm") return(y) } print.summary.slrm <- function(x, ...) { switch(x$link, logit= { splat("Fitted spatial logistic regression model") }, cloglog= { splat("Fitted spatial regression model (complementary log-log)") }, { splat("Fitted spatial regression model") splat("Link =", dQuote(x$link)) }) cat("Call:\t") print(x$callstring) cat("Formula:\t") print(x$formula) splat("Fitted coefficients:\t") print(x$coefs.SE.CI) return(invisible(NULL)) } coef.summary.slrm <- function(object, ...) { object$coefs.SE.CI } logLik.slrm <- function(object, ..., adjust=TRUE) { FIT <- object$Fit$FIT ll <- -deviance(FIT)/2 if(adjust) { sumYloga <- object$Data$sumYloga ll <- ll - sumYloga } attr(ll, "df") <- length(coef(object)) class(ll) <- "logLik" return(ll) } fitted.slrm <- function(object, ...) { if(length(list(...)) > 0) warning("second argument (and any subsequent arguments) ignored") predict(object, type="probabilities") } intensity.slrm <- function(X, ...) { Z <- predict(X, type="intensity", ..., newdata=NULL, window=NULL) if(is.stationary(X)) Z <- mean(Z) return(Z) } predict.slrm <- function(object, ..., type="intensity", newdata=NULL, window=NULL) { type <- pickoption("type", type, c(probabilities="probabilities", link="link", intensity="intensity", lambda="intensity")) FIT <- object$Fit$FIT link <- object$CallInfo$link splitby <- object$CallInfo$splitby Yname <- object$CallInfo$responsename W <- object$Data$W df <- object$Data$df loga <- df$logpixelarea if(!is.null(window)) window <- as.owin(window) if(is.null(newdata) && is.null(window) && is.null(splitby)) { # fitted pixel values from existing fit switch(type, probabilities={ values <- fitted(FIT) }, link={ values <- predict(FIT, type="link") }, intensity={ # this calculation applies whether an offset was included or not if(link == "cloglog") { linkvalues <- predict(FIT, type="link") values <- exp(linkvalues - loga) } else { probs <- fitted(FIT) values <- -log(1-probs)/exp(loga) } } ) out <- im(values, xcol=W$xcol, yrow=W$yrow, unitname=unitname(W)) return(out) } else { ## prediction from new data and/or at new locations if(is.null(newdata)) { ## prediction using existing covariates, at new locations newdata <- object$Data$covariates } else { ## prediction with completely new data stopifnot(is.list(newdata)) } ## ensure newdata includes response pattern to placate internal code if(!(Yname %in% names(newdata))) newdata[[Yname]] <- ppp(window=window %orifnull% W) ## Update arguments that may affect pixel resolution CallInfo <- object$CallInfo CallInfo$dotargs <- resolve.defaults(list(...), CallInfo$dotargs) ## prevent pixel splitting CallInfo$splitby <- NULL ## process new data newData <- slr.prepare(CallInfo, environment(CallInfo$formula), newdata, clip=!is.null(window)) newdf <- newData$df newW <- newData$W newloga <- newdf$logpixelarea ## avoid NA etc npixel <- nrow(newdf) ok <- complete.cases(newdf) if(!all(ok)) { newdf <- newdf[ok, , drop=FALSE] newloga <- newloga[ok] } ## compute link values linkvalues <- predict(FIT, newdata=newdf, type="link") ## transform to desired scale linkinv <- family(FIT)$linkinv switch(type, probabilities={ values <- linkinv(linkvalues) }, link={ values <- linkvalues }, intensity={ # this calculation applies whether an offset was included or not if(link == "cloglog") { values <- exp(linkvalues - newloga) } else { probs <- linkinv(linkvalues) values <- -log(1-probs)/exp(newloga) } } ) ## form image v <- rep.int(NA_real_, npixel) v[ok] <- values out <- im(v, xcol=newW$xcol, yrow=newW$yrow, unitname=unitname(W)) return(out) } } plot.slrm <- function(x, ..., type="intensity") { xname <- short.deparse(substitute(x)) y <- predict(x, type=type) dont.complain.about(y) do.call(plot.im, resolve.defaults(list(x=quote(y)), list(...), list(main=xname))) } formula.slrm <- function(x, ...) { f <- x$CallInfo$formula return(f) } terms.slrm <- function(x, ...) { terms(formula(x), ...) } labels.slrm <- function(object, ...) { # extract fitted trend coefficients co <- coef(object) # model terms tt <- terms(object) lab <- attr(tt, "term.labels") if(length(lab) == 0) return(character(0)) # model matrix mm <- model.matrix(object) ass <- attr(mm, "assign") # 'ass' associates coefficients with model terms # except ass == 0 for the Intercept coef.ok <- is.finite(co) relevant <- (ass > 0) okterms <- unique(ass[coef.ok & relevant]) return(lab[okterms]) } deviance.slrm <- function(object, ...) { deviance(object$Fit$FIT, ...) } extractAIC.slrm <- function (fit, scale = 0, k = 2, ...) { edf <- length(coef(fit)) aic <- AIC(fit) c(edf, aic + (k - 2) * edf) } model.frame.slrm <- function(formula, ...) { FIT <- formula$Fit$FIT mf <- model.frame(FIT, ...) return(mf) } model.matrix.slrm <- function(object,..., keepNA=TRUE) { FIT <- object$Fit$FIT mm <- model.matrix(FIT, ...) if(!keepNA) return(mm) df <- object$Data$df comp <- complete.cases(df) if(all(comp)) return(mm) if(sum(comp) != nrow(mm)) stop("Internal error in patching NA's") mmplus <- matrix(NA, nrow(df), ncol(mm)) mmplus[comp, ] <- mm colnames(mmplus) <- colnames(mm) return(mmplus) } model.images.slrm <- function(object, ...) { mm <- model.matrix(object, ...) mm <- as.data.frame(mm) Data <- object$Data W <- Data$W serial <- Data$serial splitby <- object$CallInfo$splitby blank <- as.im(NA_real_, W) assignbyserial <- function(values, serial, template) { Z <- template Z$v[serial] <- values return(Z) } if(is.null(splitby)) { result <- lapply(as.list(mm), assignbyserial, serial=serial, template=blank) } else { df <- Data$df IN <- as.logical(df[[splitby]]) OUT <- !IN mmIN <- mm[IN, , drop=FALSE] mmOUT <- mm[OUT, , drop=FALSE] resultIN <- lapply(as.list(mmIN), assignbyserial, serial=serial[IN], template=blank) resultOUT <- lapply(as.list(mmOUT), assignbyserial, serial=serial[OUT], template=blank) names(resultIN) <- paste(names(resultIN), splitby, "TRUE", sep="") names(resultOUT) <- paste(names(resultOUT), splitby, "FALSE", sep="") result <- c(resultIN, resultOUT) } return(as.solist(result)) } update.slrm <- function(object, fmla, ..., evaluate=TRUE, env=parent.frame()) { if(!missing(fmla) && is.ppp(X <- fmla)) { ## idiom 'update(model, Pattern)' covlist <- object$Data$covariates Yname <- object$CallInfo$responsename newdata <- append(covlist, setNames(list(X), Yname)) ## call this method again with new arguments object <- update(object, data=newdata, ..., env=env) return(object) } ## resume normal service z <- getCall(object) extras <- match.call(expand.dots = FALSE)$... if(!missing(fmla)) z$formula <- update(formula(object), fmla) if(!missing(env)) environment(z$formula) <- env if(length(extras)) { existing <- !is.na(match(names(extras), names(z))) for(a in names(extras)[existing]) z[[a]] <- extras[[a]] if (any(!existing)) { z <- c(as.list(z), extras[!existing]) z <- as.call(z) } } if(evaluate) eval(z, envir=env) else z } updateData.slrm <- function(model, X, ...) { covlist <- model$Data$covariates Yname <- model$CallInfo$responsename newdata <- append(covlist, setNames(list(X), Yname)) update(model, data=newdata) } anova.slrm <- local({ anova.slrm <- function(object, ..., test=NULL) { objex <- append(list(object), list(...)) if(!all(unlist(lapply(objex, is.slrm)))) stop("Some arguments are not of class slrm") fitz <- lapply(objex, getFIT) do.call(anova, append(fitz, list(test=test))) } getFIT <- function(z) {z$Fit$FIT} anova.slrm }) vcov.slrm <- function(object, ..., what=c("vcov", "corr", "fisher", "Fisher")) { stopifnot(is.slrm(object)) what <- match.arg(what) vc <- vcov(object$Fit$FIT) result <- switch(what, vcov = vc, corr = { sd <- sqrt(diag(vc)) vc / outer(sd, sd, "*") }, fisher=, Fisher={ solve(vc) }) return(result) } unitname.slrm <- function(x) { return(unitname(x$Data$response)) } "unitname<-.slrm" <- function(x, value) { unitname(x$Data$response) <- value return(x) } domain.slrm <- Window.slrm <- function(X, ..., from=c("points", "covariates")) { from <- match.arg(from) as.owin(X, ..., from=from) } as.owin.slrm <- function(W, ..., from=c("points", "covariates")) { from <- match.arg(from) U <- switch(from, points = W$Data$response, covariates = W$Data$W) V <- as.owin(U, ...) return(V) } is.stationary.slrm <- function(x) { trend <- rhs.of.formula(formula(x)) return(identical.formulae(trend, ~1)) } is.poisson.slrm <- function(x) { TRUE } is.marked.slrm <- is.multitype.slrm <- function(X, ...) { FALSE } reach.slrm <- function(x, ...) { 0 } ## pseudoR2.slrm is defined in ppmclass.R Kmodel.slrm <- function(model, ...) { function(r) { pi * r^2 } } pcfmodel.slrm <- function(model, ...) { function(r) { rep.int(1, length(r)) } } parameters.slrm <- function(model, ...) { list(trend=coef(model)) } ## ............ SIMULATION .............................. simulate.slrm <- function(object, nsim=1, seed=NULL, ..., window=NULL, covariates=NULL, verbose=TRUE, drop=FALSE) { check.1.integer(nsim) stopifnot(nsim >= 0) if(nsim == 0) return(simulationresult(list())) # .... copied from simulate.lm .... if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } starttime <- proc.time() # determine simulation window and compute intensity if(!is.null(window)) stopifnot(is.owin(window)) lambda <- predict(object, type="intensity", newdata=covariates, window=window) # max lambda (for efficiency) summ <- summary(lambda) lmax <- summ$max + 0.05 * diff(summ$range) # run out <- list() verbose <- verbose && (nsim > 1) if(verbose) { cat(paste("Generating", nsim, "simulations... ")) pstate <- list() } for(i in 1:nsim) { out[[i]] <- rpoispp(lambda, lmax=lmax) if(verbose) pstate <- progressreport(i, nsim, state=pstate) } #' pack up out <- simulationresult(out, nsim, drop) out <- timed(out, starttime=starttime) attr(out, "seed") <- RNGstate return(out) } ## ------------------ residuals -------------------------------- residuals.slrm <- function(object, type=c("raw", "deviance", "pearson", "working", "response", "partial", "score"), ...) { type <- match.arg(type) otype <- if(type %in% c("raw", "score")) "response" else type FIT <- object$Fit$FIT W <- object$Data$W res <- residuals(FIT, type=otype, ...) if(type == "score") { M <- model.matrix(object) res <- res * M colnames(res) <- colnames(M) } R <- wrangle2image(res, W) return(R) } ## ------------------ leverage and influence ------------------- leverage.slrm <- function(model, ...) { slrmInfluence(model, "leverage", ...)[["leverage"]] } influence.slrm <- function(model, ...) { slrmInfluence(model, "influence", ...)[["influence"]] } dfbetas.slrm <- function(model, ...) { slrmInfluence(model, "dfbetas", ...)[["dfbetas"]] } dffit.slrm <- function(object, ...) { slrmInfluence(object, "dffit", ...)[["dffit"]] } slrmInfluence <- function(model, what=c("all", "leverage", "influence", "dfbetas", "dffit"), ...) { stopifnot(is.slrm(model)) what <- match.arg(what, several.ok=TRUE) if("all" %in% what) what <- c("leverage", "influence", "dfbetas", "dffit") FIT <- model$Fit$FIT W <- model$Data$W ## nr <- nrow(W) ## nc <- ncol(W) result <- list() if("leverage" %in% what) { h <- hatvalues(FIT, ...) result$leverage <- wrangle2image(h, W) } if("influence" %in% what) { h <- hatvalues(FIT, ...) rP <- rstandard(FIT, type="pearson", ...) p <- length(coef(model)) s <- (1/p) * rP^2 * h/(1-h) result$influence <- wrangle2image(s, W) } if("dfbetas" %in% what) { dfb <- dfbetas(FIT, ...) result$dfbetas <- wrangle2image(dfb, W) } if("dffit" %in% what) { dfb <- dfbeta(FIT, ...) #sic X <- model.matrix(model) # sic if(is.null(dim(X)) || is.null(dim(dfb)) || !all(dim(X) == dim(dfb))) stop("Internal error: model.matrix dimensions incompatible with dfbeta") dff <- rowSums(X * dfb) result$dffit <- wrangle2image(dff, W) } return(result) } valid.slrm <- function(object, warn=TRUE, ...) { verifyclass(object, "slrm") coeffs <- coef(object) ok <- all(is.finite(coeffs)) return(ok) } emend.slrm <- local({ tracemessage <- function(depth, ...) { if(depth == 0) return(NULL) spacer <- paste(rep.int(" ", depth), collapse="") marker <- ngettext(depth, "trace", paste("trace", depth)) marker <- paren(marker, "[") splat(paste0(spacer, marker, " ", paste(...))) } leaving <- function(depth) { tracemessage(depth, ngettext(depth, "Returning.", "Exiting level.")) } emend.slrm <- function(object, ..., fatal=FALSE, trace=FALSE) { verifyclass(object, "slrm") fast <- spatstat.options("project.fast") # user specifies 'trace' as logical # but 'trace' can also be integer representing trace depth td <- as.integer(trace) trace <- (td > 0) tdnext <- if(trace) td+1 else 0 if(valid.slrm(object)) { tracemessage(td, "Model is valid.") leaving(td) return(object) } # Fitted coefficients coef.orig <- coeffs <- coef(object) ## coefnames <- names(coeffs) # Trend terms in trend formula trendterms <- attr(terms(object), "term.labels") # Mapping from coefficients to terms of GLM coef2term <- attr(model.matrix(object), "assign") ## istrend <- (coef2term > 0) # Identify non-finite trend coefficients bad <- !is.finite(coeffs) if(!any(bad)) { tracemessage(td, "Trend terms are valid.") } else { nbad <- sum(bad) tracemessage(td, "Non-finite ", ngettext(nbad, "coefficient for term ", "coefficients for terms "), commasep(sQuote(trendterms[coef2term[bad]]))) if(fast) { # remove first illegal term firstbad <- min(which(bad)) badterm <- trendterms[coef2term[firstbad]] # remove this term from model tracemessage(td, "Removing term ", sQuote(badterm)) removebad <- as.formula(paste("~ . - ", badterm), env=object$callframe) newobject <- update(object, removebad) if(trace) { tracemessage(td, "Updated model:") print(newobject) } # recurse newobject <- emend.slrm(newobject, fatal=fatal, trace=tdnext) # return leaving(td) return(newobject) } else { # consider all illegal terms bestobject <- NULL for(i in which(bad)) { badterm <- trendterms[coef2term[i]] # remove this term from model tracemessage(td, "Considering removing term ", sQuote(badterm)) removebad <- as.formula(paste("~ . - ", badterm), env=object$callframe) object.i <- update(object, removebad) if(trace) { tracemessage(td, "Considering updated model:") print(object.i) } # recurse object.i <- emend.slrm(object.i, fatal=fatal, trace=tdnext) # evaluate log likelihood logL.i <- logLik(object.i, warn=FALSE) tracemessage(td, "max log likelihood = ", logL.i) # optimise if(is.null(bestobject) || (logLik(bestobject, warn=FALSE) < logL.i)) bestobject <- object.i } if(trace) { tracemessage(td, "Best submodel:") print(bestobject) } # return leaving(td) return(bestobject) } } object$projected <- TRUE object$coef.orig <- coef.orig leaving(td) return(object) } emend.slrm }) spatialCovariateUnderModel.slrm <- function(model, covariate, ...) { P <- predict(model, type="probabilities") loc <- rasterxy.im(P, drop=TRUE) Zvalues <- evaluateCovariateAtPoints(covariate, loc, ...) wtvalues <- P[drop=TRUE] df <- data.frame(Z=Zvalues, wt=wtvalues) return(df) } spatstat.model/R/psstA.R0000644000176200001440000001171714366621174014653 0ustar liggesusers# # psstA.R # # Pseudoscore residual for unnormalised F (area-interaction) # # $Revision: 1.10 $ $Date: 2023/02/02 02:39:33 $ # ################################################################################ # psstA <- function(object, r=NULL, breaks=NULL, ..., model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), ppmcorrection="border", correction="all", truecoef=NULL, hi.res=NULL, nr=spatstat.options("psstA.nr"), ngrid=spatstat.options("psstA.ngrid")) { if(is.ppm(object)) fit <- object else if(is.ppp(object) || is.quad(object)) { # convert to quadscheme if(is.ppp(object)) object <- quadscheme(object, ...) # fit model if(!is.null(model)) { stopifnot(is.ppm(model)) ## update 'model' using new point pattern 'object' e <- list2env(list(object=object), parent=model$callframe) fit <- update(model, Q=object, forcefit=TRUE, envir=e) } else if(ppmcorrection == "border") { fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } else { fit <- ppm(object, trend=trend, interaction=interaction, correction=ppmcorrection, forcefit=TRUE) } } else stop("object should be a fitted point process model or a point pattern") rfixed <- !is.null(r) || !is.null(breaks) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy # E <- equalsfun.quad(Q) # WQ <- w.quad(Q) # quadrature weights # integrals will be restricted to quadrature points # that were actually used in the fit # USED <- getglmsubset(fit) if(fit$correction == "border") { rbord <- fit$rbord b <- bdist.points(U) USED <- (b > rbord) bX <- bdist.points(X) USEDX <- (bX > rbord) } else { USED <- rep.int(TRUE, U$n) USEDX <- rep.int(TRUE, X$n) } # basic statistics Win <- Window(X) npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW # determine breakpoints for r values rmaxdefault <- rmax.rule("F", Win, lambda) if(rfixed) breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault) else { # create fairly coarse 'r' values r <- seq(0, rmaxdefault, length=nr) breaks <- breakpts.from.r(r) } rvals <- breaks$r rmax <- breaks$max # residuals res <- residuals(fit, type="raw", drop=FALSE, new.coef=truecoef, quad=hi.res) # rescts <- with(res, "continuous") # absolute weight for continuous integrals wc <- -rescts # initialise fv object df <- data.frame(r=rvals, theo=0) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~Delta~V[A](r), NULL), "theo", . ~ r, alim=c(0, rmax), c("r","%s[theo](r)"), desc, fname="bold(R)~Delta~V[A]") # # for efficiency, compute the largest value of distance transform Dmax <- 0 for(i in 1:npts) { Di <- distmap(X[-i]) Dimax <- summary(Di)$max Dmax <- max(Dmax, Dimax) } Rmax <- min(max(rvals), Dmax * 1.1) nontrivial <- (rvals <= Rmax) trivialzeroes <- numeric(sum(!nontrivial)) # pseudosum Ax <- areaLoss.grid(X, rvals[nontrivial], subset=USEDX, ngrid=ngrid) C1 <- apply(Ax, 2, sum) C1 <- c(C1, trivialzeroes) # pseudocompensator OK <- USED & !Z Au <- areaGain.grid(U[OK], X, rvals[nontrivial], W=Win, ngrid=ngrid) lamu <- matrix(wc[OK], nrow=nrow(Au), ncol=ncol(Au)) C2 <- apply(lamu * Au, 2, sum) C2 <- c(C2, trivialzeroes) # pseudoscore residual Ctot <- C1 - C2 # tack on ans <- bind.fv(ans, data.frame(dat=C1, com=C2, res=Ctot), c("Sigma~Delta~V[A](r)", "bold(C)~Delta~V[A](r)", "%s(r)"), c("data pseudosum (contribution to %s)", "model pseudocompensator (contribution to %s)", "pseudoscore residual %s"), "res") # # pseudovariance # (skipped if called by envelope() etc) # if(correction == "all") { lamX <- matrix(wc[USED & Z], nrow=nrow(Ax), ncol=ncol(Ax)) Var <- apply(lamu * Au^2, 2, sum) + apply(lamX * Ax^2, 2, sum) Var <- c(Var, trivialzeroes) # two-sigma limits TwoSig <- 2 * sqrt(Var) # tack on ans <- bind.fv(ans, data.frame(var=Var, up=TwoSig, lo=-TwoSig), c("bold(C)^2~Delta~V[A](r)", "%s[up](r)", "%s[lo](r)"), c("pseudovariance of %s", "upper 2sigma critical limit for %s", "lower 2sigma critical limit for %s"), "res") fvnames(ans, ".") <- c("res", "up", "lo", "theo") } unitname(ans) <- unitname(fit) # return(ans) } spatstat.model/R/bc.R0000644000176200001440000000405314331173072014127 0ustar liggesusers#' bc.R #' #' Bias correction techniques #' #' $Revision: 1.3 $ $Date: 2022/01/04 05:30:06 $ bc <- function(fit, ...) { UseMethod("bc") } bc.ppm <- function(fit, ..., nfine=256) { stopifnot(is.ppm(fit)) # theta0 <- coef(fit) nc <- length(theta0) # X <- data.ppm(fit) Z <- is.data(quad.ppm(fit)) # evaluate sufficient statistic at data points sufX <- model.matrix(fit)[Z, ] if(ncol(sufX) != nc) stop("Internal error: model.matrix does not match coef(model)") # predict on fine grid finemask <- as.mask(as.owin(fit), dimyx=nfine) lamF <- predict(fit, type="cif", locations=finemask) sufF <- model.images(fit, W=finemask) if(length(sufF) != nc) stop("Internal error: model.images does not match coef(model)") # edge correction if(fit$correction == "border" && ((rbord <- fit$rbord) > 0)) { b <- bdist.pixels(finemask) bX <- bdist.points(X) excludeU <- eval.im(b < rbord) retainX <- (bX >= rbord) sufX <- sufX[retainX, , drop=FALSE] } else { excludeU <- FALSE } # compute fine approximation to score scoreX <- colSums(sufX) scoreW <- numeric(nc) for(k in seq_len(nc)) { S <- sufF[[k]] # infinite values of S may occur and correspond to zero cif Slam <- eval.im(ifelse(is.infinite(S) | excludeU, 0, S * lamF)) scoreW[k] <- integral.im(Slam) } score <- scoreX - scoreW # Newton-Raphson Iinv <- vcov(fit, hessian=TRUE) theta <- theta0 + Iinv %*% score theta <- theta[ , 1L, drop=TRUE] # # return(list(theta0=theta0, theta=theta)) return(theta) } # Richardson extrapolation (generic) rex <- function(x, r=2, k=1, recursive=FALSE) { # x should be a matrix # whose columns are successive estimates of a parameter vector # obtained using "grid step sizes" t, t/r, t/r^2, ... # Estimate from step size t is assumed to converge at rate t^k if(!is.matrix(x)) x <- matrix(x, nrow=1) if(ncol(x) <= 1) return(x) rk <- r^k y <- (rk * x[, -1L, drop=FALSE] - x[, -ncol(x), drop=FALSE])/(rk - 1) if(recursive) y <- rex(y, r=r, k=k+1, recursive=TRUE) return(y) } spatstat.model/R/leverage.R0000644000176200001440000012565214424137177015357 0ustar liggesusers# # leverage.R # # leverage and influence # # $Revision: 1.124 $ $Date: 2023/02/28 01:56:34 $ # leverage <- function(model, ...) { UseMethod("leverage") } leverage.ppm <- function(model, ..., drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) { fitname <- short.deparse(substitute(model)) a <- ppmInfluence(model, what="leverage", drop=drop, iScore=iScore, iHessian=iHessian, iArgs=iArgs, ..., fitname=fitname) return(a$leverage) } influence.ppm <- function(model, ..., drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) { fitname <- short.deparse(substitute(model)) a <- ppmInfluence(model, what="influence", drop=drop, iScore=iScore, iHessian=iHessian, iArgs=iArgs, ..., fitname=fitname) return(a$influence) } dfbetas.ppm <- function(model, ..., drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) { fitname <- short.deparse(substitute(model)) a <- ppmInfluence(model, what="dfbetas", drop=drop, iScore=iScore, iHessian=iHessian, iArgs=iArgs, ..., fitname=fitname) return(a$dfbetas) } ppmInfluence <- function(fit, what=c("leverage", "influence", "dfbetas"), ..., iScore=NULL, iHessian=NULL, iArgs=NULL, drop=FALSE, fitname=NULL) { stuff <- ppmInfluenceEngine(fit, what=what, ..., iScore=iScore, iHessian=iHessian, iArgs=iArgs, drop=drop, fitname=fitname) fnam <- c("fitname", "fit.is.poisson") result <- list() if("lev" %in% names(stuff)) { lev <- stuff[c(fnam, "lev")] class(lev) <- "leverage.ppm" result$leverage <- lev } if("infl" %in% names(stuff)) { infl <- stuff[c(fnam, "infl")] class(infl) <- "influence.ppm" result$influence <- infl } if(!is.null(dfb <- stuff$dfbetas)) { attr(dfb, "info") <- stuff[fnam] result$dfbetas <- dfb } other <- setdiff(names(stuff), c("lev", "infl", "dfbetas")) result[other] <- stuff[other] class(result) <- "ppmInfluence" return(result) } leverage.ppmInfluence <- function(model, ...) { model$leverage } influence.ppmInfluence <- function(model, ...) { model$influence } dfbetas.ppmInfluence <- function(model, ...) { model$dfbetas } ## ............... main workhorse .................................... ppmInfluenceEngine <- function(fit, what=c("leverage", "influence", "dfbetas", "score", "derivatives", "increments", "all"), ..., iScore=NULL, iHessian=NULL, iArgs=NULL, drop=FALSE, method=c("C", "interpreted"), fine=FALSE, precomputed=list(), sparseOK=TRUE, fitname=NULL, multitypeOK=FALSE, entrywise = TRUE, matrix.action = c("warn", "fatal", "silent"), dimyx=NULL, eps=NULL, rule.eps = c("adjust.eps","grow.frame","shrink.frame"), geomsmooth = TRUE) { if(is.null(fitname)) fitname <- short.deparse(substitute(fit)) ## type of calculation to be performed method <- match.arg(method) what <- match.arg(what, several.ok=TRUE) if("all" %in% what) what <- c("leverage", "influence", "dfbetas", "score", "derivatives", "increments") matrix.action <- match.arg(matrix.action) influencecalc <- any(what %in% c("leverage", "influence", "dfbetas")) hesscalc <- influencecalc || any(what == "derivatives") sparse <- sparseOK target <- paste(what, collapse=",") ## discretisation rule.eps <- match.arg(rule.eps) ## ........... collect information about the model ................. stopifnot(is.ppm(fit)) #' ensure object contains GLM fit if(!hasglmfit(fit)) { fit <- update(fit, forcefit=TRUE) precomputed <- list() } #' type of interpoint interaction fit.is.poisson <- is.poisson(fit) hasInf <- !fit.is.poisson && !identical(fit$interaction$hasInf, FALSE) #' estimating function fitmethod <- fit$method logi <- (fitmethod == "logi") pseudo <- (fitmethod == "mpl") #' detect 'improved' fits improv <- fit$improve.type %orifnull% "none" if((!logi && !pseudo) || improv != "none") { info <- c(if(!logi && !pseudo) paste("method =", dQuote(fitname)) else NULL, if(improv != "none") paste("improve.type =", dQuote(improv)) else NULL) fitblurb <- paste(info, collapse=" and ") warning(paste("Model was fitted with", fitblurb, "but is treated as having been fitted by maximum", if(fit.is.poisson) "likelihood" else if(pseudo) "pseudolikelihood" else "logistic likelihood", "for leverage/influence calculation"), call.=FALSE) if(!logi && !pseudo) pseudo <- TRUE } ## Detect presence of irregular parameters if(is.null(iArgs)) iArgs <- fit$covfunargs gotScore <- !is.null(iScore) gotHess <- !is.null(iHessian) needHess <- gotScore && hesscalc # may be updated later if(!gotHess && needHess) stop("Must supply iHessian", call.=FALSE) #' ................... evaluate basic terms .................... ## extract values from model, using precomputed values if given theta <- precomputed$coef %orifnull% coef(fit) lampos <- precomputed$lambda %orifnull% fitted(fit, ignore.hardcore=hasInf, check=FALSE) mom <- precomputed$mom %orifnull% model.matrix(fit, splitInf=hasInf) ## 'lampos' is positive part of cif ## 'lam' is full model cif including zeroes lam <- lampos zerocif <- attr(mom, "-Inf") %orifnull% logical(nrow(mom)) anyzerocif <- any(zerocif) if(hasInf && anyzerocif) lam[zerocif] <- 0 p <- length(theta) Q <- quad.ppm(fit) w <- w.quad(Q) loc <- union.quad(Q) isdata <- is.data(Q) mt <- is.multitype(loc) if(length(w) != length(lam)) stop(paste("Internal error: length(w) = ", length(w), "!=", length(lam), "= length(lam)"), call.=FALSE) ## smoothing bandwidth and resolution for smoothed images of densities smallsigma <- if(!mt) avenndist(loc) else max(sapply(split(loc), avenndist)) ## previously used 'maxnndist' instead of 'avenndist' if(is.null(dimyx) && is.null(eps)) eps <- sqrt(prod(sidelengths(Frame(loc))))/256 #' ............... evaluate Hessian of regular parameters ................ ## domain of composite likelihood ## (e.g. eroded window in border correction) inside <- getglmsubset(fit) %orifnull% rep(TRUE, npoints(loc)) ## extract negative Hessian matrix of regular part of log composite likelihood ## hess = negative Hessian H ## fgrad = Fisher-scoring-like gradient G = estimate of E[H] if(logi) { ## .............. logistic composite likelihood ...................... ## Intensity of dummy points rho <- fit$Q$param$rho %orifnull% intensity(as.ppp(fit$Q)) logiprob <- lampos / (lampos + rho) vclist <- vcov(fit, what = "internals", fine=fine, matrix.action="silent") hess <- vclist$Slog fgrad <- vclist$fisher invhess <- if(is.null(hess)) NULL else checksolve(hess, "silent") invfgrad <- if(is.null(fgrad)) NULL else checksolve(fgrad, "silent") if(is.null(invhess) || is.null(invfgrad)) { #' use more expensive estimate of variance terms vclist <- vcov(fit, what = "internals", fine=TRUE, matrix.action=matrix.action) hess <- vclist$Slog fgrad <- vclist$fisher #' try again - exit if really singular invhess <- checksolve(hess, matrix.action, "Hessian", target) invfgrad <- checksolve(fgrad, matrix.action, "gradient matrix", target) } # vc <- invhess %*% (vclist$Sigma1log+vclist$Sigma2log) %*% invhess } else { ## .............. likelihood or pseudolikelihood .................... invfgrad <- vcov(fit, hessian=TRUE, fine=fine, matrix.action="silent") fgrad <- hess <- if(is.null(invfgrad) || anyNA(invfgrad)) NULL else checksolve(invfgrad, "silent") if(is.null(fgrad)) { invfgrad <- vcov(fit, hessian=TRUE, fine=TRUE, matrix.action=matrix.action) fgrad <- hess <- checksolve(invfgrad, matrix.action, "Hessian", target) } } #' ............... augment Hessian ................... ## evaluate additional (`irregular') components of score, if any iscoremat <- ppmDerivatives(fit, "gradient", iScore, loc, covfunargs=iArgs) gotScore <- !is.null(iscoremat) needHess <- gotScore && hesscalc if(!gotScore) { REG <- 1:ncol(mom) } else { ## count regular and irregular parameters nreg <- ncol(mom) nirr <- ncol(iscoremat) ## add extra columns to model matrix mom <- cbind(mom, iscoremat) REG <- 1:nreg IRR <- nreg + 1:nirr ## evaluate additional (`irregular') entries of Hessian ihessmat <- if(!needHess) NULL else ppmDerivatives(fit, "hessian", iHessian, loc, covfunargs=iArgs) if(gotHess <- !is.null(ihessmat)) { ## recompute negative Hessian of log PL and its mean fgrad <- hessextra <- matrix(0, ncol(mom), ncol(mom)) } else if(needHess && length(iArgs)) { nami <- names(iArgs) stop(paste("Unable to compute iHess, the", ngettext(length(nami), "component", "components"), "of the Hessian matrix for the irregular", ngettext(length(nami), "parameter", "parameters"), commasep(sQuote(names(iArgs)))), call.=FALSE) } if(pseudo) { ## .............. likelihood or pseudolikelihood .................... switch(method, interpreted = { for(i in seq(loc$n)) { # weight for integrand wti <- lam[i] * w[i] if(all(is.finite(wti))) { # integral of outer product of score momi <- mom[i, ] v1 <- outer(momi, momi, "*") * wti if(all(is.finite(v1))) fgrad <- fgrad + v1 # integral of Hessian # contributions nonzero for irregular parameters if(gotHess) { v2 <- matrix(as.numeric(ihessmat[i,]), nirr, nirr) * wti if(all(is.finite(v2))) hessextra[IRR, IRR] <- hessextra[IRR, IRR] + v2 } } } # subtract sum over data points if(gotHess) { for(i in which(isdata)) { v2 <- matrix(as.numeric(ihessmat[i,]), nirr, nirr) if(all(is.finite(v2))) hessextra[IRR, IRR] <- hessextra[IRR, IRR] - v2 } hess <- fgrad + hessextra invhess <- checksolve(hess, matrix.action, "Hessian", target) } else { invhess <- hess <- NULL } }, C = { wlam <- lam * w fgrad <- sumouter(mom, wlam) if(gotHess) { # integral term isfin <- is.finite(wlam) & matrowall(is.finite(ihessmat)) vintegral <- if(all(isfin)) wlam %*% ihessmat else wlam[isfin] %*% ihessmat[isfin,, drop=FALSE] # sum over data points vdata <- .colSums(ihessmat[isdata, , drop=FALSE], sum(isdata), ncol(ihessmat), na.rm=TRUE) vcontrib <- vintegral - vdata hessextra[IRR, IRR] <- hessextra[IRR, IRR] + matrix(vcontrib, nirr, nirr) hess <- fgrad + hessextra invhess <- checksolve(hess, matrix.action, "Hessian", target) } else { invhess <- hess <- NULL } }) } else { ## .............. logistic composite likelihood .................... switch(method, interpreted = { oweight <- logiprob * (1 - logiprob) hweight <- ifelse(isdata, -(1 - logiprob), logiprob) for(i in seq(loc$n)) { ## outer product of score momi <- mom[i, ] v1 <- outer(momi, momi, "*") * oweight[i] if(all(is.finite(v1))) fgrad <- fgrad + v1 ## Hessian term ## contributions nonzero for irregular parameters if(gotHess) { v2 <- hweight[i] * matrix(as.numeric(ihessmat[i,]), nirr, nirr) if(all(is.finite(v2))) hessextra[IRR, IRR] <- hessextra[IRR, IRR] + v2 } } if(gotHess) { hess <- fgrad + hessextra invhess <- checksolve(hess, matrix.action, "Hessian", target) } else { invhess <- hess <- NULL } }, C = { oweight <- logiprob * (1 - logiprob) hweight <- ifelse(isdata, -(1 - logiprob), logiprob) fgrad <- sumouter(mom, oweight) if(gotHess) { # Hessian term isfin <- is.finite(hweight) & matrowall(is.finite(ihessmat)) vcontrib <- if(all(isfin)) hweight %*% ihessmat else hweight[isfin] %*% ihessmat[isfin,, drop=FALSE] hessextra[IRR, IRR] <- hessextra[IRR, IRR] + matrix(vcontrib, nirr, nirr) hess <- fgrad + hessextra invhess <- checksolve(hess, matrix.action, "Hessian", target) } else { invhess <- hess <- NULL } }) } invfgrad <- checksolve(fgrad, matrix.action, "gradient matrix", target) } if(!needHess) { if(pseudo){ hess <- fgrad invhess <- invfgrad } } # ok <- NULL if(drop) { ok <- complete.cases(mom) if(all(ok)) { ok <- NULL } else { if((nbad <- sum(isdata[!ok])) > 0) warning(paste("NA value of canonical statistic at", nbad, ngettext(nbad, "data point", "data points")), call.=FALSE) Q <- Q[ok] mom <- mom[ok, , drop=FALSE] loc <- loc[ok] lam <- lam[ok] w <- w[ok] isdata <- isdata[ok] inside <- inside[ok] } } # ........ start assembling result ..................... result <- list(fitname=fitname, fit.is.poisson=fit.is.poisson) if(any(c("score", "derivatives") %in% what)) { ## calculate the composite score rawmean <- if(logi) logiprob else (lam * w) rawresid <- isdata - rawmean score <- matrix(rawresid, nrow=1) %*% mom if("score" %in% what) result$score <- score if("derivatives" %in% what) result$deriv <- list(mom=mom, score=score, fgrad=fgrad, invfgrad=invfgrad, hess=hess, invhess=invhess) if(all(what %in% c("score", "derivatives"))) return(result) } ## ::::::::::::::: compute second order terms ::::::::::::: ## >>> set model matrix to zero outside the domain <<< mom[!inside, ] <- 0 ## compute effect of adding/deleting each quadrature point if(fit.is.poisson) { ## ........ Poisson case .................................. eff <- mom ddS <- ddSintegrand <- NULL } else { ## ........ Gibbs case .................................... ## initialise eff <- mom ## second order interaction terms ## columns index the point being added/deleted ## rows index the points affected ## goal is to compute these effect matrices: eff.data <- eff.back <- matrix(0, nrow(eff), ncol(eff), dimnames=dimnames(eff)) ## U <- union.quad(Q) nU <- npoints(U) ## decide whether to split into blocks nX <- Q$data$n nD <- Q$dummy$n bls <- quadBlockSizes(nX, nD, p, announce=TRUE) nblocks <- bls$nblocks nperblock <- bls$nperblock ## if(nblocks > 1 && ("increments" %in% what)) { warning("Oversize quadrature scheme: cannot return array of increments", call.=FALSE) what <- setdiff(what, "increments") } R <- reach(fit) ## indices into original quadrature scheme whichok <- if(!is.null(ok)) which(ok) else seq_len(nX+nD) whichokdata <- whichok[isdata] whichokdummy <- whichok[!isdata] ## {{{{{{{{{{{{{ L O O P }}}}}}}}}}}}} ## loop for(iblock in 1:nblocks) { first <- min(nD, (iblock - 1) * nperblock + 1) last <- min(nD, iblock * nperblock) # corresponding subset of original quadrature scheme if(!is.null(ok) || nblocks > 1) { ## subset for which we will compute the effect current <- c(whichokdata, whichokdummy[first:last]) ## find neighbours, needed for calculation other <- setdiff(seq_len(nU), current) crx <- crosspairs(U[current], U[other], R, what="indices") nabers <- other[unique(crx$j)] ## subset actually requested requested <- c(current, nabers) ## corresponding stuff ('B' for block) isdataB <- isdata[requested] changesignB <- ifelse(isdataB, -1, 1) zerocifB <- zerocif[requested] anyzerocifB <- any(zerocifB) momB <- mom[requested, , drop=FALSE] lamB <- lam[requested] #' unused: #' insideB <- inside[requested] #' lamposB <- lampos[requested] if(logi) logiprobB <- logiprob[requested] wB <- w[requested] currentB <- seq_along(current) } else { requested <- NULL isdataB <- isdata changesignB <- ifelse(isdataB, -1, 1) zerocifB <- zerocif anyzerocifB <- anyzerocif momB <- mom lamB <- lam #' unused: #' insideB <- inside #' lamposB <- lampos if(logi) logiprobB <- logiprob wB <- w } ## compute second order terms ## ddS[i,j, ] = Delta_i Delta_j S(x) ddS <- deltasuffstat(fit, restrict = "first", dataonly=FALSE, quadsub=requested, sparseOK=sparse, splitInf=hasInf, force=TRUE, warn.forced=TRUE) ## if(is.null(ddS)) { warning("Second order interaction terms are not implemented", " for this model; they are treated as zero", call.=FALSE) break } else { sparse <- inherits(ddS, "sparse3Darray") if(hasInf) { deltaInf <- attr(ddS, "deltaInf") hasInf <- !is.null(deltaInf) if(hasInf) sparse <- sparse && inherits(deltaInf, "sparseMatrix") } if(gotScore) { ## add extra planes of zeroes to second-order model matrix ## (zero because the irregular components are part of the trend) paddim <- c(dim(ddS)[1:2], nirr) if(!sparse) { ddS <- abind::abind(ddS, array(0, dim=paddim), along=3) } else { ddS <- bind.sparse3Darray(ddS, sparse3Darray(dims=paddim), along=3) } } } ## ^^^^^^^^^^^^^^^^^ second term in DeltaScore ^^^^^^^^^^^^^^^^^^^^ ## effect of addition/deletion of U[j] ## on score contribution from data points (sum automatically restricted to ## interior for border correction by earlier call to ## deltasuffstat(..., restrict = "first")) ddSX <- ddS[isdataB, , , drop=FALSE] eff.data.B <- marginSumsSparse(ddSX, c(2,3)) ## check if any quadrature points have zero conditional intensity; ## these do not contribute to this term if(anyzerocifB) eff.data.B[zerocifB, ] <- 0 ## save results for current subset of quadrature points if(is.null(requested)) { eff.data <- eff.data.B } else { eff.data[current,] <- as.matrix(eff.data.B[currentB,,drop=FALSE]) } ## rm(ddSX, eff.data.B) ## ^^^^^^^^^^^^^^^^^ third term in DeltaScore ^^^^^^^^^^^^^^^^^^^^ ## effect of addition/deletion of U[j] on integral term in score if(!sparse) { ## ::::::::::::::: full arrays, simpler code ::::::::::::::::::: if(pseudo) { ## --------------- likelihood or pseudolikelihood ----------- ## model matrix after addition/deletion of each U[j] ## mombefore[i,j,] <- mom[i,] di <- dim(ddS) mombefore <- array(apply(momB, 2, rep, times=di[2]), dim=di) momchange <- ddS momchange[ , isdataB, ] <- - momchange[, isdataB, ] momafter <- mombefore + momchange ## effect of addition/deletion of U[j] on lambda(U[i], X) if(gotScore) { lamratio <- exp(tensor::tensor(momchange[,,REG,drop=FALSE], theta, 3, 1)) } else { lamratio <- exp(tensor::tensor(momchange, theta, 3, 1)) } lamratio <- array(lamratio, dim=dim(momafter)) if(!hasInf) { #' cif is positive ddSintegrand <- lamB * (momafter * lamratio - mombefore) } else { #' cif can be zero zerobefore <- matrix(zerocifB, di[1], di[2]) zerochange <- deltaInf * 1L zerochange[ , isdataB] <- - zerochange[ , isdataB] zeroafter <- zerobefore + zerochange momZbefore <- mombefore momZbefore[ , zerocifB, ] <- 0 IJK <- unname(which(array(zeroafter == 1, dim=di), arr.ind=TRUE)) momZafter <- momafter momZafter[IJK] <- 0 momZchange <- momZafter- momZbefore ddSintegrand <- lamB * (momZafter * lamratio - momZbefore) } rm(momchange, mombefore, momafter, lamratio) } else { ## --------------- logistic composite likelihood ----------- stop("Non-sparse method is not implemented for method = 'logi'!") } gc() } else { ## :::::::::::::::::: sparse arrays :::::::::::::::::::::::: if(logi) { ## --------------- logistic composite likelihood ----------- ## Delta suff. stat. with sign change for data/dummy (sparse3Darray) momchange <- ddS momchange[ , isdataB, ] <- - momchange[, isdataB, ] ## Evaluate theta^T %*% ddS (with sign -1/+1 according to data/dummy) ## as triplet sparse matrix if(gotScore){ momchangeeffect <- tensorSparse(momchange[,,REG,drop=FALSE], theta, 3, 1) } else{ momchangeeffect <- tensorSparse(momchange, theta, 3, 1) } ## Copy to each slice momchangeeffect <- expandSparse(momchangeeffect, n = dim(ddS)[3], across = 3) ijk <- SparseIndices(momchangeeffect) ## Entrywise calculations below momchange <- as.numeric(momchange[ijk]) mombefore <- mom[cbind(ijk$i,ijk$k)] momafter <- mombefore + momchange ## Transform to change in probability expchange <- exp(momchangeeffect$x) lamBi <- lamB[ijk$i] logiprobBi <- logiprobB[ijk$i] changesignBj <- changesignB[ijk$j] pchange <- changesignBj*(lamBi * expchange / (lamBi*expchange + rho) - logiprobBi) ## Note: changesignBj * momchange == as.numeric(ddS[ijk]) if(!hasInf) { #' cif is positive ddSintegrand <- momafter * pchange + logiprobBi * changesignBj * momchange } else { #' cif can be zero isdataBj <- isdataB[ijk$j] zerobefore <- as.logical(zerocifB[ijk$i]) zerochange <- as.logical(deltaInf[cbind(ijk$i, ijk$j)]) zerochange[isdataBj] <- - zerochange[isdataBj] zeroafter <- zerobefore + zerochange momZbefore <- ifelse(zerobefore, 0, mombefore) momZafter <- ifelse(zeroafter, 0, momafter) momZchange <- momZafter - momZbefore ddSintegrand <- momZafter * pchange + logiprobBi * changesignBj * momZchange } ddSintegrand <- sparse3Darray(i = ijk$i, j = ijk$j, k = ijk$k, x = ddSintegrand, dims = dim(ddS)) } else{ ## --------------- likelihood or pseudolikelihood ----------- if(entrywise) { ## ...... sparse arrays, using explicit indices ...... momchange <- ddS momchange[ , isdataB, ] <- - momchange[, isdataB, ] if(gotScore){ lamratiominus1 <- expm1(tensorSparse(momchange[,,REG,drop=FALSE], theta, 3, 1)) } else{ lamratiominus1 <- expm1(tensorSparse(momchange, theta, 3, 1)) } lamratiominus1 <- expandSparse(lamratiominus1, n = dim(ddS)[3], across = 3) ijk <- SparseIndices(lamratiominus1) ## Everything entrywise with ijk now: # lamratiominus1 <- lamratiominus1[cbind(ijk$i, ijk$j)] lamratiominus1 <- as.numeric(lamratiominus1$x) momchange <- as.numeric(momchange[ijk]) mombefore <- momB[cbind(ijk$i, ijk$k)] momafter <- mombefore + momchange ## lamarray[i,j,k] <- lam[i] lamarray <- lamB[ijk$i] if(!hasInf) { #' cif is positive ddSintegrand <- lamarray * (momafter * lamratiominus1 + momchange) } else { #' cif can be zero isdataBj <- isdataB[ijk$j] zerobefore <- as.logical(zerocifB[ijk$i]) zerochange <- as.logical(deltaInf[cbind(ijk$i, ijk$j)]) zerochange[isdataBj] <- - zerochange[isdataBj] zeroafter <- zerobefore + zerochange momZbefore <- ifelse(zerobefore, 0, mombefore) momZafter <- ifelse(zeroafter, 0, momafter) momZchange <- momZafter - momZbefore ddSintegrand <- lamarray*(momZafter*lamratiominus1 + momZchange) } ddSintegrand <- sparse3Darray(i = ijk$i, j = ijk$j, k = ijk$k, x = ddSintegrand, dims = dim(ddS)) } else { ## ...... sparse array code ...... ## Entries are required only for pairs i,j which interact. ## mombefore[i,j,] <- mom[i,] mombefore <- mapSparseEntries(ddS, 1, momB, conform=TRUE, across=3) momchange <- ddS momchange[ , isdataB, ] <- - momchange[, isdataB, ] ## momafter <- evalSparse3Dentrywise(mombefore + momchange) momafter <- mombefore + momchange ## lamarray[i,j,k] <- lam[i] lamarray <- mapSparseEntries(ddS, 1, lamB, conform=TRUE, across=3) if(gotScore){ lamratiominus1 <- expm1(tensorSparse(momchange[,,REG,drop=FALSE], theta, 3, 1)) } else{ lamratiominus1 <- expm1(tensorSparse(momchange,theta, 3, 1)) } lamratiominus1 <- expandSparse(lamratiominus1, n = dim(ddS)[3], across = 3) ## ddSintegrand <- evalSparse3Dentrywise(lamarray * (momafter* lamratiominus1 + momchange)) if(!hasInf) { #' cif is positive ddSintegrand <- lamarray * (momafter* lamratiominus1 + momchange) } else { #' cif has zeroes zerobefore <- mapSparseEntries(ddS, 1, zerocifB, conform=TRUE, across=3) zerochange <- mapSparseEntries(ddS, 1, deltaInf, conform=TRUE, across=2) zerochange[,isdataB,] <- - zerochange[,isdataB,] zeroafter <- zerobefore + zerochange momZbefore <- mombefore momZafter <- momafter momZbefore[ , zerocifB , ] <- 0 IJK <- SparseIndices(zeroafter) momZafter[IJK] <- 0 momZchange <- momZafter - momZbefore ddSintegrand <- lamarray*(momZafter*lamratiominus1 + momZchange) } } rm(lamratiominus1, lamarray, momafter) } rm(momchange, mombefore) } if(anyzerocifB) { ddSintegrand[zerocifB,,] <- 0 ddSintegrand[,zerocifB,] <- 0 } ## integrate if(logi){ # eff.back.B <- tensorSparse(ddSintegrand, rep(1, length(wB)), 1, 1) eff.back.B <- marginSumsSparse(ddSintegrand, c(2,3)) } else{ eff.back.B <- changesignB * tensorSparse(ddSintegrand, wB, 1, 1) } ## save contribution if(is.null(requested)) { eff.back <- eff.back.B } else { eff.back[current,] <- as.matrix(eff.back.B[currentB, , drop=FALSE]) } } ## {{{{{{{{{{{{{ E N D O F L O O P }}}}}}}}}}}}} ## total eff <- eff + eff.data - eff.back eff <- as.matrix(eff) } if("increments" %in% what) { result$increm <- list(ddS=ddS, ddSintegrand=ddSintegrand, isdata=isdata, wQ=w) } if(!any(c("leverage", "influence", "dfbetas") %in% what)) return(result) # ............ compute leverage, influence, dfbetas .............. if(!is.matrix(invhess)) stop("Internal error: inverse Hessian not available", call.=FALSE) # compute basic contribution from each quadrature point nloc <- npoints(loc) switch(method, interpreted = { b <- numeric(nloc) for(i in seq(nloc)) { effi <- eff[i,, drop=FALSE] momi <- mom[i,, drop=FALSE] b[i] <- momi %*% invhess %*% t(effi) } }, C = { b <- bilinearform(mom, invhess, eff) }) # .......... leverage ............. if("leverage" %in% what) { ## values of leverage (diagonal) at points of 'loc' h <- b * lam ok <- is.finite(h) geomsmooth <- geomsmooth && all(h[!isdata & ok] >= 0) if(mt) h <- data.frame(leverage=h, type=marks(loc)) levval <- (loc %mark% h)[ok] levvaldum <- levval[!isdata[ok]] if(!mt) { levsmo <- Smooth(levvaldum, sigma=smallsigma, geometric=geomsmooth, dimyx=dimyx, eps=eps, rule.eps=rule.eps) levnearest <- nnmark(levvaldum, dimyx=dimyx, eps=eps) } else { levsplitdum <- split(levvaldum, reduce=TRUE) levsmo <- Smooth(levsplitdum, sigma=smallsigma, geometric=geomsmooth, dimyx=dimyx, eps=eps) levnearest <- solapply(levsplitdum, nnmark, dimyx=dimyx, eps=eps, rule.eps=rule.eps) } ## mean level if(fit.is.poisson) { a <- area(Window(loc)) * markspace.integral(loc) levmean <- p/a } else { levmean <- if(!mt) mean(levnearest) else mean(sapply(levnearest, mean)) } lev <- list(val=levval, smo=levsmo, ave=levmean, nearest=levnearest) result$lev <- lev } # .......... influence ............. if("influence" %in% what) { if(logi){ X <- loc effX <- as.numeric(isdata) * eff - mom * (inside * logiprob) } else{ # values of influence at data points X <- loc[isdata] effX <- eff[isdata, ,drop=FALSE] } M <- (1/p) * quadform(effX, invhess) if(logi || is.multitype(X)) { # result will have several columns of marks M <- data.frame(influence=M) if(logi) M$isdata <- factor(isdata, levels = c(TRUE, FALSE), labels = c("data", "dummy")) if(is.multitype(X)) M$type <- marks(X) } V <- X %mark% M result$infl <- V } # .......... dfbetas ............. if("dfbetas" %in% what) { if(logi){ M <- as.numeric(isdata) * eff - mom * (inside * logiprob) M <- t(invhess %*% t(M)) Mdum <- M Mdum[isdata,] <- 0 Mdum <- Mdum / w.quad(Q) DFB <- msr(Q, M[isdata, ], Mdum) } else { vex <- invhess %*% t(mom) dex <- invhess %*% t(eff) switch(method, interpreted = { dis <- con <- matrix(0, nloc, ncol(mom)) for(i in seq(nloc)) { vexi <- vex[,i, drop=FALSE] dexi <- dex[,i, drop=FALSE] dis[i, ] <- if(isdata[i]) dexi else 0 con[i, ] <- if(inside[i]) (- lam[i] * vexi) else 0 } }, C = { dis <- t(dex) dis[!isdata,] <- 0 con <- - lam * t(vex) con[(lam == 0 | !inside), ] <- 0 }) colnames(dis) <- colnames(con) <- colnames(mom) DFB <- msr(Q, dis[isdata, ], con) } #' add smooth component DFB <- augment.msr(DFB, sigma=smallsigma, dimyx=dimyx, eps=eps, rule.eps=rule.eps) result$dfbetas <- DFB } return(result) } ## >>>>>>>>>>>>>>>>>>>>>>> HELPER FUNCTIONS <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< ## extract derivatives from covariate functions ## WARNING: these are not the score components in general ppmDerivatives <- function(fit, what=c("gradient", "hessian"), Dcovfun=NULL, loc, covfunargs=list()) { what <- match.arg(what) if(!is.null(Dcovfun)) { ## use provided function Dcov to compute derivatives Dvalues <- mpl.get.covariates(Dcovfun, loc, covfunargs=covfunargs) result <- as.matrix(as.data.frame(Dvalues)) return(result) } ## any irregular parameters? if(length(covfunargs) == 0) return(NULL) ## Try to extract derivatives from covariate functions ## This often works if the functions were created by symbolic differentiation fvalues <- mpl.get.covariates(fit$covariates, loc, covfunargs=covfunargs, need.deriv=TRUE) Dlist <- attr(fvalues, "derivatives")[[what]] if(length(Dlist) == 0) return(NULL) switch(what, gradient = { result <- do.call(cbind, unname(lapply(Dlist, as.data.frame))) result <- as.matrix(result) }, hessian = { ## construct array containing Hessian matrices biga <- do.call(blockdiagarray, Dlist) ## flatten matrices result <- matrix(biga, nrow=dim(biga)[1L]) }) return(result) } ## >>>>>>>>>>>>>>>> PLOT METHODS <<<<<<<<<<<<<<<<<<<<< plot.leverage.ppm <- function(x, ..., what=c("smooth", "nearest", "exact"), showcut=TRUE, args.cut=list(drawlabels=FALSE), multiplot=TRUE) { what <- match.arg(what) fitname <- x$fitname defaultmain <- paste("Leverage for", fitname) y <- x$lev if(what == "exact") { #' plot exact quadrature locations and leverage values yval <- y$val dont.complain.about(yval) z <- do.call(plot, resolve.defaults(list(x=quote(yval), multiplot=multiplot), list(...), list(main=defaultmain))) return(invisible(z)) } smo <- as.im(x, what=what) if(is.null(smo)) return(invisible(NULL)) ave <- y$ave if(!multiplot && inherits(smo, "imlist")) { ave <- ave * length(smo) smo <- do.call(harmonise.im, unname(smo)) ## smo <- Reduce("+", smo) smo <- im.apply(smo, sum, check=FALSE) defaultmain <- c(defaultmain, "(sum over all types of point)") } args.contour <- resolve.defaults(args.cut, list(levels=ave)) cutinfo <- list(addcontour=showcut, args.contour=args.contour) if(is.im(smo)) { do.call(plot.im, resolve.defaults(list(quote(smo)), cutinfo, list(...), list(main=defaultmain))) } else if(inherits(smo, "imlist")) { do.call(plot.solist, resolve.defaults(list(quote(smo)), cutinfo, list(...), list(main=defaultmain))) } invisible(NULL) } persp.leverage.ppm <- function(x, ..., what=c("smooth", "nearest"), main, zlab="leverage") { if(missing(main)) main <- short.deparse(substitute(x)) what <- match.arg(what) y <- as.im(x, what=what) if(is.null(y)) return(invisible(NULL)) if(is.im(y)) return(persp(y, main=main, ..., zlab=zlab)) pa <- par(ask=TRUE) on.exit(par(pa)) lapply(y, persp, main=main, ..., zlab=zlab) return(invisible(NULL)) } contour.leverage.ppm <- function(x, ..., what=c("smooth", "nearest"), showcut=TRUE, args.cut=list(col=3, lwd=3, drawlabels=FALSE), multiplot=TRUE) { defaultmain <- paste("Leverage for", x$fitname) smo <- as.im(x, what=what) y <- x$lev ave <- y$ave if(!multiplot && inherits(smo, "imlist")) { ave <- ave * length(smo) smo <- do.call(harmonise.im, unname(smo)) ## smo <- Reduce("+", smo) smo <- im.apply(smo, sum, check=FALSE) defaultmain <- c(defaultmain, "(sum over all types of point)") } argh1 <- resolve.defaults(list(...), list(main=defaultmain)) argh2 <- resolve.defaults(args.cut, list(levels=ave), list(...)) if(is.im(smo)) { #' single panel out <- do.call(contour, append(list(x=smo), argh1)) if(showcut) do.call(contour, append(list(x=smo, add=TRUE), argh2)) } else if(inherits(smo, "imlist")) { #' multiple panels argh <- append(list(x=smo, plotcommand ="contour"), argh1) if(showcut) { argh <- append(argh, list(panel.end=function(i, y, ...) contour(y, ...), panel.end.args=argh2)) } out <- do.call(plot.solist, argh) } else { warning("Unrecognised format") out <- NULL } return(invisible(out)) } plot.influence.ppm <- function(x, ..., multiplot=TRUE) { fitname <- x$fitname defaultmain <- paste("Influence for", fitname) y <- x$infl if(multiplot && isTRUE(ncol(marks(y)) > 1)) { # apart from the influence value, there may be additional columns of marks # containing factors: {type of point}, { data vs dummy in logistic case } ma <- as.data.frame(marks(y)) fax <- sapply(ma, is.factor) nfax <- sum(fax) if(nfax == 1) { # split on first available factor, and remove this factor y <- split(y, reduce=TRUE) } else if(nfax > 1) { # several factors: split according to them all, and remove them all f.all <- do.call(interaction, ma[fax]) z <- y %mark% ma[,!fax] y <- split(z, f.all) } } do.call(plot, resolve.defaults(list(quote(y)), list(...), list(main=defaultmain, multiplot=multiplot, which.marks=1))) } ## >>>>>>>>>>>>>>>> CONVERSION METHODS <<<<<<<<<<<<<<<<<<<<< as.im.leverage.ppm <- function(X, ..., what=c("smooth", "nearest")) { what <- match.arg(what) y <- switch(what, smooth = X$lev$smo, nearest = X$lev$nearest) if(is.null(y)) warning(paste("Data for", sQuote(what), "image are not available:", "please recompute the leverage using the current spatstat"), call.=FALSE) return(y) # could be either an image or a list of images } as.function.leverage.ppm <- function(x, ...) { X <- x$lev$val S <- ssf(unmark(X), marks(X)) return(as.function(S)) } as.ppp.influence.ppm <- function(X, ...) { return(X$infl) } as.owin.leverage.ppm <- function(W, ..., fatal=TRUE) { y <- as.im(W) if(inherits(y, "imlist")) y <- y[[1L]] as.owin(y, ..., fatal=fatal) } as.owin.influence.ppm <- function(W, ..., fatal=TRUE) { as.owin(as.ppp(W), ..., fatal=fatal) } domain.leverage.ppm <- domain.influence.ppm <- Window.leverage.ppm <- Window.influence.ppm <- function(X, ...) { as.owin(X) } ## >>>>>>>>>>>>>>>> PRINT METHODS <<<<<<<<<<<<<<<<<<<<< print.leverage.ppm <- function(x, ...) { splat("Point process leverage function") fitname <- x$fitname splat("for model:", fitname) lev <- x$lev splat("\nExact values:") print(lev$val) splat("\nSmoothed values:") print(lev$smo) ## for compatibility we retain the x$fit usage if(x$fit.is.poisson %orifnull% is.poisson(x$fit)) splat("\nAverage value:", lev$ave) return(invisible(NULL)) } print.influence.ppm <- function(x, ...) { splat("Point process influence measure") fitname <- x$fitname splat("for model:", fitname) splat("\nExact values:") print(x$infl) return(invisible(NULL)) } ## >>>>>>>>>>>>>>>> SUBSET METHODS <<<<<<<<<<<<<<<<<<<<< "[.leverage.ppm" <- function(x, i, ..., update=TRUE) { if(missing(i)) return(x) y <- x$lev smoi <- if(is.im(y$smo)) y$smo[i, ...] else solapply(y$smo, "[", i=i, ...) if(!inherits(smoi, c("im", "imlist"))) return(smoi) y$smo <- smoi y$val <- y$val[i, ...] if(update) y$ave <- if(is.im(smoi)) mean(smoi) else mean(sapply(smoi, mean)) x$lev <- y return(x) } "[.influence.ppm" <- function(x, i, ...) { if(missing(i)) return(x) y <- x$infl[i, ...] if(!is.ppp(y)) return(y) x$infl <- y return(x) } ## >>>>>>>>>>>>>>>> SMOOTHING, INTEGRATION <<<<<<<<<<<<<<<<<<<<< integral.leverage.ppm <- function(f, domain=NULL, ...) { y <- as.im(f, what="nearest") z <- if(is.im(y)) { integral(y, domain=domain, ...) } else if(is.solist(y)) { sapply(y, integral, domain=domain, ...) } else stop("Internal format is not understood") if(length(dim(z))) z <- t(z) return(z) } integral.influence.ppm <- function(f, domain=NULL, ...) { if(!is.null(domain)) { if(is.tess(domain)) { z <- sapply(tiles(domain), integral, f=f) if(length(dim(z))) z <- t(z) return(z) } f <- f[domain] } #' actual computation y <- as.ppp(f) return(colSums(as.matrix(marks(y)))) } mean.leverage.ppm <- function(x, ...) { y <- as.im(x, what="nearest") mean(y, ...) } Smooth.leverage.ppm <- function(X, ...) Smooth(X$lev$val, ...) Smooth.influence.ppm <- function(X, ...) Smooth(as.ppp(X), ...) ## >>>>>>>>>>>>>>>> GEOMETRICAL OPERATIONS <<<<<<<<<<<<<<<<<<<<< shift.leverage.ppm <- function(X, ...) { vec <- getlastshift(shift(as.owin(X), ...)) X$lev$val <- shift(X$lev$val, vec=vec) smo <- X$lev$smo X$lev$smo <- if(is.im(smo)) shift(smo, vec=vec) else solapply(smo, shift, vec=vec) return(putlastshift(X, vec)) } shift.influence.ppm <- function(X, ...) { X$infl <- shift(X$infl, ...) return(putlastshift(X, getlastshift(X$infl))) } spatstat.model/R/plot.plotppm.R0000644000176200001440000001076714331173073016225 0ustar liggesusers# # plot.plotppm.R # # engine of plot method for ppm # # $Revision: 1.23 $ $Date: 2022/11/04 10:46:08 $ # # plot.plotppm <- function(x,data=NULL,trend=TRUE,cif=TRUE,se=TRUE, pause=interactive(), how=c("persp","image","contour"), ..., pppargs=list()) { verifyclass(x,"plotppm") # determine main plotting actions superimposed <- !is.null(data) if(!missing(trend) && (trend & is.null(x[["trend"]]))) stop("No trend to plot.\n") trend <- trend & !is.null(x[["trend"]]) if(!missing(cif) && (cif & is.null(x[["cif"]]))) stop("No cif to plot.\n") cif <- cif & !is.null(x[["cif"]]) if(!missing(se) && (se & is.null(x[["se"]]))) stop("No SE to plot.\n") se <- se & !is.null(x[["se"]]) surftypes <- c("trend", "cif", "se")[c(trend, cif, se)] # marked point process? mrkvals <- attr(x,"mrkvals") marked <- (length(mrkvals) > 1) if(marked) data.marks <- marks(data) if(marked & superimposed) { data.types <- levels(data.marks) if(any(sort(data.types) != sort(mrkvals))) stop(paste("Data marks are different from mark", "values for argument x.\n")) } # plotting style howmat <- outer(how, c("persp", "image", "contour"), "==") howmatch <- matrowany(howmat) if (any(!howmatch)) stop(paste("unrecognised option", how[!howmatch])) # no pause required for single display if(missing(pause) || is.null(pause)) { nplots <- length(surftypes) * length(mrkvals) pause <- interactive() && (nplots != 1) } # start plotting if(pause) { oldpar <- par(ask = TRUE) on.exit(par(oldpar)) } for(ttt in surftypes) { xs <- x[[ttt]] for (i in seq_along(mrkvals)) { xsi <- xs[[i]] level <- mrkvals[i] main <- paste(if(ttt == "se") "Estimated" else "Fitted", ttt, if(marked) paste("\n mark =", level) else NULL) dont.complain.about(xsi) for (style in how) { switch(style, persp = { do.call(persp, resolve.defaults(list(quote(xsi)), list(...), spatstat.options("par.persp"), list(xlab="x", zlab=ttt, main=main))) }, image = { do.call(image, resolve.defaults(list(quote(xsi)), list(...), list(main=main))) if(superimposed) { X <- if(marked) data[data.marks == level] else data dont.complain.about(X) do.call(plot.ppp, append(list(x=quote(X), add=TRUE), pppargs)) } }, contour = { do.call(contour, resolve.defaults(list(quote(xsi)), list(...), list(main=main))) if(superimposed) { X <- if(marked) data[data.marks == level] else data dont.complain.about(X) do.call(plot.ppp, append(list(x=quote(X), add=TRUE), pppargs)) } }, { stop(paste("Unrecognised plot style", style)) }) } } } return(invisible()) } print.plotppm <- function(x, ...) { verifyclass(x, "plotppm") trend <- x$trend cif <- x$cif mrkvals <- attr(x, "mrkvals") ntypes <- length(mrkvals) unmarked <- (ntypes == 1 ) cat(paste("Object of class", sQuote("plotppm"), "\n")) if(unmarked) cat("Computed for an unmarked point process\n") else { cat("Computed for a marked point process, with mark values:\n") print(mrkvals) } cat("Contains the following components:\n") if(!is.null(trend)) { cat("\n$trend:\tFitted trend.\n") if(unmarked) { cat("A list containing 1 image\n") print(trend[[1]], ...) } else { cat(paste("A list of", ntypes, "images\n")) cat("Typical details:\n") print(trend[[1]], ...) } } if(!is.null(cif)) { cat("\n$cif:\tFitted conditional intensity.\n") if(unmarked) { cat("A list containing 1 image\n") print(cif[[1]], ...) } else { cat(paste("A list of", ntypes, "images\n")) cat("Typical details:\n") print(cif[[1]], ...) } } invisible(NULL) } spatstat.model/R/concom.R0000644000176200001440000000742514243551505015032 0ustar liggesusers# # # concom.R # # $Revision: 1.5 $ $Date: 2018/03/15 07:37:41 $ # # The connected component interaction # # Concom() create an instance of the connected component interaction # [an object of class 'interact'] # # ------------------------------------------------------------------- # Concom <- local({ connectedlabels <- function(X, R) { connected(X, R, internal=TRUE) } countcompo <- function(X, R) { length(unique(connectedlabels(X, R))) } # change in number of components when point i is deleted cocoDel <- function(X, R, subset=seq_len(npoints(X))) { n <- length(subset) ans <- integer(n) if(n > 0) { cX <- countcompo(X, R) for(i in 1:n) ans[i] = countcompo(X[-subset[i]], R) - cX } return(ans) } # change in number of components when new point is added cocoAdd <- function(U, X, R) { U <- as.ppp(U, W=as.owin(X)) nU <- npoints(U) cr <- crosspairs(U, X, R, what="indices") lab <- connectedlabels(X, R) hitcomp <- tapply(X=lab[cr$j], INDEX=factor(cr$i, levels=1:nU), FUN=unique, simplify=FALSE) nhit <- unname(lengths(hitcomp)) change <- 1L - nhit return(change) } # connected component potential cocopot <- function(X,U,EqualPairs,pars,correction, ...) { bad <- !(correction %in% c("border", "none")) if((nbad <- sum(bad)) > 0) warning(paste("The", ngettext(nbad, "correction", "corrections"), commasep(sQuote(correction[!ok])), ngettext(nbad, "is", "are"), "not implemented")) n <- U$n answer <- numeric(n) r <- pars$r if(is.null(r)) stop("internal error: r parameter not found") dummies <- !(seq_len(n) %in% EqualPairs[,2L]) if(sum(dummies) > 0) answer[dummies] <- -cocoAdd(U[dummies], X, r) ii <- EqualPairs[,1L] jj <- EqualPairs[,2L] answer[jj] <- cocoDel(X, r, subset=ii) return(answer + 1L) } # template object without family, par, version BlankCoco <- list( name = "Connected component process", creator = "Concom", family = "inforder.family", # evaluated later pot = cocopot, par = list(r = NULL), # to be filled in parnames = "distance threshold", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1L || r <= 0) stop("distance threshold r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { logeta <- as.numeric(coeffs[1L]) eta <- exp(logeta) return(list(param=list(eta=eta), inames="interaction parameter eta", printable=signif(eta))) }, valid = function(coeffs, self) { eta <- ((self$interpret)(coeffs, self))$param$eta return(is.finite(eta)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { if(anyNA(coeffs)) return(Inf) logeta <- coeffs[1L] if(abs(logeta) <= epsilon) return(0) else return(Inf) }, version=NULL # to be added ) class(BlankCoco) <- "interact" Concom <- function(r) { instantiate.interact(BlankCoco, list(r=r)) } Concom <- intermaker(Concom, BlankCoco) Concom }) spatstat.model/R/multistrhard.R0000644000176200001440000003135714331173073016275 0ustar liggesusers# # # multistrhard.S # # $Revision: 2.40 $ $Date: 2022/03/07 02:20:59 $ # # The multitype Strauss/hardcore process # # MultiStraussHard() # create an instance of the multitype Strauss/ harcore # point process # [an object of class 'interact'] # # ------------------------------------------------------------------- # doMultiStraussHard <- local({ # ........ define potential ...................... MSHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[i] type (mark) of point U[j] # # get matrices of interaction radii r <- par$iradii h <- par$hradii # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) # list all UNORDERED pairs of types to be counted # (the interaction must be symmetric in type, and scored as such) uptri <- (row(r) <= col(r)) & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] # corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) # list all ORDERED pairs of types to be counted # (to save writing the same code twice) different <- mark1 != mark2 mark1o <- c(mark1, mark2[different]) mark2o <- c(mark2, mark1[different]) nordpairs <- length(mark1o) # unordered pair corresponding to each ordered pair ucode <- c(1:npairs, (1:npairs)[different]) # # create numeric array for result z <- array(0, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # apply the relevant interaction distance to each pair of points rxu <- r[ tx, tu ] str <- (d < rxu) str[is.na(str)] <- FALSE # and the relevant hard core distance hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- str value[forbid] <- -Inf # assign value[i,j] -> z[i,j,k] where k is relevant interaction code for(i in 1:nordpairs) { # data points with mark m1 Xsub <- (tx == mark1o[i]) # quadrature points with mark m2 Qsub <- (tu == mark2o[i]) # assign z[Xsub, Qsub, ucode[i]] <- value[Xsub, Qsub] } } return(z) } # ............... end of potential function ................... # .......... auxiliary functions ................. delMSH <- function(which, types, iradii, hradii, ihc) { iradii[which] <- NA if(any(!is.na(iradii))) { # some gamma interactions left # return modified MultiStraussHard with fewer gamma parameters return(MultiStraussHard(types, iradii, hradii)) } else if(any(!ihc)) { # no gamma interactions left, but some active hard cores return(MultiHard(types, hradii)) } else return(Poisson()) } # ........................................................... # Set up basic object except for family and parameters BlankMSHobject <- list( name = "Multitype Strauss Hardcore process", creator = "MultiStraussHard", family = "pairwise.family", # evaluated later pot = MSHpotential, par = list(types=NULL, iradii=NULL, hradii=NULL), # to be added parnames = c("possible types", "interaction distances", "hardcore distances"), pardesc = c("vector of possible types", "matrix of interaction distances", "matrix of hardcore distances"), hasInf = TRUE, selfstart = function(X, self) { types <- self$par$types hradii <- self$par$hradii if(!is.null(types) && !is.null(hradii)) return(self) if(is.null(types)) types <- levels(marks(X)) if(is.null(hradii)) { marx <- marks(X) d <- nndist(X, by=marx) h <- aggregate(d, by=list(from=marx), min) h <- as.matrix(h[, -1, drop=FALSE]) m <- table(marx) mm <- outer(m, m, pmin) hradii <- h * mm/(mm+1) dimnames(hradii) <- list(types, types) } MultiStraussHard(types=types,hradii=hradii,iradii=self$par$iradii) }, init = function(self) { types <- self$par$types iradii <- self$par$iradii hradii <- self$par$hradii # hradii could be NULL if(!is.null(types)) { if(!is.null(dim(types))) stop(paste("The", sQuote("types"), "argument should be a vector")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } nt <- length(types) MultiPair.checkmatrix(iradii, nt, sQuote("iradii")) if(!is.null(hradii)) MultiPair.checkmatrix(hradii, nt, sQuote("hradii")) } ina <- is.na(iradii) if(all(ina)) stop(paste("All entries of", sQuote("iradii"), "are NA")) if(!is.null(hradii)) { hna <- is.na(hradii) both <- !ina & !hna if(any(iradii[both] <= hradii[both])) stop("iradii must be larger than hradii") } }, update = NULL, # default OK print = function(self) { types <- self$par$types iradii <- self$par$iradii hradii <- self$par$hradii nt <- nrow(iradii) if(waxlyrical('gory')) { splat(nt, "types of points") if(!is.null(types)) { splat("Possible types:") print(noquote(types)) } else splat("Possible types:\t not yet determined") } splat("Interaction radii:") dig <- getOption("digits") print(signif(iradii, dig)) if(!is.null(hradii)) { splat("Hardcore radii:") print(signif(hradii, dig)) } else splat("Hardcore radii: not yet determined") invisible() }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) # get matrices of interaction radii r <- self$par$iradii h <- self$par$hradii # list all relevant unordered pairs of types uptri <- (row(r) <= col(r)) & !is.na(r) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) expcoef <- exp(coeffs) gammas[ cbind(index1, index2) ] <- expcoef gammas[ cbind(index2, index1) ] <- expcoef # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=dround(gammas))) }, valid = function(coeffs, self) { # interaction radii r[i,j] iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # Check that we managed to estimate all required parameters required <- !is.na(iradii) if(!all(is.finite(gamma[required]))) return(FALSE) # Check that the model is integrable # inactive hard cores ... ihc <- (is.na(hradii) | hradii == 0) # .. must have gamma <= 1 return(all(gamma[required & ihc] <= 1)) }, project = function(coeffs, self) { # types types <- self$par$types # interaction radii r[i,j] iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # required gamma parameters required <- !is.na(iradii) # active hard cores activehard <- !is.na(hradii) & (hradii > 0) ihc <- !activehard # problems gammavalid <- is.finite(gamma) & (activehard | gamma <= 1) naughty <- required & !gammavalid if(!any(naughty)) return(NULL) # if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delMSH(naughty, types, iradii, hradii, ihc)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) uptri <- (rn <= cn) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) # matindex <- function(v) { matrix(c(v, rev(v)), # ncol=2, byrow=TRUE) } mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matindex) inters <- lapply(mats, delMSH, types=types, iradii=iradii, hradii=hradii, ihc=ihc) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$iradii h <- self$par$hradii ractive <- !is.na(r) hactive <- !is.na(h) if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 ractive <- ractive & (abs(log(gamma)) > epsilon) } if(!any(c(ractive,hactive))) return(0) else return(max(c(r[ractive],h[hactive]))) }, hardcore = function(self, coeffs=NA, epsilon=0, ...) { h <- self$par$hradii active <- !is.na(h) return(max(0, h[active])) }, version=NULL # to be added ) class(BlankMSHobject) <- "interact" matindex <- function(v) { matrix(c(v, rev(v)), ncol=2, byrow=TRUE) } # Finally define MultiStraussHard function doMultiStraussHard <- function(iradii, hradii=NULL, types=NULL) { iradii[iradii == 0] <- NA if(!is.null(hradii)) hradii[hradii == 0] <- NA out <- instantiate.interact(BlankMSHobject, list(types=types, iradii = iradii, hradii = hradii)) if(!is.null(types)) { dn <- list(types, types) dimnames(out$par$iradii) <- dn if(!is.null(out$par$hradii)) dimnames(out$par$hradii) <- dn } return(out) } doMultiStraussHard }) MultiStraussHard <- local({ MultiStraussHard <- function(iradii, hradii, types=NULL) { ## try new syntax newcall <- match.call() newcall[[1]] <- as.name('doMultiStraussHard') out <- try(eval(newcall, parent.frame()), silent=TRUE) if(is.interact(out)) return(out) ## try old syntax oldcall <- match.call(function(types=NULL, iradii, hradii) {}) oldcall[[1]] <- as.name('doMultiStraussHard') out <- try(eval(oldcall, parent.frame()), silent=TRUE) if(is.interact(out)) return(out) ## Syntax is wrong: generate error using new syntax rules if(missing(hradii)) hradii <- NULL doMultiStraussHard(iradii=iradii, hradii=hradii, types=types) } BlankMSHobject <- get("BlankMSHobject", envir=environment(doMultiStraussHard)) MultiStraussHard <- intermaker(MultiStraussHard, BlankMSHobject) MultiStraussHard }) spatstat.model/R/envelope.R0000644000176200001440000001753314476233602015375 0ustar liggesusers# # envelope.R # # computes simulation envelopes # # $Revision: 2.125 $ $Date: 2023/08/15 08:07:52 $ # ## Code for envelope() and envelope.ppp() is moved to spatstat.explore envelope.ppm <- function(Y, fun=Kest, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, clipdata=TRUE, start=NULL, control=update(default.rmhcontrol(Y), nrep=nrep), nrep=1e5, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) # Extract data pattern X from fitted model Y X <- data.ppm(Y) if(is.null(simulate)) { # ................................................... # Simulated realisations of the fitted model Y # will be generated pois <- is.poisson(Y) csr <- is.stationary(Y) && pois type <- if(csr) "csr" else "rmh" # Set up parameters for rmh rmodel <- rmhmodel(Y, verbose=FALSE) if(is.null(start)) start <- list(n.start=npoints(X)) rstart <- rmhstart(start) rcontr <- rmhcontrol(control) if(fix.marks) { rcontr <- update(rcontr, fixall=TRUE, p=1, expand=1) nst <- if(is.multitype(X)) table(marks(X)) else npoints(X) rstart <- update(rstart, n.start=nst) constraints <- "with fixed number of points of each type" } else if(fix.n) { rcontr <- update(rcontr, p=1, expand=1) rstart <- update(rstart, n.start=X$n) constraints <- "with fixed number of points" } else constraints <- "" # pre-digest arguments rmhinfolist <- rmh(rmodel, rstart, rcontr, preponly=TRUE, verbose=FALSE) # expression that will be evaluated simexpr <- expression(rmhEngine(rmhinfolist, verbose=FALSE)) dont.complain.about(rmhinfolist) # evaluate in THIS environment simrecipe <- simulrecipe(type = type, expr = simexpr, envir = envir.here, csr = csr, pois = pois, constraints = constraints) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } envelope.kppm <- function(Y, fun=Kest, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) # Extract data pattern X from fitted model Y X <- Y$X if(is.null(simulate)) { # Simulated realisations of the fitted model Y # will be generated using simulate.kppm kmodel <- Y # expression that will be evaluated simexpr <- expression(simulate(kmodel)[[1L]]) dont.complain.about(kmodel) # evaluate in THIS environment simrecipe <- simulrecipe(type = "kppm", expr = simexpr, envir = envir.here, csr = FALSE, pois = FALSE) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } envelope.slrm <- function(Y, fun=Kest, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) # Extract data pattern X from fitted model Y X <- response(Y) if(is.null(simulate)) { # Simulated realisations of the fitted model Y # will be generated using simulate.slrm smodel <- Y # expression that will be evaluated simexpr <- expression(simulate(smodel)[[1L]]) dont.complain.about(smodel) # evaluate in THIS environment simrecipe <- simulrecipe(type = "slrm", expr = simexpr, envir = envir.here, csr = FALSE, pois = FALSE) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } spatstat.model/R/augment.msr.R0000644000176200001440000000524514331173072016007 0ustar liggesusers#' #' augment.msr.R #' #' Given a measure, compute a pixel image of the smoothed density #' and insert it in the object. #' #' $Revision: 1.2 $ $Date: 2020/12/19 05:25:06 $ augment.msr <- function(x, ..., sigma, recompute=FALSE) { ## add a pixel image of the smoothed density component stopifnot(inherits(x, "msr")) if(!recompute && !is.null(attr(x, "smoothdensity"))) return(x) d <- ncol(as.matrix(x$val)) xloc <- x$loc W <- as.owin(xloc) mt <- is.multitype(xloc) if(missing(sigma)) { sigma <- if(!mt) avenndist(xloc) else max(sapply(split(xloc), avenndist)) if(sigma == 0) sigma <- max(bw.scott(xloc))/5 } if(mt) { ## multitype case - split by type, extract smoothed part, then sum y <- lapply(split(x), augment.msr, sigma=sigma, ...) z <- lapply(y, attr, which="smoothdensity") if((nc <- ncol(x)) == 1) { ## scalar valued smo <- im.apply(z, sum) ## WAS: z <- do.call(harmonise, unname(z)) ## smo <- Reduce("+", z) } else { ## vector valued smo <- vector(mode="list", length=nc) for(j in 1:nc) { zj <- lapply(z, "[[", i=j) smo[[j]] <- im.apply(zj, sum) ## WAS: zj <- do.call(harmonise, unname(zj)) ## smo[[j]] <- Reduce("+", zj) } smo <- as.solist(smo) } attr(smo, "sigma") <- sigma attr(x, "smoothdensity") <- smo return(x) } ## Single-type xdensity <- as.matrix(x$density) ## first weed out Inf, NA, NaN if(!all(ok <- complete.cases(xdensity))) xdensity <- ok * xdensity ## smooth density unless constant ra <- apply(xdensity, 2, range) varble <- apply(as.matrix(ra), 2, diff) > sqrt(.Machine$double.eps) ## if(d == 1) { if(!varble) { smo <- as.im(mean(xdensity), W=W) } else { xmd <- xloc %mark% xdensity dont.complain.about(xmd) smo <- do.call(Smooth, resolve.defaults(list(X=quote(xmd)), list(...), list(sigma=sigma))) } } else { smo <- vector(mode="list", length=d) names(smo) <- colnames(x) if(any(varble)) { xmdv <- xloc %mark% xdensity[,varble, drop=FALSE] dont.complain.about(xmdv) smo[varble] <- do.call(Smooth, resolve.defaults(list(X=quote(xmdv)), list(...), list(sigma=sigma))) } if(any(!varble)) smo[!varble] <- solapply(apply(xdensity[, !varble, drop=FALSE], 2, mean), as.im, W=W) } attr(smo, "sigma") <- sigma attr(x, "smoothdensity") <- smo return(x) } spatstat.model/R/Kres.R0000644000176200001440000000526714331173072014457 0ustar liggesusers# # Kres.R # # Residual K # # $Revision: 1.3 $ $Date: 2013/04/25 06:37:43 $ # ############################################################################# # Kres <- function(object, ...) { if(!is.fv(object)) { # usual case where 'object' is a ppm, ppp or quad K <- Kcom(object, ...) } else { # case where 'object' is the output of 'Kcom' a <- attr(object, "maker") if(is.null(a) || a != "Kcom") stop("fv object was not created by Kcom") K <- object if(length(list(...)) > 0) warning("Extra arguments ignored") } # initialise fv object df <- data.frame(r=K$r, theo=numeric(length(K$r))) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~hat(K)(r), NULL), "theo", . ~ r , attr(K, "alim"), c("r","bold(R)~%s[theo](r)"), desc, fname="K") # add residual functions nam <- names(K) if("border" %in% nam) ans <- bind.fv(ans, data.frame(bres=with(K, border-bcom)), "bold(R)~hat(%s)[bord](r)", "residual function %s based on border correction", "bres") if(all(c("trans","tcom") %in% nam)) ans <- bind.fv(ans, data.frame(tres=with(K, trans-tcom)), "bold(R)~hat(%s)[trans](r)", "residual function %s based on translation correction", "tres") if(all(c("iso","icom") %in% nam)) ans <- bind.fv(ans, data.frame(ires=with(K, iso-icom)), "bold(R)~hat(%s)[iso](r)", "residual function %s based on isotropic correction", "ires") if("ivar" %in% nam) { savedotnames <- fvnames(ans, ".") ans <- bind.fv(ans, as.data.frame(K)[, c("ivar", "isd", "ihi", "ilo")], c("bold(C)^2~hat(%s)[iso](r)", "sqrt(bold(C)^2~hat(%s)[iso](r))", "bold(R)~hat(%s)[Hi](r)", "bold(R)~hat(%s)[Lo](r)"), c("pseudovariance of isotropic-corrected residual %s", "pseudo-SD of isotropic-corrected residual %s", "upper critical band for isotropic-corrected residual %s", "lower critical band for isotropic-corrected residual %s"), "ires") ans <- bind.fv(ans, data.frame(istdres=with(ans, ires/isd)), "bold(T)~hat(%s)[iso](r)", "standardised isotropic-corrected residual %s", "ires") fvnames(ans, ".") <- c(savedotnames, c("ihi", "ilo")) } unitname(ans) <- unitname(K) return(ans) } spatstat.model/R/hierstrauss.R0000644000176200001440000002110614243551505016120 0ustar liggesusers## ## hierstrauss.R ## ## $Revision: 1.10 $ $Date: 2018/03/15 07:37:41 $ ## ## The hierarchical Strauss process ## ## HierStrauss() create an instance of the hierarchical Strauss process ## [an object of class 'interact'] ## ## ------------------------------------------------------------------- ## HierStrauss <- local({ # ......... define interaction potential HSpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrix of interaction radii r[ , ] r <- par$radii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) ## list all ordered pairs of types to be checked uptri <- par$archy$relation & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] ## corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) ## create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { ## assemble the relevant interaction distance for each pair of points rxu <- r[ tx, tu ] ## apply relevant threshold to each pair of points str <- (d <= rxu) ## score for(i in 1:npairs) { # data points with mark m1 Xsub <- (tx == mark1[i]) # quadrature points with mark m2 Qsub <- (tu == mark2[i]) # assign z[Xsub, Qsub, i] <- str[Xsub, Qsub] } } return(z) } #### end of 'pot' function #### # ........ auxiliary functions .............. delHS <- function(which, types, radii, archy) { radii[which] <- NA if(all(is.na(radii))) return(Poisson()) return(HierStrauss(types=types, radii=radii, archy=archy)) } # Set up basic object except for family and parameters BlankHSobject <- list( name = "Hierarchical Strauss process", creator = "HierStrauss", family = "hierpair.family", # evaluated later pot = HSpotential, par = list(types=NULL, radii=NULL, archy=NULL), # filled in later parnames = c("possible types", "interaction distances", "hierarchical order"), hasInf = FALSE, selfstart = function(X, self) { if(!is.null(self$par$types) && !is.null(self$par$archy)) return(self) types <- self$par$types %orifnull% levels(marks(X)) archy <- self$par$archy %orifnull% types HierStrauss(types=types,radii=self$par$radii,archy=archy) }, init = function(self) { types <- self$par$types if(!is.null(types)) { radii <- self$par$radii nt <- length(types) MultiPair.checkmatrix(radii, nt, sQuote("radii"), asymmok=TRUE) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } } }, update = NULL, # default OK print = function(self) { radii <- self$par$radii types <- self$par$types archy <- self$par$archy if(waxlyrical('gory')) splat(nrow(radii), "types of points") if(!is.null(types) && !is.null(archy)) { if(waxlyrical('space')) { splat("Possible types and ordering:") } else cat("Hierarchy: ") print(archy) } else if(!is.null(types)) { (if(waxlyrical('space')) splat else cat)("Possible types: ") print(types) } else if(waxlyrical('gory')) splat("Possible types:\t not yet determined") splat("Interaction radii:") print(hiermat(radii, self$par$archy)) invisible(NULL) }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) # get matrix of Strauss interaction radii r <- self$par$radii # list all unordered pairs of types uptri <- self$par$archy$relation & !is.na(r) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(NA, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) gammas[ cbind(index1, index2) ] <- exp(coeffs) # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=hiermat(round(gammas, 4), self$par$archy))) }, valid = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii radii <- self$par$radii # parameters to estimate required <- !is.na(radii) & self$par$archy$relation # all required parameters must be finite if(!all(is.finite(gamma[required]))) return(FALSE) # DIAGONAL interaction parameters must be non-explosive d <- diag(rep(TRUE, nrow(radii))) return(all(gamma[required & d] <= 1)) }, project = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii and types radii <- self$par$radii types <- self$par$types archy <- self$par$archy # problems? uptri <- archy$relation required <- !is.na(radii) & uptri okgamma <- !uptri | (is.finite(gamma) & (gamma <= 1)) naughty <- required & !okgamma # if(!any(naughty)) return(NULL) if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delHS(naughty, types, radii, archy)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) ord <- self$par$archy$ordering uptri <- (ord[rn] <= ord[cn]) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matrix, ncol=2) inters <- lapply(mats, delHS, types=types, radii=radii, archy=archy) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$radii active <- !is.na(r) & self$par$archy$relation if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- active & (abs(log(gamma)) > epsilon) } if(any(active)) return(max(r[active])) else return(0) }, version=NULL # to be added ) class(BlankHSobject) <- "interact" # finally create main function HierStrauss <- function(radii, types=NULL, archy=NULL) { if(!is.null(types)) { if(is.null(archy)) archy <- seq_len(length(types)) archy <- hierarchicalordering(archy, types) } radii[radii == 0] <- NA out <- instantiate.interact(BlankHSobject, list(types=types, radii=radii, archy=archy)) if(!is.null(types)) dimnames(out$par$radii) <- list(types, types) return(out) } HierStrauss <- intermaker(HierStrauss, BlankHSobject) HierStrauss }) spatstat.model/R/diagnoseppm.R0000644000176200001440000003457114331173072016061 0ustar liggesusers# # diagnoseppm.R # # Makes diagnostic plots based on residuals or energy weights # # $Revision: 1.44 $ $Date: 2019/10/02 10:33:21 $ # diagnose.ppm.engine <- function(object, ..., type="eem", typename, opt, sigma=NULL, rbord=reach(object), compute.sd=is.poisson(object), compute.cts=TRUE, envelope=FALSE, nsim=39, nrank=1, rv=NULL, oldstyle=FALSE, splineargs = list(spar=0.5), verbose=TRUE) { if(is.marked.ppm(object)) stop("Sorry, this is not yet implemented for marked models") # quadrature points Q <- quad.ppm(object) U <- union.quad(Q) Qweights <- w.quad(Q) # -------------- Calculate residuals/weights ------------------- # Discretised residuals if(type == "eem") { residval <- if(!is.null(rv)) rv else eem(object, check=FALSE) residval <- as.numeric(residval) X <- data.ppm(object) Y <- X %mark% residval } else { if(!is.null(rv) && !inherits(rv, "msr")) stop("rv should be a measure (object of class msr)") residobj <- if(!is.null(rv)) rv else residuals.ppm(object, type=type, check=FALSE) residval <- with(residobj, "increment") if(ncol(as.matrix(residval)) > 1L) stop("Not implemented for vector-valued residuals; use [.msr to split into separate components") Y <- U %mark% residval } # Atoms and density of measure Ymass <- NULL Ycts <- NULL Ydens <- NULL if(compute.cts) { if(type == "eem") { Ymass <- Y Ycts <- U %mark% (-1) Ydens <- as.im(-1, Y$window) } else { atoms <- with(residobj, "is.atom") masses <- with(residobj, "discrete") cts <- with(residobj, "density") if(!is.null(atoms) && !is.null(masses) && !is.null(cts)) { Ymass <- (U %mark% masses)[atoms] Ycts <- U %mark% cts # remove NAs (as opposed to zero cif points) if(!all(ok <- is.finite(cts))) { U <- U[ok] Ycts <- Ycts[ok] cts <- cts[ok] Qweights <- Qweights[ok] } # interpolate continuous part to yield an image for plotting if(type == "inverse" && all(cts != 0)) { Ydens <- as.im(-1, Y$window) } else if(is.stationary.ppm(object) && is.poisson.ppm(object)) { # all values of `cts' will be equal Ydens <- as.im(cts[1L], Y$window) } else { smallsigma <- maxnndist(Ycts) Ujitter <- U Ujitter$x <- U$x + runif(U$n, -smallsigma, smallsigma) Ujitter$y <- U$y + runif(U$n, -smallsigma, smallsigma) Ydens <- Smooth(Ujitter %mark% marks(Ycts), sigma=smallsigma, weights=Qweights, edge=TRUE, ...) } } } } #---------------- Erode window --------------------------------- # ## Compute windows W <- Y$window # Erode window if required clip <- !is.null(rbord) && is.finite(rbord) && (rbord > 0) if(clip) { Wclip <- erosion.owin(W, rbord) Yclip <- Y[Wclip] Qweightsclip <- Qweights[inside.owin(U, , Wclip)] if(!is.null(Ycts)) Ycts <- Ycts[Wclip] if(!is.null(Ydens)) Ydens <- Ydens[Wclip, drop=FALSE] } else { Wclip <- W Yclip <- Y } # ------------ start collecting results ------------------------- result <- list(type=type, clip=clip, Y=Y, W=W, Yclip=Yclip, Ymass=Ymass, Ycts=Ycts, Ydens=Ydens) # ------------- smoothed field ------------------------------ Z <- NULL if(opt$smooth | opt$xcumul | opt$ycumul | opt$xmargin | opt$ymargin) { if(is.null(sigma)) sigma <- 0.1 * diameter(Wclip) Z <- density.ppp(Yclip, sigma, weights=Yclip$marks, edge=TRUE, ...) } if(opt$smooth) { result$smooth <- list(Z = Z, sigma=sigma) if(type == "pearson") result$smooth$sdp <- 1/(2 * sigma * sqrt(pi)) } # -------------- marginals of smoothed field ------------------------ if(opt$xmargin) { xZ <- apply(Z$v, 2, sum, na.rm=TRUE) * Z$xstep if(type == "eem") ExZ <- colSums(!is.na(Z$v)) * Z$xstep else ExZ <- numeric(length(xZ)) result$xmargin <- list(x=Z$xcol, xZ=xZ, ExZ=ExZ) } if(opt$ymargin) { yZ <- apply(Z$v, 1L, sum, na.rm=TRUE) * Z$ystep if(type == "eem") EyZ <- rowSums(!is.na(Z$v)) * Z$ystep else EyZ <- numeric(length(yZ)) result$ymargin <- list(y=Z$yrow, yZ=yZ, EyZ=EyZ) } # -------------- cumulative (lurking variable) plots -------------- ## precompute simulated patterns for envelopes if(identical(envelope, TRUE)) envelope <- simulate(object, nsim=nsim, progress=verbose) if(opt$xcumul) result$xcumul <- lurking(object, covariate=expression(x), type=type, clipwindow= if(clip) Wclip else NULL, rv=residval, plot.sd=compute.sd, envelope=envelope, nsim=nsim, nrank=nrank, plot.it=FALSE, typename=typename, covname="x coordinate", oldstyle=oldstyle, check=FALSE, splineargs=splineargs, ...) if(opt$ycumul) result$ycumul <- lurking(object, covariate=expression(y), type=type, clipwindow= if(clip) Wclip else NULL, rv=residval, plot.sd=compute.sd, envelope=envelope, nsim=nsim, nrank=nrank, plot.it=FALSE, typename=typename, covname="y coordinate", oldstyle=oldstyle, check=FALSE, splineargs=splineargs, ...) # -------------- summary numbers -------------- if(opt$sum) result$sum <- list(marksum=sum(Yclip$marks, na.rm=TRUE), areaWclip=area(Wclip), areaquad=if(clip) sum(Qweightsclip) else sum(Qweights), range=if(!is.null(Z)) range(Z) else NULL) return(invisible(result)) } ######################################################################## diagnose.ppm <- function(object, ..., type="raw", which="all", sigma=NULL, rbord =reach(object), cumulative=TRUE, plot.it = TRUE, rv = NULL, compute.sd=is.poisson(object), compute.cts=TRUE, envelope=FALSE, nsim=39, nrank=1, typename, check=TRUE, repair=TRUE, oldstyle=FALSE, splineargs=list(spar=0.5)) { asked.newstyle <- !missing(oldstyle) && !oldstyle if(is.marked.ppm(object)) stop("Sorry, this is not yet implemented for marked models") # check whether model originally came from replicated data is.subfit <- (object$method == "mppm") Coefs <- coef(object) if(check && damaged.ppm(object)) { if(!repair) stop("object format corrupted; try update(object, use.internal=TRUE)") message("object format corrupted; repairing it.") object <- update(object, use.internal=TRUE) object <- tweak.coefs(object, Coefs) } else if(compute.sd && is.null(getglmfit(object))) { object <- update(object, forcefit=TRUE, use.internal=TRUE) object <- tweak.coefs(object, Coefs) } # ------------- Interpret arguments -------------------------- # Edge-effect avoidance if(missing(rbord) && !is.finite(rbord)) { ## Model has infinite reach ## Use correction rule employed when fitting rbord <- if(object$correction == "border") object$rbord else 0 } # match type argument type <- pickoption("type", type, c(eem="eem", raw="raw", inverse="inverse", pearson="pearson", Pearson="pearson")) if(missing(typename)) typename <- switch(type, eem="exponential energy weights", raw="raw residuals", inverse="inverse-lambda residuals", pearson="Pearson residuals") # 'which' is multiple choice with exact matching optionlist <- c("all", "marks", "smooth", "x", "y", "sum") if(!all(m <- which %in% optionlist)) stop(paste("Unrecognised choice(s) of", paste(sQuote("which"), ":", sep=""), paste(which[!m], collapse=", "))) opt <- list() opt$all <- "all" %in% which opt$marks <- ("marks" %in% which) | opt$all opt$smooth <- ("smooth" %in% which) | opt$all opt$xmargin <- (("x" %in% which) | opt$all) && !cumulative opt$ymargin <- (("y" %in% which) | opt$all) && !cumulative opt$xcumul <- (("x" %in% which) | opt$all) && cumulative opt$ycumul <- (("y" %in% which) | opt$all) && cumulative opt$sum <- ("sum" %in% which) | opt$all # compute and plot estimated standard deviations? # yes for Poisson, no for other models, unless overridden if(!missing(compute.sd)) plot.sd <- compute.sd else plot.sd <- list(...)$plot.sd if(is.null(plot.sd)) plot.sd <- is.poisson.ppm(object) if(missing(compute.sd)) compute.sd <- plot.sd # default for mppm objects is oldstyle=TRUE if(compute.sd && is.subfit) { if(!asked.newstyle) { # silently change default oldstyle <- TRUE } else { stop(paste("Variance calculation for a subfit of an mppm object", "is only implemented for oldstyle=TRUE"), call.=FALSE) } } # interpolate the density of the residual measure? if(missing(compute.cts)) { plot.neg <- resolve.defaults(list(...), formals(plot.diagppm)["plot.neg"])$plot.neg # only if it is needed for the mark plot compute.cts <- opt$marks && (plot.neg != "discrete") } # ------- DO THE CALCULATIONS ----------------------------------- RES <- diagnose.ppm.engine(object, type=type, typename=typename, opt=opt, sigma=sigma, rbord=rbord, compute.sd=compute.sd, compute.cts=compute.cts, envelope=envelope, nsim=nsim, nrank=nrank, rv=rv, oldstyle=oldstyle, splineargs=splineargs, ...) RES$typename <- typename RES$opt <- opt RES$compute.sd <- compute.sd RES$compute.cts <- compute.cts class(RES) <- "diagppm" # ------- PLOT -------------------------------------------------- if(plot.it) plot(RES, ...) return(RES) } plot.diagppm <- function(x, ..., which, plot.neg=c("image", "discrete", "contour", "imagecontour"), plot.smooth=c("imagecontour", "image", "contour", "persp"), plot.sd, spacing=0.1, outer=3, srange=NULL, monochrome=FALSE, main=NULL) { opt <- x$opt plot.neg <- match.arg(plot.neg) plot.smooth <- match.arg(plot.smooth) if(!missing(which)) { witches <- c("all", "marks", "smooth", "x", "y", "sum") unknown <- is.na(match(which, witches)) if(any(unknown)) warning(paste("Unrecognised", ngettext(sum(unknown), "option", "options"), "which =", commasep(sQuote(which[unknown])), ": valid options are", commasep(sQuote(witches))), call.=FALSE) oldopt <- opt newopt <- list() newopt$all <- "all" %in% which newopt$marks <- ("marks" %in% which) | newopt$all newopt$smooth <- ("smooth" %in% which) | newopt$all newopt$xmargin <- (("x" %in% which) | newopt$all) && oldopt$xmargin newopt$ymargin <- (("y" %in% which) | newopt$all) && oldopt$ymargin newopt$xcumul <- (("x" %in% which) | newopt$all) && oldopt$xcumul newopt$ycumul <- (("y" %in% which) | newopt$all) && oldopt$ycumul newopt$sum <- ("sum" %in% which) | newopt$all illegal <- (unlist(newopt) > unlist(oldopt)) if(any(illegal)) { offending <- paste(names(newopt)[illegal], collapse=", ") whinge <- paste("cannot display the following components;\n", "they were not computed: - \n", offending, "\n") stop(whinge) } opt <- newopt } if(missing(plot.sd)) { plot.sd <- x$compute.sd } else if(plot.sd && !(x$compute.sd)) { warning("can't plot standard deviations; they were not computed") plot.sd <- FALSE } if(!(x$compute.cts) && (plot.neg != "discrete") && (opt$marks || opt$all)) { if(!missing(plot.neg)) warning("can't plot continuous component of residuals; it was not computed") plot.neg <- "discrete" } if(opt$all) resid4plot(RES=x, plot.neg=plot.neg, plot.smooth=plot.smooth, spacing=spacing, outer=outer, srange=srange, monochrome=monochrome, main=main, ...) else resid1plot(RES=x, opt=opt, plot.neg=plot.neg, plot.smooth=plot.smooth, srange=srange, monochrome=monochrome, main=main, ...) } print.diagppm <- function(x, ...) { opt <- x$opt typename <- x$typename splat("Model diagnostics", paren(typename)) splat("Diagnostics available:") optkey <- list(all="four-panel plot", marks=paste("mark plot", if(!x$compute.cts) "(discrete representation only)" else NULL), smooth="smoothed residual field", xmargin="x marginal density", ymargin="y marginal density", xcumul="x cumulative residuals", ycumul="y cumulative residuals", sum="sum of all residuals") avail <- unlist(optkey[names(opt)[unlist(opt)]]) names(avail) <- NULL cat(paste("\t", paste(avail, collapse="\n\t"), "\n", sep="")) if(opt$sum) { xs <- x$sum windowname <- if(x$clip) "clipped window" else "entire window" splat("sum of", typename, "in", windowname, "=", signif(sum(xs$marksum),4)) splat("area of", windowname, "=", signif(xs$areaWclip, 4)) splat("quadrature area =", signif(xs$areaquad, 4)) } if(opt$smooth) { splat("range of smoothed field = ", prange(signif(range(x$smooth$Z),4))) if(!is.null(sdp <- x$smooth$sdp)) splat("Null standard deviation of smoothed Pearson residual field:", signif(sdp, 4)) } return(invisible(NULL)) } spatstat.model/R/addvar.R0000644000176200001440000003072214377275735015031 0ustar liggesusers# # addvar.R # # added variable plot # # $Revision: 1.14 $ $Date: 2022/05/20 04:12:31 $ # addvar <- function(model, covariate, ..., subregion=NULL, bw="nrd0", adjust=1, from=NULL, to=NULL, n=512, bw.input = c("points", "quad"), bw.restrict = FALSE, covname, crosscheck=FALSE) { if(missing(covname)) covname <- sensiblevarname(deparse(substitute(covariate)), "X") callstring <- paste(deparse(sys.call()), collapse = "") if(is.marked(model)) stop("Sorry, this is not yet implemented for marked models") if(is.null(adjust)) adjust <- 1 bw.input <- match.arg(bw.input) # validate model stopifnot(is.ppm(model)) if(is.null(getglmfit(model))) model <- update(model, forcefit=TRUE) modelcall <- model$callstring if(is.null(modelcall)) modelcall <- model$call # extract spatial locations Q <- quad.ppm(model) # datapoints <- Q$data quadpoints <- union.quad(Q) Z <- is.data(Q) wts <- w.quad(Q) nQ <- n.quad(Q) # fitted intensity lam <- fitted(model, type="trend") # subset of quadrature points used to fit model subQset <- getglmsubset(model) if(is.null(subQset)) subQset <- rep.int(TRUE, nQ) # restriction to subregion insubregion <- if(!is.null(subregion)) { inside.owin(quadpoints, w=subregion) } else rep.int(TRUE, nQ) ################################################################ # Pearson residuals from point process model yr <- residuals(model, type="Pearson") yresid <- with(yr, "increment") # averaged (then sum with weight 'wts') yresid <- yresid/wts ################################################################# # Covariates # # covariate data frame df <- getglmdata(model) if(!all(c("x", "y") %in% names(df))) { xy <- as.data.frame(quadpoints) notxy <- !(colnames(df) %in% c("x", "y")) other <- df[, notxy] df <- cbind(xy, other) } # avail.covars <- names(df) # covariates used in model used.covars <- model.covariates(model) fitted.covars <- model.covariates(model, offset=FALSE) # ################################################################# # identify the covariate # if(!is.character(covariate)) { # Covariate is some kind of data, treated as external covariate if(covname %in% fitted.covars) stop(paste("covariate named", dQuote(covname), "is already used in model")) covvalues <- evaluateCovariate(covariate, quadpoints) # validate covvalues if(is.null(covvalues)) stop("Unable to extract covariate values") else if(length(covvalues) != npoints(quadpoints)) stop(paste("Internal error: number of covariate values =", length(covvalues), "!=", npoints(quadpoints), "= number of quadrature points")) # tack onto data frame covdf <- data.frame(covvalues) names(covdf) <- covname df <- cbind(df, covdf) } else { # Argument is name of covariate covname <- covariate if(length(covname) > 1) stop("Must specify only one covariate") # if(covname %in% fitted.covars) stop(paste("covariate", dQuote(covname), "already used in model")) # if(!(covname %in% avail.covars)) stop(paste("covariate", dQuote(covname), "not available")) # covvalues <- df[, covname] } ################################################################ # Pearson residuals from weighted linear regression of new covariate on others rhs <- formula(model) fo <- as.formula(paste(covname, paste(rhs, collapse=" "))) fit <- lm(fo, data=df, weights=lam * wts, na.action=na.exclude) xresid <- residuals(fit, type="pearson")/sqrt(wts) if(crosscheck) { message("Cross-checking...") X <- model.matrix(fo, data=df) V <- diag(lam * wts) sqrtV <- diag(sqrt(lam * wts)) Info <- t(X) %*% V %*% X H <- sqrtV %*% X %*% solve(Info) %*% t(X) %*% sqrtV nQ <- length(lam) Id <- diag(1, nQ, nQ) xresid.pearson <- (Id - H) %*% sqrtV %*% covvalues xresid.correct <- xresid.pearson/sqrt(wts) abserr <- max(abs(xresid - xresid.correct), na.rm=TRUE) relerr <- abserr/diff(range(xresid.correct, finite=TRUE)) if(is.finite(relerr) && relerr > 0.01) { warning("Large relative error in residual computation") } message("Done.") } # experiment suggests residuals(fit, "pearson") == xresid.correct # and residuals(fit) equivalent to # covvalues - X %*% solve(t(X) %*% V %*% X) %*% t(X) %*% V %*% covvalues ################################################################# # check for NA's etc # locations that must have finite values operative <- if(bw.restrict) insubregion & subQset else subQset nbg <- !is.finite(xresid) | !is.finite(yresid) if(any(offending <- nbg & operative)) { warning(paste(sum(offending), "out of", length(offending), "covariate values discarded because", ngettext(sum(offending), "it is", "they are"), "NA or infinite")) } ################################################################# # Restrict data to 'operative' points # with finite values ok <- !nbg & operative Q <- Q[ok] xresid <- xresid[ok] yresid <- yresid[ok] covvalues <- covvalues[ok] df <- df[ok, ] lam <- lam[ok] wts <- wts[ok] Z <- Z[ok] insubregion <- insubregion[ok] #################################################### # assemble data for smoothing xx <- xresid yy <- yresid ww <- wts if(makefrom <- is.null(from)) from <- min(xresid) if(maketo <- is.null(to)) to <- max(xresid) #################################################### # determine smoothing bandwidth # from 'operative' data switch(bw.input, quad = { # bandwidth selection from covariate values at all quadrature points numer <- unnormdensity(xx, weights=yy * ww, bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- numer$bw }, points= { # bandwidth selection from covariate values at data points fake <- unnormdensity(xx[Z], weights=1/lam[Z], bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- fake$bw numer <- unnormdensity(xx, weights=yy * ww, bw=sigma, adjust=1, n=n,from=from,to=to, ...) }) #################################################### # Restrict data and recompute numerator if required if(!is.null(subregion) && !bw.restrict) { # Bandwidth was computed on all data # Restrict to subregion and recompute numerator xx <- xx[insubregion] yy <- yy[insubregion] ww <- ww[insubregion] lam <- lam[insubregion] Z <- Z[insubregion] if(makefrom) from <- min(xx) if(maketo) to <- max(xx) numer <- unnormdensity(xx, weights=yy * ww, bw=sigma, adjust=1, n=n,from=from,to=to, ...) } #################################################### # Compute denominator denom <- unnormdensity(xx,weights=ww, bw=sigma, adjust=1, n=n,from=from,to=to, ...) #################################################### # Determine recommended plot range xr <- range(xresid[Z], finite=TRUE) alim <- xr + 0.1 * diff(xr) * c(-1,1) alim <- intersect.ranges(alim, c(from, to)) #################################################### # Compute terms interpolate <- function(x,y) { if(inherits(x, "density") && missing(y)) approxfun(x$x, x$y, rule=2) else approxfun(x, y, rule=2) } numfun <- interpolate(numer) denfun <- interpolate(denom) xxx <- numer$x ratio <- function(y, x) { ifelseXB(x != 0, y/x, NA) } yyy <- ratio(numfun(xxx), denfun(xxx)) # Null variance estimation # smooth with weight 1 and smaller bandwidth tau <- sigma/sqrt(2) varnumer <- unnormdensity(xx,weights=ww, bw=tau,adjust=1, n=n,from=from,to=to, ...) varnumfun <- interpolate(varnumer) vvv <- ratio(varnumfun(xxx), 2 * sigma * sqrt(pi) * denfun(xxx)^2) safesqrt <- function(x) { ok <- is.finite(x) & (x >= 0) y <- rep.int(NA_real_, length(x)) y[ok] <- sqrt(x[ok]) return(y) } twosd <- 2 * safesqrt(vvv) # pack into fv object rslt <- data.frame(rcov=xxx, rpts=yyy, theo=0, var=vvv, hi=twosd, lo=-twosd) nuc <- length(used.covars) if(nuc == 0) { given <- givenlab <- 1 } else if(nuc == 1) { given <- givenlab <- used.covars } else { given <- commasep(used.covars, ", ") givenlab <- paste("list", paren(given)) } given <- paste("|", given) xlab <- sprintf("r(paste(%s, '|', %s))", covname, givenlab) ylab <- sprintf("r(paste(points, '|', %s))", givenlab) yexpr <- parse(text=ylab)[[1L]] desc <- c(paste("Pearson residual of covariate", covname, given), paste("Smoothed Pearson residual of point process", given), "Null expected value of point process residual", "Null variance of point process residual", "Upper limit of pointwise 5%% significance band", "Lower limit of pointwise 5%% significance band") rslt <- fv(rslt, argu="rcov", ylab=yexpr, valu="rpts", fmla= (. ~ rcov), alim=alim, labl=c(xlab, "%s", "0", "bold(var) ~ %s", "%s[hi]", "%s[lo]"), desc=desc, fname=ylab) attr(rslt, "dotnames") <- c("rpts", "theo", "hi", "lo") # data associated with quadrature points reserved <- (substr(colnames(df), 1L, 4L) == ".mpl") isxy <- colnames(df) %in% c("x", "y") dfpublic <- cbind(df[, !(reserved | isxy)], data.frame(xresid, yresid)) attr(rslt, "spatial") <- union.quad(Q) %mark% dfpublic # auxiliary data attr(rslt, "stuff") <- list(covname = covname, xresid = xresid, yresid = yresid, covvalues = covvalues, wts = wts, bw = bw, adjust = adjust, sigma = sigma, used.covars = used.covars, modelcall = modelcall, callstring = callstring, xlim = c(from, to), xlab = xlab, ylab = ylab, lmcoef = coef(fit), bw.input = bw.input, bw.restrict = bw.restrict, restricted = !is.null(subregion)) # finish class(rslt) <- c("addvar", class(rslt)) return(rslt) } print.addvar <- function(x, ...) { cat("Added variable plot diagnostic (class addvar)\n") s <- attr(x, "stuff") mc <- paste(s$modelcall, collapse="") cat(paste("for the covariate", dQuote(s$covname), "for the fitted model:", if(nchar(mc) <= 30) "" else "\n\t", mc, "\n\n")) if(identical(s$restricted, TRUE)) cat("\t--Diagnostic computed for a subregion--\n") cat(paste("Call:", s$callstring, "\n")) cat(paste("Actual smoothing bandwidth sigma =", signif(s$sigma,5), "\n\n")) NextMethod("print") } plot.addvar <- function(x, ..., do.points=FALSE) { xname <- short.deparse(substitute(x)) s <- attr(x, "stuff") # covname <- s$covname xresid <- s$xresid yresid <- s$yresid # adjust y limits if intending to plot points as well ylimcover <- if(do.points) range(yresid, finite=TRUE) else NULL # do.call(plot.fv, resolve.defaults(list(quote(x)), list(...), list(main=xname, shade=c("hi", "lo"), legend=FALSE, ylim.covers=ylimcover))) # plot points if(do.points) do.call(points, resolve.defaults(list(x=xresid, y=yresid), list(...), list(pch=3, cex=0.5))) return(invisible(x)) } spatstat.model/R/ord.family.R0000644000176200001440000001020214331173073015601 0ustar liggesusers# # # ord.family.S # # $Revision: 1.17 $ $Date: 2015/10/21 09:06:57 $ # # The Ord model (family of point process models) # # ord.family: object of class 'isf' defining Ord model structure # # # ------------------------------------------------------------------- # ord.family <- list( name = "ord", print = function(self) { cat("Ord model family\n") }, eval = function(X, U, EqualPairs, pot, pars, ...) { # # This auxiliary function is not meant to be called by the user. # It computes the distances between points, # evaluates the pair potential and applies edge corrections. # # Arguments: # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function function(d, p) # pars auxiliary parameters for pot list(......) # ... IGNORED # # Value: # matrix of values of the potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # # Note: # The potential function 'pot' will be called as # pot(M, pars) where M is a vector of tile areas. # It must return a vector of the same length as M # or a matrix with number of rows equal to the length of M ########################################################################## nX <- npoints(X) nU <- length(U$x) # number of data + dummy points seqX <- seq_len(nX) seqU <- seq_len(nU) # determine which points in the combined list are data points if(length(EqualPairs) > 0) is.data <- seqU %in% EqualPairs[,2] else is.data <- rep.int(FALSE, nU) ############################################################################# # First compute Dirichlet tessellation of data # and its total potential (which could be vector-valued) ############################################################################# marks(X) <- NULL Wdata <- dirichletWeights(X) # sic - these are the tile areas. Pdata <- pot(Wdata, pars) summa <- function(P) { if(is.matrix(P)) matrowsum(P) else if(is.vector(P) || length(dim(P))==1 ) sum(P) else stop("Don't know how to take row sums of this object") } total.data.potential <- summa(Pdata) # Initialise V dimpot <- dim(Pdata)[-1] # dimension of each value of the potential function # (= numeric(0) if potential is a scalar) dimV <- c(nU, dimpot) if(length(dimV) == 1) dimV <- c(dimV, 1) V <- array(0, dim=dimV) rowV <- array(seqU, dim=dimV) #################### Next, evaluate V for the data points. ############### # For each data point, compute Dirichlet tessellation # of the data with this point removed. # Compute difference of total potential. ############################################################################# for(j in seq_len(nX)) { # Dirichlet tessellation of data without point j Wminus <- dirichletWeights(X[-j]) # regressor is the difference in total potential V[rowV == j] <- total.data.potential - summa(pot(Wminus, pars)) } #################### Next, evaluate V for the dummy points ################ # For each dummy point, compute Dirichlet tessellation # of (data points together with this dummy point) only. # Take difference of total potential. ############################################################################# for(j in seqU[!is.data]) { Xplus <- superimpose(X, list(x=U$x[j], y=U$y[j]), W=X$window) # compute Dirichlet tessellation (of these points only!) Wplus <- dirichletWeights(Xplus) # regressor is difference in total potential V[rowV == j] <- summa(pot(Wplus, pars)) - total.data.potential } cat("dim(V) = \n") print(dim(V)) return(V) } ######### end of function $eval ) ######### end of list class(ord.family) <- "isf" spatstat.model/R/rppm.R0000644000176200001440000001214414372606517014533 0ustar liggesusers#' #' rppm.R #' #' Recursive Partitioning for Point Process Models #' #' $Revision: 1.24 $ $Date: 2023/02/14 04:25:45 $ rppm <- function(..., rpargs=list()) { ## do the equivalent of ppm(...) cl <- match.call() cl[[1]] <- as.name('ppm') if("rpargs" %in% names(cl)) cl$rpargs <- NULL cl$forcefit <- TRUE pfit <- eval(cl, envir=parent.frame()) ## if(!is.poisson(pfit)) warning("Interpoint interaction will be ignored", call.=FALSE) df <- getglmdata(pfit) gf <- getglmfit(pfit) sf <- getglmsubset(pfit) rp <- do.call(rpart, resolve.defaults(list(formula=formula(gf), data=df, subset=sf, weights=df$.mpl.W), rpargs, list(method="poisson"))) result <- list(pfit=pfit, rp=rp, rpargs=rpargs) class(result) <- c("rppm", class(result)) return(result) } # undocumented as.ppm.rppm <- function(object) { object$pfit } print.rppm <- function(x, ...) { splat("Point process model with recursive partitioning") splat("Data:", sQuote(x$pfit$Qname)) splat("Covariates:", commasep(sQuote(variablesinformula(formula(x$pfit))))) splat("Regression tree:") print(x$rp) invisible(NULL) } plot.rppm <- local({ argsPlotRpart <- c("x", "uniform", "branch", "compress", "margin", "minbranch") argsTextRpart <- c("splits", "label", "FUN", "all", "pretty", "digits", "use.n", "fancy", "fwidth", "fheight", "bg", "minlength") plot.rppm <- function(x, ..., what=c("tree", "spatial"), treeplot=NULL) { xname <- short.deparse(substitute(x)) what <- match.arg(what) switch(what, tree = { xrp <- x$rp if(is.function(treeplot)) return(treeplot(xrp, ...)) dont.complain.about(xrp) out <- do.call.matched(plot, list(x=quote(xrp), ...), funargs=argsPlotRpart, extrargs=graphicsPars("plot")) # note: plot.rpart does not pass arguments to 'lines' do.call.matched(text, list(x=quote(xrp), ...), funargs=argsTextRpart, extrargs=graphicsPars("text")) }, spatial = { p <- predict(x) dont.complain.about(p) out <- do.call("plot", resolve.defaults(list(x=quote(p)), list(...), list(main=xname))) }) return(invisible(out)) } plot.rppm }) #' prune method prune.rppm <- function(tree, ...) { tree$rp <- rpart::prune(tree$rp, ...) return(tree) } #' predict method predict.rppm <- function(object, ...) { model <- object$pfit tree <- object$rp #' assemble covariates for prediction, using rules of predict.ppm co <- predict(model, ..., type="covariates", check=FALSE, repair=FALSE) newdata <- co$newdata masque <- co$mask #' perform prediction using the tree pred <- predict(tree, newdata=co$newdata) #' pack up appropriately if(is.null(masque)) return(pred) imago <- as.im(masque, value=1.0) if(!is.marked(model)) { out <- imago out[] <- pred } else { lev <- levels(marks(data.ppm(model))) nlev <- length(lev) out <- rep(list(imago), nlev) names(out) <- lev splitpred <- split(pred, newdata$marks) for(i in seq_len(nlev)) out[[i]][] <- splitpred[[i]] out <- as.solist(out) } return(out) } fitted.rppm <- function(object, ...) { predict(object, locations=response(object)) } response.rppm <- function(object) { data.ppm(as.ppm(object)) } is.poisson.rppm <- function(x) { is.poisson(as.ppm(x)) } is.marked.rppm <- function(X, ...) { is.marked(as.ppm(X)) } is.multitype.rppm <- function(X, ...) { is.multitype(as.ppm(X)) } residuals.rppm <- function(object, type=c("raw", "inverse", "Pearson"), ...) { type <- match.arg(type) Q <- quad.ppm(as.ppm(object)) lambda <- predict(object) residualMeasure(Q, lambda, type) } terms.rppm <- function(x, ...) { terms(x$pfit) } update.rppm <- function(object, ..., envir=environment(terms(object))) { pfit <- update(object$pfit, ..., envir=envir) if(!is.poisson(pfit)) warning("Interpoint interaction will be ignored", call.=FALSE) df <- getglmdata(pfit) gf <- getglmfit(pfit) sf <- getglmsubset(pfit) rpargs <- resolve.1.default("rpargs", ...) %orifnull% object$rpargs rp <- do.call(rpart, resolve.defaults(list(formula=formula(gf), data=df, subset=sf, weights=df$.mpl.W), rpargs, list(method="poisson"))) result <- list(pfit=pfit, rp=rp, rpargs=rpargs) class(result) <- c("rppm", class(result)) return(result) } spatstat.model/R/detpointprocfamilyfun.R0000644000176200001440000004521614331173072020176 0ustar liggesusers## detpointprocfamilyfun.R ## ## $Revision: 1.8 $ $Date: 2021/07/14 09:44:04 $ ## ## This file contains the function `detpointprocfamilyfun' ## to define new DPP model family functions ## and a print method for class `detpointprocfamilyfun' ## as well as the currently defined ## - dppBessel ## - dppCauchy ## - dppGauss ## - dppMatern ## - dppPowerExp detpointprocfamilyfun <- local({ names_formals <- function(f, dots = FALSE){ nam <- names(formals(f)) if(!dots) nam <- nam[nam!="..."] return(nam) } detpointprocfamilyfun <- function(kernel=NULL, specden=NULL, basis="fourierbasis", convkernel=NULL, Kfun=NULL, valid=NULL, intensity=NULL, dim=2, name="User-defined", isotropic=TRUE, range=NULL, parbounds=NULL, specdenrange=NULL, startpar=NULL, ...) { ## Check which functions are given, check them for sanity and ## extract argument names and other stuff given <- NULL if(!is.null(kernel)){ if(!is.function(kernel)) stop("If kernel is given it must be a function.") given <- "kernel" kernelnames <- names_formals(kernel) if(length(kernelnames)<1L) stop("kernel function must have at least one argument") kernelnames <- kernelnames[-1L] } if(!is.null(specden)){ if(!is.function(specden)) stop("If specden is given it must be a function.") given <- c(given, "specden") specdennames <- names_formals(specden) if(length(specdennames)<1L) stop("specden function must have at least one argument") specdennames <- specdennames[-1L] } if(is.null(given)) stop("At least one of kernel or specden must be provided.") if(length(given)==2){ if(!setequal(kernelnames,specdennames)) stop("argument names of kernel and specden must match.") } if(is.element("kernel",given)){ parnames <- kernelnames } else{ parnames <- specdennames } if(!is.null(convkernel)){ given <- c(given,"convkernel") if(!is.function(convkernel)||length(formals(convkernel))<2) stop("If convkernel is given it must be a function with at least two arguments.") if(!setequal(parnames,names_formals(convkernel)[-(1:2)])) stop("argument names of convkernel must match argument names of kernel and/or specden.") } if(!is.null(Kfun)){ given <- c(given,"Kfun") if(!is.function(Kfun)||length(formals(Kfun))<1L) stop("If Kfun is given it must be a function with at least one arguments.") if(!setequal(parnames,names_formals(Kfun)[-1L])) stop("argument names of Kfun must match argument names of kernel and/or specden.") } if(!is.null(valid)){ if(!(is.function(valid)&&setequal(parnames,names_formals(valid)))) stop("argument names of valid must match argument names of kernel and/or specden.") } else{ warning("No function for checking parameter validity provided. ANY numerical value for the parameters will be accepted.") } if(!is.null(intensity)&&!(is.character(intensity)&&length(intensity)==1L&&is.element(intensity, parnames))) stop("argument intensity must be NULL or have length one, be of class character and match a parameter name") if(!(is.character(dim)|is.numeric(dim))|length(dim)!=1L) stop("argument dim must have length one and be of class character or numeric") if(is.character(dim)){ if(!is.element(dim, parnames)) stop("When dim is a character it must agree with one of the parameter names of the model") } else{ dim <- round(dim) if(dim<1L) stop("When dim is a numeric it must be a positive integer") } ## Catch extra unknown args (will be appended to output object below). dots <- list(...) ## Create output object. out <- function(...){ caller <- match.call()[[1L]] caller <- eval(substitute(caller), parent.frame()) fixedpar <- list(...) nam <- names(fixedpar) if(length(fixedpar)>0&&is.null(nam)) stop(paste("Named arguments are required. Please supply parameter values in a", sQuote("tag=value"), "form")) match <- is.element(nam, parnames) if(sum(!match)>0) warning(paste("Not all supplied argument(s) make sense. Valid arguments are: ", paste(parnames, collapse = ", "), ". The following supplied argument(s) will be ignored: ", paste(nam[!match], collapse = ", "), sep = "")) fixedpar <- fixedpar[match] ## Code to always fix the dimension to a numeric when calling the function ####### if(is.character(dim) && !is.element(dim,names(fixedpar))){ dimpar <- structure(list(2), .Names=dim) fixedpar <- c(fixedpar, dimpar) } ## Detect inhomogeneous intensity (an image), and replace by max and an image for thinning thin <- NULL if(!is.null(intensity)){ lambda <- getElement(fixedpar, intensity) if(is.im(lambda)){ lambdamax <- max(lambda) thin <- lambda/lambdamax fixedpar[[intensity]] <- lambdamax } } obj <- list(fixedpar = fixedpar, freepar = parnames[!is.element(parnames,names(fixedpar))], kernel = kernel, specden = specden, convkernel = convkernel, intensity = intensity, thin = thin, dim = dim, name = name, range = range, valid = valid, parbounds = parbounds, specdenrange = specdenrange, startpar = startpar, isotropic = isotropic, caller = caller, basis = basis ) obj <- append(obj, dots) class(obj) <- "detpointprocfamily" return(obj) } class(out) <- c("detpointprocfamilyfun", "pointprocfamilyfun", class(out)) attr(out, "parnames") <- parnames attr(out, "name") <- name return(out) } detpointprocfamilyfun } ) print.detpointprocfamilyfun <- function(x, ...){ cat(paste(attr(x, "name"), "determinantal point process model family\n")) cat("The parameters of the family are:\n") cat(attr(x, "parnames"), sep = ", ") cat("\n") invisible(NULL) } dppBessel <- detpointprocfamilyfun( name="Bessel", kernel=function(x, lambda, alpha, sigma, d){ a <- 0.5*(sigma+d) y <- abs(x/alpha) # Kernel: lambda*2^a*gamma(a+1)*besselJ(2*y*sqrt(a),a) / (2*y*sqrt(a))^a logrslt <- log(lambda) + a*log(2) + lgamma(a+1) - a*log(2*y*sqrt(a)) rslt <- exp(logrslt) * besselJ(2*y*sqrt(a), a) rslt[x==0] <- lambda return(rslt) }, Dpcf=function(x, lambda, alpha, sigma, d){ a <- 0.5*(sigma+d) z <- sqrt(a)*x/alpha dalpha <- ifelse(x==0, 0, -2*gamma(a+1)^2*besselJ(x = 2*z, nu=a)*alpha^{2*a-2}*(x*sqrt(a))^(-2*a)*( a*alpha*besselJ(x=2*z, nu=a) - x*sqrt(a)*(besselJ(x = 2*z, nu = a - 1) - besselJ(x = 2*z, nu = a + 1)))) dsigma <- ifelse(x == 0, 0, -2 * besselJ(x = 2*z, nu=a) * gamma(a+1)^2/z^a * ((0.5 * besselJ(x = 2*z, nu=a)*digamma(a+1) + 0.5 * x * 0.5 * (besselJ(x = 2*z, nu = a-1) - besselJ(x=2*z, nu=a+1))/(alpha*sqrt(a)))/z^a - (0.5 * z^a * (0.5 * log(a) + log(x) - log(alpha)) + x * 2*a * z^(a-1)/(8 * alpha * sqrt(a))) * besselJ(x = 2*z, nu=a)/z^(2*a))) return(c(lambda=0, alpha=dalpha, sigma=dsigma)) }, specden=function(x, lambda, alpha, sigma, d){ a <- sigma+d # specden: lambda*(2*pi)^(d/2)*alpha^d*gamma(0.5*a+1)/a^(d/2)/gamma(sigma/2+1)*(1-2*pi^2*alpha^2*x^2/a)^(sigma/2) logrslt <- log(lambda) + (d/2)*log(2*pi) + d*log(alpha) + lgamma(0.5*a+1) logrslt <- logrslt - (d/2)*log(a) - lgamma(sigma/2+1) tmp <- 1-2*pi^2*alpha^2*x^2/a suppressWarnings({ logrslt <- logrslt + ifelse(tmp<0, -Inf, (sigma/2)*log(tmp)) }) return(exp(logrslt)) }, specdenrange=function(model){ p <- model$fixedpar sqrt((p$sigma+p$d)/(2*pi^2*p$alpha^2)) }, valid=function(lambda, alpha, sigma, d){ a <- sigma+d OK <- lambda>0 && alpha>0 && d>=1 && sigma>=0 if(!OK) return(FALSE) ## Upper bound for alpha (using log-scale) lognum <- log(a^(0.5*d)) + lgamma(0.5*sigma+1) logdenom <- log( lambda*(2*pi^(0.5*d))) + lgamma(0.5*a+1) logalphamax <- (1/d) * (lognum - logdenom) return(OK && log(alpha) <= logalphamax) }, isotropic=TRUE, intensity="lambda", dim="d", parbounds=function(name, lambda, alpha, sigma, d){ lognum <- log((sigma+d)^(0.5*d)) + lgamma(0.5*sigma+1) logdenom <- log(2*pi^(0.5*d)) + lgamma(0.5*(sigma+d)+1) switch(name, lambda = c(0, exp(lognum - log( alpha^d) - logdenom)) , alpha = c(0, exp((1/d) * (lognum - log(lambda) - logdenom))), sigma = c(0, switch(as.character(d), "2"=Inf, NA)), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("d" %in% model$freepar){ model <- update(model, d=spatdim(X)) } if("lambda" %in% model$freepar){ lambda <- intensity(X) while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK) lambda <- lambda/2 rslt <- c(rslt, "lambda" = lambda) } if("sigma" %in% model$freepar){ sigma <- 2 while(!is.na(OK <- valid(model <- update(model, sigma=sigma)))&&!OK) sigma <- sigma/2 rslt <- c(rslt, "sigma" = sigma) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){ alpha <- alpha/2 } rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) dppCauchy <- detpointprocfamilyfun( name="Cauchy", kernel=function(x, lambda, alpha, nu, d){ rslt <- lambda * (1+(x/alpha)^2)^(-nu-d/2) rslt[x==0] <- lambda return(rslt) }, Dpcf=function(x, lambda, alpha, nu, d){ dalpha <- (-4*nu-2*d)*x^2*alpha^(-3)*((x/alpha)^2 + 1)^(-2*nu - d - 1) dnu <- 2*log1p((x/alpha)^2)*((x/alpha)^2 + 1)^(-2*nu - d) return(c(lambda=0, alpha=dalpha, nu=dnu)) }, specden=function(x, lambda, alpha, nu, d){ y <- 2*x*alpha*pi rslt <- lambda * y^nu * besselK(x = y, nu = nu) * (sqrt(pi)*alpha)^d * exp((1-nu)*log(2) - lgamma(nu+d/2)) rslt[x==0] <- lambda * exp(lgamma(nu) - lgamma(nu+d/2)) * (sqrt(pi)*alpha)^d return(rslt) }, Kfun = function(x, lambda, alpha, nu, d){ rslt <- pi*x^2 - pi*alpha^2/(2*nu+1) * (1 - (alpha^2/(alpha^2+x^2))^(2*nu+1)) rslt[rslt<0] <- 0 return(rslt) }, valid=function(lambda, alpha, nu, d){ ## Note the upper bound on nu for numerical stability! lambda>0 && alpha>0 && nu>0 && nu<=50 && d>=1 && lambda <= gamma(nu+d/2)/(gamma(nu)*(sqrt(pi)*alpha)^d) }, isotropic=TRUE, intensity="lambda", dim="d", range=function(alpha, nu, d, bound = .99){ if(missing(alpha)) stop("The parameter alpha is missing.") if(missing(nu)) stop("The parameter nu is missing.") if(missing(d)) stop("The parameter d (giving the dimension) is missing.") if(!(is.numeric(bound)&&bound>0&&bound<1)) stop("Argument bound must be a numeric between 0 and 1.") return(alpha * sqrt((1-bound)^(-1/(2*nu+d))-1)) }, parbounds=function(name, lambda, alpha, nu, d){ switch(name, lambda = c(0, gamma(nu+d/2)/(gamma(nu)*(sqrt(pi)*alpha)^d)), alpha = c(0, (exp(lgamma(nu+d/2)-lgamma(nu))/lambda)^(1/d)/sqrt(pi)), ## nu bound only implemented for d = 2. nu = c(switch(as.character(d), "2"=pi*lambda*alpha^2, NA), Inf), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" %in% model$freepar){ lambda <- intensity(X) while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK) lambda <- lambda/2 rslt <- c(rslt, "lambda" = lambda) } if("nu" %in% model$freepar){ nu <- 2 while(!is.na(OK <- valid(model <- update(model, nu=nu)))&&!OK) nu <- nu/2 rslt <- c(rslt, "nu" = nu) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){ alpha <- alpha/2 } rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) dppGauss <- detpointprocfamilyfun( name="Gaussian", kernel=function(x, lambda, alpha, d){ rslt <- lambda*exp(-(x/alpha)^2) return(rslt) }, Dpcf=function(x, lambda, alpha, d){ dalpha <- -4*x^2/alpha^3*exp(-(x/alpha)^2)^2 return(c(lambda=0, alpha=dalpha)) }, specden=function(x, lambda, alpha, d){ lambda * (sqrt(pi)*alpha)^d * exp(-(x*alpha*pi)^2) }, convkernel=function(x, k, lambda, alpha, d){ logres <- k*log(lambda*pi*alpha^2) - log(pi*k*alpha^2) - x^2/(k*alpha^2) return(exp(logres)) }, Kfun = function(x, lambda, alpha, d){ pi*x^2 - pi*alpha^2/2*(1-exp(-2*x^2/alpha^2)) }, valid=function(lambda, alpha, d){ lambda>0 && alpha>0 && d>=1 && lambda <= (sqrt(pi)*alpha)^(-d) }, isotropic=TRUE, intensity="lambda", dim="d", range=function(alpha, bound = .99){ if(missing(alpha)) stop("The parameter alpha is missing.") if(!(is.numeric(bound)&&bound>0&&bound<1)) stop("Argument bound must be a numeric between 0 and 1.") return(alpha*sqrt(-log(sqrt(1-bound)))) }, parbounds=function(name, lambda, alpha, d){ switch(name, lambda = c(0, (sqrt(pi)*alpha)^(-d)), alpha = c(0, lambda^(-1/d)/sqrt(pi)), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" %in% model$freepar){ lambda <- intensity(X) rslt <- c(rslt, "lambda" = lambda) model <- update(model, lambda=lambda) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) dppMatern <- detpointprocfamilyfun( name="Whittle-Matern", kernel=function(x, lambda, alpha, nu, d){ rslt <- lambda*2^(1-nu) / gamma(nu) * ((x/alpha)^nu) * besselK(x = x/alpha, nu = nu) rslt[x==0] <- lambda return(rslt) }, Dpcf=function(x, lambda, alpha, nu, d){ s <- besselK(x = x/alpha, nu = nu) dalpha <- 2 * (2^(1 - nu) * x * s * (x/alpha)^nu/(alpha^2 * gamma(nu)^2) * (-2^(1 - nu) * (0.5 * (besselK(x = x/alpha, nu = nu + 1) + besselK(x = x/alpha, nu = nu - 1))) * (x/alpha)^nu + 2^(1 - nu) * nu * s * (x/alpha)^(nu - 1))) dnu <- -2/gamma(nu)^2 * (2^(1-nu)*besselK(x = x/alpha, nu = nu)*(x/alpha)^nu)^2*(log(x/(2*alpha)) + digamma(nu)) return(c(lambda=0, alpha=dalpha, nu=dnu)) }, specden=function(x, lambda, alpha, nu, d){ lambda * exp(lgamma(nu+d/2) - lgamma(nu)) * (2*sqrt(pi)*alpha)^d * (1+(2*x*alpha*pi)^2)^(-nu-d/2) }, convkernel=function(x, k, lambda, alpha, nu, d){ nu2 <- k*(nu+d/2)-d/2 logres <- (nu2)*log(x/alpha) + log(besselK(x = x/alpha, nu = nu2, expon.scaled = TRUE)) - x/alpha logres[x == 0] <- (nu2-1)*log(2) + lgamma(nu2) logres <- logres + k*log(lambda) + k*(lgamma(nu+d/2)-lgamma(nu)) + (d*k-d+1-nu2)*log(2) + d*(k-1)*log(sqrt(pi)*alpha) - lgamma(nu2+d/2) index <- which(logres == Inf) logres[index] <- -Inf return(exp(logres)) }, valid=function(lambda, alpha, nu, d){ ## Note the upper bound on nu for numerical stability! lambda>0 && alpha>0 && nu>0 && nu<=50 && d>=1 && lambda <= gamma(nu)/(gamma(nu+d/2)*(2*sqrt(pi)*alpha)^d) }, isotropic=TRUE, intensity="lambda", dim="d", range=function(alpha, nu, d, bound = .99, exact = FALSE){ if(missing(alpha)) stop("The parameter alpha is missing.") if(missing(nu)) stop("The parameter nu is missing.") if(missing(d)) stop("The parameter d (giving the dimension) is missing.") if(!is.logical(exact)) stop("Argument exact must be a logical.") if(!exact&&d==2) return(alpha * sqrt(8*nu)) ## range suggested by Haavard Rue et al. if(!(is.numeric(bound)&&bound>0&&bound<1)) stop("Argument bound must be a numeric between 0 and 1.") fun <- function(x) sqrt(1-bound)-2^(1-nu) / gamma(nu) * ((x/alpha)^nu) * besselK(x = x/alpha, nu = nu) return(uniroot(fun, c(sqrt(.Machine$double.eps),1e3*alpha*sqrt(nu)))$root) }, parbounds=function(name, lambda, alpha, nu, d){ switch(name, lambda = c(0, gamma(nu)/(gamma(nu+d/2)*(2*sqrt(pi)*alpha)^d)), alpha = c(0, (exp(lgamma(nu)-lgamma(nu+d/2))/lambda)^(1/d)/2/sqrt(pi)), ## nu bound only implemented for d = 2 and d = 4. nu = c(0, switch(as.character(d), "2"=1/(4*pi*lambda*alpha^2), "4"=sqrt(1/4+1/(lambda*16*pi*pi*alpha^4))-1/2, NA)), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" %in% model$freepar){ lambda <- intensity(X) while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK) lambda <- lambda/2 rslt <- c(rslt, "lambda" = lambda) } if("nu" %in% model$freepar){ nu <- 2 while(!is.na(OK <- valid(model <- update(model, nu=nu)))&&!OK) nu <- nu/2 rslt <- c(rslt, "nu" = nu) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){ alpha <- alpha/2 } rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) dppPowerExp <- detpointprocfamilyfun( name="Power Exponential Spectral", specden=function(x, lambda, alpha, nu, d){ lambda * gamma(d/2+1) * alpha^d / (pi^(d/2)*gamma(d/nu+1)) * exp(-(alpha*x)^nu) }, valid=function(lambda, alpha, nu, d){ ## Note the upper bound on nu for numerical stability! lambda>0 && alpha>0 && nu>0 && nu<=20 && d>=1 && lambda <= pi^(d/2)*gamma(d/nu+1) / (gamma(1+d/2)*alpha^d) }, isotropic=TRUE, intensity="lambda", dim="d", parbounds=function(name, lambda, alpha, nu, d){ switch(name, lambda = c(0, pi^(d/2)*gamma(d/nu+1) / (gamma(d/2+1)*alpha^d)), alpha = c(0, (pi^(d/2)*gamma(d/nu+1) / (lambda * gamma(d/2+1)))^(1/d)), nu = c(NA, NA), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" %in% model$freepar){ lambda <- intensity(X) while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK) lambda <- lambda/2 rslt <- c(rslt, "lambda" = lambda) } if("nu" %in% model$freepar){ nu <- 2 while(!is.na(OK <- valid(model <- update(model, nu=nu)))&&!OK) nu <- nu/2 rslt <- c(rslt, "nu" = nu) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){ alpha <- alpha/2 } rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) spatstat.model/R/aaaa.R0000644000176200001440000000251214331173072014424 0ustar liggesusers#' #' aaaa.R #' #' Code that must be read before the rest of the R code in spatstat #' #' $Revision: 1.6 $ $Date: 2020/11/30 13:17:22 $ #' #' ................................................................... #' intermaker: #' Class structure for functions like 'Strauss' #' so they print a nice description. #' intermaker <- function(f, blank) { # f is the creator function like 'Strauss' class(f) <- c("intermaker", class(f)) # blank is the prototype interaction object: extract some fields desired <- c("creator", "name", "par", "parnames", "pardesc") avail <- desired[desired %in% names(blank)] attr(f, "b") <- blank[avail] return(f) } print.intermaker <- function(x, ...) { b <- attr(x, "b") argh <- names(formals(x)) explain <- NULL if(length(argh) > 0) { desc <- b$pardesc %orifnull% b$parnames namep <- names(b$par) if(length(desc) == length(namep) && all(argh %in% namep)) { names(desc) <- namep explain <- paste(", where", commasep(paste(sQuote(argh), "is the", desc[argh]))) } } blah <- paste0("Function ", b$creator, paren(paste(argh, collapse=", ")), ": creates the interpoint interaction of the ", b$name, explain) splat(blah) return(invisible(NULL)) } spatstat.model/R/is.R0000644000176200001440000000013014374302014014143 0ustar liggesusers## is.R ## original for spatstat.core is.lppm <- function(x) { inherits(x, "lppm") } spatstat.model/R/dgs.R0000644000176200001440000000725414331173072014326 0ustar liggesusers# # # dgs.R # # $Revision: 1.14 $ $Date: 2022/05/21 08:53:38 $ # # Diggle-Gates-Stibbard process # # # ------------------------------------------------------------------- # DiggleGatesStibbard <- local({ # .......... auxiliary functions ................ dgsTerms <- function(X, Y, idX, idY, rho) { stopifnot(is.numeric(rho)) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] idXsort <- idX[oX] idYsort <- idY[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine out <- .C(SM_Ediggatsti, nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), idsource = as.integer(idXsort), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), idtarget = as.integer(idYsort), rrho = as.double(rho), values = as.double(double(nX)), PACKAGE="spatstat.model") answer <- numeric(nX) answer[oX] <- out$values return(answer) } # ...... template object ...................... BlankDGS <- list( name = "Diggle-Gates-Stibbard process", creator = "DiggleGatesStibbard", family = "pairwise.family", # evaluated later pot = function(d, par) { rho <- par$rho v <- log(sin((pi/2) * d/rho)^2) v[ d > par$rho ] <- 0 attr(v, "IsOffset") <- TRUE v }, par = list(rho = NULL), # to be filled in later parnames = "interaction range", hasInf = TRUE, init = function(self) { rho <- self$par$rho if(!is.numeric(rho) || length(rho) != 1L || rho <= 0) stop("interaction range rho must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { rho <- self$par$rho return(rho) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { # fast evaluator for DiggleGatesStibbard interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for DiggleGatesStibbard") dont.complain.about(splitInf) rho <- potpars$rho idX <- seq_len(npoints(X)) idU <- rep.int(-1L, npoints(U)) idU[EqualPairs[,2L]] <- EqualPairs[,1L] v <- dgsTerms(U, X, idU, idX, rho) v <- matrix(v, ncol=1L) attr(v, "IsOffset") <- TRUE return(v) }, Mayer=function(coeffs, self) { # second Mayer cluster integral rho <- self$par$rho return((pi/2 - 2/pi) * rho^2) } ) class(BlankDGS) <- "interact" DiggleGatesStibbard <- function(rho) { instantiate.interact(BlankDGS, list(rho = rho)) } DiggleGatesStibbard <- intermaker(DiggleGatesStibbard, BlankDGS) DiggleGatesStibbard }) spatstat.model/R/hardcore.R0000644000176200001440000001013514331173073015331 0ustar liggesusers# # # hardcore.S # # $Revision: 1.16 $ $Date: 2022/03/07 02:06:12 $ # # The Hard core process # # Hardcore() create an instance of the Hard Core process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Hardcore <- local({ BlankHardcore <- list( name = "Hard core process", creator = "Hardcore", family = "pairwise.family", # evaluated later pot = function(d, par) { v <- 0 * d v[ d <= par$hc ] <- (-Inf) attr(v, "IsOffset") <- TRUE v }, par = list(hc = NULL), # filled in later parnames = "hard core distance", hasInf = TRUE, selfstart = function(X, self) { # self starter for Hardcore nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } md <- minnndist(X) if(md == 0) { warning(paste("Pattern contains duplicated points:", "impossible under Hardcore model")) return(self) } if(!is.na(hc <- self$par$hc)) { # value fixed by user or previous invocation # check it if(md < hc) warning(paste("Hard core distance is too large;", "some data points will have zero probability")) return(self) } # take hc = minimum interpoint distance * n/(n+1) hcX <- md * nX/(nX+1) Hardcore(hc = hcX) }, init = function(self) { hc <- self$par$hc if(length(hc) != 1) stop("hard core distance must be a single value") if(!is.na(hc) && !(is.numeric(hc) && hc > 0)) stop("hard core distance hc must be a positive number, or NA") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { return(self$par$hc) }, hardcore = function(self, coeffs=NA, epsilon=0, ...) { return(self$par$hc) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { # fast evaluator for Hardcore interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Hardcore") hc <- potpars$hc # call evaluator for Strauss process counts <- strausscounts(U, X, hc, EqualPairs) forbid <- (counts != 0) if(!splitInf) { ## usual case v <- matrix(ifelseAB(forbid, -Inf, 0), ncol=1L) } else { ## separate hard core v <- matrix(0, nrow=npoints(U), ncol=1L) attr(v, "-Inf") <- forbid } attr(v, "IsOffset") <- TRUE return(v) }, Mayer=function(coeffs, self) { # second Mayer cluster integral hc <- self$par$hc return(pi * hc^2) }, Percy=function(d, coeffs, par, ...) { ## term used in Percus-Yevick type approximation H <- par$hc t <- abs(d/(2*H)) t <- pmin.int(t, 1) y <- 2 * H^2 * (pi - acos(t) + t * sqrt(1 - t^2)) return(y) } ) class(BlankHardcore) <- "interact" Hardcore <- function(hc=NA) { instantiate.interact(BlankHardcore, list(hc=hc)) } Hardcore <- intermaker(Hardcore, BlankHardcore) Hardcore }) spatstat.model/R/model.depends.R0000644000176200001440000000730314331173073016266 0ustar liggesusers# # Determine which 'canonical variables' depend on a supplied covariate # # $Revision: 1.11 $ $Date: 2021/03/29 08:11:44 $ # model.depends <- function(object) { ## supplied covariates fo <- formula(object) if(length(as.list(fo)) == 3) { ## formula has a response: strip it fo <- fo[-2] } covars <- variablesinformula(fo) mm <- model.matrix(object) depends <- matrix(FALSE, ncol(mm), length(covars), dimnames=list(colnames(mm), covars)) ## model term labels tt <- terms(object) lab <- attr(tt, "term.labels") ## map from canonical covariates to term labels ass <- attr(mm, "assign") %orifnull% object[["assign"]] ## determine which canonical covariate depends on which supplied covariate if(length(ass) == ncol(mm)) { for(i in seq(along=ass)) { if(ass[i] == 0) # 0 is the intercept term depends[i,] <- FALSE else { turm <- lab[ass[i]] depends[i, ] <- covars %in% all.vars(parse(text=turm)) } } } else { warning("model.depends: unable to determine the dependence structure", call.=FALSE) } ## detect offsets if(!is.null(oo <- attr(tt, "offset")) && ((noo <- length(oo)) > 0)) { # entries of 'oo' index the list of variables in terms object vv <- attr(tt, "variables") offdep <- matrix(FALSE, noo, length(covars)) offnms <- character(noo) for(i in seq_len(noo)) { offseti <- languageEl(vv, oo[i] + 1) offdep[i, ] <- covars %in% all.vars(offseti) offnms[i] <- deparse(offseti) } rownames(offdep) <- offnms colnames(offdep) <- covars attr(depends, "offset") <- offdep } return(depends) } model.is.additive <- function(object) { dep <- model.depends(object) hit <- t(dep) %*% dep diag(hit) <- 0 ok <- all(hit == 0) return(ok) } model.covariates <- function(object, fitted=TRUE, offset=TRUE) { md <- model.depends(object) nm <- colnames(md) keep <- rep.int(FALSE, length(nm)) # variables used in formula with coefficients if(fitted) keep <- apply(md, 2, any) # variables used in offset if(offset) { oo <- attr(md, "offset") if(!is.null(oo)) keep <- keep | apply(oo, 2, any) } return(nm[keep]) } has.offset.term <- function(object) { # model terms tt <- terms(object) oo <- attr(tt, "offset") return(!is.null(oo) && (length(oo) > 0)) } has.offset <- function(object) { has.offset.term(object) || !is.null(model.offset(model.frame(object))) } check.separable <- function(dmat, covname, isconstant, fatal=TRUE) { #' Determine whether the effect of 'covname' is separable from other terms. #' dmat = model.depends(model) #' Find covariates entangled with 'covname' in the model relevant <- dmat[, covname] othercov <- (colnames(dmat) != covname) conflict <- dmat[relevant, othercov, drop=FALSE] if(!any(conflict)) return(TRUE) #' names of entangled covariates entangled <- colnames(conflict)[matcolany(conflict)] #' not problematic if constant if(is.null(names(isconstant))) names(isconstant) <- colnames(dmat) ok <- unlist(isconstant[entangled]) conflict[ , ok] <- FALSE if(!any(conflict)) return(TRUE) #' there are conflicts conflictterms <- matrowany(conflict) conflictcovs <- matcolany(conflict) whinge <- paste("The covariate", sQuote(covname), "cannot be separated from the", ngettext(sum(conflictcovs), "covariate", "covariates"), commasep(sQuote(colnames(conflict)[conflictcovs])), "in the model", ngettext(sum(conflictterms), "term", "terms"), commasep(sQuote(rownames(conflict)[conflictterms]))) if(fatal) stop(whinge, call.=FALSE) warning(whinge, call.=FALSE) return(FALSE) } spatstat.model/R/enet.R0000644000176200001440000001151714331173072014501 0ustar liggesusers#' #' enet.R #' #' Fit ppm using elastic net #' #' Original by Achmad Choiruddin #' plus original by Suman Rakshit #' modified by Adrian Baddeley #' #' Copyright (c) Achmad Choiruddin, Suman Rakshit and Adrian Baddeley 2022 #' GNU Public Licence >= 2.0 #' #' $Revision: 1.15 $ $Date: 2022/06/20 07:50:42 $ enet.engine <-function(model, ..., standardize=TRUE, lambda=NULL, alpha=1, adaptive=FALSE) { #' alpha=1, enet #' alpha=0, ridge #' 0= intercept.position, , drop=FALSE]) } ## Tuning parameter selection stuff <- apply(beta.estimates, 2, function(beta) { eta <- mm %*% beta + off v <- switch(CL, mpl = exp(eta), logi = log(1+exp(eta))) loglike <- sum(wts * (yy * eta - v)) beta.strip <- if(has.intercept) beta[-intercept.position] else beta p <- sum(beta.strip != 0) bic <- -2*loglike + p * log(nX) return(c(loglike=loglike, bic=bic)) }) jopt <- which.min(stuff["bic", ]) optim.bic <- stuff["bic", jopt] optim.loglike <- stuff["loglike", jopt] optim.beta <- beta.estimates[,jopt] optim.lambda <- lambda[jopt] ## update fitted model object newmodel <- model newmodel$coef <- optim.beta newmodel$maxlogpl <- optim.loglike newmodel$improve.type <- "enet" ## recompute/update internals newmodel$fisher <- NULL newmodel$varcov <- NULL newmodel$fitin <- NULL newmodel$fitin <- fitin(newmodel) ## save glmnet information newmodel$internal$glmnet <- list(fit=gfit, lambda=lambda, alpha=alpha, standardize=standardize, adaptive=adaptive, intercept=has.intercept, estimates=beta.estimates, criteria=stuff) ## remember original estimates newmodel$coef.orig <- beta.ini newmodel$maxlogpl.orig <- model$maxlogpl return(newmodel) } spatstat.model/R/varcount.R0000644000176200001440000001104414334115572015406 0ustar liggesusers#' #' varcount.R #' #' Variance of N(B) #' #' $Revision: 1.18 $ $Date: 2022/11/08 04:21:32 $ #' varcount <- function(model, B=Window(model), ..., dimyx=NULL, relative=FALSE) { if(is.null(B)) B <- Window(model) stopifnot(is.owin(B) || is.im(B) || is.function(B)) if(is.owin(B)) { f <- NULL lambdaB <- predict(model, locations=B, ngrid=dimyx, type="intensity") } else { f <- if(is.im(B)) B else as.im(B, W=as.owin(model), ..., dimyx=dimyx) B <- as.owin(f) lambdaB <- predict(model, locations=B, type="intensity") } ## important range of distances ## need to integrate over [0, R] R <- reach(model, epsilon=0.001) if(!isTRUE(is.finite(R))) R <- NULL if(!is.null(R)) { ## detect very small cluster radius (or very large window) diam <- diameter(Frame(B)) if((R < 0.001 * diam) && (area(erosion(B, R))/area(B) > 0.999)) { #' integrate by parts and ignore edge effect K <- Kmodel(model) if(is.function(K)) { excess <- K(diam) - pi * diam^2 if(is.null(f)) { E <- integral(lambdaB) V <- integral(lambdaB^2) } else { E <- integral(lambdaB * f) V <- integral((lambdaB * f)^2) } v <- E + V * excess if(relative) v <- V/E if(is.finite(v)) return(v) } } } g <- pcfmodel(model) if(!is.function(g)) stop("Pair correlation function is not available") v <- varcountEngine(g, B, lambdaB, f, R=R) if(relative) { E <- if(is.null(f)) integral(lambdaB) else integral(lambdaB * f) v <- v/E } return(v) } varcountEngine <- local({ varcountEngine <- function(g, B, lambdaB, f=1, R=NULL, what=c("variance","excess","pairs","squared")) { ## variance = var(N) ## excess = var(N) - E(N) ## pairs = E[ N(N-1) ] ## squared = E[N^2] what <- match.arg(what) g1 <- function(r) { g(r) - 1 } if(missing(f) || is.null(f) || identical(f, 1)) { v <- switch(what, variance = integral(lambdaB) + dublin(g1, B, lambdaB, R=R), excess = dublin(g1, B, lambdaB, R=R), pairs = dublin(g, B, lambdaB, R=R), squared = integral(lambdaB) + dublin(g, B, lambdaB, R=R)) } else if(min(f) >= 0) { ## nonnegative integrand v <- switch(what, variance = integral(lambdaB * f^2) + dublin(g1, B, lambdaB * f, R=R), excess = dublin(g1, B, lambdaB * f, R=R), pairs = dublin(g, B, lambdaB * f, R=R), squared = integral(lambdaB * f^2) + dublin(g, B, lambdaB * f, R=R)) } else if(max(f) <= 0) { ## nonpositive integrand v <- switch(what, variance=integral(lambdaB * f^2) + dublin(g1, B, lambdaB * (-f), R=R), excess = dublin(g1, B, lambdaB * (-f), R=R), pairs = dublin(g, B, lambdaB * (-f), R=R), squared =integral(lambdaB * f^2) + dublin(g, B, lambdaB * (-f), R=R)) } else { ## integrand has both positive and negative parts lamfplus <- eval.im(lambdaB * pmax(0, f)) lamfminus <- eval.im(lambdaB * pmax(0, -f)) h <- switch(what, variance = g1, excess = g1, pairs = g, squared = g) co <- (dublin(h, B, lamfplus, R=R) + dublin(h, B, lamfminus, R=R) - dublin(h, B, lamfplus, lamfminus, R=R) - dublin(h, B, lamfminus, lamfplus, R=R)) v <- switch(what, variance = integral(lambdaB * f^2) + co, excess = co, pairs = co, squared = integral(lambdaB * f^2) + co) } return(v) } dublin <- function(h, B, f, f2, R=NULL) { ## Double integral ## \int_B \int_B h(|u-v|) f(u) f(v) du dv ## or \int_B \int_B h(|u-v|) f(u) f2(v) du dv ## Assume h, f, f2 are nonnegative ## R = reach of model dr <- R/100 if(missing(f2)) { ## \int_B \int_B h(|u-v|) f(u) f(v) du dv M <- distcdf(B, dW=f, nr=NULL, delta=dr) ## integrate a <- integral(f)^2 * as.numeric(stieltjes(h, M)) } else { ## \int_B \int_B h(|u-v|) f(u) f2(v) du dv M <- distcdf(B, dW=f, dV=f2, nr=NULL, delta=dr) ## integrate a <- integral(f) * integral(f2) * as.numeric(stieltjes(h, M)) } return(a) } varcountEngine }) spatstat.model/R/poisson.R0000644000176200001440000000175114331173073015240 0ustar liggesusers# # # poisson.S # # $Revision: 1.9 $ $Date: 2022/03/07 03:58:22 $ # # The Poisson process # # Poisson() create an object of class 'interact' describing # the (null) interpoint interaction structure # of the Poisson process. # # # ------------------------------------------------------------------- # Poisson <- local({ BlankPoisson <- list( name = "Poisson process", creator = "Poisson", family = NULL, order = 1, pot = NULL, par = NULL, parnames = NULL, init = function(...) { }, update = function(...) { }, print = function(self) { cat("Poisson process\n") invisible() }, valid = function(...) { TRUE }, project = function(...) NULL, irange = function(...) { 0 }, version=NULL ) class(BlankPoisson) <- "interact" Poisson <- function() { BlankPoisson } Poisson <- intermaker(Poisson, BlankPoisson) Poisson }) spatstat.model/R/clusterfunctions.R0000644000176200001440000000273414460146550017165 0ustar liggesusers## clusterfunctions.R ## ## Contains methods for the generic functions ## - clusterkernel ## - clusterfield ## - clusterradius. ## ## $Revision: 1.13 $ $Date: 2023/06/09 05:13:54 $ ## ## The generic clusterkernel() is now in spatstat.random ## The method clusterkernel.character is now in spatstat.random clusterkernel.kppm <- function(model, ...) { kernelR <- Kpcf.kppm(model, what = "kernel") f <- function(x, y = 0, ...){ kernelR(sqrt(x^2+y^2)) } return(f) } ## The generic clusterfield() is now in spatstat.random ## The method clusterfield.character is now in spatstat.random ## The method clusterfield.function is now in spatstat.random clusterfield.kppm <- function(model, locations = NULL, ...) { f <- clusterkernel(model) if(is.null(locations)){ if(!is.stationary(model)) stop("The model is non-stationary. The argument ", sQuote("locations"), " must be given.") locations <- centroid.owin(Window(model), as.ppp = TRUE) } clusterfield.function(f, locations, ..., mu = model$mu) } ## The generic clusterradius is now defined in spatstat.random ## The method clusterradius.character is now defined in spatstat.random clusterradius.kppm <- function(model, ..., thresh = NULL, precision = FALSE){ a <- list(model = model$clusters, thresh = thresh, precision = precision) a <- append(a, as.list(c(model$clustpar, model$clustargs))) do.call(clusterradius.character, a) } spatstat.model/R/ordthresh.R0000644000176200001440000000330714243551505015551 0ustar liggesusers# # # ordthresh.S # # $Revision: 1.12 $ $Date: 2018/03/15 07:37:41 $ # # Ord process with threshold potential # # OrdThresh() create an instance of the Ord process # [an object of class 'interact'] # with threshold potential # # # ------------------------------------------------------------------- # OrdThresh <- local({ BlankOrdThresh <- list( name = "Ord process with threshold potential", creator = "OrdThresh", family = "ord.family", pot = function(d, par) { (d <= par$r) }, par = list(r = NULL), parnames = "threshold distance", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("threshold distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) is.finite(loggamma) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(...) { return(Inf) }, version=NULL ) class(BlankOrdThresh) <- "interact" OrdThresh <- function(r) { instantiate.interact(BlankOrdThresh, list(r=r)) } OrdThresh <- intermaker(OrdThresh, BlankOrdThresh) OrdThresh }) spatstat.model/R/predictmppm.R0000644000176200001440000004107214331173074016073 0ustar liggesusers# # predictmppm.R # # $Revision: 1.18 $ $Date: 2022/04/26 07:25:57 $ # # # ------------------------------------------------------------------- predict.mppm <- local({ predict.mppm <- function(object, ..., newdata=NULL, type=c("trend", "cif"), ngrid=40, locations=NULL, verbose=FALSE) { ## ## 'object' is the output of mppm() ## model <- object verifyclass(model, "mppm") ## isMulti <- is.multitype(model) modelsumry <- summary(model) depends.on.row <- modelsumry$depends.on.row fixedinteraction <- modelsumry$ikind$fixedinteraction ndata.old <- model$npat ## ## ...................................................................... if(verbose) cat("Inspecting arguments...") ## ...................................................................... ## ## hidden arguments selfcheck <- resolve.defaults(list(...), list(selfcheck=FALSE))$selfcheck ## ## Argument 'type' ## type <- pickoption("type", type, c(trend="trend", lambda="cif", cif="cif"), multi=TRUE) want.trend <- "trend" %in% type want.cif <- "cif" %in% type ## ## Argument 'newdata' ## use.olddata <- is.null(newdata) if(use.olddata) { newdata <- model$data newdataname <- "Original data" ndata.new <- ndata.old new.id <- NULL } else { stopifnot(is.data.frame(newdata) || is.hyperframe(newdata)) newdataname <- sQuote("newdata") ndata.new <- nrow(newdata) new.id <- NULL if(depends.on.row) { #' require row serial numbers 'id' new.id <- newdata$id if(is.null(new.id)) { #' no serial numbers given #' implicitly use the old serial numbers if(ndata.new != ndata.old) stop(paste("'newdata' must have the same number of rows", "as the original 'data' argument", paren(paste("namely", ndata.old)), "because the model depends on the row index"), call.=FALSE) } else { #' serial numbers given #' validate them if(!is.factor(new.id) && !is.integer(new.id)) stop("newdata$id should be a factor or integer vector", call.=FALSE) if(is.integer(new.id)) { new.id <- factor(new.id, levels=1:ndata.old) } else if(!identical(levels(new.id), as.character(1:ndata.old))) { stop(paste0("Levels of newdata$id must be 1:", ndata.old), call.=FALSE) } } } } ## ## Argument 'locations' ## if(is.hyperframe(locations)) locations <- locations[,1,drop=TRUE] if(is.list(locations)) cls <- unique(sapply(locations, class)) loctype <- if(is.null(locations)) "null" else if(is.data.frame(locations)) "data.frame" else if(is.list(locations)) { if(any(c("ppp", "quad") %in% cls)) "points" else if("owin" %in% cls) { if(all(sapply(locations, is.mask))) "mask" else "window" } else "unknown" } else "unknown" ## ...................................................................... if(verbose) cat("done.\nDeciding type of locations for prediction...") ## ...................................................................... need.grid <- switch(loctype, null =TRUE, data.frame=FALSE, points =FALSE, mask =FALSE, window =TRUE, unknown =stop("Unrecognised format for locations")) make.image <- need.grid || (loctype == "mask") ## locationvars <- c("x", "y", "id", if(isMulti) "marks" else NULL) ## ## if(need.grid) { ## prediction on a grid is required if(is.data.frame(newdata)) stop(paste("Cannot predict model on a grid;", newdataname, "are a data frame")) } else { ## prediction at `locations' is required if(is.hyperframe(newdata)) { ## check consistency between locations and newdata nloc <- length(locations) if(nloc != ndata.new) stop(paste("Length of argument", sQuote("locations"), paren(nloc), "does not match number of rows in", newdataname, paren(ndata.new))) } else { ## newdata is a data frame if(!is.data.frame(locations)) stop(paste(newdataname, "is a data frame; locations must be a data frame")) else { stopifnot(nrow(locations) == nrow(newdata)) dup <- names(newdata) %in% names(locations) if(any(dup)) for(nam in names(newdata)[dup]) if(!isTRUE(all.equal(newdata[,nam], locations[,nam]))) stop(paste("The data frames newdata and locations", "both have a column called", sQuote(nam), "but the entries differ")) nbg <- !(locationvars %in% c(names(newdata),names(locations))) if(any(nbg)) stop(paste(ngettext(sum(nbg), "Variable", "Variables"), commasep(locationvars[nbg]), "not provided")) ## merge the two data frames newdata <- cbind(newdata[,!dup], locations) locations <- NULL } } } ## ...................................................................... if(verbose) cat("done.\nExtracting details of point process model...") ## ...................................................................... ## extract fitted glm/gam/glmm object FIT <- model$Fit$FIT MOADF <- model$Fit$moadf ## extract names of interaction variables Vnamelist <- model$Fit$Vnamelist vnames <- unlist(Vnamelist) ## determine which interaction is applicable on each row interactions <- model$Inter$interaction hyperinter <- is.hyperframe(interactions) ninter <- if(hyperinter) nrow(interactions) else 1L if(hyperinter && ninter > 1) { if(fixedinteraction) { interactions <- interactions[1L, ] } else { ## interaction depends on row if(!is.null(new.id)) { ## row sequence specified; extract the relevant rows interactions <- interactions[as.integer(new.id), ] } else { ## rows of newdata implicitly correspond to rows of original data if(ninter != ndata.new) stop(paste("Number of rows of newdata", paren(ndata.new), "does not match number of interactions in model", paren(ninter))) } } } ## extract possible types, if model is multitype if(isMulti) { levlist <- unique(lapply(data.mppm(model), levelsofmarks)) if(length(levlist) > 1) stop("Internal error: the different point patterns have inconsistent marks", call.=FALSE) marklevels <- levlist[[1L]] } else marklevels <- list(NULL) # sic ## ...................................................................... if(verbose) { cat("done.\n") if(use.olddata) splat("Using original hyperframe of data") else splat("newdata is a", if(is.data.frame(newdata)) "data frame" else "hyperframe") } ## ...................................................................... ## if(is.data.frame(newdata)) { ## ## newdata is a DATA FRAME ## if(need.grid) stop("Cannot predict model on a grid; newdata is a data frame") if(verbose) cat("Computing prediction..") ## use newdata as covariates nbg <- !(locationvars %in% names(newdata)) if(any(nbg)) stop(paste(ngettext(sum(nbg), "variable", "variables"), commasep(locationvars[nbg]), "not provided")) ## create output data frame answer <- as.data.frame(matrix(, nrow=nrow(newdata), ncol=0), row.names=row.names(newdata)) if(want.trend) { ## add interaction components, set to zero (if any) if(length(vnames) > 0) newdata[, vnames] <- 0 ## compute fitted values answer$trend <- Predict(FIT, newdata=newdata, type="response") } if(want.cif) { if(is.poisson(object)) { ## cif = trend answer$cif <- if(want.trend) answer$trend else Predict(FIT, newdata=newdata, type="response") } else { warning("Computation of the cif is not yet implemented when newdata is a data frame") ## split data frame by 'id' ## compute interaction components using existing point patterns ## compute fitted values } } if(verbose) cat("done.\n") return(answer) } ## ...................................................................... ## newdata is a HYPERFRAME ## if(verbose) cat("Building data for prediction...") sumry.new <- summary(newdata) ndata.new <- sumry.new$ncases ## name of response point pattern in model Yname <- model$Info$Yname ## ## Determine response point patterns if known. ## Extract from newdata if available ## Otherwise from the original data if appropriate if(verbose) cat("(responses)...") Y <- if(Yname %in% sumry.new$col.names) newdata[, Yname, drop=TRUE, strip=FALSE] else if(ndata.new == ndata.old) data[, Yname, drop=TRUE, strip=FALSE] else NULL ## if(want.cif && is.null(Y)) stop(paste("Cannot compute cif:", "newdata does not contain column", dQuote(Yname), "of response point patterns")) ## ## Determine windows for prediction if(verbose) cat("(windows)...") Wins <- if(!need.grid) lapply(locations, as.owin, fatal=FALSE) else if(!is.null(Y)) lapply(Y, as.owin, fatal=FALSE) else NULL if(is.null(Wins) || any(sapply(Wins, is.null))) stop("Cannot determine windows where predictions should be made") ## ## if(is.null(Y)) { ## only want trend; empty patterns will do Y <- lapply(Wins, emptypattern) } ## ensure Y contains data points only if(is.quad(Y[[1]])) Y <- lapply(Y, getElement, name="data") ## Determine locations for prediction if(need.grid) { ## Generate grids of dummy locations if(verbose) cat("(grids)...") Gridded <- lapply(Wins, gridsample, ngrid=ngrid) Dummies <- lapply(Gridded, getElement, name="D") Templates <- lapply(Gridded, getElement, name="I") } else { ## locations are given somehow if(verbose) cat("(locations)...") switch(loctype, points = { Dummies <- locations }, mask = { Dummies <- lapply(locations, punctify) Templates <- lapply(locations, as.im) }, stop("Internal error: illegal loctype")) } ## .......................................... ## ............... PREDICTION ............... ## .......................................... ## initialise hyperframe of predicted values Answer <- newdata[,integer(0),drop=FALSE] if(depends.on.row) Answer$id <- factor(levels(MOADF$id)) ## Loop over possible types, or execute once: ## /////////////////////////////////////////// for(lev in marklevels) { ## Pack prediction locations into quadschemes if(verbose) { cat("Building quadschemes") if(isMulti) cat(paste("with mark", lev)) cat("...") } if(isMulti) { ## assign current mark level to all dummy points flev <- factor(lev, levels=marklevels) Dummies <- lapply(Dummies, "marks<-", value=flev) } Quads <- mapply(quad, data=Y, dummy=Dummies, SIMPLIFY=FALSE, USE.NAMES=FALSE) ## Insert quadschemes into newdata newdata[, Yname] <- Quads ## compute the Berman-Turner frame if(verbose) cat("done.\nStarting prediction...(Berman-Turner frame)...") moadf <- mppm(formula = model$formula, data = newdata, interaction = interactions, iformula = model$iformula, random = model$random, use.gam = model$Fit$use.gam, correction = model$Info$correction, rbord = model$Info$rbord, backdoor = TRUE) ## compute fitted values if(verbose) cat("(glm prediction)...") values <- moadf[, locationvars] if(want.cif) values$cif <- Predict(FIT, newdata=moadf, type="response") if(want.trend) { if(length(vnames) == 0) { ## Poisson model: trend = cif values$trend <- if(want.cif) values$cif else Predict(FIT, newdata=moadf, type="response") } else { ## zero the interaction components moadf[, vnames] <- 0 ## compute fitted values values$trend <- Predict(FIT, newdata=moadf, type="response") } } if(verbose) cat("done.\nReshaping results...") ## ## Reshape results ## separate answers for each image values <- split(values, values$id) ## Trends <- list() Lambdas <- list() if(!make.image) { if(verbose) cat("(marked point patterns)...") ## values become marks attached to locations for(i in seq(ndata.new)) { Val <- values[[i]] Loc <- Dummies[[i]] isdum <- !is.data(Quads[[i]]) if(selfcheck) if(length(isdum) != length(Val$trend)) stop("Internal error: mismatch between data frame and locations") if(want.trend) Trends[[i]] <- Loc %mark% (Val$trend[isdum]) if(want.cif) Lambdas[[i]] <- Loc %mark% (Val$cif[isdum]) } } else { if(verbose) cat("(pixel images)...") ## assign values to pixel images for(i in seq(ndata.new)) { values.i <- values[[i]] Q.i <- Quads[[i]] values.i <- values.i[!is.data(Q.i), ] Template.i <- Templates[[i]] ok.i <- !is.na(Template.i$v) if(sum(ok.i) != nrow(values.i)) stop("Internal error: mismatch between data frame and image") if(selfcheck) { dx <- rasterx.im(Template.i)[ok.i] - values.i$x dy <- rastery.im(Template.i)[ok.i] - values.i$y cat(paste("i=", i, "range(dx) =", paste(range(dx), collapse=", "), "range(dy) =", paste(range(dy), collapse=", "), "\n")) } if(want.trend) { Trend.i <- Template.i Trend.i$v[ok.i] <- values.i$trend Trends[[i]] <- Trend.i } if(want.cif) { Lambda.i <- Template.i Lambda.i$v[ok.i] <- values.i$cif Lambdas[[i]] <- Lambda.i } } } if(verbose) cat("done reshaping.\n") if(want.trend) { trendname <- paste0("trend", lev) Answer[,trendname] <- Trends } if(want.cif) { cifname <- paste0("cif", lev) Answer[,cifname] <- Lambdas } } ## /////////// end loop over possible types ////////////////// return(Answer) } ## helper functions emptypattern <- function(w) { ppp(numeric(0), numeric(0), window=w) } levelsofmarks <- function(X) { levels(marks(X)) } gridsample <- function(W, ngrid) { masque <- as.mask(W, dimyx=ngrid) xx <- raster.x(masque) yy <- raster.y(masque) xpredict <- xx[masque$m] ypredict <- yy[masque$m] Dummy <- ppp(xpredict, ypredict, window=W) Image <- as.im(masque) return(list(D=Dummy, I=Image)) } punctify <- function(M) { xx <- raster.x(M) yy <- raster.y(M) xpredict <- xx[M$m] ypredict <- yy[M$m] return(ppp(xpredict, ypredict, window=M)) } Predict <- function(object, newdata, type=c("link", "response")) { type <- match.arg(type) if(inherits(object, "glmmPQL")) { object <- stripGLMM(object) pred <- predict(object, newdata=newdata) if(type == "response") pred <- object$family$linkinv(pred) } else { pred <- predict(object, newdata=newdata, type=type) } return(as.numeric(pred)) } predict.mppm }) spatstat.model/R/plot.mppm.R0000644000176200001440000000122414377277502015503 0ustar liggesusers# # plot.mppm.R # # $Revision: 1.8 $ $Date: 2023/02/28 04:04:51 $ # # plot.mppm <- function(x, ..., trend=TRUE, cif=FALSE, se=FALSE, how=c("image", "contour", "persp"), main) { if(missing(main)) main <- short.deparse(substitute(x)) how <- match.arg(how) subs <- subfits(x) if(trend) plot.anylist(x=subs, how=how, main=main, ..., trend=TRUE, cif=FALSE, se=FALSE) if(cif) plot.anylist(x=subs, how=how, main=main, ..., trend=FALSE, cif=TRUE, se=FALSE) if(se) plot.anylist(x=subs, how=how, main=main, ..., trend=FALSE, cif=FALSE, se=TRUE) invisible(NULL) } spatstat.model/R/parres.R0000644000176200001440000005061614377275735015070 0ustar liggesusers# # parres.R # # code to plot transformation diagnostic # # $Revision: 1.18 $ $Date: 2023/02/28 01:55:09 $ # parres <- function(model, covariate, ..., smooth.effect=FALSE, subregion=NULL, bw="nrd0", adjust=1, from=NULL,to=NULL, n=512, bw.input = c("points", "quad"), bw.restrict = FALSE, covname) { callstring <- paste(deparse(sys.call()), collapse = "") modelname <- short.deparse(substitute(model)) stopifnot(is.ppm(model)) if(missing(covariate)) { mc <- model.covariates(model) if(length(mc) == 1) covariate <- mc else stop("covariate must be provided") } if(missing(covname)) covname <- sensiblevarname(deparse(substitute(covariate)), "X") if(is.marked(model)) stop("Sorry, this is not yet implemented for marked models") if(!is.null(subregion)) stopifnot(is.owin(subregion)) if(is.null(adjust)) adjust <- 1 bw.input <- match.arg(bw.input) # validate model modelcall <- model$callstring if(is.null(modelcall)) modelcall <- model$call if(is.null(getglmfit(model))) model <- update(model, forcefit=TRUE) # extract spatial locations Q <- quad.ppm(model) # datapoints <- Q$data quadpoints <- union.quad(Q) Z <- is.data(Q) wts <- w.quad(Q) nQ <- npoints(quadpoints) # fitted intensity lam <- fitted(model, type="trend") # subset of quadrature points used to fit model subQset <- getglmsubset(model) if(is.null(subQset)) subQset <- rep.int(TRUE, nQ) # restriction to subregion insubregion <- if(!is.null(subregion)) { inside.owin(quadpoints, w=subregion) } else rep.int(TRUE, nQ) ################################################################ # Inverse lambda residuals rx <- residuals(model, type="inverse") resid <- with(rx, "increment") ################################################################# # identify the covariate # if(length(covariate) == 0) stop("No covariate specified") covtype <- "unknown" if(!is.character(covariate)) { # Covariate is some kind of data, treated as external covariate covtype <- "external" beta <- 0 covvalues <- evaluateCovariate(covariate, quadpoints) } else { # Argument is name of covariate covname <- covariate if(length(covname) > 1) stop("Must specify only one covariate") # 'original covariates' orig.covars <- variablesinformula(formula(model)) # 'canonical covariates' canon.covars <- names(coef(model)) # offsets offset.covars <- offsetsinformula(formula(model)) # if(covname %in% orig.covars) { # one of the original covariates covtype <- "original" covvalues <- evaluateCovariate(findCovariate(covname, model), quadpoints) } else if(covname %in% canon.covars) { # one of the canonical covariates covtype <- "canonical" mm <- model.matrix(model) covvalues <- mm[, covname] ## extract the corresponding coefficient beta <- coef(model)[[covname]] } else if(covname %in% offset.covars) { # an offset term only covtype <- "offset" mf <- model.frame(model, subset=rep.int(TRUE, n.quad(Q))) if(!(covname %in% colnames(mf))) stop(paste("Internal error: offset term", covname, "not found in model frame")) covvalues <- mf[, covname] ## fixed coefficient (not an estimated parameter) beta <- 1 } else{ # must be an external covariate (i.e. not used in fitted model) covtype <- "external" beta <- 0 covvalues <- evaluateCovariate(findCovariate(covname, model), quadpoints) } } # validate covvalues # if(is.null(covvalues)) stop("Unable to extract covariate values") if(length(covvalues) != npoints(quadpoints)) stop(paste("Internal error: number of covariate values =", length(covvalues), "!=", npoints(quadpoints), "= number of quadrature points")) vtype <- typeof(covvalues) switch(vtype, real=, double = { }, integer = { warning("Covariate is integer-valued") }, stop(paste("Cannot handle covariate of type", sQuote(vtype)))) ################################################################# # Compute covariate effect if(covtype != "original") { effect <- beta * covvalues mediator <- covtype effectfundata <- list(beta=beta) effectFun <- function(x) { (effectfundata$beta) * x } isoffset <- (covtype == "offset") names(isoffset) <- covname } else { ## `original' covariate (passed as argument to ppm) ## may determine one or more canonical covariates and/or offsets origcovdf <- getppmOriginalCovariates(model)[insubregion, , drop=FALSE] isconstant <- lapply(origcovdf, function(z) { length(unique(z)) == 1 }) ## ## Initialise termnames <- character(0) termbetas <- numeric(0) isoffset <- logical(0) mediator <- character(0) effect <- 0 effectFun <- function(x) { effectFun.can(x) + effectFun.off(x) } effectFun.can <- effectFun.off <- function(x) { 0 * x } ## Identify relevant canonical covariates dmat <- model.depends(model) if(!(covname %in% colnames(dmat))) stop("Internal error: cannot match covariate names") relevant <- dmat[, covname] if(any(relevant)) { # original covariate determines one or more canonical covariates mediator <- "canonical" ## check whether covariate is separable check.separable(dmat, covname, isconstant) ## Extract information about relevant model terms termnames <- rownames(dmat)[relevant] isoffset <- rep.int(FALSE, length(termnames)) names(isoffset) <- termnames ## Extract relevant canonical covariates mm <- model.matrix(model) termvalues <- mm[, relevant, drop=FALSE] ## extract corresponding coefficients termbetas <- coef(model)[relevant] ## evaluate model effect effect <- as.numeric(termvalues %*% termbetas) ## check length if(length(effect) != npoints(quadpoints)) stop(paste("Internal error: number of values of fitted effect =", length(effect), "!=", npoints(quadpoints), "= number of quadrature points")) ## Trap loglinear case if(length(termnames) == 1 && identical(termnames, covname)) { covtype <- "canonical" beta <- termbetas } ## construct the corresponding function gd <- getglmdata(model) goodrow <- min(which(complete.cases(gd))) defaultdata <- gd[goodrow, , drop=FALSE] #' set interaction terms to zero if(length(Vnames <- model$internal$Vnames)) defaultdata[,Vnames] <- 0 gf <- getglmfit(model) effectfundata.can <- list(covname=covname, fmla = rhs.of.formula(formula(gf)), termbetas = termbetas, defaultdata = defaultdata, relevant = relevant, termnames = termnames) effectFun.can <- function(x) { d <- effectfundata.can # replicate default data to correct length df <- as.data.frame(lapply(d$defaultdata, rep, length(x))) # overwrite value of covariate with new data df[,covname] <- x # construct model matrix m <- model.matrix(d$fmla, df) # check it conforms to expected structure if(!identical(colnames(m)[d$relevant], d$termnames)) stop("Internal error: mismatch in term names in effectFun") me <- m[, d$relevant, drop=FALSE] y <- me %*% as.matrix(d$termbetas, ncol=1) return(y) } } if(!is.null(offmat <- attr(dmat, "offset")) && any(relevant <- offmat[, covname])) { ## covariate appears in a model offset term mediator <- c(mediator, "offset") ## check whether covariate is separable check.separable(offmat, covname, isconstant) ## collect information about relevant offset offnames <- rownames(offmat)[relevant] termnames <- c(termnames, offnames) noff <- length(offnames) termbetas <- c(termbetas, rep.int(1, noff)) isoffset <- c(isoffset, rep.int(TRUE, noff)) names(termbetas) <- names(isoffset) <- termnames ## extract values of relevant offset mf <- model.frame(model, subset=rep.int(TRUE, n.quad(Q))) if(any(nbg <- !(offnames %in% colnames(mf)))) stop(paste("Internal error:", ngettext(sum(nbg), "offset term", "offset terms"), offnames[nbg], "not found in model frame")) effex <- mf[, offnames, drop=FALSE] effect <- effect + rowSums(effex) # # construct the corresponding function gd <- getglmdata(model) goodrow <- min(which(complete.cases(gd))) defaultdata <- gd[goodrow, , drop=FALSE] #' set interaction terms to zero if(length(Vnames <- model$internal$Vnames)) defaultdata[,Vnames] <- 0 gf <- getglmfit(model) effectfundata.off <- list(covname=covname, fmla = rhs.of.formula(formula(gf)), defaultdata = defaultdata, offnames = offnames) effectFun.off <- function(x) { d <- effectfundata.off # replicate default data to correct length df <- as.data.frame(lapply(d$defaultdata, rep, length(x))) # overwrite value of covariate with new data df[,covname] <- x # construct model FRAME mf <- model.frame(d$fmla, df) # check it conforms to expected structure if(!all(d$offnames %in% colnames(mf))) stop("Internal error: mismatch in term names in effectFun") moff <- mf[, d$offnames, drop=FALSE] y <- rowSums(moff) return(y) } } if(length(termnames) == 0) { # Sanity clause # (everyone knows there ain't no Sanity Clause...) warning(paste("Internal error: could not find any", "canonical covariates or offset terms", "that depended on the covariate", sQuote(covname))) # Assume it's an external covariate (i.e. not used in fitted model) covtype <- "external" beta <- 0 effect <- beta * covvalues effectFun <- function(x) { 0 * x } isoffset <- FALSE names(isoffset) <- covname } } #### Canonical covariates and coefficients switch(covtype, original={ cancovs <- termnames canbeta <- termbetas }, offset = , canonical={ cancovs <- covname canbeta <- beta }, external={ cancovs <- canbeta <- NA }) ################################################################# # Validate covariate values # locations that must have finite values operative <- if(bw.restrict) insubregion & subQset else subQset nbg.cov <- !is.finite(covvalues) if(any(offending <- nbg.cov & operative)) { warning(paste(sum(offending), "out of", length(offending), "covariate values discarded because", ngettext(sum(offending), "it is", "they are"), "NA or infinite")) } nbg.eff <- !is.finite(effect) if(any(offending <- nbg.eff & operative)) { warning(paste(sum(offending), "out of", length(offending), "values of fitted effect discarded because", ngettext(sum(offending), "it is", "they are"), "NA or infinite")) } ################################################################# # Restrict data to 'operative' points # with finite values nbg <- nbg.cov | nbg.eff ok <- !nbg & operative if(sum(ok) < 2) { warning("Not enough data; returning NULL") return(NULL) } if(!all(ok)) { Q <- Q[ok] covvalues <- covvalues[ok] quadpoints <- quadpoints[ok] resid <- resid[ok] lam <- lam[ok] effect <- effect[ok] insubregion <- insubregion[ok] Z <- Z[ok] wts <- wts[ok] } #################################################### # assemble data for smoothing x <- covvalues y <- resid/wts if(smooth.effect) y <- y + effect w <- wts # if(makefrom <- is.null(from)) from <- min(x) if(maketo <- is.null(to)) to <- max(x) #################################################### # determine smoothing bandwidth # from 'operative' data switch(bw.input, quad = { # bandwidth selection from covariate values at all quadrature points numer <- unnormdensity(x, weights=w*y, bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- numer$bw }, points= { # bandwidth selection from covariate values at data points fake <- unnormdensity(x[Z], weights=1/lam[Z], bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- fake$bw numer <- unnormdensity(x, weights=w*y, bw=sigma, adjust=1, n=n,from=from,to=to, ...) }) #################################################### # Restrict data and recompute numerator if required if(!is.null(subregion) && !bw.restrict) { # Bandwidth was computed on all data # Restrict to subregion and recompute numerator if(sum(insubregion) < 2) { warning("Not enough useable data in subregion; returning NULL") return(NULL) } x <- x[insubregion] y <- y[insubregion] w <- w[insubregion] Z <- Z[insubregion] lam <- lam[insubregion] if(makefrom) from <- min(x) if(maketo) to <- max(x) numer <- unnormdensity(x, weights=w*y, bw=sigma, adjust=1, n=n,from=from,to=to, ...) } #################################################### # Compute denominator denom <- unnormdensity(x, weights=w, bw=sigma, adjust=1, n=n,from=from,to=to, ...) #################################################### # Determine recommended plot range alim <- c(from, to) nZ <- sum(Z) if(nZ > 5) { xr <- range(as.vector(x[Z]), finite=TRUE) alimx <- xr + 0.1 * diff(xr) * c(-1,1) alim <- intersect.ranges(alim, alimx) } #################################################### # Compute terms interpolate <- function(x,y) { if(inherits(x, "density") && missing(y)) approxfun(x$x, x$y, rule=2) else approxfun(x, y, rule=2) } numfun <- interpolate(numer) denfun <- interpolate(denom) xxx <- numer$x yyy <- numfun(xxx)/denfun(xxx) # variance estimation # smooth 1/lambda(u) with smaller bandwidth tau <- sigma/sqrt(2) varnumer <- unnormdensity(x, weights=w/lam, bw=tau, adjust=1, n=n,from=from,to=to, ...) varnumfun <- interpolate(varnumer) varestxxx <- varnumfun(xxx)/(2 * sigma * sqrt(pi) * denfun(xxx)^2) sd <- sqrt(varestxxx) # alternative estimate of variance using data points only if(nZ > 1) { varXnumer <- unnormdensity(x[Z], weights=1/lam[Z]^2, bw=tau, adjust=1, n=n,from=from,to=to, ...) varXnumfun <- interpolate(varXnumer) varXestxxx <- varXnumfun(xxx)/(2 * sigma * sqrt(pi) * denfun(xxx)^2) sdX <- sqrt(varXestxxx) } else sdX <- rep(NA, length(xxx)) # fitted effect effxxx <- effectFun(xxx) # add fitted effect of covariate, if not added before smoothing if(!smooth.effect) yyy <- yyy + effxxx #################################################### # pack into fv object df <- data.frame(xxx=xxx, h =yyy, varh=varestxxx, hi=yyy+2*sd, lo=yyy-2*sd, hiX=yyy+2*sdX, loX=yyy-2*sdX, fit=effxxx) # remove any funny characters in name of covariate (e.g. if it is an offset) Covname <- make.names(covname) names(df)[1] <- Covname desc <- c(paste("covariate", sQuote(covname)), "Smoothed partial residual", "Variance", "Upper limit of pointwise 5%% significance band (integral)", "Lower limit of pointwise 5%% significance band (integral)", "Upper limit of pointwise 5%% significance band (sum)", "Lower limit of pointwise 5%% significance band (sum)", paste("Parametric fitted effect of", sQuote(covname))) rslt <- fv(df, argu=Covname, ylab=substitute(h(X), list(X=as.name(covname))), valu="h", fmla= as.formula(paste(". ~ ", Covname)), alim=alim, labl=c(covname, paste("%s", paren(covname), sep=""), paste("var", paren(covname), sep=""), paste("hi", paren(covname), sep=""), paste("lo", paren(covname), sep=""), paste("hiX", paren(covname), sep=""), paste("loX", paren(covname), sep=""), paste("fit", paren(covname), sep="")), desc=desc, fname="h", yexp=as.expression(substitute(hat(h)(X), list(X=covname)))) attr(rslt, "dotnames") <- c("h", "hi", "lo", "fit") fvnames(rslt, ".s") <- c("hi", "lo") # add special class data class(rslt) <- c("parres", class(rslt)) attr(rslt, "stuff") <- list(covname = paste(covname, collapse=""), covtype = covtype, mediator = mediator, cancovs = cancovs, canbeta = canbeta, isoffset = isoffset, modelname = modelname, modelcall = modelcall, callstring = callstring, sigma = sigma, smooth.effect = smooth.effect, restricted = !is.null(subregion), bw.input = bw.input) return(rslt) } print.parres <- function(x, ...) { cat("Transformation diagnostic (class parres)\n") s <- attr(x, "stuff") cat(paste("for the", s$covtype, "covariate", sQuote(s$covname), if(s$covtype != "external") "in" else "for", "the fitted model", if(nchar(s$modelcall) < 30) "" else "\n\t", s$modelcall, "\n")) switch(s$covtype, original={ cancovs <- s$cancovs med <- s$mediator isoffset <- s$isoffset if(is.null(isoffset)) isoffset <- rep.int(FALSE, length(cancovs)) ncc <- length(cancovs) nfitted <- sum(!isoffset) noff <- sum(isoffset) explainfitted <- explainoff <- character(0) if(noff > 0) explainoff <- paste("offset", ngettext(noff, "term", "terms"), commasep(dQuote(cancovs[isoffset]))) if(nfitted > 0) explainfitted <- paste( paste(med[med != "offset"], collapse=" and "), ngettext(nfitted, "term", "terms"), commasep(dQuote(cancovs[!isoffset]))) splat("Fitted effect: ", if(ncc > 1) "sum of" else NULL, paste(c(explainfitted, explainoff), collapse=" and ")) }, external={ cat("Note: effect estimate not justified by delta method\n") }, offset={}, canonical={}) # earlier versions were equivalent to restricted=FALSE if(identical(s$restricted, TRUE)) cat("\t--Diagnostic computed for a subregion--\n") cat(paste("Call:", s$callstring, "\n")) cat(paste("Actual smoothing bandwidth sigma =", signif(s$sigma,5), "\n")) # earlier versions were equivalent to smooth.effect=TRUE sme <- !identical(s$smooth.effect, FALSE) if(sme) { cat("Algorithm: smooth(effect + residual)\n\n") } else { cat("Algorithm: effect + smooth(residual)\n\n") } NextMethod("print") } plot.parres <- function(x, ...) { xname <- short.deparse(substitute(x)) force(x) do.call(plot.fv, resolve.defaults(list(quote(x)), list(...), list(main=xname, shade=c("hi", "lo")))) } spatstat.model/R/fitted.mppm.R0000644000176200001440000000404214331173074015772 0ustar liggesusers# # fitted.mppm.R # # method for 'fitted' for mppm objects # # $Revision: 1.4 $ $Date: 2021/12/27 10:20:31 $ # fitted.mppm <- function(object, ..., type="lambda", dataonly=FALSE) { # sumry <- summary(object) type <- pickoption("type", type, c(lambda="lambda", cif ="lambda", trend ="trend"), multi=FALSE, exact=FALSE) # extract fitted model object and data frame glmfit <- object$Fit$FIT glmdata <- object$Fit$moadf # interaction names Vnames <- unlist(object$Fit$Vnamelist) interacting <- (length(Vnames) > 0) # row identifier id <- glmdata$id ## secret arguments dotargs <- list(...) new.coef <- dotargs$new.coef dropcoef <- isTRUE(dotargs$dropcoef) # Modification of `glmdata' may be required if(interacting) switch(type, trend={ # zero the interaction statistics glmdata[ , Vnames] <- 0 }, lambda={ # Find any dummy points with zero conditional intensity forbid <- matrowany(as.matrix(glmdata[, Vnames]) == -Inf) # exclude from predict.glm glmdata <- glmdata[!forbid, ] }) # Possibly updated coefficients coeffs <- adaptcoef(new.coef, coef(object), drop=dropcoef) # Compute predicted [conditional] intensity values values <- GLMpredict(glmfit, glmdata, coeffs, !is.null(new.coef), type="response") # Note: the `glmdata' argument is necessary in order to obtain # predictions at all quadrature points. If it is omitted then # we would only get predictions at the quadrature points j # where glmdata$SUBSET[j]=TRUE. if(interacting && type=="lambda") { # reinsert zeroes vals <- numeric(length(forbid)) vals[forbid] <- 0 vals[!forbid] <- values values <- vals } names(values) <- NULL if(dataonly) { # extract only data values isdata <- (glmdata$.mpl.Y != 0) values <- values[isdata] id <- id[isdata] } return(split(values, id)) } spatstat.model/R/dffit.R0000644000176200001440000000212214331173072014632 0ustar liggesusers#' #' dffit.R #' #' $Revision: 1.1 $ $Date: 2018/04/19 05:04:59 $ #' #' Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2018 dffit <- function(object, ...) UseMethod("dffit") dffit.ppm <- function(object, ..., collapse=FALSE, dfb=NULL) { db <- dfb %orifnull% dfbetas(object, ...) Z <- model.matrix(object, drop=FALSE, irregular=TRUE) if(!all(dim(db) == dim(Z))) { #' internal error - mismatch in quadrature points - fall back U <- db$loc Z <- sapply(model.images(object, irregular=TRUE), "[", i=U) } #' ensure 0 * (-Inf) = 0 if(any(a <- (db$val == 0) & (Z == -Inf))) Z[a] <- 0 #' smoothed density must be handled separately sm <- attr(db, "smoothdensity") attr(db, "smoothdensity") <- NULL #' do the main calculation Y <- db * Z #' also calculate the smoothed density if(!is.null(sm)) { ZZ <- model.images(object, irregular=TRUE) HH <- mapply(harmonise, ZZ=ZZ, sm=sm) sm <- mapply("*", e1=HH$sm, e2=HH$ZZ, SIMPLIFY=FALSE) attr(Y, "smoothdensity") <- as.solist(sm) } if(collapse) Y <- Reduce("+", unstack(Y)) return(Y) } spatstat.model/R/multihard.R0000644000176200001440000001467514331173073015550 0ustar liggesusers# # # multihard.R # # $Revision: 1.21 $ $Date: 2022/05/23 02:33:06 $ # # The Hard core process # # Hardcore() create an instance of the Hard Core process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # MultiHard <- local({ # .... multitype hard core potential MHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[i] type (mark) of point U[j] # # get matrices of interaction radii h <- par$hradii # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) # list all UNORDERED pairs of types to be checked # (the interaction must be symmetric in type, and scored as such) uptri <- (row(h) <= col(h)) & (!is.na(h)) mark1 <- (lx[row(h)])[uptri] mark2 <- (lx[col(h)])[uptri] # corresponding names mark1name <- (lxname[row(h)])[uptri] mark2name <- (lxname[col(h)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) # list all ORDERED pairs of types to be checked # (to save writing the same code twice) different <- mark1 != mark2 mark1o <- c(mark1, mark2[different]) mark2o <- c(mark2, mark1[different]) nordpairs <- length(mark1o) # unordered pair corresponding to each ordered pair ucode <- c(1:npairs, (1:npairs)[different]) # # create numeric array for result z <- array(0, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # apply the relevant hard core distance to each pair of points hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- array(0, dim=dim(d)) value[forbid] <- -Inf # assign value[i,j] -> z[i,j,k] where k is relevant interaction code for(i in 1:nordpairs) { # data points with mark m1 Xsub <- (tx == mark1o[i]) # quadrature points with mark m2 Qsub <- (tu == mark2o[i]) # assign z[Xsub, Qsub, ucode[i]] <- value[Xsub, Qsub] } } attr(z, "IsOffset") <- rep.int(TRUE, npairs) return(z) } #### end of 'pot' function #### # ............ template object ................... BlankMH <- list( name = "Multitype Hardcore process", creator = "MultiHard", family = "pairwise.family", # evaluated later pot = MHpotential, par = list(types=NULL, hradii = NULL), # filled in later parnames = c("possible types", "hardcore distances"), pardesc = c("vector of possible types", "matrix of hardcore distances"), hasInf = TRUE, selfstart = function(X, self) { types <- self$par$types hradii <- self$par$hradii if(!is.null(types) && !is.null(hradii)) return(self) if(is.null(types)) types <- levels(marks(X)) if(is.null(hradii)) { marx <- marks(X) d <- nndist(X, by=marx) h <- aggregate(d, by=list(from=marx), min) h <- as.matrix(h[, -1, drop=FALSE]) m <- table(marx) mm <- outer(m, m, pmin) hradii <- h * mm/(mm+1) dimnames(hradii) <- list(types, types) } MultiHard(types=types,hradii=hradii) }, init = function(self) { types <- self$par$types if(!is.null(types)) { h <- self$par$hradii nt <- length(types) if(!is.null(h)) MultiPair.checkmatrix(h, nt, sQuote("hradii")) if(length(types) == 0) stop(paste("The", sQuote("types"), "argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } } }, update = NULL, # default OK print = function(self) { h <- self$par$hradii if(waxlyrical('gory')) { if(!is.null(h)) splat(nrow(h), "types of points") types <- self$par$types if(!is.null(types)) { splat("Possible types:") print(noquote(types)) } else splat("Possible types:\t not yet determined") } if(!is.null(h)) { splat("Hardcore radii:") print(signif(h, getOption("digits"))) } else splat("Hardcore radii:\t not yet determined") invisible() }, interpret = function(coeffs, self) { # there are no regular parameters (woo-hoo!) return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { h <- self$par$hradii return(max(0, h, na.rm=TRUE)) }, hardcore = function(self, coeffs=NA, epsilon=0, ...) { h <- self$par$hradii return(h) }, version=NULL # fix later ) class(BlankMH) <- "interact" MultiHard <- function(hradii=NULL, types=NULL) { if((missing(hradii) || !is.matrix(hradii)) && is.matrix(types)) { ## old syntax: (types=NULL, hradii) hradii <- types types <- NULL } if(!is.null(hradii)) hradii[hradii == 0] <- NA out <- instantiate.interact(BlankMH, list(types=types, hradii = hradii)) if(!is.null(types)) dimnames(out$par$hradii) <- list(types, types) return(out) } MultiHard <- intermaker(MultiHard, BlankMH) MultiHard }) spatstat.model/R/pairwise.family.R0000644000176200001440000005337514331173073016662 0ustar liggesusers# # # pairwise.family.S # # $Revision: 1.75 $ $Date: 2022/11/03 11:08:33 $ # # The pairwise interaction family of point process models # # pairwise.family: object of class 'isf' defining pairwise interaction # # # ------------------------------------------------------------------- # pairwise.family <- list( name = "pairwise", order = 2, print = function(self) { cat("Pairwise interaction family\n") }, plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction unitz <- unitname(fint) if(is.null(inter) || is.null(inter$family) || inter$family$name != "pairwise") stop("Tried to plot the wrong kind of interaction") # get fitted coefficients of interaction terms # and set coefficients of offset terms to 1 Vnames <- fint$Vnames IsOffset <- fint$IsOffset coeff <- rep.int(1, length(Vnames)) names(coeff) <- Vnames coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]] # pairpot <- inter$pot potpars <- inter$par rmax <- reach(fint, epsilon=1e-3) xlim <- list(...)$xlim if(is.infinite(rmax)) { if(!is.null(xlim)) rmax <- max(xlim) else { warning("Reach of interaction is infinite; need xlim to plot it") return(invisible(NULL)) } } if(is.null(d)) { dmax <- 1.25 * rmax d <- seq(from=0, to=dmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) dmax <- max(d) } if(is.null(xlim)) xlim <- c(0, dmax) types <- potpars$types if(is.null(types)) { # compute potential function as `fv' object dd <- matrix(d, ncol=1) p <- pairpot(dd, potpars) if(length(dim(p))==2) p <- array(p, dim=c(dim(p),1), dimnames=NULL) if(dim(p)[3] != length(coeff)) stop("Dimensions of potential do not match coefficient vector") for(k in seq_len(dim(p)[3])) p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] ) y <- exp(apply(p, c(1,2), sum)) ylim <- range(0, 1.1, y, finite=TRUE) fun <- fv(data.frame(r=d, h=y, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "pairwise interaction term h(r)", "reference value 1"), unitname=unitz) if(plotit) do.call(plot.fv, resolve.defaults(list(quote(fun)), list(...), list(ylim=ylim))) return(invisible(fun)) } else{ # compute each potential and store in `fasp' object if(!is.factor(types)) types <- factor(types, levels=types) m <- length(types) nd <- length(d) dd <- matrix(rep.int(d, m), nrow=nd * m, ncol=m) tx <- rep.int(types, rep.int(nd, m)) ty <- types p <- pairpot(dd, tx, ty, potpars) if(length(dim(p))==2) p <- array(p, dim=c(dim(p),1), dimnames=NULL) if(dim(p)[3] != length(coeff)) stop("Dimensions of potential do not match coefficient vector") for(k in seq_len(dim(p)[3])) p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] ) y <- exp(apply(p, c(1,2), sum)) ylim <- range(0, 1.1, y, finite=TRUE) fns <- vector(m^2, mode="list") which <- matrix(, m, m) for(i in seq_len(m)) { for(j in seq_len(m)) { # relevant position in matrix ijpos <- i + (j-1) * m which[i,j] <- ijpos # extract values of potential yy <- y[tx == types[i], j] # make fv object fns[[ijpos]] <- fv(data.frame(r=d, h=yy, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "pairwise interaction term h(r)", "reference value 1"), unitname=unitz) # } } funz <- fasp(fns, which=which, formulae=list(cbind(h, one) ~ r), title="Fitted pairwise interactions", rowNames=paste(types), colNames=paste(types)) if(plotit) do.call(plot.fasp, resolve.defaults(list(quote(funz)), list(...), list(ylim=ylim))) return(invisible(funz)) } }, # end of function `plot' # ---------------------------------------------------- eval = function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ..., Reach=NULL, precomputed=NULL, savecomputed=FALSE, pot.only=FALSE) { # # This is the eval function for the `pairwise' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # For the `pairwise' family of pairwise-interaction processes, # this eval function computes the distances between points, # invokes 'pairpot' to evaluate the potential between each pair of points, # applies edge corrections, and then sums the pair potential terms. # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ########################################################################## # POTENTIAL: # # The pair potential function 'pairpot' should be either # pairpot(d, par) [for potentials that don't depend on marks] # or # pairpot(d, tx, tu, par) [for potentials that do depend on mark] # where d is a matrix of interpoint distances, # tx is the vector of types for the data points, # tu is the vector of types for all quadrature points # and # par is a list of parameters for the potential. # # The additional argument 'splitInf' is also permitted. # # It must return a matrix with the same dimensions as d # or an array with its first two dimensions the same as the dimensions of d. pt <- PairPotentialType(pairpot) # includes validation of pair potential ## edge correction argument if(length(correction) > 1) stop("Only one edge correction allowed at a time!") if(!any(correction == c("periodic", "border", "translate", "translation", "isotropic", "Ripley", "none"))) stop(paste("Unrecognised edge correction", sQuote(correction))) no.correction <- #### Compute basic data # Decide whether to apply faster algorithm using 'closepairs' use.closepairs <- (correction %in% c("none", "border", "translate", "translation")) && !is.null(Reach) && is.finite(Reach) && is.null(precomputed) && !savecomputed if(!is.null(precomputed)) { # precomputed X <- precomputed$X U <- precomputed$U EqualPairs <- precomputed$E M <- precomputed$M } else { U <- as.ppp(U, X$window) # i.e. X$window is DEFAULT window if(!use.closepairs) # Form the matrix of distances M <- crossdist(X, U, periodic=(correction=="periodic")) } nX <- npoints(X) nU <- npoints(U) dimM <- c(nX, nU) # Evaluate the pairwise potential without edge correction if(use.closepairs) { POT <- evalPairPotential(X,U,EqualPairs,pairpot,potpars,Reach) } else { POT <- do.call.matched(pairpot, list(d=M, tx=marks(X), tu=marks(U), par=potpars)) } # Determine whether each component of potential is an offset IsOffset <- attr(POT, "IsOffset") # Check errors and special cases if(!is.matrix(POT) && !is.array(POT)) { if(length(POT) == 0 && X$n == 0) # empty pattern POT <- array(POT, dim=c(dimM,1)) else stop("Pair potential did not return a matrix or array") } if(length(dim(POT)) == 1 || any(dim(POT)[1:2] != dimM)) { whinge <- paste0( "The pair potential function ",short.deparse(substitute(pairpot)), " must produce a matrix or array with its first two dimensions\n", "the same as the dimensions of its input.\n") stop(whinge) } # make it a 3D array if(length(dim(POT))==2) POT <- array(POT, dim=c(dim(POT),1), dimnames=NULL) #' positive case if(splitInf) { IsNegInf <- (POT == -Inf) POT[IsNegInf] <- 0 } # handle corrections if(correction == "translate" || correction == "translation") { edgewt <- edge.Trans(X, U) # sanity check ("everybody knows there ain't no...") if(!is.matrix(edgewt)) stop("internal error: edge.Trans() did not yield a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Trans() has wrong dimensions") POT <- c(edgewt) * POT } else if(correction == "isotropic" || correction == "Ripley") { # weights are required for contributions from QUADRATURE points edgewt <- t(edge.Ripley(U, t(M), X$window)) if(!is.matrix(edgewt)) stop("internal error: edge.Ripley() did not return a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Ripley() has wrong dimensions") POT <- c(edgewt) * POT } # No pair potential term between a point and itself if(length(EqualPairs) > 0) { nplanes <- dim(POT)[3] for(k in 1:nplanes) { POT[cbind(EqualPairs, k)] <- 0 if(splitInf) IsNegInf[cbind(EqualPairs, k)] <- FALSE } } # reattach the negative infinity for re-use by special code if(splitInf) attr(POT, "IsNegInf") <- IsNegInf # Return just the pair potential? if(pot.only) return(POT) # Sum the pairwise potentials over data points for each quadrature point V <- apply(POT, c(2,3), sum) # Handle positive case if(splitInf) attr(V, "-Inf") <- apply(IsNegInf, 2, any) # attach the original pair potentials attr(V, "POT") <- POT # attach the offset identifier attr(V, "IsOffset") <- IsOffset # pass computed information out the back door if(savecomputed) attr(V, "computed") <- list(E=EqualPairs, M=M) return(V) }, ######### end of function $eval suffstat = function(model, X=NULL, callstring="pairwise.family$suffstat") { # for pairwise models only (possibly nonstationary) verifyclass(model, "ppm") if(!identical(model$interaction$family$name,"pairwise")) stop("Model is not a pairwise interaction process") if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") modelX <- update(model, X, improve.type="none") } # determine which data points contribute to pseudolikelihood contribute <- getppmdatasubset(modelX) Xin <- X[contribute] Xout <- X[!contribute] # partial model matrix arising from ordered pairs of data points # which both contribute to the pseudolikelihood Empty <- X[numeric(0)] momINxIN <- partialModelMatrix(Xin, Empty, model, "suffstat") # partial model matrix arising from ordered pairs of data points # the second of which does not contribute to the pseudolikelihood mom <- partialModelMatrix(Xout, Xin, model, "suffstat") indx <- Xout$n + seq_len(Xin$n) momINxOUT <- mom[indx, , drop=FALSE] # parameters order2 <- names(coef(model)) %in% model$internal$Vnames order1 <- !order2 result <- 0 * coef(model) if(any(order1)) { # first order contributions can be determined from INxIN o1terms <- momINxIN[ , order1, drop=FALSE] o1sum <- colSums(o1terms) result[order1] <- o1sum } if(any(order2)) { # adjust for double counting of ordered pairs in INxIN but not INxOUT o2termsINxIN <- momINxIN[, order2, drop=FALSE] o2termsINxOUT <- momINxOUT[, order2, drop=FALSE] o2sum <- colSums(o2termsINxIN)/2 + colSums(o2termsINxOUT) result[order2] <- o2sum } return(result) }, ######### end of function $suffstat delta2 = function(X, inte, correction, ..., sparseOK=FALSE) { #' Sufficient statistic for second order conditional intensity #' for pairwise interaction processes #' Equivalent to evaluating pair potential. if(is.ppp(X)) { seqX <- seq_len(npoints(X)) E <- cbind(seqX, seqX) R <- reach(inte) result <- pairwise.family$eval(X,X,E, inte$pot,inte$par, correction, pot.only=TRUE, Reach=R, splitInf=TRUE) M <- attr(result, "IsNegInf") if(sparseOK) result <- as.sparse3Darray(result) if(!is.null(M)) { #' validate if(length(dim(M)) != 3) stop("Internal error: IsNegInf is not a 3D array") #' collapse vector-valued potential, yielding a matrix M <- apply(M, c(1,2), any) if(!is.matrix(M)) M <- matrix(M, nrow=nX) #' count conflicts hits <- colSums(M) #' hits[j] == 1 implies that X[j] violates hard core with only one X[i] #' and therefore changes status if X[i] is deleted. deltaInf <- M deltaInf[, hits != 1] <- FALSE if(sparseOK) deltaInf <- as(deltaInf, "sparseMatrix") #' attr(result, "deltaInf") <- deltaInf } } else if(is.quad(X)) { U <- union.quad(X) izdat <- is.data(X) nU <- npoints(U) nX <- npoints(X$data) seqU <- seq_len(nU) E <- cbind(seqU, seqU) R <- reach(inte) result <- pairwise.family$eval(U,U,E, inte$pot,inte$par, correction, pot.only=TRUE, Reach=R, splitInf=TRUE) M <- attr(result, "IsNegInf") if(sparseOK) result <- as.sparse3Darray(result) if(!is.null(M)) { #' validate if(length(dim(M)) != 3) stop("Internal error: IsNegInf is not a 3D array") #' consider conflicts with data points MXU <- M[izdat, , , drop=FALSE] #' collapse vector-valued potential, yielding a matrix MXU <- apply(MXU, c(1,2), any) if(!is.matrix(MXU)) MXU <- matrix(MXU, nrow=nX) #' count data points conflicting with each quadrature point nhitdata <- colSums(MXU) #' for a conflicting pair U[i], U[j], #' status of U[j] will change when U[i] is added/deleted #' iff EITHER #' U[i] = X[i] is a data point and #' U[j] is only in conflict with X[i], deltaInf <- apply(M, c(1,2), any) deltaInf[izdat, nhitdata != 1] <- FALSE #' OR #' U[i] is a dummy point, #' U[j] has no conflicts with X. deltaInf[!izdat, nhitdata != 0] <- FALSE #' if(sparseOK) deltaInf <- as(deltaInf, "sparseMatrix") #' attr(result, "deltaInf") <- deltaInf } } return(result) } ######### end of function $delta2 ) ######### end of list class(pairwise.family) <- "isf" # externally visible PairPotentialType <- function(pairpot) { stopifnot(is.function(pairpot)) fop <- names(formals(pairpot)) v <- match(list(fop), list(c("d", "par"), c("d", "tx", "tu", "par"))) if(is.na(v)) stop("Formal arguments of pair potential function are not understood", call.=FALSE) marked <- (v == 2) return(list(marked=marked)) } evalPairPotential <- function(X, P, E, pairpot, potpars, R) { # Evaluate pair potential without edge correction weights nX <- npoints(X) nP <- npoints(P) pt <- PairPotentialType(pairpot) # includes validation # determine dimension of potential, etc fakePOT <- do.call.matched(pairpot, list(d=matrix(, 0, 0), tx=marks(X)[integer(0)], tu=marks(P)[integer(0)], par=potpars)) IsOffset <- attr(fakePOT, "IsOffset") fakePOT <- ensure3Darray(fakePOT) Vnames <- dimnames(fakePOT)[[3]] p <- dim(fakePOT)[3] # Identify close pairs X[i], P[j] cl <- crosspairs(X, P, R, what="ijd") I <- cl$i J <- cl$j D <- matrix(cl$d, ncol=1) # deal with empty cases if(nX == 0 || nP == 0 || length(I) == 0) { di <- c(nX, nP, p) dn <- list(NULL, NULL, Vnames) result <- array(0, dim=di, dimnames=dn) attr(result, "IsOffset") <- IsOffset return(result) } # evaluate potential for close pairs # POT is a 1-column matrix or array, with rows corresponding to close pairs if(!pt$marked) { # unmarked POT <- do.call.matched(pairpot, list(d=D, par=potpars)) IsOffset <- attr(POT, "IsOffset") } else { # marked marX <- marks(X) marP <- marks(P) if(!identical(levels(marX), levels(marP))) stop("Internal error: marks of X and P have different levels") types <- levels(marX) mI <- marX[I] mJ <- marP[J] POT <- NULL # split data by type of P[j] for(k in types) { relevant <- which(mJ == k) if(length(relevant) > 0) { fk <- factor(k, levels=types) POTk <- do.call.matched(pairpot, list(d=D[relevant, , drop=FALSE], tx=mI[relevant], tu=fk, par=potpars)) POTk <- ensure3Darray(POTk) if(is.null(POT)) { #' use first result of 'pairpot' to determine dimension POT <- array(, dim=c(length(I), 1, dim(POTk)[3])) #' capture information about offsets, and names of interaction terms IsOffset <- attr(POTk, "IsOffset") Vnames <- dimnames(POTk)[[3]] } # insert values just computed POT[relevant, , ] <- POTk } } } POT <- ensure3Darray(POT) p <- dim(POT)[3] # create result array result <- array(0, dim=c(npoints(X), npoints(P), p), dimnames=list(NULL, NULL, Vnames)) # insert results II <- rep(I, p) JJ <- rep(J, p) KK <- rep(1:p, each=length(I)) IJK <- cbind(II, JJ, KK) result[IJK] <- POT # finally identify identical pairs and set value to 0 if(length(E) > 0) { E.rep <- apply(E, 2, rep, times=p) p.rep <- rep(1:p, each=nrow(E)) result[cbind(E.rep, p.rep)] <- 0 } attr(result, "IsOffset") <- IsOffset return(result) } evalPairwiseTerm <- function(fint, d) { ## very similar to pairwise.family$plot verifyclass(fint, "fii") inter <- fint$interaction if(is.null(inter) || interactionorder(inter) != 2) stop("This operation is only defined for pairwise interactions") ## get fitted coefficients of interaction terms ## and set coefficients of offset terms to 1 Vnames <- fint$Vnames IsOffset <- fint$IsOffset coeff <- rep.int(1, length(Vnames)) names(coeff) <- Vnames coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]] ## pairpot <- inter$pot potpars <- inter$par types <- potpars$types if(is.null(types)) { ## values of interaction term at distances 'd' dd <- matrix(d, ncol=1) p <- pairpot(dd, potpars) if(length(dim(p))==2) p <- array(p, dim=c(dim(p),1), dimnames=NULL) if(dim(p)[3] != length(coeff)) stop("Dimensions of potential do not match coefficient vector") for(k in seq_len(dim(p)[3])) p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] ) z <- exp(apply(p, c(1,2), sum)) z <- as.numeric(z) } else { ## value of interaction terms between each pair of types if(!is.factor(types)) types <- factor(types, levels=types) m <- length(types) nd <- length(d) dd <- matrix(rep.int(d, m), nrow=nd * m, ncol=m) tx <- rep.int(types, rep.int(nd, m)) ty <- types p <- pairpot(dd, tx, ty, potpars) if(length(dim(p))==2) p <- array(p, dim=c(dim(p),1), dimnames=NULL) if(dim(p)[3] != length(coeff)) stop("Dimensions of potential do not match coefficient vector") for(k in seq_len(dim(p)[3])) p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] ) y <- exp(apply(p, c(1,2), sum)) z <- matrix(1, nrow=nd, ncol=m * m) colnames(z) <- as.character(outer(types, types, paste, sep=":")) for(i in seq_len(m)) { for(j in seq_len(m)) { ## extract values of potential z[ , j + (i-1) * m] <- y[tx == types[i], j] } } } return(z) } spatstat.model/R/effectfun.R0000644000176200001440000001505014331173072015507 0ustar liggesusers# # effectfun.R # # $Revision: 1.25 $ $Date: 2020/01/23 05:36:51 $ # effectfun <- local({ okclasses <- c("ppm", "kppm", "lppm", "dppm", "rppm", "profilepl") effectfun <- function(model, covname, ..., se.fit=FALSE, nvalues=256) { if(!inherits(model, okclasses)) stop(paste("First argument 'model' should be a fitted model of class", commasep(sQuote(okclasses), " or ")), call.=FALSE) orig.model <- model model <- as.ppm(model) dotargs <- list(...) #' determine names of covariates involved intern.names <- if(is.marked.ppm(model)) c("x", "y", "marks") else c("x", "y") needed.names <- variablesinformula(rhs.of.formula(formula(model))) #' check for clashes/quirks if("lambda" %in% needed.names) { if(is.dppm(orig.model) && ( identical.formulae(formula(model), ~offset(log(lambda))-1) || identical.formulae(formula(model), ~log(lambda)-1) )) stop("effectfun is not defined for a DPP model with fixed intensity", call.=FALSE) intensityname <- setdiff(c("Lambda", "intensity"), needed.names)[1] } else intensityname <- "lambda" ## validate the relevant covariate if(missing(covname) || is.null(covname)) { mc <- model.covariates(model) if(length(mc) == 1) covname <- mc else stop("covname must be provided") } if(!is.character(covname)) stop("covname should be a character string", call.=FALSE) if(length(covname) != 1L) stop("covname should be a single character string", call.=FALSE) # check that fixed values for all other covariates are provided given.covs <- names(dotargs) if(any(uhoh <- !(needed.names %in% c(given.covs, covname)))) { nuh <- sum(uhoh) stop(paste(ngettext(nuh, "A value for the covariate", "Values for the covariates"), commasep(dQuote(needed.names[uhoh])), "must be provided (as", ngettext(nuh, "an argument", "arguments"), "to effectfun)")) } #' establish type and range of covariate values check.1.integer(nvalues) stopifnot(nvalues >= 128) N0 <- nvalues if(covname == "x") { covtype <- "real" W <- as.owin(data.ppm(model)) Zr <- W$xrange Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0) } else if(covname == "y") { covtype <- "real" W <- as.owin(data.ppm(model)) Zr <- W$yrange Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0) } else if(covname == "marks") { covtype <- "factor" Zvals <- levels(marks(data.ppm(model))) } else { # covariate is external if(is.data.frame(covdf <- model$covariates) && (covname %in% names(covdf))) { Z <- covdf[,covname] covtype <- typeof(Z) if(covtype == "double") covtype <- "real" switch(covtype, real={ Zr <- range(Z) Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0) }, integer={ Zr <- range(Z) Zvals <- seq(from=Zr[1L], to=Zr[2L], by=ceiling((diff(Zr)+1)/N0)) }, factor={ Zvals <- levels(Z) }, logical={ Zvals <- c(FALSE, TRUE) }, stop(paste("Cannot handle covariate of type", dQuote(covtype))) ) } else { Z <- getdataobjects(covname, environment(formula(model)), model$covariates)[[1L]] if(is.null(Z)) stop(paste("Cannot find covariate", sQuote(covname)), call.=FALSE) # convert to image if(!is.im(Z)) Z <- as.im(Z, W=as.owin(model)) covtype <- Z$type switch(covtype, real={ Zr <- summary(Z)$range Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0) }, factor={ Zvals <- levels(Z) }, logical={ Zvals <- c(FALSE, TRUE) }, stop(paste("Cannot handle covariate of type", dQuote(covtype))) ) } } # set up data frames of fake data for predict method # First set up default, constant value for each covariate N <- length(Zvals) fakeloc <- resolve.defaults(dotargs, list(x=0, y=0))[c("x","y")] if(is.marked.ppm(model)) { if("marks" %in% names(dotargs)) { fakeloc$marks <- dotargs$marks dotargs <- dotargs[names(dotargs) != "marks"] } else { lev <- levels(marks(data.ppm(model))) fakeloc$marks <- lev[1L] } } fakeloc <- lapply(fakeloc, padout, N=N) fakecov <- lapply(dotargs, padout, N=N) # Overwrite value for covariate of interest if(covname %in% intern.names) fakeloc[[covname]] <- Zvals else fakecov[[covname]] <- Zvals # convert to data frame fakeloc <- do.call(data.frame, fakeloc) fakecov <- if(length(fakecov) > 0) do.call(data.frame, fakecov) else NULL # # Now predict pred <- predict(orig.model, locations=fakeloc, covariates=fakecov, se=se.fit) if(!se.fit) lambda <- pred else { lambda <- pred$estimate se <- pred$se sedf <- data.frame(se =se, hi = lambda + 2 * se, lo = lambda - 2 * se) } # dfin <- if(!is.null(fakecov)) cbind(fakeloc, fakecov) else fakeloc dfin <- dfin[covname] dflam <- data.frame(lambda=lambda) names(dflam) <- intensityname df <- cbind(dfin, dflam) # if(covtype != "real") { result <- df if(se.fit) result <- cbind(result, sedf) } else { bc <- paren(covname) result <- fv(df, argu=covname, ylab=substitute(lambda(X), list(X=as.name(covname), lambda=as.name(intensityname))), labl=c(covname, paste("hat(%s)", bc)), valu=intensityname, alim=Zr, desc=c(paste("value of covariate", covname), "fitted intensity"), fname=intensityname) if(se.fit) { result <- bind.fv(result, sedf, labl=c(paste("se[%s]", bc), paste("%s[hi]", bc), paste("%s[lo]", bc)), desc=c("standard error of fitted trend", "upper limit of pointwise 95%% CI for trend", "lower limit of pointwise 95%% CI for trend")) fvnames(result, ".") <- c(intensityname, "hi", "lo") fvnames(result, ".s") <- c("hi", "lo") formula(result) <- paste(". ~ ", covname) } } return(result) } padout <- function(x,N) { rep.int(x[1L],N) } effectfun }) spatstat.model/R/strauss.R0000644000176200001440000001302214331173073015244 0ustar liggesusers# # # strauss.R # # $Revision: 2.48 $ $Date: 2022/05/22 10:52:48 $ # # The Strauss process # # Strauss() create an instance of the Strauss process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Strauss <- local({ # create blank template object without family and pars BlankStrauss <- list( name = "Strauss process", creator = "Strauss", family = "pairwise.family", # evaluated later pot = function(d, par) { d <= par$r }, par = list(r = NULL), # to be filled in parnames = "interaction distance", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) return(is.finite(loggamma) && (loggamma <= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(anyNA(coeffs)) return(r) loggamma <- coeffs[1] if(abs(loggamma) <= epsilon) return(0) else return(r) }, version=NULL, # to be filled in # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { #' fast evaluator for Strauss interaction dont.complain.about(splitInf) if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Strauss") r <- potpars$r answer <- strausscounts(U, X, r, EqualPairs) return(matrix(answer, ncol=1)) }, Mayer=function(coeffs, self) { # second Mayer cluster integral gamma <- exp(as.numeric(coeffs[1])) r <- self$par$r return((1-gamma) * pi * r^2) }, Percy=function(d, coeffs, par, ...) { ## term used in Percus-Yevick type approximation gamma <- exp(as.numeric(coeffs[1])) R <- par$r t <- abs(d/(2*R)) t <- pmin.int(t, 1) y <- 2 * R^2 * (pi * (1-gamma) - (1-gamma)^2 * (acos(t) - t * sqrt(1 - t^2))) return(y) }, delta2 = function(X, inte, correction, ..., sparseOK=FALSE) { r <- inte$par$r X <- as.ppp(X) # algorithm is the same for data and dummy points nX <- npoints(X) cl <- weightedclosepairs(X, r, correction=correction, what="indices") if(is.null(cl)) return(NULL) v <- sparseMatrix(i=cl$i, j=cl$j, x=cl$weight, dims=c(nX, nX)) if(!sparseOK) v <- as.matrix(v) return(v) } ) class(BlankStrauss) <- "interact" # Finally define main function Strauss <- function(r) { instantiate.interact(BlankStrauss, list(r=r)) } Strauss <- intermaker(Strauss, BlankStrauss) Strauss }) # generally accessible functions strausscounts <- function(U, X, r, EqualPairs=NULL) { answer <- crosspaircounts(U,X,r) # subtract counts of identical pairs if(length(EqualPairs) > 0) { nU <- npoints(U) idcount <- as.integer(table(factor(EqualPairs[,2L], levels=1:nU))) answer <- answer - idcount } return(answer) } closepaircounts <- function(X, r) { stopifnot(is.ppp(X)) stopifnot(is.numeric(r) && length(r) == 1) stopifnot(is.finite(r)) stopifnot(r >= 0) # sort in increasing order of x coordinate oX <- fave.order(X$x) Xsort <- X[oX] nX <- npoints(X) # call C routine (defined in Estrauss.c) out <- .C(SM_Cclosepaircounts, nxy = as.integer(nX), x = as.double(Xsort$x), y = as.double(Xsort$y), rmaxi = as.double(r), counts = as.integer(integer(nX)), PACKAGE="spatstat.model") answer <- integer(nX) answer[oX] <- out$counts return(answer) } crosspaircounts <- function(X, Y, r) { stopifnot(is.ppp(X)) stopifnot(is.numeric(r) && length(r) == 1) stopifnot(is.finite(r)) stopifnot(r >= 0) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine (defined in Estrauss.c) out <- .C(SM_Ccrosspaircounts, nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), rrmax = as.double(r), counts = as.integer(integer(nX)), PACKAGE="spatstat.model") answer <- integer(nX) answer[oX] <- out$counts return(answer) } spatstat.model/R/relrisk.ppm.R0000644000176200001440000003307414331173073016017 0ustar liggesusers## ## relrisk.ppm.R ## ## $Revision: 1.9 $ $Date: 2019/01/08 07:44:07 $ ## relrisk.ppm <- local({ relrisk.ppm <- function(X, ..., at=c("pixels", "points"), relative=FALSE, se=FALSE, casecontrol=TRUE, control=1, case, ngrid=NULL, window=NULL) { stopifnot(is.ppm(X)) stopifnot(is.multitype(X)) control.given <- !missing(control) case.given <- !missing(case) at <- match.arg(at) if(!relative && (control.given || case.given)) { aa <- c("control", "case")[c(control.given, case.given)] nn <- length(aa) warning(paste(ngettext(nn, "Argument", "Arguments"), paste(sQuote(aa), collapse=" and "), ngettext(nn, "was", "were"), "ignored, because relative=FALSE")) } model <- X Y <- data.ppm(model) types <- levels(marks(Y)) ntypes <- length(types) # np <- length(coef(model)) ## compute probabilities or risks if(ntypes == 2 && casecontrol) { if(control.given || !case.given) { stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:2) } else if(is.character(control)) { icontrol <- match(control, types) if(is.na(icontrol)) stop(paste("No points have mark =", control)) } else stop(paste("Unrecognised format for argument", sQuote("control"))) if(!case.given) icase <- 3 - icontrol } if(case.given) { stopifnot(length(case) == 1) if(is.numeric(case)) { icase <- case <- as.integer(case) stopifnot(case %in% 1:2) } else if(is.character(case)) { icase <- match(case, types) if(is.na(icase)) stop(paste("No points have mark =", case)) } else stop(paste("Unrecognised format for argument", sQuote("case"))) if(!control.given) icontrol <- 3 - icase } switch(at, pixels= { ## estimate is a single image ## compute images of intensities of each mark lambda.each <- predict(model, ngrid=ngrid, window=window) if(!relative) { ## compute probabilities.. ## total intensity (image) lambda.all <- im.apply(lambda.each, sum, check=FALSE) ## WAS: lambda.all <- Reduce("+", lambda.each) if(!se) { result <- lambda.each[[icase]]/lambda.all result <- killglitches(result) } else { probs <- lapply(lambda.each, "/", e2=lambda.all) probs <- as.solist(lapply(probs, killglitches)) estimate <- probs[[icase]] SE <- SEprobPixels(model, probs)[[icase]] SE <- killglitches(SE) result <- list(estimate=estimate, SE=SE) } } else { ## relative risks lambda.ctrl <- lambda.each[[icontrol]] if(!se) { result <- lambda.each[[icase]]/lambda.ctrl result <- killglitches(result) } else { risks <- lapply(lambda.each, "/", e2=lambda.ctrl) risks <- as.solist(lapply(risks, killglitches)) estimate <- risks[[icase]] SE <- SErelriskPixels(model, risks, icontrol)[[icase]] SE <- killglitches(SE) result <- list(estimate=estimate, SE=SE) } } }, points={ ## compute intensities of each type Ycase <- unmark(Y) %mark% factor(types[icase], levels=types) Yctrl <- unmark(Y) %mark% factor(types[icontrol], levels=types) lambda.case <- predict(model, locations=Ycase) lambda.ctrl <- predict(model, locations=Yctrl) if(!relative) { ## compute probabilities ## total intensity lambda.all <- lambda.case + lambda.ctrl prob.case <- lambda.case/lambda.all if(!se) { result <- prob.case } else { probs <- matrix(, length(prob.case), 2) probs[,icase] <- prob.case probs[,icontrol] <- 1 - prob.case SE <- SEprobPoints(model, probs)[,icase] result <- list(estimate=prob.case, SE=SE) } } else { ## compute relative risks risk.case <- lambda.case/lambda.ctrl if(!se) { result <- risk.case } else { risks <- matrix(, length(risk.case), 2) risks[,icase] <- risk.case risks[,icontrol] <- 1 SE <- SErelriskPoints(model, risks, icontrol)[,icase] result <- list(estimate=risk.case, SE=SE) } } }) } else { ## several types if(relative) { ## need 'control' type stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:ntypes) } else if(is.character(control)) { icontrol <- match(control, types) if(is.na(icontrol)) stop(paste("No points have mark =", control)) } else stop(paste("Unrecognised format for argument", sQuote("control"))) } switch(at, pixels={ ## estimate is a list of images ## Compute images of intensities of each type lambda.each <- predict(model, ngrid=ngrid, window=window) if(!relative) { ## compute probabilities... ## image of total intensity lambda.all <- im.apply(lambda.each, sum, check=FALSE) ## WAS: lambda.all <- Reduce("+", lambda.each) probs <- lapply(lambda.each, "/", e2=lambda.all) probs <- as.solist(lapply(probs, killglitches)) if(!se) { result <- probs } else { SE <- SEprobPixels(model, probs) SE <- as.solist(lapply(SE, killglitches)) result <- list(estimate=probs, SE=SE) } } else { ## compute relative risks risks <- lapply(lambda.each, "/", e2=lambda.each[[icontrol]]) risks <- as.solist(lapply(risks, killglitches)) if(!se) { result <- risks } else { SE <- SErelriskPixels(model, risks, icontrol) SE <- as.solist(lapply(SE, killglitches)) result <- list(estimate=risks, SE=SE) } } }, points = { ## matrix of intensities of each type at each point ## rows=locations, cols=types lambda.each <- sapply(types, predictfortype, loc=unmark(Y), model=model, types=types) if(!relative) { ## compute probabilities lambda.all <- rowSums(lambda.each) probs <- lambda.each/lambda.all if(!se) { result <- probs } else { SE <- SEprobPoints(model, probs) result <- list(estimate=probs, SE=SE) } } else { ## compute relative risks risks <- lambda.each/lambda.each[,icontrol] if(!se) { result <- risks } else { SE <- SErelriskPoints(model, risks, icontrol) result <- list(estimate=risks, SE=SE) } } }) } return(result) } modmats <- function(model) { # model matrices for data locations for each possible mark QM <- quad.ppm(model) Y <- QM$data suppressWarnings({ QR <- quadscheme.replicated(Y, unmark(Y[FALSE])) }) sourceid <- QR$param$sourceid ## canonical covariates mm <- model.matrix(model, Q=QR) ## mm is a matrix with one column for canonical covariate ## and one row for each marked point in QR. mm <- cbind(data.frame(".s"=sourceid, ".m"=marks(QR)), mm) ## Split by marks ss <- split(mm, mm$.m) ## Reorganise into compatible matrices zz <- lapply(ss, reorg) return(zz) } reorg <- function(x) { z <- x rownames(z) <- NULL z[x$.s, ] <- z return(z[,-(1:2), drop=FALSE]) } SErelriskPoints <- function(model, riskvalues, icontrol) { ## riskvalues is a matrix with rows=data locations, cols=types types <- colnames(riskvalues) ntypes <- length(types) ## S.um <- modmats(model) S.um <- lapply(S.um, as.matrix) ## S.um is a list of matrices, one for each possible type, ## each matrix having one row per data location dS.um <- lapply(S.um, "-", e2=S.um[[icontrol]]) R.um <- mapply("*", dS.um, as.list(as.data.frame(riskvalues)), SIMPLIFY=FALSE) ## likewise R.um is a list of matrices ## vc <- vcov(model) VAR <- lapply(R.um, quadform, v=vc) VAR <- do.call(cbind, VAR) SE <- sqrt(VAR) colnames(SE) <- types return(SE) } msubtract <- function(z1, z2) mapply("-", e1=z1, e2=z2, SIMPLIFY=FALSE) mmultiply <- function(z1, z2) solapply(z1, "*", e2=z2) SErelriskPixels <- function(model, riskvalues, icontrol) { ## riskvalues is an imlist types <- names(riskvalues) ntypes <- length(types) ## canonical covariates S.um <- model.images(model) ## S.um is a hyperframe with one column for each mark value ## and one row for each canonical covariate dS.um <- lapply(S.um, msubtract, z2=S.um[,icontrol,drop=TRUE]) R.um <- mapply(mmultiply, z1=dS.um, z2=riskvalues, SIMPLIFY=FALSE) VAR <- vector(mode="list", length=ntypes) ntypes <- length(types) vc <- vcov(model) ncoef <- nrow(vc) for(type in 1:ntypes) { v <- 0 Rum <- R.um[[type]] for(i in 1:ncoef) { for(j in 1:ncoef) { v <- v + Rum[[i]] * vc[i,j] * Rum[[j]] } } VAR[[type]] <- v } names(VAR) <- types VAR <- as.solist(VAR) SE <- as.solist(lapply(VAR, sqrt)) return(SE) } SEprobPixels <- function(model, probvalues) { ## probvalues is an imlist types <- names(probvalues) ntypes <- length(types) ## canonical covariates S.um <- model.images(model) ## S.um is a hyperframe with one column for each mark value ## and one row for each canonical covariate ncoef <- length(coef(model)) Sbar.u <- vector(mode="list", length=ncoef) for(k in 1:ncoef) { A <- mapply("*", e1=S.um[k,,drop=TRUE], e2=probvalues, SIMPLIFY=FALSE) Sbar.u[[k]] <- im.apply(A, sum) } ## Sbar.u is a list of images, one for each canonical covariate Sdif.um <- lapply(as.list(S.um), msubtract, z2=Sbar.u) ## Sdif.um is a list of lists of images. ## List of length ntypes, ## each entry being an imlist of length ncoef P.um <- mapply(mmultiply, Sdif.um, probvalues, SIMPLIFY=FALSE) ## P.um is same format as Sdif.um vc <- vcov(model) ncoef <- nrow(vc) VAR <- vector(mode="list", length=ntypes) for(m in 1:ntypes) { v <- 0 Pum <- P.um[[m]] for(i in 1:ncoef) { for(j in 1:ncoef) { v <- v + Pum[[i]] * vc[i,j] * Pum[[j]] } } VAR[[m]] <- v } names(VAR) <- types VAR <- as.solist(VAR) SE <- as.solist(lapply(VAR, sqrt)) } SEprobPoints <- function(model, probvalues) { ## probvalues is a matrix with row=location and column=type types <- colnames(probvalues) ntypes <- length(types) ## canonical covariates S.um <- modmats(model) S.um <- lapply(S.um, as.matrix) ## S.um is a list of matrices, one for each possible type, ## each matrix having rows=locations and cols=covariates ## Weight each matrix by its mark probabilities SW <- mapply("*", e1=S.um, e2=as.list(as.data.frame(probvalues)), SIMPLIFY=FALSE) ## average them Sbar.u <- Reduce("+", SW) ## Sbar.u is a matrix with rows=locations and cols=covariates Sdif.um <- lapply(S.um, "-", e2=Sbar.u) ## Sdif.um is a list of matrices like S.um P.um <- mapply("*", e1=Sdif.um, e2=as.list(as.data.frame(probvalues)), SIMPLIFY=FALSE) ## P.um likewise vc <- vcov(model) VAR <- lapply(P.um, quadform, v=vc) VAR <- do.call(cbind, VAR) SE <- sqrt(VAR) colnames(SE) <- types return(SE) } predictfortype <- function(type, model, types, loc) { predict(model, locations=loc %mark% factor(type, levels=types)) } killglitches <- function(z, eps=.Machine$double.eps) { ra <- range(z, finite=TRUE) if(max(abs(ra)) < eps) { z[] <- 0 return(z) } if(diff(ra) < eps) z[] <- mean(z, na.rm=TRUE) return(z) } relrisk.ppm }) spatstat.model/R/hierpair.family.R0000644000176200001440000003137314331173073016634 0ustar liggesusers# # # hierpair.family.R # # $Revision: 1.13 $ $Date: 2022/06/18 10:39:32 $ # # The family of hierarchical pairwise interactions # # # ------------------------------------------------------------------- # hierpair.family <- list( name = "hierpair", print = function(self) { splat("Hierarchical pairwise interaction family") }, plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction if(is.null(inter) || is.null(inter$family) || inter$family$name != "hierpair") stop("Tried to plot the wrong kind of interaction") # get fitted coefficients of interaction terms # and set coefficients of offset terms to 1 Vnames <- fint$Vnames IsOffset <- fint$IsOffset coeff <- rep.int(1, length(Vnames)) names(coeff) <- Vnames coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]] # pairpot <- inter$pot potpars <- inter$par rmax <- reach(fint, epsilon=1e-3) xlim <- list(...)$xlim if(is.infinite(rmax)) { if(!is.null(xlim)) rmax <- max(xlim) else { warning("Reach of interaction is infinite; need xlim to plot it") return(invisible(NULL)) } } if(is.null(d)) { dmax <- 1.25 * rmax d <- seq(from=0, to=dmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) dmax <- max(d) } if(is.null(xlim)) xlim <- c(0, dmax) types <- potpars$types if(is.null(types)) stop("Unable to determine types of points") if(!is.factor(types)) types <- factor(types, levels=types) ## compute each potential and store in `fasp' object m <- length(types) nd <- length(d) dd <- matrix(rep.int(d, m), nrow=nd * m, ncol=m) tx <- rep.int(types, rep.int(nd, m)) ty <- types p <- pairpot(dd, tx, ty, potpars) if(length(dim(p))==2) p <- array(p, dim=c(dim(p),1), dimnames=NULL) if(dim(p)[3L] != length(coeff)) stop("Dimensions of potential do not match coefficient vector") for(k in seq_len(dim(p)[3L])) p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] ) y <- exp(apply(p, c(1,2), sum)) ylim <- range(0, 1.1, y, finite=TRUE) fns <- vector(m^2, mode="list") which <- matrix(, m, m) for(i in seq_len(m)) { for(j in seq_len(m)) { ## relevant position in matrix ijpos <- i + (j-1L) * m which[i,j] <- ijpos ## extract values of potential yy <- y[tx == types[i], j] ## make fv object fns[[ijpos]] <- fv(data.frame(r=d, h=yy, one=1), "r", quote(h(r)), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "pairwise interaction term h(r)", "reference value 1")) } } funz <- fasp(fns, which=which, formulae=list(cbind(h, one) ~ r), title="Fitted pairwise interactions", rowNames=paste(types), colNames=paste(types)) if(plotit) do.call(plot.fasp, resolve.defaults(list(quote(funz)), list(...), list(ylim=ylim, ylab="Pairwise interaction", xlab="Distance"))) return(invisible(funz)) }, # end of function `plot' # ---------------------------------------------------- eval = function(X,U,EqualPairs,pairpot,potpars,correction, ..., Reach=NULL, precomputed=NULL, savecomputed=FALSE, pot.only=FALSE) { ## ## This is the eval function for the `hierpair' family. ## fop <- names(formals(pairpot)) if(isTRUE(all.equal(fop, c("d", "par")))) marx <- FALSE else if(isTRUE(all.equal(fop, c("d", "tx", "tu", "par")))) marx <- TRUE else stop("Formal arguments of pair potential function are not understood") ## edge correction argument if(length(correction) > 1) stop("Only one edge correction allowed at a time!") if(!any(correction == c("periodic", "border", "translate", "translation", "isotropic", "Ripley", "none"))) stop(paste("Unrecognised edge correction", sQuote(correction))) no.correction <- #### Compute basic data # Decide whether to apply faster algorithm using 'closepairs' use.closepairs <- FALSE && (correction %in% c("none", "border", "translate", "translation")) && !is.null(Reach) && is.finite(Reach) && is.null(precomputed) && !savecomputed if(!is.null(precomputed)) { # precomputed X <- precomputed$X U <- precomputed$U EqualPairs <- precomputed$E M <- precomputed$M } else { U <- as.ppp(U, X$window) # i.e. X$window is DEFAULT window if(!use.closepairs) # Form the matrix of distances M <- crossdist(X, U, periodic=(correction=="periodic")) } nX <- npoints(X) nU <- npoints(U) dimM <- c(nX, nU) # Evaluate the pairwise potential without edge correction if(use.closepairs) POT <- evalPairPotential(X,U,EqualPairs,pairpot,potpars,Reach) else if(!marx) POT <- pairpot(M, potpars) else POT <- pairpot(M, marks(X), marks(U), potpars) # Determine whether each column of potential is an offset IsOffset <- attr(POT, "IsOffset") # Check errors and special cases if(!is.matrix(POT) && !is.array(POT)) { if(length(POT) == 0 && X$n == 0) # empty pattern POT <- array(POT, dim=c(dimM,1)) else stop("Pair potential did not return a matrix or array") } if(length(dim(POT)) == 1 || any(dim(POT)[1:2] != dimM)) { whinge <- paste0( "The pair potential function ",short.deparse(substitute(pairpot)), " must produce a matrix or array with its first two dimensions\n", "the same as the dimensions of its input.\n") stop(whinge) } # make it a 3D array if(length(dim(POT))==2) POT <- array(POT, dim=c(dim(POT),1), dimnames=NULL) if(correction == "translate" || correction == "translation") { edgewt <- edge.Trans(X, U) # sanity check ("everybody knows there ain't no...") if(!is.matrix(edgewt)) stop("internal error: edge.Trans() did not yield a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Trans() has wrong dimensions") POT <- c(edgewt) * POT } else if(correction == "isotropic" || correction == "Ripley") { # weights are required for contributions from QUADRATURE points edgewt <- t(edge.Ripley(U, t(M), X$window)) if(!is.matrix(edgewt)) stop("internal error: edge.Ripley() did not return a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Ripley() has wrong dimensions") POT <- c(edgewt) * POT } # No pair potential term between a point and itself if(length(EqualPairs) > 0) { nplanes <- dim(POT)[3L] for(k in 1:nplanes) POT[cbind(EqualPairs, k)] <- 0 } # Return just the pair potential? if(pot.only) return(POT) # Sum the pairwise potentials V <- apply(POT, c(2,3), sum) # attach the original pair potentials attr(V, "POT") <- POT # attach the offset identifier attr(V, "IsOffset") <- IsOffset # pass computed information out the back door if(savecomputed) attr(V, "computed") <- list(E=EqualPairs, M=M) return(V) }, ######### end of function $eval suffstat = function(model, X=NULL, callstring="hierpair.family$suffstat") { # for hierarchical pairwise models only (possibly nonstationary) verifyclass(model, "ppm") if(!identical(model$interaction$family$name,"hierpair")) stop("Model is not a hierarchical pairwise interaction process") if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") modelX <- update(model, X, improve.type="none") } # determine which data points contribute to pseudolikelihood contribute <- getppmdatasubset(modelX) Xin <- X[contribute] Xout <- X[!contribute] # partial model matrix arising from ordered pairs of data points # which both contribute to the pseudolikelihood Empty <- X[integer(0)] momINxIN <- partialModelMatrix(Xin, Empty, model, "suffstat") # partial model matrix at data points which contribute to the pseudolikelihood momIN <- partialModelMatrix(X, Empty, model, "suffstat")[contribute, , drop=FALSE] # partial model matrix arising from ordered pairs of data points # the second of which does not contribute to the pseudolikelihood mom <- partialModelMatrix(Xout, Xin, model, "suffstat") indx <- Xout$n + seq_len(Xin$n) momINxOUT <- mom[indx, , drop=FALSE] ## determine which canonical covariates are true second-order terms ## eg 'mark1x1' typ <- levels(marks(X)) vn <- paste0("mark", typ, "x", typ) order2 <- names(coef(model)) %in% vn order1 <- !order2 result <- 0 * coef(model) if(any(order1)) { # first order contributions (including 'mark1x2' etc) o1terms <- momIN[ , order1, drop=FALSE] o1sum <- colSums(o1terms) result[order1] <- o1sum } if(any(order2)) { # adjust for double counting of ordered pairs in INxIN but not INxOUT o2termsINxIN <- momINxIN[, order2, drop=FALSE] o2termsINxOUT <- momINxOUT[, order2, drop=FALSE] o2sum <- colSums(o2termsINxIN)/2 + colSums(o2termsINxOUT) result[order2] <- o2sum } return(result) }, ######### end of function $suffstat delta2 = function(X, inte, correction, ...) { # Sufficient statistic for second order conditional intensity # for hierarchical pairwise interaction processes # Equivalent to evaluating pair potential. if(is.ppp(X)) { seqX <- seq_len(npoints(X)) E <- cbind(seqX, seqX) R <- reach(inte) POT <- hierpair.family$eval(X,X,E, inte$pot,inte$par, correction, pot.only=TRUE, Reach=R, splitInf=TRUE) result <- aperm(POT, c(2,1,3)) M <- attr(POT, "IsNegInf") if(!is.null(M)) { #' validate if(length(dim(M)) != 3) stop("Internal error: IsNegInf is not a 3D array") M <- aperm(M, c(2,1,3)) #' collapse vector-valued potential, yielding a matrix M <- apply(M, c(1,2), any) if(!is.matrix(M)) M <- matrix(M, nrow=nX) #' count conflicts hits <- colSums(M) #' hits[j] == 1 implies that X[j] violates hard core with only one X[i] #' and therefore changes status if X[i] is deleted. deltaInf <- M deltaInf[, hits != 1] <- FALSE attr(result, "deltaInf") <- deltaInf } } else if(is.quad(X)) { U <- union.quad(X) izdat <- is.data(X) nU <- npoints(U) nX <- npoints(X$data) seqU <- seq_len(nU) E <- cbind(seqU, seqU) R <- reach(inte) POT <- hierpair.family$eval(U,U,E, inte$pot,inte$par, correction, pot.only=TRUE, Reach=R, splitInf=TRUE) result <- aperm(POT, c(2,1,3)) M <- attr(POT, "IsNegInf") if(!is.null(M)) { #' validate if(length(dim(M)) != 3) stop("Internal error: IsNegInf is not a 3D array") M <- aperm(M, c(2,1,3)) #' consider conflicts with data points MXU <- M[izdat, , , drop=FALSE] #' collapse vector-valued potential, yielding a matrix MXU <- apply(MXU, c(1,2), any) if(!is.matrix(MXU)) MXU <- matrix(MXU, nrow=nX) #' count data points conflicting with each quadrature point nhitdata <- colSums(MXU) #' for a conflicting pair U[i], U[j], #' status of U[j] will change when U[i] is added/deleted #' iff EITHER #' U[i] = X[i] is a data point and #' U[j] is only in conflict with X[i], deltaInf <- apply(M, c(1,2), any) deltaInf[izdat, nhitdata != 1] <- FALSE #' OR #' U[i] is a dummy point, #' U[j] has no conflicts with X. deltaInf[!izdat, nhitdata != 0] <- FALSE attr(result, "deltaInf") <- deltaInf } } return(result) } ######### end of function $delta2 ) ######### end of list class(hierpair.family) <- "isf" spatstat.model/R/Kmodel.R0000644000176200001440000000032414331173072014753 0ustar liggesusers# # Kmodel.R # # Kmodel and pcfmodel # # $Revision: 1.1 $ $Date: 2011/05/30 14:02:21 $ # Kmodel <- function(model, ...) { UseMethod("Kmodel") } pcfmodel <- function(model, ...) { UseMethod("pcfmodel") } spatstat.model/R/hybrid.family.R0000644000176200001440000001565414331173073016316 0ustar liggesusers# # hybrid.family.R # # $Revision: 1.14 $ $Date: 2020/11/16 01:32:06 $ # # Hybrid interactions # # hybrid.family: object of class 'isf' defining pairwise interaction # # ------------------------------------------------------------------- # hybrid.family <- list( name = "hybrid", print = function(self) { cat("Hybrid interaction family\n") }, plot = function(fint, ..., d=NULL, plotit=TRUE, separate=FALSE) { # plot hybrid interaction if possible verifyclass(fint, "fii") inter <- fint$interaction if(is.null(inter) || is.null(inter$family) || inter$family$name != "hybrid") stop("Tried to plot the wrong kind of interaction") if(is.null(d)) { # compute reach and determine max distance for plots dmax <- 1.25 * reach(inter) if(!is.finite(dmax)) { # interaction has infinite reach # Are plot limits specified? xlim <- resolve.defaults(list(...), list(xlim=c(0, Inf))) if(all(is.finite(xlim))) dmax <- max(xlim) else stop("Interaction has infinite reach; need to specify xlim or d") } d <- seq(0, dmax, length=256) } # get fitted coefficients of interaction terms # and set coefficients of offset terms to 1 Vnames <- fint$Vnames IsOffset <- fint$IsOffset coeff <- rep.int(1, length(Vnames)) names(coeff) <- Vnames coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]] # extract the component interactions interlist <- inter$par # check that they are all pairwise interactions families <- unlist(lapply(interlist, interactionfamilyname)) if(!separate && !all(families == "pairwise")) { warning(paste("Cannot compute the resultant function;", "not all components are pairwise interactions;", "plotting each component separately")) separate <- TRUE } # deal with each interaction ninter <- length(interlist) results <- list() for(i in 1:ninter) { interI <- interlist[[i]] nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. prefixlength <- nchar(nameI.) Vprefix <- substr(Vnames, 1, prefixlength) relevant <- (Vprefix == nameI.) # construct fii object for this component fitinI <- fii(interI, coeff[relevant], Vnames[relevant], IsOffset[relevant]) # convert to fv object a <- plot(fitinI, ..., d=d, plotit=FALSE) aa <- list(a) names(aa) <- nameI results <- append(results, aa) } # computation of resultant is only implemented for fv objects if(!separate && !all(unlist(lapply(results, is.fv)))) { warning(paste("Cannot compute the resultant function;", "not all interaction components yielded an fv object;", "plotting separate results for each component")) separate <- TRUE } # return separate 'fv' or 'fasp' objects if required results <- as.anylist(results) if(separate) { if(plotit) { main0 <- "Pairwise interaction components" do.call(plot, resolve.defaults(list(quote(results)), list(...), list(main=main0))) } return(invisible(results)) } # multiply together to obtain resultant pairwise interaction ans <- results[[1L]] if(ninter >= 2) { for(i in 2:ninter) { Fi <- results[[i]] ans <- eval.fv(ans * Fi) } copyover <- c("ylab", "yexp", "labl", "desc", "fname") attributes(ans)[copyover] <- attributes(results[[1L]])[copyover] } main0 <- "Resultant pairwise interaction" if(plotit) do.call(plot, resolve.defaults(list(quote(ans)), list(...), list(main=main0))) return(invisible(ans)) }, eval = function(X,U,EqualPairs,pot,pars,correction, ...) { # `pot' is ignored; `pars' is a list of interactions nU <- length(U$x) V <- matrix(, nU, 0) IsOffset <- logical(0) for(i in 1:length(pars)) { # extract i-th component interaction interI <- pars[[i]] nameI <- names(pars)[[i]] # compute potential for i-th component VI <- evalInteraction(X, U, EqualPairs, interI, correction, ...) if(ncol(VI) > 0) { if(ncol(VI) > 1 && is.null(colnames(VI))) # make up names colnames(VI) <- paste("Interaction", seq(ncol(VI)), sep=".") # prefix label with name of i-th component colnames(VI) <- paste(nameI, dimnames(VI)[[2L]], sep=".") # handle IsOffset offI <- attr(VI, "IsOffset") if(is.null(offI)) offI <- rep.int(FALSE, ncol(VI)) # tack on IsOffset <- c(IsOffset, offI) # append to matrix V V <- cbind(V, VI) } } if(any(IsOffset)) attr(V, "IsOffset") <- IsOffset return(V) }, delta2 = function(X, inte, correction, ..., sparseOK=FALSE) { ## Sufficient statistic for second order conditional intensity result <- NULL deltaInf <- FALSE interlist <- inte$par for(ii in interlist) { v <- NULL ## look for 'delta2' in component interaction 'ii' if(!is.null(delta2 <- ii$delta2) && is.function(delta2)) v <- delta2(X, ii, correction, sparseOK=sparseOK) ## look for 'delta2' in family of component 'ii' if(is.null(v) && !is.null(delta2 <- ii$family$delta2) && is.function(delta2)) v <- delta2(X, ii, correction, sparseOK=sparseOK) if(is.null(v)) { ## no special algorithm available: generic algorithm needed return(NULL) } if(is.null(result)) { result <- v } else if(inherits(v, c("sparse3Darray", "sparseMatrix"))) { result <- bind.sparse3Darray(result, v, along=3) } else { result <- abind::abind(as.array(result), v, along=3) } deltaInf <- deltaInf | (attr(v, "deltaInf") %orifnull% FALSE) } if(length(dim(deltaInf))) attr(result, "deltaInf") <- deltaInf return(result) }, suffstat = NULL ) class(hybrid.family) <- "isf" spatstat.model/R/cdftest.R0000644000176200001440000000655314424137177015217 0ustar liggesusers# # cdftest.R # # $Revision: 2.33 $ $Date: 2023/01/15 03:23:28 $ # # ## generic cdf.test() and cdf.test.ppp() are moved to spatstat.explore cdf.test.ppm <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE) { modelname <- short.deparse(substitute(model)) covname <- singlestring(short.deparse(substitute(covariate))) test <- match.arg(test) verifyclass(model, "ppm") if(is.character(covariate)) covname <- covariate if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call(spatialCDFtest, resolve.defaults(list(model=quote(model), covariate=quote(covariate), test=test), list(interpolate=interpolate, jitter=jitter, nsim=nsim, verbose=verbose), list(...), list(modelname=modelname, covname=covname))) } cdf.test.slrm <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., modelname=NULL, covname=NULL) { # get names if(is.null(modelname)) modelname <- short.deparse(substitute(model)) if(is.null(covname)) covname <- short.deparse(substitute(covariate)) dataname <- model$CallInfo$responsename test <- match.arg(test) # stopifnot(is.slrm(model)) stopifnot(is.im(covariate)) # extract data prob <- fitted(model) covim <- as.im(covariate, W=as.owin(prob)) probvalu <- as.matrix(prob) covvalu <- as.matrix(covim) ok <- !is.na(probvalu) & !is.na(covvalu) probvalu <- as.vector(probvalu[ok]) covvalu <- as.vector(covvalu[ok]) # compile weighted cdf's FZ <- ewcdf(covvalu, probvalu/sum(probvalu)) X <- model$Data$response ZX <- safelookup(covim, X) # Ensure support of cdf includes the range of the data xxx <- knots(FZ) yyy <- FZ(xxx) if(min(xxx) > min(ZX)) { xxx <- c(min(ZX), xxx) yyy <- c(0, yyy) } if(max(xxx) < max(ZX)) { xxx <- c(xxx, max(ZX)) yyy <- c(yyy, 1) } if(length(xxx) > 1) { #' non-degenerate cdf ## replace by piecewise linear approximation FZ <- approxfun(xxx, yyy, rule=2) } # now apply cdf U <- FZ(ZX) # Test uniformity of transformed values result <- switch(test, ks = ks.test(U, "punif", ...), cvm = cvm.test(U, "punif", ...), ad = ad.test(U, "punif", ...)) testname <- switch(test, ks="Kolmogorov-Smirnov", cvm="Cramer-Von Mises", ad="Anderson-Darling") # modify the 'htest' entries result$method <- paste("Spatial", testname, "test of", "inhomogeneous Poisson process", "in two dimensions") result$data.name <- paste("covariate", sQuote(paste(covname, collapse="")), "evaluated at points of", sQuote(dataname), "\n and transformed to uniform distribution under", sQuote(modelname)) # additional class 'cdftest' class(result) <- c("cdftest", class(result)) attr(result, "prep") <- list(Zvalues=covvalu, ZX=ZX, FZ=FZ, FZX=ecdf(ZX), U=U) attr(result, "info") <- list(modelname=modelname, covname=covname, dataname=dataname, csr=FALSE) return(result) } ## Internal code is moved to spatstat.explore spatstat.model/R/hierarchy.R0000644000176200001440000000257414331173073015530 0ustar liggesusers## hierarchy.R ## ## Support functions for hierarchical interactions ## ## $Revision: 1.1 $ $Date: 2015/05/26 08:39:56 $ hierarchicalordering <- function(i, s) { s <- as.character(s) if(inherits(i, "hierarchicalordering")) { ## already a hierarchical ordering if(length(s) != length(i$labels)) stop("Tried to change the number of types in the hierarchical order") i$labels <- s return(i) } n <- length(s) possible <- if(is.character(i)) s else seq_len(n) j <- match(i, possible) if(any(uhoh <- is.na(j))) stop(paste("Unrecognised", ngettext(sum(uhoh), "level", "levels"), commasep(sQuote(i[uhoh])), "amongst possible levels", commasep(sQuote(s)))) if(length(j) < n) stop("Ordering is incomplete") ord <- order(j) m <- matrix(, n, n) rel <- matrix(ord[row(m)] <= ord[col(m)], n, n) dimnames(rel) <- list(s, s) x <- list(indices=j, ordering=ord, labels=s, relation=rel) class(x) <- "hierarchicalordering" x } print.hierarchicalordering <- function(x, ...) { splat(x$labels[x$indices], collapse=" ~> ") invisible(NULL) } hiermat <- function (x, h) { stopifnot(is.matrix(x)) isna <- is.na(x) x[] <- as.character(x) x[isna] <- "" if(inherits(h, "hierarchicalordering")) ## allows h to be NULL, etc x[!(h$relation)] <- "" return(noquote(x)) } spatstat.model/R/pairpiece.R0000644000176200001440000001120414331173073015501 0ustar liggesusers# # # pairpiece.S # # $Revision: 1.24 $ $Date: 2022/03/07 02:24:21 $ # # A pairwise interaction process with piecewise constant potential # # PairPiece() create an instance of the process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # PairPiece <- local({ # .... auxiliary functions ........ delP <- function(i, r) { r <- r[-i] nr <- length(r) if(nr == 0) return(Poisson()) if(nr == 1) return(Strauss(r)) return(PairPiece(r)) } # ..... template .......... BlankPairPiece <- list( name = "Piecewise constant pairwise interaction process", creator = "PairPiece", family = "pairwise.family", # evaluated later pot = function(d, par) { r <- par$r nr <- length(r) out <- array(FALSE, dim=c(dim(d), nr)) out[,,1] <- (d < r[1]) if(nr > 1) { for(i in 2:nr) out[,,i] <- (d >= r[i-1]) & (d < r[i]) } out }, par = list(r = NULL), # filled in later parnames = "interaction thresholds", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || !all(r > 0)) stop("interaction thresholds r must be positive numbers") if(length(r) > 1 && !all(diff(r) > 0)) stop("interaction thresholds r must be strictly increasing") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { r <- self$par$r npiece <- length(r) # extract coefficients gammas <- exp(as.numeric(coeffs)) # name them gn <- gammas names(gn) <- paste("[", c(0,r[-npiece]),",", r, ")", sep="") # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_i", printable=dround(gn))) }, valid = function(coeffs, self) { # interaction parameters gamma gamma <- (self$interpret)(coeffs, self)$param$gammas if(!all(is.finite(gamma))) return(FALSE) return(all(gamma <= 1) || gamma[1] == 0) }, project = function(coeffs, self){ # interaction parameters gamma gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction thresholds r[i] r <- self$par$r # check for NA or Inf bad <- !is.finite(gamma) # gamma > 1 forbidden unless hard core ishard <- is.finite(gamma[1]) && (gamma[1] == 0) if(!ishard) bad <- bad | (gamma > 1) if(!any(bad)) return(NULL) if(spatstat.options("project.fast") || sum(bad) == 1) { # remove smallest threshold with an unidentifiable parameter firstbad <- min(which(bad)) return(delP(firstbad, r)) } else { # consider all candidate submodels subs <- lapply(which(bad), delP, r=r) return(subs) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(all(is.na(coeffs))) return(max(r)) gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- (abs(log(gamma)) > epsilon) if(!any(active)) return(0) else return(max(r[active])) }, hardcore = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(all(is.na(coeffs))) return(0) gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 prohibited <- (gamma <= epsilon) return(max(0, r[prohibited])) }, Mayer=function(coeffs, self) { # second Mayer cluster integral r <- self$par$r gamma <- (self$interpret)(coeffs, self)$param$gammas # areas of annuli between r[i-1], r[i] areas <- pi * diff(c(0,r)^2) return(sum(areas * (1-gamma))) }, version=NULL # filled in later ) class(BlankPairPiece) <- "interact" PairPiece <- function(r) { instantiate.interact(BlankPairPiece, list(r=r)) } PairPiece <- intermaker(PairPiece, BlankPairPiece) PairPiece }) spatstat.model/R/rmhmodel.ppm.R0000644000176200001440000003061114331173073016145 0ustar liggesusers# # rmhmodel.ppm.R # # convert ppm object into format palatable to rmh.default # # $Revision: 2.65 $ $Date: 2022/01/03 05:37:32 $ # # .Spatstat.rmhinfo # rmhmodel.ppm() # .Spatstat.Rmhinfo <- list( "Multitype Hardcore process" = function(coeffs, inte) { # hard core radii r[i,j] hradii <- inte$par[["hradii"]] return(list(cif='multihard', par=list(hradii=hradii), ntypes=ncol(hradii))) }, "Lennard-Jones process" = function(coeffs, inte) { pa <- inte$interpret(coeffs,inte)$param sigma <- pa[["sigma"]] epsilon <- pa[["epsilon"]] return(list(cif='lennard', par=list(sigma=sigma, epsilon=epsilon), ntypes=1)) }, "Fiksel process" = function(coeffs, inte) { hc <- inte$par[["hc"]] r <- inte$par[["r"]] kappa <- inte$par[["kappa"]] a <- inte$interpret(coeffs,inte)$param$a return(list(cif='fiksel', par=list(r=r,hc=hc,kappa=kappa,a=a), ntypes=1)) }, "Diggle-Gates-Stibbard process" = function(coeffs, inte) { rho <- inte$par[["rho"]] return(list(cif='dgs', par=list(rho=rho), ntypes=1)) }, "Diggle-Gratton process" = function(coeffs, inte) { kappa <- inte$interpret(coeffs,inte)$param$kappa delta <- inte$par[["delta"]] rho <- inte$par[["rho"]] return(list(cif='diggra', par=list(kappa=kappa,delta=delta,rho=rho), ntypes=1)) }, "Hard core process" = function(coeffs, inte) { hc <- inte$par[["hc"]] return(list(cif='hardcore', par=list(hc=hc), ntypes=1)) }, "Geyer saturation process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] sat <- inte$par[["sat"]] return(list(cif='geyer', par=list(gamma=gamma,r=r,sat=sat), ntypes=1)) }, "Soft core process" = function(coeffs, inte) { kappa <- inte$par[["kappa"]] sigma <- inte$interpret(coeffs,inte)$param$sigma return(list(cif="sftcr", par=list(sigma=sigma,kappa=kappa), ntypes=1)) }, "Strauss process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] return(list(cif = "strauss", par = list(gamma = gamma, r = r), ntypes=1)) }, "Strauss - hard core process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] hc <- inte$par[["hc"]] return(list(cif='straush', par=list(gamma=gamma,r=r,hc=hc), ntypes=1)) }, "Triplets process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] return(list(cif = "triplets", par = list(gamma = gamma, r = r), ntypes=1)) }, "Penttinen process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] return(list(cif='penttinen', par=list(gamma=gamma, r=r), ntypes=1)) }, "Multitype Strauss process" = function(coeffs, inte) { # interaction radii r[i,j] radii <- inte$par[["radii"]] # interaction parameters gamma[i,j] gamma <- (inte$interpret)(coeffs, inte)$param$gammas return(list(cif='straussm', par=list(gamma=gamma,radii=radii), ntypes=ncol(radii))) }, "Multitype Strauss Hardcore process" = function(coeffs, inte) { # interaction radii r[i,j] iradii <- inte$par[["iradii"]] # hard core radii r[i,j] hradii <- inte$par[["hradii"]] # interaction parameters gamma[i,j] gamma <- (inte$interpret)(coeffs, inte)$param$gammas return(list(cif='straushm', par=list(gamma=gamma,iradii=iradii,hradii=hradii), ntypes=ncol(iradii))) }, "Piecewise constant pairwise interaction process" = function(coeffs, inte) { r <- inte$par[["r"]] gamma <- (inte$interpret)(coeffs, inte)$param$gammas h <- stepfun(r, c(gamma, 1)) return(list(cif='lookup', par=list(h=h), ntypes=1)) }, "Area-interaction process" = function(coeffs, inte) { r <- inte$par[["r"]] eta <- (inte$interpret)(coeffs, inte)$param$eta return(list(cif='areaint', par=list(eta=eta,r=r), ntypes=1)) }, "hybrid Geyer process" = function(coeffs, inte) { r <- inte$par[["r"]] sat <- inte$par[["sat"]] gamma <- (inte$interpret)(coeffs,inte)$param$gammas return(list(cif='badgey',par=list(gamma=gamma,r=r,sat=sat), ntypes=1)) }, "Hybrid interaction"= function(coeffs, inte){ # for hybrids, $par is a list of the component interactions interlist <- inte$par # check for Poisson components ispois <- unlist(lapply(interlist, is.poisson)) if(all(ispois)) { # reduces to Poisson Z <- list(cif='poisson', par=list()) return(Z) } else if(any(ispois)) { # remove Poisson components interlist <- interlist[!ispois] } # N <- length(interlist) cifs <- character(N) pars <- vector(mode="list", length=N) ntyp <- integer(N) for(i in 1:N) { interI <- interlist[[i]] # forbid hybrids-of-hybrids - these should not occur anyway if(interI$name == "Hybrid interaction") stop("Simulation of a hybrid-of-hybrid interaction is not implemented") # get RMH mapping for I-th component siminfoI <- .Spatstat.Rmhinfo[[interI$name]] if(is.null(siminfoI)) stop(paste("Simulation of a fitted", sQuote(interI$name), "has not yet been implemented"), call.=FALSE) # nameI is the tag that identifies I-th component in hybrid nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) # extract coefficients # (there may be none, if this interaction is an 'offset') coeffsI <- coeffs[relevant] # remove the prefix so the coefficients are recognisable to 'siminfoI' if(any(relevant)) names(coeffsI) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) # compute RMH info ZI <- siminfoI(coeffsI, interI) cifs[i] <- ZI$cif pars[[i]] <- ZI$par ntyp[i] <- ZI$ntypes } nt <- unique(ntyp[ntyp != 1]) if(length(nt) > 1) stop(paste("Hybrid components have different numbers of types:", commasep(nt))) if(N == 1) { # single cif: revert to original format: par is a list of parameters Z <- list(cif=cifs[1], par=pars[[1]], ntypes=ntyp) } else { # hybrid cif: par is a list of lists of parameters Z <- list(cif=cifs, par=pars, ntypes=ntyp) } return(Z) } ) # OTHER S not yet implemented: # # # interaction object rmh.default # ------------------ ----------- # # OrdThresh # rmhmodel.ppm <- function(model, w, ..., verbose=TRUE, project=TRUE, control=rmhcontrol(), new.coef=NULL) { ## converts ppm object `model' into format palatable to rmh.default verifyclass(model, "ppm") argh <- list(...) if(!is.null(new.coef)) model <- tweak.coefs(model, new.coef) ## Ensure the fitted model is valid ## (i.e. exists mathematically as a point process) if(!valid.ppm(model)) { if(project) { if(verbose) cat("Model is invalid - projecting it\n") model <- project.ppm(model, fatal=TRUE) } else stop("The fitted model is not a valid point process") } if(verbose) cat("Extracting model information...") ## Extract essential information Y <- summary(model, quick="no variances") if(Y$marked && !Y$multitype) stop("Not implemented for marked point processes other than multitype") if(Y$uses.covars && is.data.frame(model$covariates)) stop(paste("This model cannot be simulated, because the", "covariate values were given as a data frame.")) ## enforce defaults for `control' control <- rmhcontrol(control) ## adjust to peculiarities of model control <- rmhResolveControl(control, model) ######## Interpoint interaction if(Y$poisson) { Z <- list(cif="poisson", par=list()) # par is filled in later } else { ## First check version number of ppm object if(Y$antiquated) stop(paste("This model was fitted by a very old version", "of the package: spatstat", Y$version, "; simulation is not possible.", "Re-fit the model using your original code")) else if(Y$old) warning(paste("This model was fitted by an old version", "of the package: spatstat", Y$version, ". Re-fit the model using update.ppm", "or your original code")) ## Extract the interpoint interaction object inte <- Y$entries$interaction ## Determine whether the model can be simulated using rmh siminfo <- .Spatstat.Rmhinfo[[inte$name]] if(is.null(siminfo)) stop(paste("Simulation of a fitted", sQuote(inte$name), "has not yet been implemented")) ## Get fitted model's canonical coefficients coeffs <- Y$entries$coef if(newstyle.coeff.handling(inte)) { ## extract only the interaction coefficients Vnames <- Y$entries$Vnames IsOffset <- Y$entries$IsOffset coeffs <- coeffs[Vnames[!IsOffset]] } ## Translate the model to the format required by rmh.default Z <- siminfo(coeffs, inte) if(is.null(Z)) stop("The model cannot be simulated") else if(is.null(Z$cif)) stop(paste("Internal error: no cif returned from .Spatstat.Rmhinfo")) } ## Don't forget the types if(Y$multitype && is.null(Z$types)) Z$types <- levels(Y$entries$marks) ######## Window for result if(missing(w) || is.null(w)) { ## check for outdated argument name 'win' if(!is.na(m <- match("win", names(argh)))) { warning("Argument 'win' to rmhmodel.ppm is deprecated; use 'w'") w <- argh[[m]] argh <- argh[-m] } else w <- Y$entries$data$window } Z$w <- w ######## Expanded window for simulation? covims <- if(Y$uses.covars) model$covariates[Y$covars.used] else NULL wsim <- rmhResolveExpansion(w, control, covims, "covariate")$wsim ###### Trend or Intensity ############ if(verbose) cat("Evaluating trend...") if(Y$stationary) { ## first order terms (beta or beta[i]) are carried in Z$par beta <- as.numeric(Y$trend$value) Z$trend <- NULL } else { ## trend terms present ## all first order effects are subsumed in Z$trend beta <- if(!Y$marked) 1 else rep.int(1, length(Z$types)) ## predict on window possibly larger than original data window Z$trend <- if(wsim$type == "mask") predict(model, window=wsim, type="trend", locations=wsim) else predict(model, window=wsim, type="trend") } Ncif <- length(Z$cif) if(Ncif == 1) { ## single interaction Z$par[["beta"]] <- beta } else { ## hybrid interaction if(all(Z$ntypes == 1)) { ## unmarked model: scalar 'beta' is absorbed in first cif absorb <- 1 } else { ## multitype model: vector 'beta' is absorbed in a multitype cif absorb <- min(which(Z$ntypes > 1)) } Z$par[[absorb]]$beta <- beta ## other cifs have par$beta = 1 for(i in (1:Ncif)[-absorb]) Z$par[[i]]$beta <- rep.int(1, Z$ntypes[i]) } if(verbose) cat("done.\n") Z <- do.call(rmhmodel, append(list(Z), argh)) return(Z) } spatstat.model/R/compareFit.R0000644000176200001440000000503114331173072015631 0ustar liggesusers# # compareFit.R # # $Revision: 1.4 $ $Date: 2022/01/04 05:30:06 $ compareFit <- function(object, Fun, r=NULL, breaks=NULL, ..., trend=~1, interaction=Poisson(), rbord=NULL, modelnames=NULL, same=NULL, different=NULL) { dotargs <- list(...) h <- hyperframe(obj=object, tren=trend, inte=interaction) N <- nrow(h) if(N == 0) stop("No objects specified") # determine rbord for summary statistics if(is.null(rbord) && !is.null(interaction)) rbord <- max(with(h, reach(inte))) h$rbord <- rbord # try to get nice model names if(is.null(modelnames)) { if(inherits(trend, "formula") && is.interact(interaction) && inherits(object, c("anylist", "listof")) && all(nzchar(names(object))) && length(names(object)) == nrow(h)) modelnames <- names(object) else if(inherits(trend, c("anylist", "listof")) && all(nzchar(names(trend))) && length(names(trend)) == nrow(h)) modelnames <- names(trend) else if(inherits(interaction, c("anylist", "listof")) && all(nzchar(names(interaction))) && length(names(interaction)) == nrow(h)) modelnames <- names(interaction) else modelnames <- row.names(h) } row.names(h) <- make.names(modelnames) # fix a common vector of r values if(is.null(r)) { # compute first function fun1 <- with(h[1L,,drop=TRUE,strip=FALSE], do.call(Fun, append(list(object=obj, trend=tren, interaction=inte, rbord=rbord, r=NULL, breaks=breaks), dotargs))) # extract r values r <- with(fun1, .x) } # compute the subsequent functions if(N == 1L) funs2toN <- NULL else funs2toN <- with(h[-1L, , drop=TRUE, strip=FALSE], do.call(Fun, append(list(object=obj, trend=tren, interaction=inte, rbord=rbord, r=r), dotargs))) if(N == 2) funs2toN <- list(funs2toN) # collect all functions in a list funs <- as.anylist(append(list(fun1), funs2toN)) names(funs) <- row.names(h) # collapse together out <- collapse.fv(funs, same=same, different=different) return(out) } spatstat.model/R/dg.R0000644000176200001440000001577214331173072014147 0ustar liggesusers# # dg.S # # $Revision: 1.25 $ $Date: 2022/05/21 08:53:38 $ # # Diggle-Gratton pair potential # # DiggleGratton <- local({ # .... auxiliary functions ...... diggraterms <- function(X, Y, idX, idY, delta, rho, splitInf=FALSE) { stopifnot(is.numeric(delta)) stopifnot(is.numeric(rho)) stopifnot(delta < rho) ## sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] idXsort <- idX[oX] idYsort <- idY[oY] nX <- npoints(X) nY <- npoints(Y) ## call C routine if(!splitInf) { ## usual case: allow cif to be zero because of hard core out <- .C(SM_Ediggra, nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), idsource = as.integer(idXsort), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), idtarget = as.integer(idYsort), ddelta = as.double(delta), rrho = as.double(rho), values = as.double(double(nX)), PACKAGE="spatstat.model") answer <- integer(nX) answer[oX] <- out$values } else { ## split off the hard core terms and return them separately out <- .C(SM_ESdiggra, nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), idsource = as.integer(idXsort), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), idtarget = as.integer(idYsort), ddelta = as.double(delta), rrho = as.double(rho), positive = as.double(double(nX)), hardcore = as.integer(integer(nX)), PACKAGE="spatstat.model") answer <- integer(nX) hardcore <- logical(nX) answer[oX] <- out$positive hardcore[oX] <- as.logical(out$hardcore) attr(answer, "hardcore") <- hardcore } return(answer) } # .......... template object .......... BlankDG <- list( name = "Diggle-Gratton process", creator = "DiggleGratton", family = "pairwise.family", #evaluated later pot = function(d, par) { delta <- par$delta rho <- par$rho above <- (d > rho) inrange <- (!above) & (d > delta) h <- above + inrange * (d - delta)/(rho - delta) return(log(h)) }, par = list(delta=NULL, rho=NULL), # to be filled in later parnames = list("lower limit delta", "upper limit rho"), hasInf = TRUE, selfstart = function(X, self) { # self starter for DiggleGratton nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } md <- minnndist(X) if(!is.na(delta <- self$par$delta)) { # value fixed by user or previous invocation # check it if(md < delta) warning(paste("Hard core distance delta is too large;", "some data points will have zero probability")) return(self) } if(md == 0) warning(paste("Pattern contains duplicated points:", "hard core distance delta must be zero")) # take hard core = minimum interpoint distance * n/(n+1) deltaX <- md * nX/(nX+1) DiggleGratton(delta=deltaX, rho=self$par$rho) }, init = function(self) { delta <- self$par$delta rho <- self$par$rho if(!is.numeric(rho) || length(rho) != 1L) stop("upper limit rho must be a single number") stopifnot(is.finite(rho)) if(!is.na(delta)) { if(!is.numeric(delta) || length(delta) != 1L) stop("lower limit delta must be a single number") stopifnot(delta >= 0) stopifnot(rho > delta) } else stopifnot(rho >= 0) }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { kappa <- as.numeric(coeffs[1L]) return(list(param=list(kappa=kappa), inames="exponent kappa", printable=dround(kappa))) }, valid = function(coeffs, self) { kappa <- as.numeric(coeffs[1L]) return(is.finite(kappa) && (kappa >= 0)) }, project = function(coeffs, self) { kappa <- as.numeric(coeffs[1L]) if(is.finite(kappa) && (kappa >= 0)) return(NULL) return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { rho <- self$par$rho if(all(is.na(coeffs))) return(rho) kappa <- coeffs[1L] delta <- self$par$delta if(abs(kappa) <= epsilon) return(delta) else return(rho) }, hardcore = function(self, coeffs=NA, epsilon=0, ...) { return(self$par$delta) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { ## fast evaluator for DiggleGratton interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for DiggleGratton") delta <- potpars$delta rho <- potpars$rho idX <- seq_len(npoints(X)) idU <- rep.int(-1L, npoints(U)) idU[EqualPairs[,2L]] <- EqualPairs[,1L] values <- diggraterms(U, X, idU, idX, delta, rho, splitInf) result <- log(pmax.int(0, values)) result <- matrix(result, ncol=1L) if(!splitInf) attr(result, "-Inf") <- attr(values, "hardcore") return(result) }, Mayer=function(coeffs, self) { # second Mayer cluster integral rho <- self$par$rho delta <- self$par$delta width <- rho - delta kappa <- coeffs[1L] ans <- pi * (rho^2 - 2 * rho* width/(kappa + 1) + 2 * width^2/((kappa + 1) * (kappa + 2))) return(ans) } ) class(BlankDG) <- "interact" DiggleGratton <- function(delta=NA, rho) { instantiate.interact(BlankDG, list(delta=delta, rho=rho)) } DiggleGratton <- intermaker(DiggleGratton, BlankDG) DiggleGratton }) spatstat.model/R/plot.ppm.R0000644000176200001440000000542414331173073015320 0ustar liggesusers# # plot.ppm.S # # $Revision: 2.12 $ $Date: 2016/06/11 08:02:17 $ # # plot.ppm() # Plot a point process model fitted by ppm(). # # # plot.ppm <- function(x, ngrid = c(40,40), superimpose = TRUE, trend=TRUE, cif=TRUE, se=TRUE, pause = interactive(), how=c("persp","image", "contour"), plot.it=TRUE, locations=NULL, covariates=NULL, ...) { model <- x # Plot a point process model fitted by ppm(). # verifyclass(model, "ppm") # # find out what kind of model it is # mod <- summary(model, quick="entries") stationary <- mod$stationary poisson <- mod$poisson marked <- mod$marked multitype <- mod$multitype data <- mod$entries$data if(marked) { if(!multitype) stop("Not implemented for general marked point processes") else mrkvals <- levels(marks(data)) } else mrkvals <- 1 # ntypes <- length(mrkvals) # # Interpret options # ----------------- # # Whether to plot trend, cif, se if(!trend && !cif && !se) { cat(paste("Nothing plotted;", sQuote("trend"), ",", sQuote("cif"), "and", sQuote("se"), "are all FALSE\n")) return(invisible(NULL)) } # Suppress uninteresting plots # unless explicitly instructed otherwise if(missing(trend)) trend <- !stationary if(missing(cif)) cif <- !poisson if(missing(se)) se <- poisson && !stationary else if(se && !poisson) { warning(paste("standard error calculation", "is only implemented for Poisson models")) se <- FALSE } if(!trend && !cif && !se) { cat("Nothing plotted -- all plots selected are flat surfaces.\n") return(invisible(NULL)) } # # style of plot: suppress pseudo-default # if(missing(how)) how <- "image" # # # Do the prediction # ------------------ out <- list() surftypes <- c("trend","cif","se")[c(trend,cif,se)] ng <- if(missing(ngrid) && !missing(locations)) NULL else ngrid for (ttt in surftypes) { p <- predict(model, ngrid=ng, locations=locations, covariates=covariates, type = ttt, getoutofjail=TRUE) # permit outdated usage type="se" if(is.im(p)) p <- list(p) out[[ttt]] <- p } # Make it a plotppm object # ------------------------ class(out) <- "plotppm" attr(out, "mrkvals") <- mrkvals # Actually plot it if required # ---------------------------- if(plot.it) { if(!superimpose) data <- NULL if(missing(pause)) pause <- NULL plot(out,data=data,trend=trend,cif=cif,se=se,how=how,pause=pause, ...) } return(invisible(out)) } spatstat.model/R/lennard.R0000644000176200001440000000764714331173073015203 0ustar liggesusers# # # lennard.R # # $Revision: 1.24 $ $Date: 2022/05/23 02:33:06 $ # # Lennard-Jones potential # # # ------------------------------------------------------------------- # LennardJones <- local({ BlankLJ <- list( name = "Lennard-Jones process", creator = "LennardJones", family = "pairwise.family", # evaluated later pot = function(d, par) { sig0 <- par$sigma0 if(is.na(sig0)) { d6 <- d^{-6} p <- array(c(-d6^2,d6),dim=c(dim(d),2)) } else { # expand around sig0 and set large numbers to Inf drat <- d/sig0 d6 <- drat^{-6} p <- array(c(-d6^2,d6),dim=c(dim(d),2)) small <- (drat < 1/4) small <- array(c(small, small), dim=c(dim(d), 2)) p[small] <- -Inf big <- (drat > 4) big <- array(c(big, big), dim=c(dim(d), 2)) p[big] <- 0 } return(p) }, par = list(sigma0=NULL), # filled in later parnames = "Initial approximation to sigma", hasInf = TRUE, selfstart = function(X, self) { # self starter for Lennard Jones # attempt to set value of 'sigma0' if(!is.na(self$par$sigma0)) { # value fixed by user or previous invocation return(self) } if(npoints(X) < 2) { # not enough points return(self) } s0 <- minnndist(X) if(s0 == 0) { warning(paste("Pattern contains duplicated points:", "impossible under Lennard-Jones model")) s0 <- mean(nndist(X)) if(s0 == 0) return(self) } LennardJones(s0) }, init = function(...){}, # do nothing update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { theta1 <- as.numeric(coeffs[1L]) theta2 <- as.numeric(coeffs[2L]) sig0 <- self$par$sigma0 if(is.na(sig0)) sig0 <- 1 if(sign(theta1) * sign(theta2) == 1) { sigma <- sig0 * (theta1/theta2)^(1/6) epsilon <- (theta2^2)/(4 * theta1) } else { sigma <- NA epsilon <- NA } return(list(param=list(sigma=sigma, epsilon=epsilon), inames="interaction parameters", printable=signif(c(sigma=sigma,epsilon=epsilon)))) }, valid = function(coeffs, self) { p <- unlist(self$interpret(coeffs, self)$param) return(all(is.finite(p) & (p > 0))) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { if(anyNA(coeffs) || epsilon == 0) return(Inf) sig0 <- self$par$sigma0 if(is.na(sig0)) sig0 <- 1 theta1 <- abs(coeffs[1L]) theta2 <- abs(coeffs[2L]) return(sig0 * max((theta1/epsilon)^(1/12), (theta2/epsilon)^(1/6))) }, hardcore = function(self, coeffs=NA, epsilon=0, ...) { if(epsilon == 0) return(0) if(anyNA(coeffs)) return(NA) sig0 <- self$par$sigma0 if(is.na(sig0) || is.null(sig0)) sig0 <- 1 theta1 <- abs(as.numeric(coeffs[1L])) h <- sig0 * (-log(epsilon)/theta1)^(1/12) return(h) }, version=NULL # filled in later ) class(BlankLJ) <- "interact" LennardJones <- function(sigma0=NA) { if(is.null(sigma0) || !is.finite(sigma0)) sigma0 <- NA instantiate.interact(BlankLJ, list(sigma0=sigma0)) } LennardJones <- intermaker(LennardJones, BlankLJ) LennardJones }) spatstat.model/R/saturated.R0000644000176200001440000000333114331173073015536 0ustar liggesusers# # # saturated.S # # $Revision: 1.10 $ $Date: 2019/02/20 03:34:50 $ # # Saturated pairwise process with user-supplied potential # # Saturated() create a saturated pairwise process # [an object of class 'interact'] # with user-supplied potential # # # ------------------------------------------------------------------- # Saturated <- function(pot, name) { if(missing(name)) name <- "Saturated process with user-defined potential" fop <- names(formals(pot)) if(!isTRUE(all.equal(fop, c("d", "par"))) && !isTRUE(all.equal(fop, c("d", "tx", "tu", "par")))) stop(paste("Formal arguments of pair potential function", sQuote("pot"), "must be either (d, par) or (d, tx, tu, par)")) out <- list( name = name, creator = "Saturated", family = pairsat.family, pot = pot, par = NULL, parnames = NULL, hasInf = NA, init = NULL, update = function(self, ...){ do.call(Saturated, resolve.defaults(list(...), list(pot=self$pot, name=self$name))) } , print = function(self) { cat("Potential function:\n") print(self$pot) invisible() }, version=versionstring.spatstat() ) class(out) <- "interact" return(out) } Saturated <- intermaker(Saturated, list(creator="Saturated", name="saturated process with user-defined potential", par=formals(Saturated), parnames=list("the potential", "the name of the interaction"))) spatstat.model/R/fiksel.R0000644000176200001440000001425114331173073015022 0ustar liggesusers# # # fiksel.R # # $Revision: 1.21 $ $Date: 2022/05/21 08:53:38 $ # # Fiksel interaction # # ee Stoyan Kendall Mcke 1987 p 161 # # ------------------------------------------------------------------- # Fiksel <- local({ # ......... auxiliary functions ........... fikselterms <- function(U, X, r, kappa, EqualPairs=NULL) { answer <- crossfikselterms(U, X, r, kappa) nU <- npoints(U) # subtract contrinbutions from identical pairs (exp(-0) = 1 for each) if(length(EqualPairs) > 0) { idcount <- as.integer(table(factor(EqualPairs[,2L], levels=1:nU))) answer <- answer - idcount } return(answer) } crossfikselterms <- function(X, Y, r, kappa) { stopifnot(is.numeric(r)) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine out <- .C(SM_Efiksel, nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), rrmax = as.double(r), kkappa = as.double(kappa), values = as.double(double(nX)), PACKAGE="spatstat.model") answer <- integer(nX) answer[oX] <- out$values return(answer) } # ........ template object .............. BlankFiksel <- list( name = "Fiksel process", creator = "Fiksel", family = "pairwise.family", # evaluated later pot = function(d, par) { v <- (d <= par$r) * exp( - d * par$kappa) v[ d <= par$hc ] <- (-Inf) v }, par = list(r = NULL, hc = NULL, kappa=NULL), # filled in later parnames = c("interaction distance", "hard core distance", "rate parameter"), hasInf = TRUE, selfstart = function(X, self) { # self starter for Fiksel nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } md <- minnndist(X) if(!is.na(hc <- self$par$hc)) { # value fixed by user or previous invocation # check it if(md < hc) warning(paste("Hard core distance is too large;", "some data points will have zero probability")) return(self) } if(md == 0) warning(paste("Pattern contains duplicated points:", "hard core must be zero")) # take hc = minimum interpoint distance * n/(n+1) hcX <- md * nX/(nX+1) Fiksel(r=self$par$r, hc = hcX, kappa=self$par$kappa) }, init = function(self) { r <- self$par$r hc <- self$par$hc kappa <- self$par$kappa check.1.real(r) check.1.real(kappa) if(!is.na(hc)) { check.1.real(hc) stopifnot(hc > 0) stopifnot(r > hc) } else stopifnot(r > 0) }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { a <- as.numeric(coeffs[1L]) return(list(param=list(a=a), inames="interaction strength a", printable=signif(a))) }, valid = function(coeffs, self) { a <- (self$interpret)(coeffs, self)$param$a return(is.finite(a)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) hc <- self$par$hc if(hc > 0) return(Hardcore(hc)) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r hc <- self$par$hc if(anyNA(coeffs)) return(r) a <- coeffs[1L] if(abs(a) <= epsilon) return(hc) else return(r) }, hardcore = function(self, coeffs=NA, epsilon=0, ...) { return(self$par$hc) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { ## fast evaluator for Fiksel interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Fiksel") r <- potpars$r hc <- potpars$hc kappa <- potpars$kappa hclose <- (strausscounts(U, X, hc, EqualPairs) != 0) fikselbit <- fikselterms(U, X, r, kappa, EqualPairs) if(!splitInf) { answer <- ifelseAX(hclose, -Inf, fikselbit) answer <- matrix(answer, ncol=1L) } else { answer <- fikselbit answer <- matrix(answer, ncol=1L) attr(answer, "-Inf") <- hclose } return(answer) }, Mayer=function(coeffs, self) { # second Mayer cluster integral a <- as.numeric(coeffs[1L]) r <- self$par$r hc <- self$par$hc kappa <- self$par$kappa f <- function(x, kappa, a){ 2 * pi * x * (1 - exp(a * exp(-x * kappa))) } hardbit <- integrate(f=f, lower=hc, upper=r, a=a, kappa=kappa) mess <- hardbit[["message"]] if(!identical(mess, "OK")) { warning(mess) return(NA) } return(pi * hc^2 + hardbit$value) } ) class(BlankFiksel) <- "interact" Fiksel <- function(r, hc=NA, kappa) { instantiate.interact(BlankFiksel, list(r = r, hc = hc, kappa=kappa)) } Fiksel <- intermaker(Fiksel, BlankFiksel) Fiksel }) spatstat.model/R/reach.R0000644000176200001440000000162514331173073014630 0ustar liggesusers# # reach.R # # $Revision: 1.10 $ $Date: 2022/11/03 11:08:33 $ # ## The generic 'reach' is now in spatstat.random reach.interact <- function(x, ...) { verifyclass(x, "interact") irange <- x$irange if(is.null(irange)) return(Inf) if(!is.function(irange)) stop("Internal error - x$irange is not a function") ir <- irange(x) if(is.na(ir)) ir <- Inf return(ir) } reach.ppm <- function(x, ..., epsilon=0) { verifyclass(x, "ppm") # Poisson case if(is.poisson.ppm(x)) return(0) # extract info inte <- x$interaction coeffs <- coef(x) if(newstyle.coeff.handling(inte)) { # extract only interaction coefficients Vnames <- x$internal$Vnames coeffs <- coeffs[Vnames] } # apply 'irange' function irange <- inte$irange if(is.null(irange)) return(Inf) ir <- irange(inte, coeffs, epsilon=epsilon) if(is.na(ir)) ir <- Inf return(ir) } spatstat.model/R/interact.R0000644000176200001440000002724714331173073015367 0ustar liggesusers# # interact.S # # # $Revision: 1.32 $ $Date: 2022/03/07 04:00:24 $ # # Class 'interact' representing the interpoint interaction # of a point process model # (e.g. Strauss process with a given threshold r) # # Class 'isf' representing a generic interaction structure # (e.g. pairwise interactions) # # These do NOT specify the "trend" part of the model, # only the "interaction" component. # # The analogy is: # # glm() ppm() # # model formula trend formula # # family interaction # # That is, the 'systematic' trend part of a point process # model is specified by a 'trend' formula argument to ppm(), # and the interpoint interaction is specified as an 'interact' # object. # # You only need to know about these classes if you want to # implement a new point process model. # # THE DISTINCTION: # An object of class 'isf' describes an interaction structure # e.g. pairwise interaction, triple interaction, # pairwise-with-saturation, Dirichlet interaction. # Think of it as determining the "order" of interaction # but not the specific interaction potential function. # # An object of class 'interact' completely defines the interpoint # interactions in a specific point process model, except for the # regular parameters of the interaction, which are to be estimated # by ppm() or otherwise. An 'interact' object specifies the values # of all the 'nuisance' or 'irregular' parameters. An example # is the Strauss process with a given, fixed threshold r # but with the parameters beta and gamma undetermined. # # DETAILS: # # An object of class 'isf' contains the following: # # $name Name of the interaction structure # e.g. "pairwise" # # $print How to 'print()' this object # [A function; invoked by the 'print' method # 'print.isf()'] # # $eval A function which evaluates the canonical # sufficient statistic for an interaction # of this general class (e.g. any pairwise # interaction.) # # If lambda(u,X) denotes the conditional intensity at a point u # for the point pattern X, then we assume # log lambda(u, X) = theta . S(u,X) # where theta is the vector of regular parameters, # and we call S(u,X) the sufficient statistic. # # A typical calling sequence for the $eval function is # # (f$eval)(X, U, E, potentials, potargs, correction) # # where X is the data point pattern, U is the list of points u # at which the sufficient statistic S(u,X) is to be evaluated, # E is a logical matrix equivalent to (X[i] == U[j]), # $potentials defines the specific potential function(s) and # $potargs contains any nuisance/irregular parameters of these # potentials [the $potargs are passed to the $potentials without # needing to be understood by $eval.] # $correction is the name of the edge correction method. # # # An object of class 'interact' contains the following: # # # $name Name of the specific potential # e.g. "Strauss" # # $family Object of class "isf" describing # the interaction structure # # $pot The interaction potential function(s) # -- usually a function or list of functions. # (passed as an argument to $family$eval) # # $par list of any nuisance/irregular parameters # (passed as an argument to $family$eval) # # $parnames vector of long names/descriptions # of the parameters in 'par' # # $init() initialisation action # or NULL indicating none required # # $update() A function to modify $par # [Invoked by 'update.interact()'] # or NULL indicating a default action # # $print How to 'print()' this object # [Invoked by 'print' method 'print.interact()'] # or NULL indicating a default action # # -------------------------------------------------------------------------- print.isf <- function(x, ...) { if(is.null(x)) return(invisible(NULL)) verifyclass(x, "isf") if(!is.null(x$print)) (x$print)(x) invisible(NULL) } print.interact <- function(x, ..., family, brief=FALSE, banner=TRUE) { verifyclass(x, "interact") if(missing(family)) family <- waxlyrical('extras') #' Print name of model if(banner) { if(family && !brief && !is.null(xf <- x$family)) print.isf(xf) splat(if(!brief) "Interaction:" else NULL, x$name, sep="") } # Now print the parameters if(!is.null(x$print)) { (x$print)(x) } else { # default # just print the parameter names and their values pwords <- x$parnames parval <- x$par pwords <- paste(toupper(substring(pwords, 1, 1)), substring(pwords, 2), sep="") isnum <- sapply(parval, is.numeric) parval[isnum] <- lapply(parval[isnum], signif, digits=getOption("digits")) splat(paste(paste0(pwords, ":\t", parval), collapse="\n")) } invisible(NULL) } is.interact <- function(x) { inherits(x, "interact") } update.interact <- function(object, ...) { verifyclass(object, "interact") if(!is.null(object$update)) (object$update)(object, ...) else { # Default # First update the version if(outdated.interact(object)) object <- reincarnate.interact(object) # just match the arguments in "..." # with those in object$par and update them want <- list(...) if(length(want) > 0) { m <- match(names(want),names(object$par)) nbg <- is.na(m) if(any(nbg)) { which <- paste((names(want))[nbg]) warning(paste("Arguments not matched: ", which)) } m <- m[!nbg] object$par[m] <- want } # call object's own initialisation routine if(!is.null(object$init)) (object$init)(object) object } } is.poisson.interact <- function(x) { verifyclass(x, "interact") is.null(x$family) } parameters.interact <- function(model, ...) { model$par } # Test whether interact object was made by an older version of spatstat outdated.interact <- function(object) { ver <- object$version older <- is.null(ver) || (package_version(ver) < versionstring.spatstat()) return(older) } # Test whether the functions in the interaction object # expect the coefficient vector to contain ALL coefficients, # or only the interaction coefficients. # This change was introduced in 1.11-0, at the same time # as interact objects were given version numbers. newstyle.coeff.handling <- function(object) { stopifnot(inherits(object, "interact")) ver <- object$version old <- is.null(ver) || (package_version(ver) < "1.11") return(!old) } # ###### # # Re-create an interact object in the current version of spatstat # # reincarnate.interact <- function(object) { # re-creates an interact object in the current version of spatstat if(!is.null(object$update)) { newobject <- (object$update)(object) return(newobject) } par <- object$par # pot <- object$pot name <- object$name # get creator function creator <- object$creator if(is.null(creator)) { # old version: look up list creator <- .Spatstat.Old.InteractionList[[name]] if(is.null(creator)) stop(paste("Don't know how to update", sQuote(name), "to current version of spatstat")) } if(is.character(creator)) creator <- get(creator) if(!is.function(creator) && !is.expression(creator)) stop("Internal error: creator is not a function or expression") # call creator if(is.expression(creator)) newobject <- eval(creator) else { # creator is a function # It's assumed that the creator function's arguments are # either identical to components of 'par' (the usual case) # or to one of the components of the object itself (Ord, Saturated) # or to printfun=object$print (Pairwise). argnames <- names(formals(creator)) available <- append(par, object) available <- append(available, list(printfun=object$print)) ok <- argnames %in% names(available) if(!all(ok)) stop(paste("Internal error:", ngettext(sum(!ok), "argument", "arguments"), paste(sQuote(argnames[!ok]), collapse=", "), "in creator function were not understood")) newobject <- do.call(creator, available[argnames]) } if(!inherits(newobject, "interact")) stop("Internal error: creator did not return an object of class interact") return(newobject) } # This list is necessary to deal with older formats of 'interact' objects # which did not include the creator name .Spatstat.Old.InteractionList <- list("Diggle-Gratton process" = "DiggleGratton", "Geyer saturation process" = "Geyer", "Lennard-Jones potential" = "LennardJones", "Multitype Strauss process" = "MultiStrauss", "Multitype Strauss Hardcore process" = "MultiStraussHard", "Ord process with threshold potential"="OrdThresh", "Piecewise constant pairwise interaction process"="PairPiece", "Poisson process" = "Poisson", "Strauss process" = "Strauss", "Strauss - hard core process" = "StraussHard", "Soft core process" = "Softcore", # weird ones: "Ord process with user-defined potential" = expression(Ord(object$pot)), "Saturated process with user-defined potential" =expression(Saturated(object$pot)), "user-defined pairwise interaction process"= expression( Pairwise(object$pot, par=object$par, parnames=object$parnames, printfun=object$print)) ) as.interact <- function(object) { UseMethod("as.interact") } as.interact.interact <- function(object) { verifyclass(object, "interact") return(object) } as.isf <- function(object) { if(inherits(object, "isf")) return(object) object <- as.interact(object) return(object$family) } interactionfamilyname <- function(object) { as.isf(object)$name } interactionorder <- function(object) { UseMethod("interactionorder") } interactionorder.isf <- function(object) { return(object$order %orifnull% Inf) } interactionorder.interact <- function(object) { ## order may be specified in the interaction object itself (e.g. in a hybrid, or Poisson) ## but is usually determined by the interaction family object$order %orifnull% interactionorder(object$family) } interactionorder.fii <- interactionorder.ppm <- function(object) { interactionorder(as.interact(object)) } # Extract version string from interact object versionstring.interact <- function(object) { verifyclass(object, "interact") v <- object$version return(v) # NULL before 1.11-0 } #### internal code for streamlining initialisation of interactions # # x should be a partially-completed 'interact' object # instantiate.interact <- function(x, par=NULL) { if(is.character(x$family)) x$family <- get(x$family) # set parameter values x$par <- par # validate parameters if(!is.null(x$init)) x$init(x) x$version <- versionstring.spatstat() return(x) } spatstat.model/R/bermantest.R0000644000176200001440000000220714460146550015712 0ustar liggesusers# # bermantest.R # # Test statistics from Berman (1986) # # $Revision: 1.26 $ $Date: 2023/06/20 02:43:35 $ # # ## Code for generic berman.test and berman.test.ppp ## is moved to spatstat.explore berman.test.ppm <- function(model, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { modelname <- short.deparse(substitute(model)) covname <- short.deparse(substitute(covariate)) force(model) force(covariate) if(is.character(covariate)) covname <- covariate verifyclass(model, "ppm") which <- match.arg(which) alternative <- match.arg(alternative) if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call(bermantestEngine, resolve.defaults(list(quote(model), quote(covariate), which, alternative), list(...), list(modelname=modelname, covname=covname, dataname=model$Qname))) } ## Code for generic berman.test and berman.test.ppp ## is moved to spatstat.explore spatstat.model/R/response.R0000644000176200001440000000114714243551505015405 0ustar liggesusers#' #' response.R #' #' Extract the values of the response, from a fitted model #' #' response <- function(object) { UseMethod("response") } response.glm <- response.lm <- function(object) { mo <- object$model if(is.null(mo)) return(NULL) te <- terms(object) rn <- attr(te, "response") if(is.null(rn)) return(NULL) y <- mo[,rn] return(y) } response.ppm <- function(object) { data.ppm(object) } response.dppm <- response.kppm <- function(object) { data.ppm(as.ppm(object)) } response.slrm <- function(object) { object$Data$response } response.mppm <- function(object) { data.mppm(object) } spatstat.model/R/psstG.R0000644000176200001440000001364714366621174014665 0ustar liggesusers# # psstG.R # # Pseudoscore residual for unnormalised G (saturation process) # # $Revision: 1.12 $ $Date: 2023/02/02 02:39:29 $ # ################################################################################ # psstG <- function(object, r=NULL, breaks=NULL, ..., model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), truecoef=NULL, hi.res=NULL) { if(is.ppm(object)) fit <- object else if(is.ppp(object) || is.quad(object)) { # convert to quadscheme if(is.ppp(object)) object <- quadscheme(object, ...) # fit model if(!is.null(model)) { stopifnot(is.ppm(model)) ## update 'model' using new point pattern 'object' e <- list2env(list(object=object), parent=model$callframe) fit <- update(model, Q=object, forcefit=TRUE, envir=e) } else fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } else stop("object should be a fitted point process model or a point pattern") # rfixed <- !is.null(r) || !is.null(breaks) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy E <- equalsfun.quad(Q) # WQ <- w.quad(Q) # quadrature weights # integrals will be restricted to quadrature points # that were actually used in the fit # USED <- getglmsubset(fit) if(fit$correction == "border") { rbord <- fit$rbord b <- bdist.points(U) USED <- (b > rbord) } else USED <- rep.int(TRUE, U$n) # basic statistics Win <- Window(X) npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW # adjustments to account for restricted domain of pseudolikelihood # if(any(!USED)) { # npts.used <- sum(Z & USED) # area.used <- sum(WQ[USED]) # lambda.used <- npts.used/area.used # } else { # npts.used <- npts # area.used <- areaW # lambda.used <- lambda # } # determine breakpoints for r values rmaxdefault <- rmax.rule("G", Win, lambda) breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max # residuals res <- residuals(fit, type="raw",drop=FALSE, new.coef=truecoef, quad=hi.res) # resval <- with(res, "increment") rescts <- with(res, "continuous") # absolute weight for continuous integrals wc <- -rescts # initialise fv object df <- data.frame(r=rvals, theo=0) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~Delta~V[S](r), NULL), "theo", . ~ r, alim=c(0, rmax), c("r","%s[theo](r)"), desc, fname="bold(R)~Delta~V[S]") # First phase: ................................................. # nearest neighbours (quadrature point to data point) nn <- nncross(U, X, seq(U$n), seq(X$n)) # excludes identical pairs dIJ <- nn$dist I <- seq(U$n) J <- nn$which DD <- (I <= X$n) # TRUE for data points wcIJ <- wc okI <- USED[I] # histogram of nndist for data points only (without edge correction) Bsum <- cumsum(whist(dIJ[DD & okI], breaks$val)) # weighted histogram of nncross (without edge correction) Bint <- cumsum(whist(dIJ[okI], breaks$val, wcIJ[okI])) # residual Bres <- Bsum - Bint # tack on ans <- bind.fv(ans, data.frame(dat1=Bsum, com1=Bint, res1=Bres), c("%s[dat1](r)", "%s[com1](r)", "%s[res1](r)"), c("phase 1 pseudosum (contribution to %s)", "phase 1 pseudocompensator (contribution to %s)", "phase 1 pseudoresidual (contribution to %s)")) # Second phase: ................................................ # close pairs (quadrature point to data point) close <- crosspairs(U, X, rmax, what="ijd") dIJ <- close$d I <- close$i J <- close$j # UI <- U[I] # XJ <- X[J] EIJ <- E(I, J) # TRUE if points are identical, U[I[k]] == X[J[k]] ZI <- Z[I] # TRUE if U[I[k]] is a data point DD <- ZI & !EIJ # TRUE for pairs of distinct data points only # nDD <- sum(DD) okI <- USED[I] # residual weights # wIJ <- ifelseXY(EIJ, rescts[I], resval[I]) # absolute weight for continuous integrals wc <- -rescts wcIJ <- -rescts[I] # nearest and second-nearest neighbour distances in X nn1 <- nndist(X) nn2 <- nndist(X, k=2) nn1J <- nn1[J] nn2J <- nn2[J] # weird use of the reduced sample estimator # data sum: RSX <- Kount(dIJ[DD & okI], nn2J[DD & okI], nn2J[ZI & okI], breaks) Csum <- RSX$numerator # integral: if(spatstat.options("psstG.remove.zeroes")) okE <- okI & !EIJ else okE <- okI RSD <- Kwtsum(dIJ[okE], nn1J[okE], wcIJ[okE], nn1, rep.int(1, length(nn1)), breaks, fatal=FALSE) Cint <- RSD$numerator # Cres <- Bres + Csum - Cint # tack on ans <- bind.fv(ans, data.frame(dat2=Csum, com2=Cint, res2=Cres, dat=Bsum+Csum, com=Bint+Cint, res=Bres+Cres), c("%s[dat2](r)", "%s[com2](r)", "%s[res2](r)", "Sigma~Delta~V[S](r)", "bold(C)~Delta~V[S](r)", "bold(R)~Delta~V[S](r)"), c("phase 2 pseudosum (contribution to %s)", "phase 2 pseudocompensator (contribution to %s)", "phase 2 pseudoresidual (contribution to %s)", "pseudosum (contribution to %s)", "pseudocompensator (contribution to %s)", "pseudoresidual function %s"), "res") # restrict choice of curves in default plot fvnames(ans, ".") <- c("dat", "com", "res", "theo") # return(ans) } spatstat.model/R/First.R0000644000176200001440000000061214243551505014632 0ustar liggesusers## spatstat.model/R/First.R .onLoad <- function(...) reset.spatstat.options() .onAttach <- function(libname, pkgname) { vs <- read.dcf(file=system.file("DESCRIPTION", package="spatstat.model"), fields="Version") vs <- as.character(vs) putSpatstatVariable("SpatstatModelVersion", vs) packageStartupMessage(paste("spatstat.model", vs)) return(invisible(NULL)) } spatstat.model/R/makepspace.R0000644000176200001440000000603114336562101015652 0ustar liggesusers#' makepspace.R #' #' Create 'pspace' argument for kppm #' #' Including default penalty for cluster scale #' #' $Revision: 1.8 $ $Date: 2022/11/21 02:36:46 $ #' #' Copyright (c) Tilman Davies, Martin Hazelton and Adrian Baddeley 2022 #' GNU Public Licence >= 2.0 make.pspace <- function(..., canonical=FALSE, adjusted=FALSE, trace=FALSE, save=trajectory, trajectory=FALSE, nhalfgrid=NULL, strict=TRUE, penalised=NULL, penalty=NULL, penal.args=NULL, tau=NULL, clusters="Thomas", fitmethod=c("mincon", "clik2", "palm"), flatness=2, C0factor=0.05, xval=FALSE, xval.args=list(), debug=FALSE, transfo=NULL) { ## assemble all recognised arguments p <- list(canonical = isTRUE(canonical), adjusted = isTRUE(adjusted), trace = isTRUE(trace), save = isTRUE(save), nhalfgrid = nhalfgrid, strict = !isFALSE(strict), xval = isTRUE(xval), xval.args = as.list(xval.args), debug = debug, transfo = transfo) ## penalise cluster scale? penalised <- isTRUE(penalised) if(is.function(penalty)) { ## user-specified penalty penalised <- TRUE } else if(penalised && is.null(penalty)) { ## default penalty function if(flatness <= 0 || flatness %% 2 != 0) stop("'flatness' of penalty must be even and positive", call.=FALSE) ## penalty is applied to generic 'scale' parameter native2generic <- spatstatClusterModelInfo(clusters)[["native2generic"]] if(!is.function(native2generic)) stop(paste("Unable to determine generic scale parameter, for clusters=", sQuote(clusters)), call.=FALSE) HazeltonPenalty <- function(par, A, B, flatness) { s <- native2generic(par)[["scale"]] u <- sqrt(s/A) - sqrt(B/s) v <- 1 - sqrt(B/A) (u/v)^flatness } penalty <- HazeltonPenalty } if(penalised) { ## compute arguments of penalty if(is.null(penal.args)) { penal.args <- function(X, rho=flatness) { nnd <- nndist(X) p <- list(A = median(nnd), B = diameter(Window(X))/2, flatness=rho) return(p) } } if(is.null(tau)) { fitmethod <- match.arg(fitmethod) tau <- switch(fitmethod, mincon = function(X, poisval, f=C0factor) { f * poisval }, palm = 1, clik2 = 1) } ## add arguments of penalty to pspace p <- append(p, list(penalty = penalty, penal.args = penal.args, tau = tau)) } return(p) } spatstat.model/R/simulatekppm.R0000644000176200001440000001714614514520742016270 0ustar liggesusers#' #' simulatekppm.R #' #' simulate.kppm #' #' $Revision: 1.12 $ $Date: 2023/10/20 11:04:52 $ simulate.kppm <- function(object, nsim=1, seed=NULL, ..., window=NULL, covariates=NULL, n.cond=NULL, w.cond=NULL, verbose=TRUE, retry=10, drop=FALSE) { starttime <- proc.time() check.1.integer(nsim) stopifnot(nsim >= 0) if(nsim == 0) return(simulationresult(list())) verbose <- verbose && (nsim > 1) check.1.real(retry) # .... copied from simulate.lm .... if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } ## .................................. ## determine window for simulation results if(!is.null(window)) { stopifnot(is.owin(window)) win <- window } else { win <- as.owin(object) } ## .................................. ## conditional simulation if(!is.null(n.cond)) { ## fixed number of points out <- condSimCox(object, nsim=nsim, seed=NULL, ..., window=win, covariates=covariates, n.cond=n.cond, w.cond=w.cond, verbose=verbose, retry=retry, drop=drop) out <- timed(out, starttime=starttime) attr(out, "seed") <- RNGstate return(out) } ## .................................. # determine parameters mp <- as.list(object$modelpar) # parameter 'mu' # = parent intensity of cluster process # = mean log intensity of log-Gaussian Cox process if(is.null(covariates) && (object$stationary || is.null(window))) { # use existing 'mu' (scalar or image) mu <- object$mu } else { # recompute 'mu' using new data switch(object$clusters, Cauchy=, VarGamma=, Thomas=, MatClust={ # Poisson cluster process kappa <- mp$kappa lambda <- predict(object, window=win, covariates=covariates) mu <- eval.im(lambda/kappa) }, LGCP={ # log-Gaussian Cox process sigma2 <- mp$sigma2 lambda <- predict(object, window=win, covariates=covariates) mu <- eval.im(log(lambda) - sigma2/2) }, stop(paste("Simulation of", sQuote(object$clusters), "processes is not yet implemented")) ) } ## simulate switch(object$clusters, Thomas={ kappa <- mp$kappa sigma <- mp$sigma out <- rThomas(kappa=kappa, scale=sigma, mu=mu, win=win, ..., nsim=nsim, drop=FALSE) }, MatClust={ kappa <- mp$kappa r <- mp$R out <- rMatClust(kappa=kappa, scale=r, mu=mu, win=win, ..., nsim=nsim, drop=FALSE) }, Cauchy = { kappa <- mp$kappa omega <- mp$omega out <- rCauchy(kappa = kappa, scale=omega, mu=mu, win=win, ..., nsim=nsim, drop=FALSE) }, VarGamma = { kappa <- mp$kappa omega <- mp$omega nu.ker <- object$covmodel$margs$nu.ker out <- rVarGamma(kappa=kappa, scale=omega, mu=mu, nu=nu.ker, win=win, ..., nsim=nsim, drop=FALSE) }, LGCP={ sigma2 <- mp$sigma2 alpha <- mp$alpha cm <- object$covmodel model <- cm$model margs <- cm$margs param <- append(list(var=sigma2, scale=alpha), margs) #' if(!is.im(mu)) { # model will be simulated in 'win' out <- rLGCP(model=model, mu=mu, param=param, win=win, ..., nsim=nsim, drop=FALSE) } else { # model will be simulated in as.owin(mu), then change window out <- rLGCP(model=model, mu=mu, param=param, ..., nsim=nsim, drop=FALSE) out <- solapply(out, "[", i=win) } }) #' pack up out <- simulationresult(out, nsim, drop) out <- timed(out, starttime=starttime) attr(out, "seed") <- RNGstate return(out) } condSimCox <- function(object, nsim=1, ..., window=NULL, n.cond=NULL, w.cond=NULL, giveup=1000, maxchunk=100, saveLambda=FALSE, verbose=TRUE, drop=FALSE) { stopifnot(is.kppm(object)) shortcut <- isFALSE(object$isPCP) w.sim <- as.owin(window) fullwindow <- is.null(w.cond) if(fullwindow) { w.cond <- w.sim w.free <- NULL } else { stopifnot(is.owin(w.cond)) w.free <- setminus.owin(w.sim, w.cond) } nremaining <- nsim ntried <- 0 accept <- FALSE nchunk <- 1 phistory <- mhistory <- numeric(0) results <- list() while(nremaining > 0) { ## increase chunk length nchunk <- min(maxchunk, giveup - ntried, 2 * nchunk) ## bite off next chunk of simulations if(shortcut) { lamlist <- simulate(object, nsim=nchunk, Lambdaonly=TRUE, ..., drop=FALSE, verbose=FALSE) } else { Xlist <- simulate(object, nsim=nchunk, saveLambda=TRUE, ..., drop=FALSE, verbose=FALSE) lamlist <- lapply(unname(Xlist), attr, which="Lambda", exact=TRUE) } ## compute acceptance probabilities lamlist <- lapply(lamlist, "[", i=w.sim, drop=FALSE, tight=TRUE) if(fullwindow) { mu <- sapply(lamlist, integral) } else { mu <- sapply(lamlist, integral, domain=w.cond) } p <- exp(n.cond * log(mu/n.cond) + n.cond - mu) phistory <- c(phistory, p) mhistory <- c(mhistory, mu) ## accept/reject accept <- (runif(length(p)) < p) if(any(accept)) { jaccept <- which(accept) if(length(jaccept) > nremaining) jaccept <- jaccept[seq_len(nremaining)] naccepted <- length(jaccept) if(verbose) splat("Accepted the", commasep(ordinal(ntried + jaccept)), ngettext(naccepted, "proposal", "proposals")) nremaining <- nremaining - naccepted for(j in jaccept) { lamj <- lamlist[[j]] if(min(lamj) < 0) lamj <- eval.im(pmax(lamj, 0)) if(fullwindow) { Y <- rpoint(n.cond, lamj, win=w.sim, forcewin=TRUE) } else { lamj.cond <- lamj[w.cond, drop=FALSE, tight=TRUE] lamj.free <- lamj[w.free, drop=FALSE, tight=TRUE] Ycond <- rpoint(n.cond, lamj.cond, win=w.cond) Yfree <- rpoispp(lamj.free) Y <- superimpose(Ycond, Yfree, W=w.sim) } if(saveLambda) attr(Y, "Lambda") <- lamj results <- append(results, list(Y)) } } ntried <- ntried + nchunk if(ntried >= giveup && nremaining > 0) { message(paste("Gave up after", ntried, "proposals with", nsim - nremaining, "accepted")) message(paste("Mean acceptance probability =", signif(mean(phistory), 3))) break } } nresults <- length(results) results <- simulationresult(results, nresults, drop) attr(results, "history") <- data.frame(mu=mhistory, p=phistory) if(verbose && nresults == nsim) splat("Mean acceptance probability", signif(mean(phistory), 3)) return(results) } spatstat.model/R/quadrattest.R0000644000176200001440000000311214460146550016103 0ustar liggesusers# # quadrattest.R # # $Revision: 1.70 $ $Date: 2023/07/17 07:38:30 $ # ## Code for generic quadrat.test() and quadrat.test.ppp() ## is moved to spatstat.explore quadrat.test.slrm <- quadrat.test.ppm <- function(X, nx=5, ny=nx, alternative = c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, df.est=NULL, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) { fitname <- short.deparse(substitute(X)) dataname <- paste("data from", fitname) method <- match.arg(method) alternative <- match.arg(alternative) if(!is.poisson(X)) stop("Test is only defined for Poisson point process models") if(is.marked(X)) stop("Sorry, not yet implemented for marked point process models") Xdata <- response(X) dont.complain.about(Xdata) do.call(quadrat.testEngine, resolve.defaults(list(quote(Xdata), nx=nx, ny=ny, alternative=alternative, method=method, conditional=conditional, CR=CR, xbreaks=xbreaks, ybreaks=ybreaks, tess=tess, nsim=nsim, fit=X, df.est=df.est), list(...), list(Xname=dataname, fitname=fitname))) } ## code for quadrat.test.quadratcount is moved to spatstat.explore ## Infrastructure for quadrat.test is moved to spatstat.explore spatstat.model/R/newformula.R0000644000176200001440000000273314331173073015726 0ustar liggesusers#' #' newformula.R #' #' $Revision: 1.4 $ $Date: 2022/01/19 08:50:37 $ #' #' Update formula and expand polynomial newformula <- function(old, change, eold, enew, expandpoly=spatstat.options("expand.polynom"), dotvars=character(0)) { old <- if(is.null(old)) ~1 else eval(old, eold) change <- if(is.null(change)) ~1 else eval(change, enew) old <- as.formula(old, env=eold) change <- as.formula(change, env=enew) if(expandpoly) { old <- expand.polynom(old) change <- expand.polynom(change) } old <- expandDot(old, dotvars) answer <- update.formula(old, change) return(answer) } expandDot <- local({ hasDot <- function(x) { "." %in% all.names(x) } expandDot <- function(f, dotvars) { if(length(dotvars) == 0) return(f) dotsum <- paren(paste(dotvars, collapse=" + ")) dotexpr <- rhs.of.formula(as.formula(paste("~", dotsum)), tilde=FALSE) g <- fuddle(f, dotexpr) environment(g) <- environment(f) return(g) } fuddle <- function(f, dotexpr) { print(f) if(!hasDot(f)) return(f) if(identical(f, as.name('.'))) return(dotexpr) if(length(f) == 1) return(f) if(identical(f[[1]], as.name('I'))) { ## expressions enclosed in I() are protected return(f) } tbd <- unlist(lapply(f, hasDot)) if(any(tbd)) { ## descend recursively for(i in which(tbd)) f[[i]] <- fuddle(f[[i]], dotexpr) } return(f) } expandDot }) spatstat.model/R/ppmclass.R0000644000176200001440000010065514366605615015405 0ustar liggesusers# # ppmclass.R # # Class 'ppm' representing fitted point process models. # # # $Revision: 2.153 $ $Date: 2023/02/02 00:16:02 $ # # An object of class 'ppm' contains the following: # # $method model-fitting method (currently "mpl") # # $coef vector of fitted regular parameters # as given by coef(glm(....)) # # $trend the trend formula # or NULL # # $interaction the interaction family # (an object of class 'interact') or NULL # # $Q the quadrature scheme used # # $maxlogpl the maximised value of log pseudolikelihood # # $internal list of internal calculation results # # $correction name of edge correction method used # $rbord erosion distance for border correction (or NULL) # # $the.call the originating call to ppm() # # $the.version version of mpl() which yielded the fit # # #------------------------------------------------------------------------ is.ppm <- function(x) { inherits(x, "ppm") } print.ppm <- function(x, ..., what=c("all", "model", "trend", "interaction", "se", "errors")) { verifyclass(x, "ppm") misswhat <- missing(what) opts <- c("model", "trend", "interaction", "se", "errors") what <- match.arg(what, c("all", opts), several.ok=TRUE) if("all" %in% what) what <- opts np <- length(coef(x)) terselevel <- spatstat.options("terse") digits <- getOption('digits') showextras <- waxlyrical('extras', terselevel) ## secret option showname <- !isFALSE(list(...)$showname) ## Determine whether SE is required want.SE <- force.SE <- force.no.SE <- FALSE if(!misswhat && ("se" %in% what)) { ## SE was explicitly requested force.SE <- TRUE } else { ## Default rule: compute SE only if the model is Poisson switch(spatstat.options("print.ppm.SE"), always = { force.SE <- TRUE }, never = { force.no.SE <- TRUE }, poisson = { want.SE <- is.poisson(x) && showextras }) } do.SE <- (want.SE || force.SE) && !force.no.SE if(do.SE) { ## Check whether able to compute SE unable.SE <- (np == 0) || any(x$fitter %in% "gam") || !is.null(x$internal$VB) || (any(x$method %in% "mppm") && is.null(x$varcov)) ## resolve if(force.SE && unable.SE) warning("Unable to compute variances for this model", call.=FALSE) do.SE <- do.SE && !unable.SE } s <- summary.ppm(x, quick=if(do.SE) FALSE else "no variances") notrend <- s$no.trend # stationary <- s$stationary poisson <- s$poisson markeddata <- s$marked multitype <- s$multitype dataname <- s$dataname # markedpoisson <- poisson && markeddata csr <- poisson && notrend && !markeddata special <- csr && all(c("model", "trend") %in% what) if(special) { ## ---------- Trivial/special cases ----------------------- splat("Stationary Poisson process") if(showname && showextras) splat("Fitted to point pattern dataset", sQuote(dataname)) cat("Intensity:", signif(s$trend$value, digits), fill=TRUE) } else { ## ----------- Print model type ------------------- if("model" %in% what) { splat(s$name) if(showname && showextras) splat("Fitted to point pattern dataset", sQuote(dataname)) parbreak(terselevel) if(markeddata) mrk <- s$entries$marks if(multitype) { splat(paste("Possible marks:", commasep(sQuote(levels(mrk))))) parbreak(terselevel) } } ## ----- trend -------------------------- if("trend" %in% what) { if(!notrend) { splat("Log", if(poisson) "intensity: " else "trend: ", pasteFormula(s$trend$formula)) parbreak(terselevel) } if(waxlyrical('space', terselevel) || !do.SE) { ## print trend coefficients, unless redundant and space is tight tv <- s$trend$value if(length(tv) == 0) splat("[No trend coefficients]") else { thead <- paste0(s$trend$label, ":") if(is.list(tv)) { splat(thead) for(i in seq_along(tv)) print(tv[[i]]) } else if(is.numeric(tv) && length(tv) == 1) { ## single number: append to end of current line tvn <- names(tv) tveq <- if(is.null(tvn)) "\t" else paste(" ", tvn, "= ") splat(paste0(thead, tveq, signif(tv, digits))) } else { ## some other format splat(thead) print(tv) } } parbreak(terselevel) } } if(waxlyrical("space", terselevel) && !is.null(cfa <- s$covfunargs) && length(cfa) > 0) { cfafitter <- s$cfafitter if(is.null(cfafitter)) { cat("Covariate", "function", "arguments", "(covfunargs)", "provided:", fill=TRUE) } else { cat("Irregular", "parameters", "(covfunargs)", "fitted", "by", paste0(sQuote(cfafitter), ":"), fill=TRUE) } for(i in seq_along(cfa)) { cat(paste(names(cfa)[i], "= ")) cfai <- cfa[[i]] if(is.numeric(cfai) && length(cfai) == 1) { cfai <- signif(cfai, digits) cat(paste(cfai, "\n")) } else print(cfai) } } } # ---- Interaction ---------------------------- if("interaction" %in% what) { if(!poisson) { print(s$interaction, family=FALSE, banner=FALSE, brief=!showextras) parbreak(terselevel) } } # ----- parameter estimates with SE and 95% CI -------------------- if(showextras && ("se" %in% what) && (np > 0)) { if(!is.null(cose <- s$coefs.SE.CI)) { print(cose, digits=digits) } else if(do.SE) { # standard error calculation failed splat("Standard errors unavailable; variance-covariance matrix is singular") } else if(!force.no.SE) { # standard error was voluntarily omitted if(waxlyrical('space', terselevel)) splat("For standard errors, type coef(summary(x))\n") } } # ---- Warnings issued in mpl.prepare --------------------- if(waxlyrical("errors", terselevel) && "errors" %in% what) { probs <- s$problems if(!is.null(probs) && is.list(probs) && (length(probs) > 0)) lapply(probs, function(x) { if(is.list(x) && !is.null(p <- x$print)) splat(paste("Problem:\n", p, "\n\n")) }) if(s$old) warning(paste("Model fitted by old spatstat version", s$version)) # ---- Algorithm status ---------------------------- fitter <- s$fitter converged <- s$converged if(!is.null(fitter) && fitter %in% c("glm", "gam") && !converged) splat("*** Fitting algorithm for", sQuote(fitter), "did not converge ***") } if(showextras && s$projected) { parbreak() splat("Fit was emended to obtain a valid point process model") } if(identical(s$valid, FALSE) && waxlyrical("errors", terselevel)) { parbreak() splat("*** Model is not valid ***") if(!all(is.finite(s$entries$coef))) { splat("*** Some coefficients are NA or Inf ***") } else { splat("*** Interaction parameters are outside valid range ***") } } else if(showextras && is.na(s$valid)) { parbreak() splat("[Validity of model could not be checked]") } return(invisible(NULL)) } # Extract version string from ppm object versionstring.ppm <- function(object) { verifyclass(object, "ppm") v <- object$version if(is.null(v) || !is.list(v)) v <- list(major=1, minor=3, release=4) vs <- paste(v$major, ".", v$minor, "-", v$release, sep="") return(vs) } # Extract quadrature scheme quad.ppm <- function(object, drop=FALSE, clip=FALSE) { if(!is.ppm(object)) { if(is.kppm(object)) object <- object$po else if(is.lppm(object)) object <- object$fit else stop("object is not of class ppm, kppm or lppm") } Q <- object$Q if(is.null(Q)) return(Q) if(drop || clip) { ok <- getglmsubset(object) if(!is.null(ok)) Q <- Q[ok] } if(clip && object$correction == "border") { Wminus <- erosion(as.owin(object), object$rbord) Q <- Q[Wminus] } return(Q) } data.ppm <- function(object) { verifyclass(object, "ppm") object$Q$data } dummy.ppm <- function(object, drop=FALSE) { return(quad.ppm(object, drop=drop)$dummy) } # method for 'coef' coef.ppm <- function(object, ...) { verifyclass(object, "ppm") object$coef } hasglmfit <- function(object) { return(!is.null(object$internal$glmfit)) } getglmfit <- function(object) { verifyclass(object, "ppm") glmfit <- object$internal$glmfit if(is.null(glmfit)) return(NULL) if(object$method != "mpl") glmfit$coefficients <- object$coef return(glmfit) } getglmdata <- function(object, drop=FALSE) { verifyclass(object, "ppm") gd <- object$internal$glmdata if(!drop || is.null(gd)) return(gd) return(gd[getglmsubset(object), , drop=FALSE]) } getglmsubset <- function(object) { gd <- object$internal$glmdata if(is.null(gd)) return(NULL) if(object$method=="logi") gd$.logi.ok else gd$.mpl.SUBSET } getppmdatasubset <- function(object) { ## Equivalent to getglmsubset(object)[is.data(quad.ppm(object))] ## but also works for models fitted exactly, etc ## sub <- getglmsubset(object) if(!is.null(sub)) { Z <- is.data(quad.ppm(object)) subZ <- sub[Z] } else { X <- data.ppm(object) subZ <- if(object$correction == "border") { (bdist.points(X) >= object$rbord) } else rep(TRUE, npoints(X)) } return(subZ) } getppmOriginalCovariates <- function(object) { df <- as.data.frame(as.ppp(quad.ppm(object))) cova <- object$covariates if(length(cova) > 0) { df2 <- mpl.get.covariates(object$covariates, union.quad(quad.ppm(object)), "quadrature points", object$covfunargs) df <- cbind(df, df2) } return(df) } # ??? method for 'effects' ??? valid <- function(object, ...) { UseMethod("valid") } valid.ppm <- function(object, warn=TRUE, ...) { verifyclass(object, "ppm") coeffs <- coef(object) # ensure all coefficients are fitted, and finite if(!all(is.finite(coeffs))) return(FALSE) # inspect interaction inte <- object$interaction if(is.poisson(object)) return(TRUE) # Poisson process # extract fitted interaction coefficients Vnames <- object$internal$Vnames IsOffset <- object$internal$IsOffset Icoeffs <- coeffs[Vnames[!IsOffset]] # check interaction checker <- inte$valid if(is.null(checker) || !newstyle.coeff.handling(inte)) { if(warn) warning("Internal error: unable to check validity of model") return(NA) } #' remove prefix to obtain coefficient names expected by interaction if(npre <- sum(nchar(object$internal$vnameprefix))) names(Icoeffs) <- substring(names(Icoeffs), npre+1L) answer <- checker(Icoeffs, inte) return(answer) } emend <- function(object, ...) { UseMethod("emend") } emend.ppm <- project.ppm <- local({ tracemessage <- function(depth, ...) { if(depth == 0) return(NULL) spacer <- paste(rep.int(" ", depth), collapse="") marker <- ngettext(depth, "trace", paste("trace", depth)) marker <- paren(marker, "[") splat(paste0(spacer, marker, " ", paste(...))) } leaving <- function(depth) { tracemessage(depth, ngettext(depth, "Returning.", "Exiting level.")) } emend.ppm <- function(object, ..., fatal=FALSE, trace=FALSE) { verifyclass(object, "ppm") fast <- spatstat.options("project.fast") # user specifies 'trace' as logical # but 'trace' can also be integer representing trace depth td <- as.integer(trace) trace <- (td > 0) tdnext <- if(trace) td+1 else 0 if(valid.ppm(object)) { tracemessage(td, "Model is valid.") leaving(td) return(object) } # First ensure trend coefficients are all finite coeffs <- coef(object) # Which coefficients are trend coefficients coefnames <- names(coeffs) internames <- object$internal$Vnames trendnames <- coefnames[!(coefnames %in% internames)] # Trend terms in trend formula trendterms <- attr(terms(object), "term.labels") # Mapping from coefficients to terms of GLM coef2term <- attr(model.matrix(object), "assign") istrend <- (coef2term > 0) & (coefnames %in% trendnames) # Identify non-finite trend coefficients bad <- istrend & !is.finite(coeffs) if(!any(bad)) { tracemessage(td, "Trend terms are valid.") } else { nbad <- sum(bad) tracemessage(td, "Non-finite ", ngettext(nbad, "coefficient for term ", "coefficients for terms "), commasep(sQuote(trendterms[coef2term[bad]]))) if(fast) { # remove first illegal term firstbad <- min(which(bad)) badterm <- trendterms[coef2term[firstbad]] # remove this term from model tracemessage(td, "Removing term ", sQuote(badterm)) removebad <- as.formula(paste("~ . - ", badterm), env=object$callframe) newobject <- update(object, removebad) if(trace) { tracemessage(td, "Updated model:") print(newobject) } # recurse newobject <- emend.ppm(newobject, fatal=fatal, trace=tdnext) # return leaving(td) return(newobject) } else { # consider all illegal terms bestobject <- NULL for(i in which(bad)) { badterm <- trendterms[coef2term[i]] # remove this term from model tracemessage(td, "Considering removing term ", sQuote(badterm)) removebad <- as.formula(paste("~ . - ", badterm), env=object$callframe) object.i <- update(object, removebad) if(trace) { tracemessage(td, "Considering updated model:") print(object.i) } # recurse object.i <- emend.ppm(object.i, fatal=fatal, trace=tdnext) # evaluate logPL logPL.i <- logLik(object.i, warn=FALSE) tracemessage(td, "max log pseudolikelihood = ", logPL.i) # optimise if(is.null(bestobject) || (logLik(bestobject, warn=FALSE) < logPL.i)) bestobject <- object.i } if(trace) { tracemessage(td, "Best submodel:") print(bestobject) } # return leaving(td) return(bestobject) } } # Now handle interaction inte <- object$interaction if(is.null(inte)) { tracemessage(td, "No interaction to check.") leaving(td) return(object) } tracemessage(td, "Inspecting interaction terms.") proj <- inte$project if(is.null(proj)) { whinge <- "Internal error: interaction has no projection operator" if(fatal) stop(whinge) warning(whinge) leaving(td) return(object) } # ensure the same edge correction is used! correction <- object$correction rbord <- object$rbord # apply projection coef.orig <- coeffs <- coef(object) Vnames <- object$internal$Vnames Icoeffs <- coeffs[Vnames] change <- proj(Icoeffs, inte) if(is.null(change)) { tracemessage(td, "Interaction does not need updating.") leaving(td) return(object) } tracemessage(td, "Interaction is not valid.") if(is.numeric(change)) { tracemessage(td, "Interaction coefficients updated without re-fitting.") # old style: 'project' returned a vector of updated coefficients Icoeffs <- change # tweak interaction coefficients object$coef[Vnames] <- Icoeffs # recompute fitted interaction object$fitin <- NULL object$fitin <- fitin(object) } else if(is.interact(change)) { # new style: 'project' returns an interaction if(trace) { tracemessage(td, "Interaction changed to:") print(change) } # refit the whole model # (using the same edge correction) # (and the same quadrature scheme) newobject <- update(object, interaction=change, correction=correction, rbord=rbord, forcefit=TRUE, envir=object$callframe) if(trace) { tracemessage(td, "Updated model:") print(newobject) } # recurse newobject <- emend.ppm(newobject, fatal=fatal, trace=tdnext) object <- newobject } else if(is.list(change) && all(unlist(lapply(change, is.interact)))) { # new style: 'project' returns a list of candidate interactions nchange <- length(change) tracemessage(td, "Considering", nchange, ngettext(nchange, "submodel", "submodels")) bestobject <- NULL for(i in seq_len(nchange)) { change.i <- change[[i]] if(trace) { tracemessage(td, "Considering", ordinal(i), "candidate submodel, with interaction:") print(change.i) } # refit the whole model object.i <- update(object, interaction=change.i, correction=correction, rbord=rbord, forcefit=TRUE, envir=object$callframe) if(trace) { tracemessage(td, "Considering", ordinal(i), "candidate updated model:") print(object.i) } # recurse object.i <- emend.ppm(object.i, fatal=fatal, trace=tdnext) # evaluate logPL logPL.i <- logLik(object.i, warn=FALSE) tracemessage(td, "max log pseudolikelihood = ", logPL.i) # optimise if(is.null(bestobject) || (logLik(bestobject, warn=FALSE) < logPL.i)) bestobject <- object.i } # end loop through submodels if(trace) { tracemessage(td, "Best submodel:") print(bestobject) } object <- bestobject } else stop("Internal error: unrecognised format of update") object$projected <- TRUE object$coef.orig <- coef.orig leaving(td) return(object) } emend.ppm }) # more methods deviance.ppm <- function(object, ...) { force(object) satlogpl <- object$satlogpl if(is.null(satlogpl)) { object <- update(object, forcefit=TRUE) satlogpl <- object$satlogpl } if(is.null(satlogpl) || !is.finite(satlogpl)) return(NA) ll <- do.call(logLik, resolve.defaults(list(object=quote(object), absolute=FALSE), list(...))) ll <- as.numeric(ll) 2 * (satlogpl - ll) } logLik.ppm <- function(object, ..., new.coef=NULL, warn=TRUE, absolute=FALSE) { if(!is.poisson.ppm(object) && warn) warn.once("ppmLogLik", "log likelihood is not available for non-Poisson model;", "log pseudolikelihood returned") ## degrees of freedom nip <- if(!inherits(object, "ippm")) 0 else length(attr(object$covfunargs, "free")) df <- length(coef(object)) + nip ## compute adjustment constant if(absolute && object$method %in% c("exact", "mpl", "ho")) { X <- data.ppm(object) W <- Window(X) areaW <- if(object$correction == "border" && object$rbord > 0) eroded.areas(W, object$rbord) else area(W) constant <- areaW * markspace.integral(X) } else constant <- 0 ## if(is.null(new.coef)) { ## extract from object ll <- object$maxlogpl + constant attr(ll, "df") <- df class(ll) <- "logLik" return(ll) } ## recompute for new parameter values method <- object$method if(method == "exact") method <- update(method, forcefit=TRUE) Q <- quad.ppm(object, drop=TRUE) Z <- is.data(Q) cif <- fitted(object, type="cif", new.coef=new.coef, drop=TRUE) cifdata <- cif[Z] switch(method, mpl=, exact=, ho = { w <- w.quad(Q) ll <- sum(log(cifdata[cifdata > 0])) - sum(w * cif) }, logi=, VBlogi={ B <- getglmdata(object, drop=TRUE)$.logi.B p <- cif/(B+cif) ll <- sum(log(p/(1-p))[Z]) + sum(log(1-p)) + sum(log(B[Z])) }, stop(paste("Internal error: unrecognised ppm method:", dQuote(method))) ) ll <- ll + constant attr(ll, "df") <- df class(ll) <- "logLik" return(ll) } pseudoR2 <- function(object, ...) { UseMethod("pseudoR2") } pseudoR2.slrm <- pseudoR2.ppm <- function(object, ..., keepoffset=TRUE) { dres <- deviance(object, ..., warn=FALSE) nullfmla <- . ~ 1 if(keepoffset && has.offset.term(object)) { off <- attr(model.depends(object), "offset") offterms <- row.names(off)[apply(off, 1, any)] if(length(offterms)) { nullrhs <- paste(offterms, collapse=" + ") nullfmla <- as.formula(paste(". ~ ", nullrhs)) } } nullmod <- update(object, nullfmla, forcefit=TRUE) dnul <- deviance(nullmod, warn=FALSE) return(1 - dres/dnul) } formula.ppm <- function(x, ...) { return(x$trend) } terms.ppm <- function(x, ...) { terms(x$terms, ...) } labels.ppm <- function(object, ...) { # extract fitted trend coefficients co <- coef(object) Vnames <- object$internal$Vnames is.trend <- !(names(co) %in% Vnames) # model terms tt <- terms(object) lab <- attr(tt, "term.labels") if(length(lab) == 0) return(character(0)) # model matrix mm <- model.matrix(object) ass <- attr(mm, "assign") # 'ass' associates coefficients with model terms # except ass == 0 for the Intercept coef.ok <- is.finite(co) relevant <- (ass > 0) & is.trend okterms <- unique(ass[coef.ok & relevant]) return(lab[okterms]) } AIC.ppm <- function(object, ..., k=2, takeuchi=TRUE) { ll <- logLik(object, warn=FALSE) pen <- attr(ll, "df") if(takeuchi && !is.poisson(object)) { vv <- vcov(object, what="internals") logi <- (object$method == "logi") J <- with(vv, if(!logi) Sigma else (Sigma1log+Sigma2log)) H <- with(vv, if(!logi) A1 else Slog) ## Takeuchi penalty = trace of J H^{-1} = trace of H^{-1} J JiH <- try(solve(H, J), silent=TRUE) if(!inherits(JiH, "try-error")) pen <- sum(diag(JiH)) } return(- 2 * as.numeric(ll) + k * pen) } extractAIC.ppm <- function (fit, scale = 0, k = 2, ..., takeuchi=TRUE) { edf <- length(coef(fit)) aic <- AIC(fit, k=k, takeuchi=takeuchi) c(edf, aic) } # # method for model.frame model.frame.ppm <- function(formula, ...) { object <- formula gf <- getglmfit(object) if(is.null(gf)) { warning("Model re-fitted with forcefit=TRUE") object <- update(object, forcefit=TRUE) gf <- getglmfit(object) } argh <- resolve.defaults(list(formula=quote(gf)), list(...), list(data = getglmdata(object), subset = TRUE)) result <- switch(object$fitter, gam = do.call(modelFrameGam, argh), do.call(model.frame, argh)) return(result) } #' a hacked version of model.frame.glm that works for gam objects (mgcv) modelFrameGam <- function(formula, ...) { dots <- list(...) nargs <- dots[match(c("data", "na.action", "subset"), names(dots), 0L)] if (length(nargs) || is.null(formula$model)) { fcall <- formula$call # fcall$method <- "model.frame" fcall[[1L]] <- quote(mgcv::gam) fcall[names(nargs)] <- nargs env <- environment(formula$terms) if (is.null(env)) env <- parent.frame() refut <- eval(fcall, env) refut$model } else formula$model } # # method for model.matrix model.matrix.ppm <- function(object, data=model.frame(object, na.action=NULL), ..., Q=NULL, keepNA=TRUE) { if(missing(data)) data <- NULL PPMmodelmatrix(object, data=data, ..., Q=Q, keepNA=keepNA) } model.matrix.ippm <- function(object, data=model.frame(object, na.action=NULL), ..., Q=NULL, keepNA=TRUE, irregular=FALSE) { if(missing(data)) data <- NULL PPMmodelmatrix(object, data=data, ..., Q=Q, keepNA=keepNA, irregular=irregular) } PPMmodelmatrix <- function(object, data = NULL, ..., subset, Q=NULL, keepNA=TRUE, irregular=FALSE, splitInf=FALSE) { # handles ppm and ippm data.given <- !is.null(data) irregular <- irregular && inherits(object, "ippm") && !is.null(object$iScore) if(splitInf && !data.given && is.null(Q)) { #' force re-computation Q <- quad.ppm(object) } if(!is.null(Q)) { if(data.given) stop("Arguments Q and data are incompatible") if(!inherits(Q, c("ppp", "quad"))) stop("Q should be a point pattern or quadrature scheme") if(is.ppp(Q)) Q <- quad(Q, Q[FALSE]) ## construct Berman-Turner frame needed <- c("trend", "interaction", "covariates", "covfunargs", "correction", "rbord") bt <- do.call(bt.frame, c(list(Q), object[needed], list(splitInf=splitInf))) forbid <- bt$forbid ## compute model matrix mf <- model.frame(bt$fmla, bt$glmdata, ...) mm <- model.matrix(bt$fmla, mf, ...) ass <- attr(mm, "assign") if(irregular) { ## add irregular score components U <- union.quad(Q) mi <- sapply(object$iScore, do.call, args=append(list(x=U$x, y=U$y), object$covfunargs), envir=environment(terms(object))) if(nrow(mi) != nrow(mm)) stop("Internal error: incorrect number of rows in iScore") mm <- cbind(mm, mi) } ## subset if(!missing(subset)) { ok <- eval(substitute(subset), envir=bt$glmdata) mm <- mm[ok, , drop=FALSE] if(!is.null(forbid)) forbid <- forbid[ok] } ## remove NA's ? if(!keepNA) { ok <- complete.cases(mm) mm <- mm[ok, , drop=FALSE] if(!is.null(forbid)) forbid <- forbid[ok] } attr(mm, "assign") <- ass attr(mm, "-Inf") <- forbid return(mm) } #' extract GLM fit gf <- getglmfit(object) if(is.null(gf)) { warning("Model re-fitted with forcefit=TRUE") object <- update(object, forcefit=TRUE) gf <- getglmfit(object) if(is.null(gf)) stop("internal error: unable to extract a glm fit") } if(data.given) { #' new data. Must contain the Berman-Turner variables as well. bt <- list(.mpl.Y=1, .mpl.W=1, .mpl.SUBSET=TRUE) if(any(forgot <- !(names(bt) %in% names(data)))) data <- do.call(cbind, append(list(data), bt[forgot])) mm <- model.matrix(gf, data=data, ..., subset=NULL) ass <- attr(mm, "assign") if(irregular) { ## add irregular score components mi <- sapply(object$iScore, do.call, args=append(list(x=data$x, y=data$y), object$covfunargs), envir=environment(terms(object))) if(nrow(mi) != nrow(mm)) stop("Internal error: incorrect number of rows in iScore") mm <- cbind(mm, mi) attr(mm, "assign") <- ass } if(inherits(gf, "gam")) attr(mm, "assign") <- gf$assign return(mm) } scrambled <- object$scrambled %orifnull% FALSE ## if TRUE, this object was produced by 'subfits' using jittered covariate if(!keepNA && !irregular && !scrambled) { # extract model matrix of glm fit object # restricting to its 'subset' mm <- model.matrix(gf, ...) if(inherits(gf, "gam")) attr(mm, "assign") <- gf$assign return(mm) } ## extract model matrix for all cases gd <- getglmdata(object, drop=FALSE) if(!scrambled) { ## 'gf' was fitted to correct data. Use internals. mm <- model.matrix(gf, ..., subset=NULL, na.action=NULL) ass <- attr(mm, "assign") } else { ## 'gf' was originally fitted using jittered data: ## Use correct data given by 'gd' ## Temporarily add scrambled data to avoid singular matrices etc gds <- object$internal$glmdata.scrambled gdplus <- rbind(gd, gds) mm <- model.matrix(gf, ..., data=gdplus, subset=NULL, na.action=NULL) ass <- attr(mm, "assign") ## Now remove rows corresponding to scrambled data mm <- mm[seq_len(nrow(gd)), , drop=FALSE] attr(mm, "assign") <- ass } cn <- colnames(mm) if(nrow(mm) != nrow(gd)) { # can occur if covariates include NA's or interaction is -Inf insubset <- getglmsubset(object) isna <- is.na(insubset) | !insubset if(sum(isna) + nrow(mm) == nrow(gd)) { # insert rows of NA's mmplus <- matrix( , nrow(gd), ncol(mm)) mmplus[isna, ] <- NA mmplus[!isna, ] <- mm mm <- mmplus attr(mm, "assign") <- ass } else stop("internal error: model matrix does not match glm data frame") } if(irregular) { ## add irregular score components U <- union.quad(quad.ppm(object, drop=FALSE)) mi <- sapply(object$iScore, do.call, args=append(list(x=U$x, y=U$y), object$covfunargs), envir=environment(terms(object))) if(nrow(mi) != nrow(mm)) stop("Internal error: incorrect number of rows in iScore") mm <- cbind(mm, mi) attr(mm, "assign") <- ass cn <- c(cn, colnames(mi)) } ## subset if(!missing(subset)) { ok <- eval(substitute(subset), envir=gd) mm <- mm[ok, , drop=FALSE] attr(mm, "assign") <- ass } ## remove NA's if(!keepNA) { mm <- mm[complete.cases(mm), , drop=FALSE] attr(mm, "assign") <- ass } if(inherits(gf, "gam")) attr(mm, "assign") <- gf$assign colnames(mm) <- cn return(mm) } model.images <- function(object, ...) { UseMethod("model.images") } model.images.ppm <- function(object, W=as.owin(object), ...) { X <- data.ppm(object) # irregular <- resolve.1.default(list(irregular=FALSE), list(...)) ## make a quadscheme with a dummy point at every pixel Q <- pixelquad(X, W) ## compute model matrix mm <- model.matrix(object, Q=Q, ...) ## retain only the entries for dummy points (pixels) mm <- mm[!is.data(Q), , drop=FALSE] mm <- as.data.frame(mm) ## create template image Z <- as.im(attr(Q, "M")) ok <- !is.na(Z$v) ## make images imagenames <- colnames(mm) if(!is.multitype(object)) { result <- lapply(as.list(mm), replace, list=ok, x=Z) result <- as.solist(result) names(result) <- imagenames } else { marx <- marks(Q$dummy) mmsplit <- split(mm, marx) result <- vector(mode="list", length=length(mmsplit)) for(i in seq_along(mmsplit)) result[[i]] <- as.solist(lapply(as.list(mmsplit[[i]]), replace, list=ok, x=Z)) names(result) <- names(mmsplit) result <- do.call(hyperframe, result) row.names(result) <- imagenames } return(result) } unitname.ppm <- function(x) { return(unitname(x$Q)) } "unitname<-.ppm" <- function(x, value) { unitname(x$Q) <- value return(x) } nobs.ppm <- function(object, ...) { npoints(data.ppm(object)) } as.interact.ppm <- function(object) { verifyclass(object, "ppm") inte <- object$interaction if(is.null(inte)) inte <- Poisson() return(inte) } as.ppm <- function(object) { UseMethod("as.ppm") } as.ppm.ppm <- function(object) { object } ## method for as.owin as.owin.ppm <- function(W, ..., from=c("points", "covariates"), fatal=TRUE) { if(!verifyclass(W, "ppm", fatal=fatal)) return(NULL) from <- match.arg(from) datawin <- as.owin(data.ppm(W)) if(from == "points") return(datawin) covs <- W$covariates isim <- unlist(lapply(covs, is.im)) if(!any(isim)) return(datawin) cwins <- lapply(covs[isim], as.owin) covwin <- do.call(intersect.owin, unname(cwins)) result <- intersect.owin(covwin, datawin) return(result) } domain.ppm <- Window.ppm <- function(X, ..., from=c("points", "covariates")) { from <- match.arg(from) as.owin(X, ..., from=from) } hardcoredist.ppm <- function(x, ..., epsilon=0) { hardcoredist.fii(fitin(x), ..., epsilon=epsilon) } ## change the coefficients in a ppm or other model tweak.coefs <- function(model, new.coef) { if(is.null(new.coef)) return(model) co <- coef(model) check.nvector(new.coef, length(co), things="coefficients", vname="new.coef") model$coef.orig <- co model$coef <- new.coef return(model) } spatialCovariateUnderModel <- function(model, covariate, ...) { UseMethod("spatialCovariateUnderModel") } spatialCovariateUnderModel.ppm <- spatialCovariateUnderModel.kppm <- spatialCovariateUnderModel.dppm <- function(model, covariate, ...) { Q <- quad.ppm(as.ppm(model)) loc <- as.ppp(Q) df <- mpl.get.covariates(list(Z=covariate), loc, covfunargs=list(...)) df$wt <- fitted(model) * w.quad(Q) return(df) } spatstat.model/R/quadratmtest.R0000644000176200001440000000067114331173074016265 0ustar liggesusers# # method for 'quadrat.test' for class mppm # # $Revision: 1.8 $ $Date: 2015/08/12 07:29:17 $ # quadrat.test.mppm <- function(X, ...) { Xname <- short.deparse(substitute(X)) if(!is.poisson.mppm(X)) stop("Model is not a Poisson point process") subs <- subfits(X) tests <- anylapply(subs, quadrat.test.ppm, ..., fitname=Xname) df.est <- length(coef(X)) return(pool.quadrattest(tests, Xname=Xname, df.est=df.est)) } spatstat.model/R/intensity.ppm.R0000644000176200001440000002403714331173073016371 0ustar liggesusers#' #' intensity.ppm.R #' #' Intensity and intensity approximations for fitted point process models #' #' $Revision: 1.4 $ $Date: 2022/05/23 02:33:06 $ #' #' Adrian Baddeley with contributions from Frederic Lavancier intensity.ppm <- function(X, ..., approx=c("Poisson", "DPP")) { approx <- match.arg(approx) if(!isTRUE(valid.ppm(X))) { warning("Model is invalid - projecting it") X <- project.ppm(X) } if(is.poisson(X)) { if(is.stationary(X)) { # stationary univariate/multivariate Poisson sX <- summary(X, quick="no variances") lam <- sX$trend$value if(sX$multitype && sX$no.trend) { ## trend is ~1; lam should be replicated for each mark lev <- levels(marks(data.ppm(X))) lam <- rep(lam, length(lev)) names(lam) <- lev } return(lam) } # Nonstationary Poisson return(predict(X, ...)) } # Gibbs process if(is.multitype(X)) stop("Not yet implemented for multitype Gibbs processes") # Compute first order term if(is.stationary(X)) { ## activity parameter sX <- summary(X, quick="no variances") beta <- sX$trend$value } else { ## activity function (or values of it, depending on '...') beta <- predict(X, ...) } ## apply approximation lambda <- SaddleApprox(beta, fitin(X), approx=approx) return(lambda) } SaddleApprox <- function(beta, fi, approx=c("Poisson", "DPP")) { approx <- match.arg(approx) z <- switch(approx, Poisson = PoisSaddle(beta, fi), DPP = DPPSaddle(beta, fi)) return(z) } PoisSaddle <- function(beta, fi) { ## apply Poisson-Saddlepoint approximation ## given first order term and fitted interaction stopifnot(inherits(fi, "fii")) if(interactionorder(fi) == 2) return(PoisSaddlePairwise(beta, fi)) modelname <- as.interact(fi)$name if(identical(modelname, "Geyer saturation process")) return(PoisSaddleGeyer(beta, fi)) if(identical(modelname, "Area-interaction process")) return(PoisSaddleArea(beta, fi)) stop(paste("Poisson-saddlepoint intensity approximation", "is not yet available for", modelname), call.=FALSE) } PoisSaddlePairwise <- function(beta, fi) { # compute second Mayer cluster integral G <- Mayer(fi) if(is.null(G) || !is.finite(G)) stop("Internal error in computing Mayer cluster integral") if(G < 0) stop(paste("Unable to apply Poisson-saddlepoint approximation:", "Mayer cluster integral is negative")) ## solve if(is.im(beta)) { lambda <- if(G == 0) beta else eval.im(LambertW(G * beta)/G) } else { lambda <- if(G == 0) beta else (LambertW(G * beta)/G) if(length(lambda) == 1) lambda <- unname(lambda) } return(lambda) } # Lambert's W-function LambertW <- local({ yexpyminusx <- function(y,x){y*exp(y)-x} W <- function(x) { result <- rep.int(NA_real_, length(x)) ok <- is.finite(x) & (x >= 0) if(requireNamespace("gsl", quietly=TRUE)) { result[ok] <- gsl::lambert_W0(x[ok]) } else { for(i in which(ok)) result[i] <- uniroot(yexpyminusx, c(0, x[i]), x=x[i])$root } return(result) } W }) PoisSaddleGeyer <- local({ PoisSaddleGeyer <- function(beta, fi) { gamma <- summary(fi)$sensible$param$gamma if(gamma == 1) return(beta) inte <- as.interact(fi) sat <- inte$par$sat R <- inte$par$r #' get probability distribution of Geyer statistic under reference model z <- Spatstat.Geyer.Nulldist # from sysdata if(is.na(m <- match(sat, z$sat))) stop(paste("Sorry, the Poisson-saddlepoint approximation", "is not implemented for Geyer models with sat =", sat), call.=FALSE) probmatrix <- z$prob[[m]] maxachievable <- max(which(colSums(probmatrix) > 0)) - 1 gammarange <- sort(c(1, gamma^maxachievable)) #' apply approximation betavalues <- beta[] nvalues <- length(betavalues) lambdavalues <- numeric(nvalues) for(i in seq_len(nvalues)) { beta.i <- betavalues[i] ra <- beta.i * gammarange lambdavalues[i] <- uniroot(diffapproxGeyer, ra, beta=beta.i, gamma=gamma, R=R, sat=sat, probmatrix=probmatrix)$root } #' return result in same format as 'beta' lambda <- beta lambda[] <- lambdavalues if(length(lambda) == 1) lambda <- unname(lambda) return(lambda) } diffapproxGeyer <- function(lambda, beta, gamma, R, sat, probmatrix) { lambda - approxEpoisGeyerT(lambda, beta, gamma, R, sat, probmatrix) } approxEpoisGeyerT <- function(lambda, beta=1, gamma=1, R=1, sat=1, probmatrix) { #' Compute approximation to E_Pois(lambda) Lambda(0,X) for Geyer #' ('probmatrix' contains distribution of geyerT(0, Z_n) for each n, #' where 'sat' is given, and Z_n is runifdisc(n, radius=2*R). possT <- 0:(ncol(probmatrix)-1) possN <- 0:(nrow(probmatrix)-1) pN <- dpois(possN, lambda * pi * (2*R)^2) EgamT <- pN %*% probmatrix %*% (gamma^possT) #' assume that, for n > max(possN), #' distribution of T is concentrated on T=sat EgamT <- EgamT + (gamma^sat) * (1-sum(pN)) return(beta * EgamT) } PoisSaddleGeyer }) PoisSaddleArea <- local({ PoisSaddleArea <- function(beta, fi) { eta <- summary(fi)$sensible$param$eta if(eta == 1) return(beta) etarange <- range(c(eta^2, 1.1, 0.9)) inte <- as.interact(fi) R <- inte$par$r #' reference distribution of canonical sufficient statistic zeroprob <- Spatstat.Area.Zeroprob areaquant <- Spatstat.Area.Quantiles # expectation of eta^A_n for each n = 0, 1, .... EetaAn <- c(1/eta, zeroprob + (1-zeroprob) * colMeans((eta^(-areaquant)))) #' compute approximation betavalues <- beta[] nvalues <- length(betavalues) lambdavalues <- numeric(nvalues) for(i in seq_len(nvalues)) { beta.i <- betavalues[i] ra <- beta.i * etarange lambdavalues[i] <- uniroot(diffapproxArea, ra, beta=beta.i, eta=eta, r=R, EetaAn=EetaAn)$root } #' return result in same format as 'beta' lambda <- beta lambda[] <- lambdavalues if(length(lambda) == 1) lambda <- unname(lambda) return(lambda) } diffapproxArea <- function(lambda, beta, eta, r, EetaAn) { lambda - approxEpoisArea(lambda, beta, eta, r, EetaAn) } approxEpoisArea <- function(lambda, beta=1, eta=1, r=1, EetaAn) { #' Compute approximation to E_Pois(lambda) Lambda(0,X) for AreaInter mu <- lambda * pi * (2*r)^2 zeta <- pi^2/2 - 1 theta <- -log(eta) zetatheta <- zeta * theta #' contribution from tabulated values Nmax <- length(EetaAn) - 1L possN <- 0:Nmax qN <- dpois(possN, mu) # expectation of eta^A when N ~ poisson (truncated) EetaA <- sum(qN * EetaAn) #' asymptotics for quite large n Nbig <- qpois(0.999, mu) qn <- 0 if(Nbig > Nmax) { n <- (Nmax+1):Nbig #' asymptotic mean uncovered area conditional on this being positive mstarn <- (16/((n+3)^2)) * exp(n * (1/4 - log(4/3))) ztm <- zetatheta * mstarn ok <- (ztm < 1) if(!any(ok)) { Nbig <- Nmax qn <- 0 } else { if(!all(ok)) { Nbig <- max(which(!ok)) - 1 n <- (Nmax+1):Nbig ztm <- ztm[1:((Nbig-Nmax)+1)] } qn <- dpois(n, mu) #' asymptotic probability of complete coverage pstarn <- 1 - pmin(1, 3 * (1 + n^2/(16*pi)) * exp(-n/4)) Estarn <- (1 - ztm)^(-1/zeta) EetaA <- EetaA + sum(qn * (pstarn + (1-pstarn) * Estarn)) } } #' for very large n, assume complete coverage, so A = 0 EetaA <- EetaA + 1 - sum(qN) - sum(qn) return(beta * eta * EetaA) } PoisSaddleArea }) ## The following was contributed by Frederic Lavancier ## hacked by Adrian DPPSaddle <- function(beta, fi) { if(interactionorder(fi) != 2) stop("DPP approximation is only available for pairwise interactions") if(!is.numeric(beta)) stop("DPP approximation is only available for stationary models") if(length(beta) > 1) stop("DPP approximation is not available for multitype models") DPPSaddlePairwise(beta, fi) } DPPSaddlePairwise<-function (beta, fi) { stopifnot(inherits(fi, "fii")) ## second Mayer cluster integral G <- Mayer(fi) if (is.null(G) || !is.finite(G)) stop("Internal error in computing Mayer cluster integral") if(G < (-exp(-1)/beta)){ warning(paste("The second Mayer cluster integral", "is less than exp(-1)/beta,", "which may lead to an unreliable solution.")) } ## integral of (1-g)^2 G2 <- Mayer(fi, exponent=2) if (is.null(G2) || !is.finite(G2)) stop("Internal error in computing integral of (1-g)^2") ## hard core distance hc <- hardcoredist(fi) ## interaction range R <- reach(fi, epsilon=0.001) if(!is.finite(R)) stop("Unable to determine a finite range of interaction") ## solve Bhc <- pi * hc^2 BR <- pi*R^2 kappa<-(G2-Bhc)/(BR-Bhc) fun <-function(x) { log(beta) + (1+x*Bhc)*log(1-x*Bhc/(1+x*Bhc)) + (1+x*(G-Bhc)/kappa)*log(1-x*(G-Bhc)/(1+x*(G-Bhc)/kappa)) - log(x) } lambda <- uniroot(fun,c(0,2*beta))$root return(lambda) } Mayer <- function(fi, exponent=1){ stopifnot(inherits(fi, "fii")) ## compute second Mayer cluster integral for a PAIRWISE interaction if(exponent == 1) { ## check if there is an analytic expression inte <- as.interact(fi) MayerCode <- inte$Mayer if(is.function(MayerCode)) { ## interaction coefficients co <- with(fi, coefs[Vnames[!IsOffset]]) z <- MayerCode(co, inte) # sic return(z) } } ## No specialised code provided. if(interactionorder(fi) != 2) stop("Mayer() is only defined for pairwise interactions") ## Compute by numerical integration f <- function(x) { log.g <- evalPairwiseTerm(fi, x) z <- 2 * pi * x * ifelse(is.finite(log.g), (1 - exp(log.g))^exponent, 1) return(z) } R <- reach(fi) M <- integrate(f,0,R)$value return(M) } spatstat.model/R/inforder.family.R0000644000176200001440000000655414331173073016644 0ustar liggesusers# # # inforder.family.R # # $Revision: 1.3 $ $Date: 2022/05/23 02:33:06 $ # # Family of `infinite-order' point process models # # inforder.family: object of class 'isf' # # # ------------------------------------------------------------------- # inforder.family <- list( name = "inforder", order = Inf, print = function(self) { cat("Family of infinite-order interactions\n") }, plot = NULL, # ---------------------------------------------------- eval = function(X,U,EqualPairs,pot,pars,correction, ...) { # # This is the eval function for the `inforder' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # For the `inforder' family of interactions with infinite order, # there are no structures common to all interactions. # So this function simply invokes the potential 'pot' directly # and expects 'pot' to return the values of the sufficient statistic S(u,X). # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pairpot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ########################################################################## # POTENTIAL: # In this case the potential function 'pot' should have arguments # pot(X, U, EqualPairs, pars, correction, ...) # # It must return a vector with length equal to the number of points in U, # or a matrix with as many rows as there are points in U. if(!is.ppp(U)) U <- ppp(U$x, U$y, window=X$window) POT <- pot(X, U, EqualPairs, pars, correction, ...) if(is.matrix(POT)) { if(nrow(POT) != U$n) stop("Internal error: the potential returned a matrix with the wrong number of rows") } else if(is.array(POT) && length(dim(POT)) > 2) stop("Internal error: the potential returned an array with more than 2 dimensions") else if(is.vector(POT)) { if(length(POT) != U$n) stop("Internal error: the potential returned a vector with the wrong length") POT <- matrix(POT, ncol=1) } else stop("Internal error: the return value from the potential is not understood") return(POT) }, ######### end of function $eval suffstat = NULL ######### end of function $suffstat ) ######### end of list class(inforder.family) <- "isf" spatstat.model/R/satpiece.R0000644000176200001440000001120014243551505015333 0ustar liggesusers# # # satpiece.S # # $Revision: 1.17 $ $Date: 2018/03/15 07:37:41 $ # # Saturated pairwise interaction process with piecewise constant potential # # SatPiece() create an instance of the process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # SatPiece <- local({ # ..... auxiliary functions ...... delSP <- function(i, r, sat) { r <- r[-i] sat <- sat[-i] nr <- length(r) if(nr == 0) return(Poisson()) if(nr == 1) return(Geyer(r, sat)) return(SatPiece(r, sat)) } # ....... template object .......... BlankSatPiece <- list( name = "piecewise constant Saturated pairwise interaction process", creator = "SatPiece", family = "pairsat.family", # evaluated later pot = function(d, par) { r <- par$r nr <- length(r) out <- array(FALSE, dim=c(dim(d), nr)) out[,,1] <- (d < r[1]) if(nr > 1) { for(i in 2:nr) out[,,i] <- (d >= r[i-1]) & (d < r[i]) } out }, par = list(r = NULL, sat=NULL), # filled in later parnames = c("interaction thresholds", "saturation parameters"), hasInf = FALSE, init = function(self) { r <- self$par$r sat <- self$par$sat if(!is.numeric(r) || !all(r > 0)) stop("interaction thresholds r must be positive numbers") if(length(r) > 1 && !all(diff(r) > 0)) stop("interaction thresholds r must be strictly increasing") if(!is.numeric(sat) || any(sat < 0)) stop("saturation parameters must be nonnegative numbers") if(any(ceiling(sat) != floor(sat))) warning("saturation parameter has a non-integer value") if(length(sat) != length(r) && length(sat) != 1) stop("vectors r and sat must have equal length") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { r <- self$par$r npiece <- length(r) # extract coefficients gammas <- exp(as.numeric(coeffs)) # name them gn <- gammas names(gn) <- paste("[", c(0,r[-npiece]),",", r, ")", sep="") # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_i", printable=dround(gn))) }, valid = function(coeffs, self) { # interaction parameters gamma must be # non-NA # finite, if sat > 0 # less than 1, if sat = Inf gamma <- (self$interpret)(coeffs, self)$param$gammas sat <- self$par$sat if(anyNA(gamma)) return(FALSE) return(all((is.finite(gamma) | sat == 0) & (gamma <= 1 | sat != Inf))) }, project = function(coeffs, self){ loggammas <- as.numeric(coeffs) sat <- self$par$sat r <- self$par$r ok <- is.finite(loggammas) & (is.finite(sat) | loggammas <= 0) if(all(ok)) return(NULL) if(!any(ok)) return(Poisson()) bad <- !ok if(spatstat.options("project.fast") || sum(bad) == 1) { # remove smallest threshold with an unidentifiable parameter firstbad <- min(which(bad)) return(delSP(firstbad, r, sat)) } else { # consider all candidate submodels subs <- lapply(which(bad), delSP, r=r, sat=sat) return(subs) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r sat <- self$par$sat if(all(is.na(coeffs))) return(2 * max(r)) gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- (abs(log(gamma)) > epsilon) & (sat > 0) if(!any(active)) return(0) else return(2 * max(r[active])) }, version=NULL # added later ) class(BlankSatPiece) <- "interact" SatPiece <- function(r, sat) { instantiate.interact(BlankSatPiece, list(r=r, sat=sat)) } SatPiece <- intermaker(SatPiece, BlankSatPiece) SatPiece }) spatstat.model/R/geyer.R0000644000176200001440000003477514331173073014675 0ustar liggesusers# # # geyer.S # # $Revision: 2.46 $ $Date: 2022/05/21 08:53:38 $ # # Geyer's saturation process # # Geyer() create an instance of Geyer's saturation process # [an object of class 'interact'] # # Geyer <- local({ # .......... template .......... BlankGeyer <- list( name = "Geyer saturation process", creator = "Geyer", family = "pairsat.family", # evaluated later pot = function(d, par) { (d <= par$r) # same as for Strauss }, par = list(r = NULL, sat=NULL), # filled in later parnames = c("interaction distance","saturation parameter"), hasInf = FALSE, init = function(self) { r <- self$par$r sat <- self$par$sat if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") if(!is.numeric(sat) || length(sat) != 1 || sat < 0) stop("saturation parameter sat must be a positive number") }, update = NULL, # default OK print = NULL, # default OK plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction unitz <- unitname(fint) if(!identical(inter$name, "Geyer saturation process")) stop("Tried to plot the wrong kind of interaction") #' fitted interaction coefficient theta <- fint$coefs[fint$Vnames] #' interaction radius r <- inter$par$r sat <- inter$par$sat xlim <- resolve.1.default(list(xlim=c(0, 1.25 * r)), list(...)) rmax <- max(xlim, d) if(is.null(d)) { d <- seq(from=0, to=rmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) } #' compute interaction between two points at distance d y <- exp(theta * sat * (d <= r)) #' compute `fv' object fun <- fv(data.frame(r=d, h=y, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "maximal interaction h(r)", "reference value 1"), unitname=unitz) if(plotit) do.call(plot.fv, resolve.defaults(list(fun), list(...), list(ylim=range(0,1,y)))) return(invisible(fun)) }, #' end of function 'plot' interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1L]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1L]) sat <- self$par$sat return(is.finite(loggamma) && (is.finite(sat) || loggamma <= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(any(!is.na(coeffs))) { loggamma <- coeffs[1L] if(!is.na(loggamma) && (abs(loggamma) <= epsilon)) return(0) } return(2 * r) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ..., halfway=FALSE, check=TRUE) { #' fast evaluator for Geyer interaction dont.complain.about(splitInf) if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Geyer") r <- potpars$r sat <- potpars$sat # first ensure all data points are in U nX <- npoints(X) nU <- npoints(U) Xseq <- seq_len(nX) if(length(EqualPairs) == 0) { # no data points currently included missingdata <- rep.int(TRUE, nX) } else { Xused <- EqualPairs[,1L] missingdata <- !(Xseq %in% Xused) } somemissing <- any(missingdata) if(somemissing) { # add the missing data points nmiss <- sum(missingdata) U <- superimpose(U, X[missingdata], W=X$window, check=check) # correspondingly augment the list of equal pairs originalrows <- seq_len(nU) newXindex <- Xseq[missingdata] newUindex <- nU + seq_len(nmiss) EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex)) nU <- nU + nmiss } # determine saturated pair counts counts <- strausscounts(U, X, r, EqualPairs) satcounts <- pmin.int(sat, counts) satcounts <- matrix(satcounts, ncol=1) if(halfway) { # trapdoor used by suffstat() answer <- satcounts } else if(sat == Inf) { # no saturation: fast code answer <- 2 * satcounts } else { # extract counts for data points Uindex <- EqualPairs[,2L] Xindex <- EqualPairs[,1L] Xcounts <- integer(npoints(X)) Xcounts[Xindex] <- counts[Uindex] # evaluate change in saturated counts of other data points change <- geyercounts(U, X, r, sat, Xcounts, EqualPairs) answer <- satcounts + change answer <- matrix(answer, ncol=1) } if(somemissing) answer <- answer[originalrows, , drop=FALSE] return(answer) }, delta2 = function(X,inte,correction, ..., sparseOK=TRUE) { # Sufficient statistic for second order conditional intensity # h(X[i] | X) - h(X[i] | X[-j]) # Geyer interaction if(correction == "periodic") return(NULL) r <- inte$par$r sat <- inte$par$sat result <- geyerdelta2(X, r, sat, correction=correction, sparseOK=sparseOK) return(result) } ) class(BlankGeyer) <- "interact" Geyer <- function(r, sat) { instantiate.interact(BlankGeyer, list(r = r, sat=sat)) } Geyer <- intermaker(Geyer, BlankGeyer) Geyer }) # ........... externally visible auxiliary functions ......... geyercounts <- function(U, X, r, sat, Xcounts, EqualPairs) { # evaluate effect of adding dummy point or deleting data point # on saturated counts of other data points stopifnot(is.numeric(r)) stopifnot(is.numeric(sat)) # for C calls we need finite numbers stopifnot(is.finite(r)) stopifnot(is.finite(sat)) # sort in increasing order of x coordinate oX <- fave.order(X$x) oU <- fave.order(U$x) Xsort <- X[oX] Usort <- U[oU] nX <- npoints(X) nU <- npoints(U) Xcountsort <- Xcounts[oX] # inverse: data point i has sorted position i' = rankX[i] rankX <- integer(nX) rankX[oX] <- seq_len(nX) rankU <- integer(nU) rankU[oU] <- seq_len(nU) # map from quadrature points to data points Uindex <- EqualPairs[,2L] Xindex <- EqualPairs[,1L] Xsortindex <- rankX[Xindex] Usortindex <- rankU[Uindex] Cmap <- rep.int(-1L, nU) Cmap[Usortindex] <- Xsortindex - 1L # call C routine zz <- .C(SM_Egeyer, nnquad = as.integer(nU), xquad = as.double(Usort$x), yquad = as.double(Usort$y), quadtodata = as.integer(Cmap), nndata = as.integer(nX), xdata = as.double(Xsort$x), ydata = as.double(Xsort$y), tdata = as.integer(Xcountsort), rrmax = as.double(r), ssat = as.double(sat), result = as.double(numeric(nU)), PACKAGE="spatstat.model") result <- zz$result[rankU] return(result) } geyerdelta2 <- local({ geyerdelta2 <- function(X, r, sat, ..., sparseOK=TRUE, correction="none") { ## Sufficient statistic for second order conditional intensity ## Geyer model stopifnot(is.numeric(sat) && length(sat) == 1 && sat > 0) ## X could be a ppp or quad. if(is.ppp(X)) { # evaluate \Delta_{x_i} \Delta_{x_j} S(x) for data points x_i, x_j # i.e. h(X[i]|X) - h(X[i]|X[-j]) where h is first order cif statistic return(geydelppp(X, r, sat, correction, sparseOK)) } else if(is.quad(X)) { # evaluate \Delta_{u_i} \Delta_{u_j} S(x) for quadrature points u_i, u_j return(geydelquad(X, r, sat, correction, sparseOK)) } else stop("Internal error: X should be a ppp or quad object") } geydelppp <- function(X, r, sat, correction, sparseOK) { # initialise nX <- npoints(X) result <- if(!sparseOK) matrix(0, nX, nX) else sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(nX, nX)) ## identify all r-close pairs (ordered pairs i ~ j) unweighted <- correction %in% c("border", "none") if(unweighted) { a <- closepairs(X, r, what="indices") I <- a$i J <- a$j IJ <- cbind(I,J) ## count number of r-neighbours for each point tvals <- table(factor(I, levels=1:nX)) } else { a <- weightedclosepairs(X, r, correction=correction, what="indices") I <- a$i J <- a$j IJ <- cbind(I,J) wIJ <- a$weight wmatrix <- sparseMatrix(i=I,j=J,x=wIJ, dims=c(nX,nX)) wJI <- wmatrix[cbind(J,I)] ## total edge-correction weight over r-neighbours for each point tvals <- tapplysum(wIJ, list(factor(I, levels=1:nX))) } # Compute direct part # (arising when i~j) tI <- tvals[I] tJ <- tvals[J] if(unweighted) { result[IJ] <- pmin(sat, tI) - pmin(sat, tI - 1L) + pmin(sat, tJ) - pmin(sat, tJ - 1L) } else { result[IJ] <- pmin(sat, tI) - pmin(sat, tI - wIJ) + pmin(sat, tJ) - pmin(sat, tJ - wJI) } # Compute indirect part # (arising when i~k and j~k for another point k) # First find all such triples ord <- (I < J) vees <- edges2vees(I[ord], J[ord], nX) # evaluate contribution of (k, i, j) II <- vees$j JJ <- vees$k KK <- vees$i tKK <- tvals[KK] if(unweighted) { contribKK <- pmin(sat, tKK) - 2 * pmin(sat, tKK-1L) + pmin(sat, tKK-2L) } else { wKKII <- wmatrix[cbind(KK, II)] wKKJJ <- wmatrix[cbind(KK, JJ)] contribKK <- ( pmin(sat, tKK) - pmin(sat, tKK-wKKII) - pmin(sat, tKK-wKKJJ) + pmin(sat, tKK-wKKII-wKKJJ) ) } # for each (i, j), sum the contributions over k if(!sparseOK) { II <- factor(II, levels=1:nX) JJ <- factor(JJ, levels=1:nX) # was: # delta3 <- tapply(contribKK, list(I=II, J=JJ), sum) # delta3[is.na(delta3)] <- 0 delta3 <- tapplysum(contribKK, list(I=II, J=JJ)) } else { delta3 <- sparseMatrix(i=II, j=JJ, x=contribKK, dims=c(nX, nX)) } # symmetrise and combine result <- result + delta3 + t(delta3) return(result) } geydelquad <- function(Q, r, sat, correction, sparseOK) { Z <- is.data(Q) U <- union.quad(Q) nU <- npoints(U) nX <- npoints(Q$data) result <- if(!sparseOK) matrix(0, nU, nU) else sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(nU, nU)) ## identify all r-close pairs U[i], U[j] unweighted <- correction %in% c("none", "border") if(unweighted) { a <- closepairs(U, r, what="indices") I <- a$i J <- a$j IJ <- cbind(I, J) } else { a <- weightedclosepairs(U, r, correction=correction, what="indices") I <- a$i J <- a$j IJ <- cbind(I, J) wIJ <- a$weight wmatrix <- sparseMatrix(i=I, j=J, x=wIJ, dims=c(nU,nU)) wJI <- wmatrix[cbind(J,I)] } ## tag which ones are data points zI <- Z[I] zJ <- Z[J] # count t(U[i], X) IzJ <- I[zJ] JzJ <- J[zJ] if(unweighted) { tvals <- table(factor(IzJ, levels=1:nU)) } else { wzJ <- wIJ[zJ] tvals <- tapplysum(wzJ, list(factor(IzJ, levels=1:nU))) } ## Compute direct part ## (arising when U[i]~U[j]) tI <- tvals[I] tJ <- tvals[J] if(unweighted) { tIJ <- tI - zJ tJI <- tJ - zI result[IJ] <- pmin(sat, tIJ + 1L) - pmin(sat, tIJ) + pmin(sat, tJI + 1L) - pmin(sat, tJI) } else { tIJ <- tI - zJ * wIJ tJI <- tJ - zI * wJI result[IJ] <- pmin(sat, tIJ + wIJ) - pmin(sat, tIJ) + pmin(sat, tJI + wJI) - pmin(sat, tJI) } # Compute indirect part # (arising when U[i]~X[k] and U[j]~X[k] for another point X[k]) # First find all such triples # Group close pairs X[k] ~ U[j] by index k spl <- split(IzJ, factor(JzJ, levels=1:nX)) grlen <- lengths(spl) # Assemble list of triples U[i], X[k], U[j] # by expanding each pair U[i], X[k] JJ <- unlist(spl[JzJ]) II <- rep(IzJ, grlen[JzJ]) KK <- rep(JzJ, grlen[JzJ]) # remove identical pairs i = j ok <- II != JJ II <- II[ok] JJ <- JJ[ok] KK <- KK[ok] # evaluate contribution of each triple tKK <- tvals[KK] zII <- Z[II] zJJ <- Z[JJ] if(unweighted) { tKIJ <- tKK - zII - zJJ contribKK <- pmin(sat, tKIJ + 2L) - 2 * pmin(sat, tKIJ + 1L) + pmin(sat, tKIJ) } else { wKKII <- wmatrix[cbind(KK, II)] wKKJJ <- wmatrix[cbind(KK, JJ)] tKIJ <- tKK - zII * wKKII - zJJ * wKKJJ contribKK <- (pmin(sat, tKIJ + wKKII + wKKJJ) - pmin(sat, tKIJ + wKKII) - pmin(sat, tKIJ + wKKJJ) + pmin(sat, tKIJ)) } # for each (i, j), sum the contributions over k if(!sparseOK) { II <- factor(II, levels=1:nU) JJ <- factor(JJ, levels=1:nU) # was: # delta4 <- tapply(contribKK, list(I=II, J=JJ), sum) # delta4[is.na(delta4)] <- 0 delta4 <- tapplysum(contribKK, list(I=II, J=JJ)) } else { delta4 <- sparseMatrix(i=II, j=JJ, x=contribKK, dims=c(nU, nU)) } # combine result <- result + delta4 return(result) } geyerdelta2 }) spatstat.model/R/dummify.R0000644000176200001440000000153414331173072015216 0ustar liggesusers# # dummify.R # # Convert a factor to a matrix of dummy variables, etc. # # $Revision: 1.6 $ $Date: 2022/01/04 05:30:06 $ # dummify <- function(x) { if(is.matrix(x) || is.data.frame(x)) { x <- as.data.frame(x) y <- do.call(data.frame, lapply(x, dummify)) return(as.matrix(y)) } # x is 1-dimensional if(is.complex(x)) return(as.matrix(data.frame(Re=Re(x), Im=Im(x)))) # convert factors etc if(is.character(x)) x <- factor(x) if(is.logical(x)) x <- factor(x, levels=c(FALSE,TRUE)) if(is.factor(x)) { # convert to dummy variables nx <- length(x) lev <- levels(x) y <- matrix(0L, nrow=nx, ncol=length(lev)) colnames(y) <- lev y[cbind(seq_len(nx), as.integer(x))] <- 1L return(y) } # convert to numeric y <- as.numeric(x) if(!is.matrix(y)) y <- matrix(y, ncol=1) return(y) } spatstat.model/R/triplets.R0000644000176200001440000001250114243551505015411 0ustar liggesusers# # # triplets.R # # $Revision: 1.18 $ $Date: 2018/03/15 07:37:41 $ # # The triplets interaction # # Triplets() create an instance of the triplets process # [an object of class 'interact'] # # ------------------------------------------------------------------- # Triplets <- local({ DebugTriplets <- FALSE # define triplet potential TripletPotential <- function(X,U,EqualPairs,pars,correction, ...) { if(!all(ok <- correction %in% c("border", "none"))) { nbad <- sum(bad <- !ok) warning(paste(ngettext(nbad, "Correction", "Corrections"), commasep(sQuote(correction[bad])), ngettext(nbad, "is unavailable and was ignored", "are unavailable and were ignored"))) } # check that all points of X are included in U nX <- npoints(X) nU <- npoints(U) XinU <- if(length(EqualPairs) == 0) integer(0) else EqualPairs[,1] missX <- which(table(factor(XinU, levels=1:nX)) == 0) if((nmiss <- length(missX)) > 0) { # add missing points to (the end of) U U <- superimpose(U, X[missX], W=as.owin(X), check=FALSE) EqualPairs <- rbind(EqualPairs, cbind(missX, nU + 1:nmiss)) nU <- nU + nmiss } iXX <- EqualPairs[,1] iXU <- EqualPairs[,2] # construct map from X index to U index mapXU <- integer(nX) mapXU[iXX] <- iXU # construct map from U index to X index mapUX <- rep.int(NA_integer_, nU) mapUX[iXU] <- iXX # logical vector identifying which quadrature points are in X isdata <- rep.int(FALSE, nU) isdata[iXU] <- TRUE # identify all close pairs u, x r <- pars$r cp <- crosspairs(U, X, r, what="indices") if(DebugTriplets) cat(paste("crosspairs at distance", r, "yields", length(cp$i), "pairs\n")) IU <- cp$i J <- cp$j # map X index to U index JU <- mapXU[J] # Each (Xi, Xj) pair will appear twice - eliminate duplicates dupX <- isdata[IU] & isdata[JU] & (IU > JU) retain <- !dupX IU <- IU[retain] JU <- JU[retain] if(DebugTriplets) cat(paste(sum(dupX), "duplicate pairs removed\n")) # find all triangles tri <- edges2triangles(IU, JU, nU, friendly=isdata) if(DebugTriplets) cat(paste(nrow(tri), "triangles identified\n")) if(nrow(tri) == 0) { # there are no triangles; return vector of zeroes return(rep.int(0, nU-nmiss)) } # count triangles containing a given quadrature point tcount <- apply(tri, 2, function(x, n) { table(factor(x, levels=1:n)) }, n=nU) tcount <- .rowSums(tcount, nrow(tcount), ncol(tcount)) # select triangles consisting only of data points triX <- matrix(mapUX[tri], nrow=nrow(tri)) isX <- matrowall(!is.na(triX)) triX <- triX[isX, , drop=FALSE] # if(nrow(triX) > 0) { # count triangles of data points containing each given data point tXcount <- apply(triX, 2, function(x, n) { table(factor(x, levels=1:n)) }, n=nX) tXcount <- .rowSums(tXcount, nrow(tXcount), ncol(tXcount)) } else { # there are no triangles of data points tXcount <- rep.int(0, nX) } # answer <- tcount answer[iXU] <- tXcount[iXX] if(DebugTriplets) cat(paste("Max suff stat: data ", max(tXcount), ", dummy ", max(tcount[isdata]), "\n", sep="")) # truncate to original size if(nmiss > 0) answer <- answer[-((nU-nmiss+1):nU)] return(answer) } # set up basic 'triplets' object except for family and parameters BlankTripletsObject <- list( name = "Triplets process", creator = "Triplets", family = "triplet.family", # evaluated later pot = TripletPotential, par = list(r=NULL), # filled in later parnames = "interaction distance", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { gamma <- ((self$interpret)(coeffs, self))$param$gamma return(is.finite(gamma) && (gamma <= 1)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(anyNA(coeffs)) return(r) loggamma <- coeffs[1] if(abs(loggamma) <= epsilon) return(0) else return(r) }, version=NULL # to be added ) class(BlankTripletsObject) <- "interact" # define Triplets function Triplets <- function(r) { instantiate.interact(BlankTripletsObject, list(r=r)) } Triplets <- intermaker(Triplets, BlankTripletsObject) Triplets }) spatstat.model/R/evidenceslrm.R0000644000176200001440000001233414331173072016224 0ustar liggesusers#' #' evidenceslrm.R #' #' method for 'spatialCovariateEvidence' for class 'slrm' #' #' $Revision: 1.8 $ $Date: 2022/07/18 04:31:45 $ spatialCovariateEvidence.slrm <- function(model, covariate, ..., lambdatype=c("probabilities", "intensity"), jitter=TRUE, jitterfactor=1, modelname=NULL, covname=NULL, dataname=NULL, subset=NULL, raster.action=c("warn", "fatal", "ignore")) { lambdatype <- match.arg(lambdatype) raster.action <- match.arg(raster.action) if(raster.action != "ignore") { #' change of resolution is not supported raster.args <- intersect(c("eps", "dimyx"), names(list(...))) nra <- length(raster.args) if(nra > 0) { problem <- paste(ngettext(nra, "Argument", "Arguments"), commasep(sQuote(raster.args)), ngettext(nra, "implies", "imply"), "a change of spatial resolution, which is not supported") switch(raster.action, warn = warning(paste(problem, "-- ignored"), call.=FALSE), fatal = stop(problem), ignore = {}) } } #' evaluate covariate values at presence pixels and all pixels #' determine names if(is.null(modelname)) modelname <- short.deparse(substitute(model)) if(covNotNamed <- is.null(covname)) { covname <- singlestring(short.deparse(substitute(covariate))) if(is.character(covariate)) covname <- covariate } if(is.null(dataname)) dataname <- model$CallInfo$responsename csr <- is.stationary(model) info <- list(modelname=modelname, covname=covname, dataname=dataname, csr=csr, ispois=TRUE, spacename="two dimensions") FIT <- model$Fit$FIT link <- model$CallInfo$link ## original point pattern X <- model$Data$response W <- Window(X) ## extract data from each pixel (or split pixel) df <- model$Data$df ## restrict to subset if required if(!is.null(subset)) { ok <- inside.owin(df$x, df$y, subset) df <- df[ok, drop=FALSE] X <- X[subset] W <- W[subset, drop=FALSE] } ## presence/absence values responsename <- model$CallInfo$responsename presence <- as.logical(df[[responsename]]) ## areas of pixels or split pixels pixelareas <- exp(df$logpixelarea) ## pixel centres as a point pattern P <- ppp(df$x, df$y, window=W) #' parse covariate argument if(is.character(covariate)) { #' One of the characters 'x' or 'y' #' Turn it into a function. ns <- length(covariate) if(ns == 0) stop("covariate is empty") if(ns > 1) stop("more than one covariate specified") covname <- covariate covNotNamed <- FALSE covariate <- switch(covname, x=function(x,y) { x }, y=function(x,y) { y }, stop(paste("Unrecognised covariate", dQuote(covariate)))) } if(is.im(covariate)) { type <- "im" ZP <- safelookup(covariate, P) Z <- covariate[W, drop=FALSE] W <- as.owin(Z) } else if(is.function(covariate)) { type <- "function" ZP <- covariate(P$x, P$y) if(!all(is.finite(ZP))) warning("covariate function returned NA or Inf values") #' window W <- as.mask(W) #' covariate in window Z <- as.im(covariate, W=W) #' collapse function body to single string if(covNotNamed) covname <- singlestring(covname) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("The covariate should be", "an image, a function(x,y)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) #' values of covariate at pixels or split pixels Zvalues <- ZP #'values of covariate at 'presence' pixels ZX <- Zvalues[presence] #' fitted probability/intensity values at all pixels or split pixels switch(lambdatype, probabilities = { lambda <- predict(FIT, newdata=df, type="response") }, intensity = { if(link == "cloglog") { linkvalues <- predict(FIT, newdata=df, type="link") lambda <- exp(linkvalues)/pixelareas } else { probs <- predict(FIT, newdata=df, type="response") lambda <- -log(1-probs)/pixelareas } }) #' apply jittering to avoid ties if(jitter) { ZX <- jitter(ZX, factor=jitterfactor) Zvalues <- jitter(Zvalues, factor=jitterfactor) } lambdaname <- paste("the fitted", lambdatype) check.finite(lambda, xname=lambdaname, usergiven=FALSE) check.finite(Zvalues, xname="the covariate", usergiven=TRUE) #' lambda values at data points lambdaX <- lambda[presence] #' lambda image(s) lambdaimage <- predict(model, window=W, type=lambdatype) #' wrap up values <- list(Zimage = Z, lambdaimage = lambdaimage, Zvalues = Zvalues, lambda = lambda, lambdaX = lambdaX, weights = pixelareas, ZX = ZX, type = type) return(list(values=values, info=info)) } spatstat.model/R/areainter.R0000644000176200001440000003143514331173072015521 0ustar liggesusers# # # areainter.R # # $Revision: 1.51 $ $Date: 2022/05/21 08:53:38 $ # # The area interaction # # AreaInter() create an instance of the area-interaction process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # AreaInter <- local({ #' area-interaction conditional intensity potential #' corresponds to potential -C(x) = n(x) - A(x)/\pi r^2 areapot <- function(X,U,EqualPairs,pars,correction, ..., W=as.owin(X)) { #' W is the window to which computations of area will be clipped. #' W=NULL is permissible, meaning no clipping. uhoh <- !(correction %in% c("border", "none")) if(any(uhoh)) { nuh <- sum(uhoh) warning(paste(ngettext(nuh, "Correction", "Corrections"), commasep(sQuote(correction[uhoh])), ngettext(nuh, "is not supported and was ignored", "are not supported and were ignored"))) } r <- pars$r if(is.null(r)) stop("internal error: r parameter not found") #' if(Poly <- spatstat.options('areainter.polygonal')) { if(is.mask(W)) W <- as.polygonal(W) if(is.mask(Window(X))) Window(X) <- as.polygonal(Window(X)) if(is.mask(Window(U))) Window(U) <- as.polygonal(Window(U)) } n <- U$n areas <- numeric(n) #' dummy points dummies <- setdiff(seq_len(n), EqualPairs[,2L]) if(length(dummies)) areas[dummies] <- areaGain(U[dummies], X, r, W=W, exact=Poly) #' data points represented in U ii <- EqualPairs[,1L] jj <- EqualPairs[,2L] inborder <- (bdist.points(X[ii]) <= r) # sic #' points in border region need clipping if(any(inborder)) areas[jj[inborder]] <- areaLoss(X, r, subset=ii[inborder], exact=Poly) #' points in eroded region do not necessarily if(any(ineroded <- !inborder)) { areas[jj[ineroded]] <- areaLoss(X, r, subset=ii[ineroded], W=W, exact=Poly) } return(1 - areas/(pi * r^2)) } #' fractional area of overlap of two unit discs at distance 2 * z discOverlap <- function(z) { z <- pmax(pmin(z, 1), -1) (2/pi) * (acos(z) - z * sqrt(1 - z^2)) } # template object without family, par, version BlankAI <- list( name = "Area-interaction process", creator = "AreaInter", family = "inforder.family", # evaluated later pot = areapot, par = list(r = NULL), # to be filled in parnames = "disc radius", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("disc radius r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction unitz <- unitname(fint) if(!identical(inter$name, "Area-interaction process")) stop("Tried to plot the wrong kind of interaction") #' fitted interaction coefficient theta <- fint$coefs[fint$Vnames] #' interaction radius r <- inter$par$r xlim <- resolve.1.default(list(xlim=c(0, 1.25 * 2*r)), list(...)) rmax <- max(xlim, d) if(is.null(d)) { d <- seq(from=0, to=rmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) } #' compute interaction between two points at distance d y <- exp(theta * discOverlap(d/(2 * r))) #' compute `fv' object fun <- fv(data.frame(r=d, h=y, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "maximal interaction h(r)", "reference value 1"), unitname=unitz) if(plotit) do.call(plot.fv, resolve.defaults(list(quote(fun)), list(...), list(ylim=range(0,1,y)))) return(invisible(fun)) }, #' end of function 'plot' interpret = function(coeffs, self) { logeta <- as.numeric(coeffs[1L]) eta <- exp(logeta) return(list(param=list(eta=eta), inames="interaction parameter eta", printable=signif(eta))) }, valid = function(coeffs, self) { eta <- ((self$interpret)(coeffs, self))$param$eta return(is.finite(eta)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(anyNA(coeffs)) return(2 * r) logeta <- coeffs[1L] if(abs(logeta) <= epsilon) return(0) else return(2 * r) }, delta2 = function(X, inte, correction, ..., sparseOK=TRUE) { # Sufficient statistic for second order conditional intensity # Area-interaction model if(!(correction %in% c("border", "none"))) return(NULL) r <- inte$par$r areadelta2(X, r, sparseOK=sparseOK) }, version=NULL # to be added ) class(BlankAI) <- "interact" AreaInter <- function(r) { instantiate.interact(BlankAI, list(r=r)) } AreaInter <- intermaker(AreaInter, BlankAI) AreaInter }) areadelta2 <- local({ areadelta2 <- function(X, r, ..., sparseOK=TRUE) { # Sufficient statistic for second order conditional intensity # Area-interaction model if(is.ppp(X)) return(areadelppp(X, r, ..., sparseOK=sparseOK)) else if(is.quad(X)) return(areadelquad(X, r, sparseOK=sparseOK)) else stop("internal error: X should be a ppp or quad object") } areadelppp <- function(X, r, algorithm=c("C", "nncross", "nnmap"), sparseOK=TRUE) { # Evaluate \Delta_{x_i} \Delta_{x_j} S(x) for data points x_i, x_j # i.e. h(X[i]|X) - h(X[i]|X[-j]) # where h is first order cif statistic algorithm <- match.arg(algorithm) nX <- npoints(X) sparseOK <- sparseOK result <- if(!sparseOK) matrix(0, nX, nX) else sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(nX,nX)) if(nX < 2) return(result) if(algorithm == "C") { # use special purpose C routine # called once for each interacting pair of points xx <- X$x yy <- X$y cl <- closepairs(X, 2 * r, what="indices", twice=FALSE, neat=FALSE) I <- cl$i J <- cl$j eps <- r/spatstat.options("ngrid.disc") for(k in seq_along(I)) { i <- I[k] j <- J[k] # all neighbours of i Ki <- union(J[I==i], I[J==i]) # all neighbours of j Kj <- union(J[I==j], I[J==j]) # relevant neighbours K <- setdiff(union(Ki, Kj), c(i,j)) # call C code z <- .C(SM_delta2area, xa = as.double(xx[i]), ya = as.double(yy[i]), xb = as.double(xx[j]), yb = as.double(yy[j]), nother = as.integer(length(K)), xother = as.double(xx[K]), yother = as.double(yy[K]), radius = as.double(r), epsilon = as.double(eps), pixcount = as.integer(integer(1L)), PACKAGE="spatstat.model") result[i,j] <- result[j,i] <- z$pixcount } # normalise result <- result * (eps^2)/(pi * r^2) return(result) } # non-C algorithms # confine attention to points which are interacting relevant <- (nndist(X) <= 2 * r) if(!all(relevant)) { if(any(relevant)) { # call self on subset Dok <- areadelppp(X[relevant], r, algorithm, sparseOK=sparseOK) result[relevant,relevant] <- Dok } return(result) } # .............. algorithm using interpreted code ........... # sort pattern in increasing order of x sortX <- (algorithm == "nnmap") if(sortX) { oX <- fave.order(X$x) X <- X[oX] } # area calculation may be restricted to window W for efficiency W <- as.owin(X) U <- as.rectangle(W) # decide pixel resolution eps <- r/spatstat.options("ngrid.disc") npix <- prod(ceiling(sidelengths(U)/eps)) if(npix <= 2^20) { # do it all in one go tile <- list(NULL) } else { # divide into rectangular tiles B <- as.rectangle(W) ntile0 <- ceiling(npix/(2^20)) tile0area <- area(B)/ntile0 tile0side <- sqrt(tile0area) nx <- ceiling(sidelengths(B)[1L]/tile0side) ny <- ceiling(sidelengths(B)[2L]/tile0side) tile <- tiles(quadrats(B, nx, ny)) } result <- matrix(0, nX, nX) for(i in seq_len(length(tile))) { # form pixel grid Ti <- tile[[i]] Wi <- if(is.null(Ti)) W else intersect.owin(W, Ti) if(algorithm == "nncross") { # Trusted, slow algorithm using nncross Z <- as.mask(Wi, eps=eps) G <- as.ppp(rasterxy.mask(Z), U, check=FALSE) # compute 3 nearest neighbours in X of each grid point v <- nncross(G, X, k=1:3) # select pixels which have exactly 2 neighbours within distance r ok <- with(v, dist.3 > r & dist.2 <= r) if(any(ok)) { v <- v[ok, , drop=FALSE] # accumulate pixel counts -> areas counts <- with(v, table(i=factor(which.1, levels=1L:nX), j=factor(which.2, levels=1L:nX))) pixarea <- with(Z, xstep * ystep) result <- result + pixarea * (counts + t(counts)) } } else { # Faster algorithm using nnmap # compute 3 nearest neighbours in X of each grid point stuff <- nnmap(X, k=1:3, W=Wi, eps=eps, is.sorted.X=TRUE, sortby="x", outputarray=TRUE) dist.2 <- stuff$dist[2L,,] dist.3 <- stuff$dist[3L,,] which.1 <- stuff$which[1L,,] which.2 <- stuff$which[2L,,] ok <- (dist.3 > r & dist.2 <= r) if(any(ok)) { which.1 <- as.vector(which.1[ok]) which.2 <- as.vector(which.2[ok]) counts <- table(i=factor(which.1, levels=1L:nX), j=factor(which.2, levels=1L:nX)) pixarea <- attr(stuff, "pixarea") result <- result + pixarea * (counts + t(counts)) } } } if(sortX) { # map back to original ordering result[oX, oX] <- result } # normalise result <- result/(pi * r^2) return(result) } areadelquad <- function(Q, r, sparseOK=TRUE) { # Sufficient statistic for second order conditional intensity # Area-interaction model # Evaluate \Delta_{u_j} \Delta_{u_i} S(x) for quadrature points # answer is area(b(u[i],r) \cap b(u[j],r)\setminus \bigcup_k b(x[k],r)) # where k ranges over all indices that are not equivalent to u[i,j] U <- union.quad(Q) Z <- is.data(Q) nU <- npoints(U) xx <- U$x yy <- U$y # identify all close pairs of quadrature points cl <- closepairs(U, 2 * r, what="indices") I <- cl$i J <- cl$j # find neighbours in X of each quadrature point zJ <- Z[J] neigh <- split(J[zJ], factor(I[zJ], levels=1L:nU)) # result <- if(!sparseOK) matrix(0, nU, nU) else sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(nU,nU)) eps <- r/spatstat.options("ngrid.disc") # for(k in seq_along(I)) { i <- I[k] j <- J[k] # all points of X close to U[i] Ki <- neigh[[i]] # all points of X close to U[j] Kj <- neigh[[j]] # relevant neighbours K <- setdiff(union(Ki, Kj), c(i,j)) # call C code z <- .C(SM_delta2area, xa = as.double(xx[i]), ya = as.double(yy[i]), xb = as.double(xx[j]), yb = as.double(yy[j]), nother = as.integer(length(K)), xother = as.double(xx[K]), yother = as.double(yy[K]), radius = as.double(r), epsilon = as.double(eps), pixcount = as.integer(integer(1L)), PACKAGE="spatstat.model") result[i,j] <- z$pixcount } # normalise result <- result * (eps^2)/(pi * r^2) return(result) } areadelta2 }) spatstat.model/R/triplet.family.R0000644000176200001440000000642314243551505016514 0ustar liggesusers# # # triplet.family.R # # $Revision: 1.1 $ $Date: 2011/11/05 07:18:51 $ # # Family of `third-order' point process models # # triplet.family: object of class 'isf' # # # ------------------------------------------------------------------- # triplet.family <- list( name = "triplet", order = 3, print = function(self) { cat("Family of third-order interactions\n") }, plot = NULL, # ---------------------------------------------------- eval = function(X,U,EqualPairs,pot,pars,correction, ...) { # # This is the eval function for the `triplet' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # This function is currently modelled on 'inforder.family'. # It simply invokes the potential 'pot' directly # and expects 'pot' to return the values of the sufficient statistic S(u,X). # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pairpot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ########################################################################## # POTENTIAL: # In this case the potential function 'pot' should have arguments # pot(X, U, EqualPairs, pars, correction, ...) # # It must return a vector with length equal to the number of points in U, # or a matrix with as many rows as there are points in U. if(!is.ppp(U)) U <- ppp(U$x, U$y, window=X$window) POT <- pot(X, U, EqualPairs, pars, correction, ...) if(is.matrix(POT)) { if(nrow(POT) != U$n) stop("Internal error: the potential returned a matrix with the wrong number of rows") } else if(is.array(POT) && length(dim(POT)) > 2) stop("Internal error: the potential returned an array with more than 2 dimensions") else if(is.vector(POT)) { if(length(POT) != U$n) stop("Internal error: the potential returned a vector with the wrong length") POT <- matrix(POT, ncol=1) } else stop("Internal error: the return value from the potential is not understood") return(POT) }, ######### end of function $eval suffstat = NULL ######### end of function $suffstat ) ######### end of list class(triplet.family) <- "isf" spatstat.model/R/Kcom.R0000644000176200001440000003174614366621174014456 0ustar liggesusers# # Kcom.R # # model compensated K-function # # $Revision: 1.19 $ $Date: 2023/02/02 02:39:43 $ # Kcom <- local({ Kcom <- function(object, r=NULL, breaks=NULL, ..., correction=c("border", "isotropic", "translate"), conditional=!is.poisson(object), restrict=FALSE, model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), compute.var=TRUE, truecoef=NULL, hi.res=NULL) { if(is.ppm(object)) { fit <- object } else if(is.ppp(object) || is.quad(object)) { if(is.ppp(object)) object <- quadscheme(object, ...) if(!is.null(model)) { stopifnot(is.ppm(model)) ## update 'model' using new point pattern 'object' e <- list2env(list(object=object), parent=model$callframe) fit <- update(model, Q=object, forcefit=TRUE, envir=e) } else { fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } } else stop("object should be a fitted point process model or a point pattern") if(missing(conditional) || is.null(conditional)) conditional <- !is.poisson(fit) restrict <- isTRUE(restrict) if(restrict && !conditional) { warning("restrict=TRUE ignored because conditional=FALSE", call.=FALSE) restrict <- FALSE } # rfixed <- !is.null(r) || !is.null(breaks) # Extract data and window Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) Win <- X$window # selection of edge corrections correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", isotropic="isotropic", Ripley="isotropic", ripley="isotropic", trans="translation", translate="translation", translation="translation", best="best"), multi=TRUE) correction <- implemented.for.K(correction, Win$type, correction.given) opt <- list(bord = any(correction == "border"), tran = any(correction == "translation"), ripl = any(correction == "isotropic")) if(sum(unlist(opt)) == 0) stop("No corrections selected") # edge correction algorithm algo <- if(!conditional) "classical" else if(restrict) "restricted" else "reweighted" # conditioning on border region? if(!conditional) { Wfree <- Win } else { rbord <- fit$rbord Wfree <- erosion(Win, rbord) if(restrict) { retain <- inside.owin(union.quad(Q), , Wfree) # Throw away boundary data Q <- Q[Wfree] X <- X[Wfree] Win <- Wfree } } # Extract quadrature info U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy E <- equalsfun.quad(Q) WQ <- w.quad(Q) # quadrature weights # quadrature points used USED <- if(algo == "reweighted") (bdist.points(U) > rbord) else rep.int(TRUE, U$n) # basic statistics npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW lambda2 <- npts * (npts - 1)/(areaW^2) # adjustments to account for restricted domain of pseudolikelihood if(algo == "reweighted") { npts.used <- sum(Z & USED) area.used <- sum(WQ[USED]) # lambda.used <- npts.used/area.used # lambda2.used <- npts.used * (npts.used - 1)/(area.used^2) } else { npts.used <- npts area.used <- areaW # lambda.used <- lambda # lambda2.used <- lambda2 } # 'r' values rmaxdefault <- rmax.rule("K", if(restrict) Wfree else Win, npts/areaW) breaks <- handle.r.b.args(r, breaks, Wfree, rmaxdefault=rmaxdefault) r <- breaks$r # nr <- length(r) rmax <- breaks$max # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame K <- data.frame(r=r, pois=pi * r^2) desc <- c("distance argument r", "expected %s for CSR") K <- fv(K, "r", substitute(K(r), NULL), "pois", , alim, c("r","%s[pois](r)"), desc, fname="K") ############### start computing ################## # residuals resid <- residuals(fit, type="raw",drop=FALSE, new.coef=truecoef, quad=hi.res) resval <- with(resid, "increment") rescts <- with(resid, "continuous") if(restrict) { # keep only data inside Wfree resval <- resval[retain] rescts <- rescts[retain] } # close pairs of points # (quadrature point to data point) clos <- crosspairs(U, X, rmax, what="ijd") dIJ <- clos$d I <- clos$i J <- clos$j UI <- U[I] XJ <- X[J] EIJ <- E(I, J) # TRUE if points are identical, U[I[k]] == X[J[k]] ZI <- Z[I] # TRUE if U[I[k]] is a data point DD <- ZI & !EIJ # TRUE for pairs of distinct data points only # nDD <- sum(DD) # determine whether a quadrature point will be used in integral okI <- USED[I] if(spatstat.options("Kcom.remove.zeroes")) okI <- okI & !EIJ # residual weights # wIJ <- ifelseXY(EIJ, rescts[I], resval[I]) # absolute weight for continuous integrals wc <- -rescts wcIJ <- -rescts[I] #################################################### if(opt$bord) { # border method # Compute distances to boundary # (in restricted case, the window of U has been adjusted) b <- bdist.points(U) bI <- b[I] # reduced sample for K(r) of data only RSX <- Kount(dIJ[DD & okI], bI[DD & okI], b[Z & USED], breaks) # Kb <- RSX$numerator/(lambda.used * RSX$denom.count) Kb <- RSX$numerator/(lambda * RSX$denom.count) K <- bind.fv(K, data.frame(border=Kb), "hat(%s)[bord](r)", nzpaste(algo, "border-corrected nonparametric estimate of %s"), "border") # reduced sample for adjustment integral RSD <- Kwtsum(dIJ[okI], bI[okI], wcIJ[okI], b[Z & USED], rep.int(1, npts.used), breaks, fatal=FALSE) # lambdaU <- (npts.used + 1)/area.used lambdaU <- (npts + 1)/areaW Kb <- RSD$numerator/((RSD$denominator + 1) * lambdaU) K <- bind.fv(K, data.frame(bcom=Kb), "bold(C)~hat(%s)[bord](r)", nzpaste("model compensator of", algo, "border-corrected %s"), "border") } if(opt$tran) { # translation correction edgewt <- switch(algo, classical = edge.Trans(UI, XJ, paired=TRUE), restricted = edge.Trans(UI, XJ, paired=TRUE), reweighted = edge.Trans.modif(UI, XJ, Win, Wfree, paired=TRUE)) wh <- whist(dIJ[okI], breaks$val, (edgewt * wcIJ)[okI]) whDD <- whist(dIJ[DD & okI], breaks$val, edgewt[DD & okI]) Ktrans <- cumsum(whDD)/(lambda2 * area.used) Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), "hat(%s)[trans](r)", nzpaste(algo, "translation-corrected nonparametric estimate of %s"), "trans") # lambda2U <- (npts.used + 1) * npts.used/(area.used^2) lambda2U <- (npts + 1) * npts/(areaW^2) Ktrans <- cumsum(wh)/(lambda2U * area.used) Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(tcom=Ktrans), "bold(C)~hat(%s)[trans](r)", nzpaste("model compensator of", algo, "translation-corrected %s"), "trans") } if(opt$ripl) { # Ripley isotropic correction edgewt <- edge.Ripley(UI, matrix(dIJ, ncol=1)) wh <- whist(dIJ[okI], breaks$val, (edgewt * wcIJ)[okI]) whDD <- whist(dIJ[DD & okI], breaks$val, edgewt[DD & okI]) # Kiso <- cumsum(whDD)/(lambda2.used * area.used) Kiso <- cumsum(whDD)/(lambda2 * area.used) Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(iso=Kiso), "hat(%s)[iso](r)", nzpaste(algo, "isotropic-corrected nonparametric estimate of %s"), "iso") # lambda2U <- (npts.used + 1) * npts.used/(area.used^2) lambda2U <- (npts + 1) * npts/(areaW^2) Kiso <- cumsum(wh)/(lambda2U * area.used) Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(icom=Kiso), "bold(C)~hat(%s)[iso](r)", nzpaste("model compensator of", algo, "isotropic-corrected %s"), "iso") # if(compute.var) { savedotnames <- fvnames(K, ".") # compute contribution to compensator from each quadrature point dOK <- dIJ[okI] eOK <- edgewt[okI] iOK <- I[okI] denom <- lambda2U * area.used variso <- varsumiso <- 0 * Kiso for(i in sortunique(iOK)) { relevant <- (iOK == i) tincrem <- whist(dOK[relevant], breaks$val, eOK[relevant]) localterm <- cumsum(tincrem)/denom variso <- variso + wc[i] * localterm^2 if(Z[i]) varsumiso <- varsumiso + localterm^2 } sdiso <- sqrt(variso) K <- bind.fv(K, data.frame(ivar=variso, isd =sdiso, ihi = 2*sdiso, ilo = -2*sdiso, ivarsum=varsumiso), c("bold(C)^2~hat(%s)[iso](r)", "sqrt(bold(C)^2~hat(%s)[iso](r))", "bold(R)~hat(%s)[hi](r)", "bold(R)~hat(%s)[lo](r)", "hat(C)^2~hat(%s)[iso](r)"), c("Poincare variance of isotropic-corrected %s", "sqrt(Poincare variance) of isotropic-corrected %s", "upper critical band for isotropic-corrected %s", "lower critical band for isotropic-corrected %s", "data estimate of Poincare variance of %s"), "iso") # fvnames(K, ".") <- c(savedotnames, "isd") fvnames(K, ".") <- savedotnames } } # default is to display all corrections formula(K) <- . ~ r unitname(K) <- unitname(X) # secret tag used by 'Kres' attr(K, "maker") <- "Kcom" return(K) } # `reweighted' translation edge correction edge.Trans.modif <- function(X, Y=X, WX=X$window, WY=Y$window, exact=FALSE, paired=FALSE, trim=spatstat.options("maxedgewt")) { # computes edge correction factor # f = area(WY)/area(intersect.owin(WY, shift(WX, X[i] - Y[j]))) X <- as.ppp(X, WX) W <- X$window x <- X$x y <- X$y Y <- as.ppp(Y, WY) xx <- Y$x yy <- Y$y nX <- npoints(X) nY <- npoints(Y) if(paired && (nX != nY)) stop("X and Y should have equal length when paired=TRUE") # For irregular polygons, exact evaluation is very slow; # so use pixel approximation, unless exact=TRUE if(!exact) { if(WX$type == "polygonal") WX <- as.mask(WX) if(WY$type == "polygonal") WY <- as.mask(WX) } typeX <- WX$type typeY <- WY$type if(typeX == "rectangle" && typeY == "rectangle") { # Fast code for this case if(!paired) { DX <- abs(outer(x,xx,"-")) DY <- abs(outer(y,yy,"-")) } else { DX <- abs(xx - x) DY <- abs(yy - y) } A <- WX$xrange B <- WX$yrange a <- WY$xrange b <- WY$yrange # compute width and height of intersection wide <- pmin.int(a[2], A[2]+DX) - pmax(a[1], A[1]+DX) high <- pmin.int(b[2], B[2]+DY) - pmax(b[1], B[1]+DY) # edge correction weight weight <- diff(a) * diff(b) / (wide * high) if(!paired) weight <- matrix(weight, nrow=X$n, ncol=Y$n) } else if(typeX %in% c("rectangle", "polygonal") && typeY %in% c("rectangle", "polygonal")) { # This code is SLOW WX <- as.polygonal(WX) WY <- as.polygonal(WY) a <- area(W) if(!paired) { weight <- matrix(, nrow=nX, ncol=nY) if(nX > 0 && nY > 0) { for(i in seq_len(nX)) { X.i <- c(x[i], y[i]) for(j in seq_len(nY)) { shiftvector <- X.i - c(xx[j],yy[j]) WXshift <- shift(WX, shiftvector) b <- overlap.owin(WY, WXshift) weight[i,j] <- a/b } } } } else { nX <- npoints(X) weight <- numeric(nX) if(nX > 0) { for(i in seq_len(nX)) { shiftvector <- c(x[i],y[i]) - c(xx[i],yy[i]) WXshift <- shift(WX, shiftvector) b <- overlap.owin(WY, WXshift) weight[i] <- a/b } } } } else { WX <- as.mask(WX) WY <- as.mask(WY) # make difference vectors if(!paired) { DX <- outer(x,xx,"-") DY <- outer(y,yy,"-") } else { DX <- x - xx DY <- y - yy } # compute set cross-covariance g <- setcov(WY,WX) # evaluate set cross-covariance at these vectors gvalues <- lookup.im(g, as.vector(DX), as.vector(DY), naok=TRUE, strict=FALSE) weight <- area(WY)/gvalues } # clip high values if(length(weight) > 0) weight <- pmin.int(weight, trim) if(!paired) weight <- matrix(weight, nrow=X$n, ncol=Y$n) return(weight) } Kcom }) spatstat.model/R/mincontrast.R0000644000176200001440000011267314400312755016114 0ustar liggesusersc#' #' mincontrast.R #' #' Functions for estimation by minimum contrast #' #' $Revision: 1.123 $ $Date: 2022/11/13 06:45:30 $ #' ################## base ################################ safePositiveValue <- function(x, default=.Machine$double.eps) { ## ensure x is finite, positive, and acceptable to C routines ifelse(is.finite(x), pmin(.Machine$double.xmax, pmax(.Machine$double.eps, x)), default) } safeFiniteValue <- function(x, default=0) { ## ensure x is finite and acceptable to C routines biggest <- .Machine$double.xmax ifelse(is.finite(x), pmin(biggest, pmax(-biggest, x)), default) } bigvaluerule <- function(objfun, objargs, startpar, ...) { ## Determine suitable large number to replace Inf values of objective ## Evaluate objective at starting parameter vector startval <- do.call(objfun, list(par=startpar, objargs=objargs, ...)) ## check with(.Machine, { hugeval <- sqrt(double.xmax) * double.eps if(abs(startval) > hugeval) { warning(paste("Internal error: objective function returns huge value", paren(startval), "which may cause numerical problems"), call.=FALSE) return(sqrt(double.xmax)) } bigvalue <- min(hugeval, max(sqrt(hugeval), 1024 * abs(startval))) return(bigvalue) }) } mincontrast <- local({ ## objective function (in a format that is re-usable by other code) contrast.objective <- function(par, objargs, ...) { with(objargs, { theo <- theoretical(par=par, rvals, ...) if(!is.vector(theo) || !is.numeric(theo)) stop("theoretical function did not return a numeric vector") if(length(theo) != nrvals) stop("theoretical function did not return the correct number of values") ## experimental if(!is.null(adjustment)) { theo <- adjustment$fun(theo=theo, par=par, auxdata=adjustment$auxdata, ...) if(!is.vector(theo) || !is.numeric(theo)) stop("adjustment did not return a numeric vector") if(length(theo) != nrvals) stop("adjustment did not return the correct number of values") } ## experimental if(is.function(transfo)) theo <- transfo(theo) ## integrand of discrepancy discrep <- (abs(theo^qq - obsq))^pp ## protect C code from weird values bigvalue <- BIGVALUE + sqrt(sum(par^2)) discrep <- safePositiveValue(discrep, default=bigvalue) ## rescaled integral of discrepancy value <- mean(discrep) ## penalty if(is.function(penalty)) { straf <- do.call(penalty, append(list(par), penal.args)) value <- value + tau * straf } ## debugger (can also be activated by spatstat.options(mincon.trace)) if(isTRUE(TRACE)) { cat("Parameters:", fill=TRUE) print(par) splat("Discrepancy value:", value) } if(is.environment(saveplace)) { h <- get("h", envir=saveplace) hplus <- as.data.frame(append(par, list(value=value))) h <- rbind(h, hplus) assign("h", h, envir=saveplace) } return(value) }) } optionalGridSearch <- function(startpar, fn, objargs, pspace, verbose=FALSE) { nhalfgrid <- as.integer(pspace$nhalfgrid %orifnull% 0) check.1.integer(nhalfgrid) if(nhalfgrid <= 0) return(startpar) searchratio <- pspace$searchratio %orifnull% 2 check.1.real(searchratio) stopifnot(searchratio > 1) ra <- searchratio^((1:nhalfgrid)/nhalfgrid) ra <- c(rev(1/ra), 1, ra) nra <- length(ra) if(length(startpar) != 2) stop(paste("startpar has length", paste0(length(startpar), ";"), "expecting 2")) values <- matrix(-Inf, nra, nra) stapa <- startpar for(i in seq_along(ra)) { stapa[[1L]] <- startpar[[1L]] * ra[i] for(j in seq_along(ra)) { stapa[[2L]] <- startpar[[1L]] * ra[j] values[i,j] <- as.numeric(do.call(fn, list(par=stapa, objargs=objargs))) } } bestpos <- which.min(values) ibest <- row(values)[bestpos] jbest <- col(values)[bestpos] bestpar <- stapa bestpar[[1L]] <- startpar[[1L]] * ra[ibest] bestpar[[2L]] <- startpar[[2L]] * ra[jbest] if(verbose) { splat("Initial starting parameters:") print(startpar) splat("Modified starting parameters after search:") print(bestpar) } return(bestpar) } mincontrast <- function(observed, theoretical, startpar, ..., ctrl=list(q = 1/4, p = 2, rmin=NULL, rmax=NULL), fvlab=list(label=NULL, desc="minimum contrast fit"), explain=list(dataname=NULL, modelname=NULL, fname=NULL), action.bad.values=c("warn", "stop", "silent"), control=list(), stabilize=TRUE, pspace=NULL) { verifyclass(observed, "fv") action.bad.values <- match.arg(action.bad.values) stopifnot(is.function(theoretical)) if(!any("par" %in% names(formals(theoretical)))) stop(paste("Theoretical function does not include an argument called", sQuote("par"))) ## enforce defaults ctrl <- resolve.defaults(ctrl, list(q = 1/4, p = 2, rmin=NULL, rmax=NULL)) fvlab <- resolve.defaults(fvlab, list(label=NULL, desc="minimum contrast fit")) explain <- resolve.defaults(explain, list(dataname=NULL, modelname=NULL, fname=NULL)) ## extract vector of r values argu <- fvnames(observed, ".x") rvals <- observed[[argu]] ## determine range of r values rmin <- ctrl$rmin rmax <- ctrl$rmax if(!is.null(rmin) && !is.null(rmax)) stopifnot(rmin < rmax && rmin >= 0) else { alim <- attr(observed, "alim") %orifnull% range(rvals) if(is.null(rmax)) rmax <- alim[2] if(is.null(rmin)) { rmin <- alim[1] if(rmin == 0 && identical(explain$fname,"g")) rmin <- rmax/1e3 # avoid artefacts at zero in pcf } } ## extract vector of observed values of statistic valu <- fvnames(observed, ".y") obs <- observed[[valu]] ## restrict to [rmin, rmax] if(max(rvals) < rmax) stop(paste("rmax=", signif(rmax,4), "exceeds the range of available data", "= [", signif(min(rvals),4), ",", signif(max(rvals),4), "]"), call.=FALSE) sub <- (rvals >= rmin) & (rvals <= rmax) rvals <- rvals[sub] obs <- obs[sub] ## sanity clause if(!all(ok <- is.finite(obs))) { doomed <- !any(ok) if(!doomed && all(ok[-1])) { ## common case: all finite except the value for r=0 whinge <- paste("The value of the empirical function", sQuote(explain$fname), "for r=", rvals[1], "was", paste0(obs[1], ".")) if(action.bad.values == "stop") stop(whinge, call.=FALSE) iMIN <- 2 iMAX <- length(obs) success <- TRUE } else { ## general case: some non-finite values whinge <- paste(if(doomed) "All" else "Some", "values of the empirical function", sQuote(explain$fname), "were infinite, NA or NaN.") if(doomed || action.bad.values == "stop") stop(whinge, call.=FALSE) ## trim each end of domain ra <- range(which(ok)) iMIN <- ra[1] iMAX <- ra[2] success <- all(ok[iMIN:iMAX]) } if(!success) { ## Finite and non-finite values are interspersed; ## find the longest run of finite values z <- rle(ok) k <- which.max(z$lengths * z$values) ## Run must be at least half of the data if(2 * z$lengths[k] > length(ok)) { csl <- cumsum(z$lengths) iMAX <- csl[k] iMIN <- 1L + if(k == 1) 0 else csl[k-1] success <- TRUE } } if(success) { ## accept trimmed domain rmin <- rvals[iMIN] rmax <- rvals[iMAX] obs <- obs[iMIN:iMAX] rvals <- rvals[iMIN:iMAX] sub[sub] <- ok if(action.bad.values == "warn") { warning(paste(whinge, "Range of r values was reset to", prange(c(rmin, rmax))), call.=FALSE) } } else stop(paste(whinge, "Unable to recover.", "Please choose a narrower range [rmin, rmax]"), call.=FALSE) } ## debugging TRACE <- pspace$trace %orifnull% spatstat.options("mincon.trace") if(SAVE <- isTRUE(pspace$save)) { saveplace <- new.env() assign("h", NULL, envir=saveplace) } else saveplace <- NULL ## adjustment of theoretical summary function adjustment <- pspace$adjustment if(!is.null(adjustment)) { check.named.list(adjustment, c("fun", "auxdata"), xtitle="adjustment") stopifnot(is.function(adjustment$fun)) } ## penalty for parameter value penalty <- pspace$penalty penal.args <- pspace$penal.args tau <- pspace$tau %orifnull% 1 if(!is.null(penalty)) stopifnot(is.function(penalty)) ## experimental: custom transformation of summary function transfo <- pspace$transfo if(is.function(transfo)) { obs <- try(transfo(obs)) if(inherits(obs, "try-error")) stop("Transformation of observed summary function failed", call.=FALSE) if(length(obs) != length(rvals)) stop(paste("Transformation of observed summary function values", "changed length", paren(paste(length(rvals), "to", length(obs)))), call.=FALSE) } else transfo <- NULL ## pack data into a list objargs <- list(theoretical = theoretical, rvals = rvals, nrvals = length(rvals), obsq = obs^(ctrl$q), ## for efficiency qq = ctrl$q, pp = ctrl$p, rmin = rmin, rmax = rmax, adjustment = adjustment, penalty = penalty, penal.args = penal.args, tau = tau, transfo = transfo, saveplace = saveplace, TRACE = TRACE, BIGVALUE = 1) ## determine a suitable large number to replace Inf values of objective objargs$BIGVALUE <- bigvaluerule(contrast.objective, objargs, startpar, ...) ## secret option to evaluate the contrast objective at a specific point if(!is.null(evalpar <- list(...)$evalpar)) { value <- contrast.objective(evalpar, objargs, ...) return(value) } ## experimental code to improve starting value startpar <- optionalGridSearch(startpar, fn=contrast.objective, objargs=objargs, pspace=pspace) ## ................... optimization algorithm control parameters ....................... if(stabilize) { ## Numerical stabilisation ## evaluate objective at starting state startval <- contrast.objective(startpar, objargs, ...) ## use to determine appropriate global scale smallscale <- sqrt(.Machine$double.eps) fnscale <- max(abs(startval), smallscale) parscale <- pmax(abs(startpar), smallscale) scaling <- list(fnscale=fnscale, parscale=parscale) } else { scaling <- list() } control <- resolve.defaults(control, scaling, list(trace=0)) ## ..................................................................................... ## >>>>>>>>>>>>>>>>> . . . . O P T I M I Z E . . . . <<<<<<<<<<<<<<<<<< minimum <- optim(startpar, fn=contrast.objective, objargs=objargs, ..., control=control) ## ..................................................................................... ## if convergence failed, issue a warning signalStatus(optimStatus(minimum), errors.only=TRUE) ## evaluate the fitted theoretical curve fittheo <- theoretical(minimum$par, rvals, ...) ## pack it up as an `fv' object label <- fvlab$label %orifnull% "%s[fit](r)" desc <- fvlab$desc fitfv <- bind.fv(observed[sub, ], data.frame(fit=fittheo), label, desc) if(!is.null(adjustment)) { adjtheo <- adjustment$fun(theo=fittheo, par=minimum$par, auxdata=adjustment$auxdata, ...) fitfv <- bind.fv(fitfv, data.frame(adjfit=adjtheo), "%s[adjfit](r)", paste("adjusted", desc)) } result <- list(par = minimum$par, fit = fitfv, opt = minimum, ctrl = list(p=ctrl$p,q=ctrl$q,rmin=rmin,rmax=rmax), info = explain, startpar = startpar, objfun = contrast.objective, objargs = objargs, dotargs = list(...), pspace = pspace) class(result) <- c("minconfit", class(result)) if(SAVE) attr(result, "h") <- get("h", envir=saveplace) return(result) } mincontrast }) print.minconfit <- function(x, ...) { terselevel <- spatstat.options('terse') digits <- getOption('digits') ## explanatory cat(paste("Minimum contrast fit ", "(", "object of class ", dQuote("minconfit"), ")", "\n", sep="")) mo <- x$info$modelname fu <- x$info$fname da <- x$info$dataname cm <- x$covmodel if(!is.null(mo)) cat("Model:", mo, fill=TRUE) if(!is.null(cm)) { ## Covariance/kernel model and nuisance parameters cat("\t", cm$type, "model:", cm$model, fill=TRUE) margs <- cm$margs if(!is.null(margs)) { nama <- names(margs) tags <- ifelse(nzchar(nama), paste(nama, "="), "") tagvalue <- paste(tags, margs) splat("\t", cm$type, "parameters:", paste(tagvalue, collapse=", ")) } } if(!is.null(fu)) { if(length(fu) > 1) { ## compress names like c("K", "inhom") -> "K[inhom]" fsub <- paste(fu[-1], collapse=",") fu <- paste0(fu[1], paren(fsub, "[")) } if(!is.null(da)) { splat("Fitted by matching theoretical", fu, "function to", da) } else { splat(" based on", fu) } } else if(!is.null(da)) { splat(" fitted to", da) } if(waxlyrical('space', terselevel)) cat("\n") ## Values splat("Internal parameters fitted by minimum contrast ($par):") print(x$par, ...) if(waxlyrical('space', terselevel)) cat("\n") ## Handling new parameters isPCP <- x$isPCP %orifnull% x$internal$model!="lgcp" cpar <- x$clustpar if (!is.null(cpar)) { splat("Fitted", if(isPCP) "cluster" else "covariance", "parameters:") print(cpar, digits=digits) } else{ ## Old modelpar field if necessary mp <- x$modelpar if(!is.null(mp)) { splat("Derived parameters of", if(!is.null(mo)) mo else "model", "($modelpar):") print(mp) } } if(length(mu <- x$mu)) { if(isPCP) { splat("Mean cluster size: ", if(is.numeric(mu)) paste(signif(mu, digits), "points") else if(is.im(mu)) "[pixel image]" else "[unknown]") } else { splat("Fitted mean of log of random intensity: ", if(is.numeric(mu)) signif(mu, digits) else if(is.im(mu)) "[pixel image]" else "[unknown]") } } if(waxlyrical('space', terselevel)) cat("\n") ## Diagnostics printStatus(optimStatus(x$opt)) ## Starting values if(waxlyrical('gory', terselevel)){ cat("\n") splat("Starting values of parameters:") print(x$startpar) ## Algorithm parameters ct <- x$ctrl splat("Domain of integration:", "[", signif(ct$rmin,4), ",", signif(ct$rmax,4), "]") splat("Exponents:", "p=", paste(signif(ct$p, 3), ",", sep=""), "q=", signif(ct$q,3)) } invisible(NULL) } plot.minconfit <- function(x, ...) { xname <- short.deparse(substitute(x)) xf <- x$fit dont.complain.about(xf) do.call(plot.fv, resolve.defaults(list(quote(xf)), list(...), list(main=xname))) } unitname.minconfit <- function(x) { unitname(x$fit) } "unitname<-.minconfit" <- function(x, value) { unitname(x$fit) <- value return(x) } as.fv.minconfit <- function(x) x$fit ###### convergence status of 'optim' object optimConverged <- function(x) { x$convergence == 0 } optimNsteps <- function(x) { x$counts[["function"]] } optimStatus <- function(x, call=NULL) { cgce <- x$convergence neval <- x$counts[["function"]] switch(paste(cgce), "0" = { simpleMessage( paste("Converged successfully after", neval, "function evaluations"), call) }, "1" = simpleWarning( paste("Iteration limit maxit was reached after", neval, "function evaluations"), call), "10" = simpleWarning("Nelder-Mead simplex was degenerate", call), "51"= { simpleWarning( paste("Warning message from L-BGFS-B method:", sQuote(x$message)), call) }, "52"={ simpleError( paste("Error message from L-BGFS-B method:", sQuote(x$message)), call) }, simpleWarning(paste("Unrecognised error code", cgce), call) ) } #' general code for collecting status reports signalStatus <- function(x, errors.only=FALSE) { if(is.null(x)) return(invisible(NULL)) stopifnot(inherits(x, "condition")) if(inherits(x, "error")) stop(x) if(inherits(x, "warning")) warning(x) if(inherits(x, "message") && !errors.only) message(x) return(invisible(NULL)) } printStatus <- function(x, errors.only=FALSE) { if(is.null(x)) return(invisible(NULL)) prefix <- if(inherits(x, "error")) "error: " else if(inherits(x, "warning")) "warning: " else NULL if(!is.null(prefix) || !errors.only) cat(paste(prefix, conditionMessage(x), "\n", sep="")) return(invisible(NULL)) } accumulateStatus <- function(x, stats=NULL) { values <- stats$values %orifnull% list() frequencies <- stats$frequencies %orifnull% integer(0) if(inherits(x, c("error", "warning", "message"))) { same <- unlist(lapply(values, identical, y=x)) if(any(same)) { i <- min(which(same)) frequencies[i] <- frequencies[i] + 1L } else { values <- append(values, list(x)) frequencies <- c(frequencies, 1L) } } stats <- list(values=values, frequencies=frequencies) return(stats) } printStatusList <- function(stats) { with(stats, { for(i in seq_along(values)) { printStatus(values[[i]]) fi <- frequencies[i] splat("\t", paren(paste(fi, ngettext(fi, "time", "times")))) } } ) invisible(NULL) } ############### applications (specific models) ################## getdataname <- function(defaultvalue, ..., dataname=NULL) { if(!is.null(dataname)) dataname else defaultvalue } thomas.estK <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("Thomas") startpar <- info$checkpar(startpar, native=TRUE) theoret <- info$K result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of Thomas process"), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Thomas process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "sigma2") result$par <- par ## infer meaningful model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Thomas") ## parameters in standard form result$clustpar <- info$checkpar(par, native=FALSE) return(result) } lgcp.estK <- function(X, startpar=c(var=1,scale=1), covmodel=list(model="exponential"), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("LGCP") startpar <- info$checkpar(startpar, native=TRUE) ## digest parameters of Covariance model and test validity cmodel <- do.call(info$resolveshape, covmodel)$covmodel theoret <- info$K result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of LGCP"), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="log-Gaussian Cox process"), ..., model=cmodel$model, margs=cmodel$margs) ## imbue with meaning par <- result$par names(par) <- c("sigma2", "alpha") result$par <- par result$covmodel <- cmodel ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="lgcp") ## parameters in standard form result$clustpar <- info$checkpar(par, native=FALSE) result$clustargs <- info$outputshape(cmodel$margs) return(result) } matclust.estK <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("MatClust") startpar <- info$checkpar(startpar, native=TRUE) theoret <- info$K result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of Matern Cluster process"), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Matern Cluster process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "R") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="MatClust") ## parameters in standard form result$clustpar <- info$checkpar(par, native=FALSE) return(result) } ## versions using pcf (suggested by Jan Wild) thomas.estpcf <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()){ dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("Thomas") startpar <- info$checkpar(startpar, native=TRUE) theoret <- info$pcf ## avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list( label="%s[fit](r)", desc="minimum contrast fit of Thomas process"), explain=list( dataname=dataname, fname=attr(g, "fname"), modelname="Thomas process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "sigma2") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Thomas") ## parameters in standard form result$clustpar <- info$checkpar(par, native=FALSE) return(result) } matclust.estpcf <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()){ dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("MatClust") startpar <- info$checkpar(startpar, native=TRUE) theoret <- info$pcf ## avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of Matern Cluster process"), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="Matern Cluster process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "R") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="MatClust") ## parameters in standard form result$clustpar <- info$checkpar(par, native=FALSE) return(result) } lgcp.estpcf <- function(X, startpar=c(var=1,scale=1), covmodel=list(model="exponential"), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("LGCP") startpar <- info$checkpar(startpar, native=TRUE) ## digest parameters of Covariance model and test validity cmodel <- do.call(info$resolveshape, covmodel)$covmodel theoret <- info$pcf result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of LGCP"), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="log-Gaussian Cox process"), ..., model=cmodel$model, margs=cmodel$margs) ## imbue with meaning par <- result$par names(par) <- c("sigma2", "alpha") result$par <- par result$covmodel <- cmodel ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="lgcp") ## parameters in standard form result$clustpar <- info$checkpar(par, native=FALSE) result$clustargs <- info$outputshape(cmodel$margs) return(result) } cauchy.estK <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { ## omega: scale parameter of Cauchy kernel function ## eta: scale parameter of Cauchy pair correlation function ## eta = 2 * omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("Cauchy") startpar <- info$checkpar(startpar, native=TRUE) theoret <- info$K desc <- "minimum contrast fit of Neyman-Scott process with Cauchy kernel" result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Cauchy process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "eta2") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Cauchy") ## parameters in standard form result$clustpar <- info$checkpar(par, native=FALSE) return(result) } cauchy.estpcf <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()) { ## omega: scale parameter of Cauchy kernel function ## eta: scale parameter of Cauchy pair correlation function ## eta = 2 * omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("Cauchy") startpar <- info$checkpar(startpar, native=TRUE) theoret <- info$pcf ## avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } desc <- "minimum contrast fit of Neyman-Scott process with Cauchy kernel" result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="Cauchy process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "eta2") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Cauchy") ## parameters in standard form result$clustpar <- info$checkpar(par, native=FALSE) return(result) } vargamma.estK <- function(X, startpar=c(kappa=1,scale=1), nu = -1/4, lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { ## nu.ker: smoothness parameter of Variance Gamma kernel function ## omega: scale parameter of kernel function ## nu.pcf: smoothness parameter of Variance Gamma pair correlation function ## eta: scale parameter of Variance Gamma pair correlation function ## nu.pcf = 2 * nu.ker + 1 and eta = omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") ## Catch old nu.ker/nu.pcf syntax and resolve nu-value. if(missing(nu)) nu <- resolve.vargamma.shape(..., allow.default = TRUE)$nu.ker check.1.real(nu) stopifnot(nu > -1/2) info <- spatstatClusterModelInfo("VarGamma") startpar <- info$checkpar(startpar, native=TRUE) theoret <- info$K ## test validity of parameter nu and digest cmodel <- info$resolveshape(nu.ker=nu)$covmodel margs <- cmodel$margs desc <- "minimum contrast fit of Neyman-Scott process with Variance Gamma kernel" result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Variance Gamma process"), margs=margs, ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "eta") result$par <- par result$covmodel <- cmodel ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="VarGamma") ## parameters in standard form result$clustpar <- info$checkpar(par, native=FALSE) result$clustargs <- info$outputshape(cmodel$margs) return(result) } vargamma.estpcf <- function(X, startpar=c(kappa=1,scale=1), nu=-1/4, lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()) { ## nu.ker: smoothness parameter of Variance Gamma kernel function ## omega: scale parameter of kernel function ## nu.pcf: smoothness parameter of Variance Gamma pair correlation function ## eta: scale parameter of Variance Gamma pair correlation function ## nu.pcf = 2 * nu.ker + 1 and eta = omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") ## Catch old nu.ker/nu.pcf syntax and resolve nu-value. if(missing(nu)) nu <- resolve.vargamma.shape(..., allow.default = TRUE)$nu.ker check.1.real(nu) stopifnot(nu > -1/2) info <- spatstatClusterModelInfo("VarGamma") startpar <- info$checkpar(startpar, native=TRUE) theoret <- info$pcf ## test validity of parameter nu and digest cmodel <- info$resolveshape(nu.ker=nu)$covmodel margs <- cmodel$margs ## avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } desc <- "minimum contrast fit of Neyman-Scott process with Variance Gamma kernel" result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="Variance Gamma process"), margs=margs, ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "eta") result$par <- par result$covmodel <- cmodel ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="VarGamma") ## parameters in standard form result$clustpar <- info$checkpar(par, native=FALSE) result$clustargs <- info$outputshape(cmodel$margs) return(result) } spatstat.model/R/exactMPLEstrauss.R0000644000176200001440000000400314331173073016746 0ustar liggesusers# # exactMPLEstrauss.R # # 'exact' MPLE for stationary Strauss process # # $Revision: 1.7 $ $Date: 2022/01/04 05:30:06 $ # exactMPLEstrauss <- local({ # main function exactMPLEstrauss <- function(X, R, ngrid=2048, plotit=FALSE, project=TRUE) { # n <- npoints(X) W <- as.owin(X) # border correction WminR <- erosion(W, R) bR <- (bdist.points(X) >= R) nR <- sum(bR) # evaluate neighbour counts for data points Tcounts <- crosspaircounts(X, X, R) - 1L sumT <- sum(Tcounts[bR]) # determine the coefficients a_k for k = 0, 1, ... Z <- scanmeasure(X, R, dimyx=ngrid) Z <- Z[WminR, drop=FALSE] kcounts <- tabulate(as.vector(Z$v) + 1L) pixarea <- with(Z, xstep * ystep) A <- kcounts * pixarea # find optimal log(gamma) op <- optim(log(0.5), lpl, sco, method="L-BFGS-B", control=list(fnscale=-1), lower=-Inf, upper=if(project) 0 else Inf, A=A, sumT=sumT, nR=nR) loggamma <- op$par # plot? if(plotit) { x <- seq(log(1e-4), if(project) 0 else log(1e4), length=512) plot(x, lpl(x, A, sumT, nR), type="l", xlab=expression(log(gamma)), ylab=expression(log(PL(gamma)))) abline(v=loggamma, lty=3) } # derive optimal beta kmax <-length(A) - 1L polypart <- A %*% exp(outer(0:kmax, loggamma)) beta <- nR/polypart logbeta <- log(beta) result <- c(logbeta, loggamma) names(result) <- c("(Intercept)", "Interaction") return(result) } # helper functions (vectorised) # log pseudolikelihood lpl <- function(theta, A=A, sumT=sumT, nR=nR) { kmax <-length(A) - 1L polypart <- A %*% exp(outer(0:kmax, theta)) nR * (log(nR) - log(polypart) - 1) + theta * sumT } # pseudoscore sco <- function(theta, A=A, sumT=sumT, nR=nR) { kmax <- length(A) - 1L kseq <- 0:kmax mat <- exp(outer(kseq, theta)) polypart <- A %*% mat Dpolypart <- (A * kseq) %*% mat sumT - nR * Dpolypart/polypart } exactMPLEstrauss }) spatstat.model/R/strausshard.R0000644000176200001440000001623714331173073016116 0ustar liggesusers# # # strausshard.S # # $Revision: 2.38 $ $Date: 2022/05/23 02:33:06 $ # # The Strauss/hard core process # # StraussHard() create an instance of the Strauss-hardcore process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # StraussHard <- local({ BlankStraussHard <- list( name = "Strauss - hard core process", creator = "StraussHard", family = "pairwise.family", # evaluated later pot = function(d, par) { v <- (d <= par$r) v[ d <= par$hc ] <- (-Inf) v }, par = list(r = NULL, hc = NULL), # filled in later parnames = c("interaction distance", "hard core distance"), hasInf = TRUE, selfstart = function(X, self) { # self starter for StraussHard nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } r <- self$par$r md <- minnndist(X) if(md == 0) { warning(paste("Pattern contains duplicated points:", "hard core must be zero")) return(StraussHard(r=r, hc=0)) } if(!is.na(hc <- self$par$hc)) { # value fixed by user or previous invocation # check it if(md < hc) warning(paste("Hard core distance is too large;", "some data points will have zero probability")) return(self) } # take hc = minimum interpoint distance * n/(n+1) hcX <- md * nX/(nX+1) StraussHard(r=r, hc = hcX) }, init = function(self) { r <- self$par$r hc <- self$par$hc if(length(hc) != 1) stop("hard core distance must be a single value") if(!is.na(hc)) { if(!is.numeric(hc) || hc <= 0) stop("hard core distance hc must be a positive number, or NA") if(!is.numeric(r) || length(r) != 1 || r <= hc) stop("interaction distance r must be a number greater than hc") } }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) return(is.finite(loggamma)) }, project = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) if(is.finite(loggamma)) return(NULL) hc <- self$par$hc if(hc > 0) return(Hardcore(hc)) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r hc <- self$par$hc if(anyNA(coeffs)) return(r) loggamma <- coeffs[1] if(abs(loggamma) <= epsilon) return(hc) else return(r) }, hardcore = function(self, ...) { return(self$par$hc) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { #' fast evaluator for StraussHard interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for StraussHard") r <- potpars$r hc <- potpars$hc hclose <- (strausscounts(U, X, hc, EqualPairs) != 0) rclose <- strausscounts(U, X, r, EqualPairs) if(!splitInf) { answer <- ifelseAX(hclose, -Inf, rclose) answer <- matrix(answer, ncol=1) } else { answer <- ifelseAX(hclose, 0, rclose) answer <- matrix(answer, ncol=1) attr(answer, "-Inf") <- hclose } return(answer) }, Mayer=function(coeffs, self) { # second Mayer cluster integral gamma <- exp(as.numeric(coeffs[1])) r <- self$par$r hc <- self$par$hc return(pi * (hc^2 + (1-gamma) * (r^2 - hc^2))) }, delta2 = function(X, inte, correction, ..., sparseOK=FALSE) { r <- inte$par$r hc <- inte$par$hc #' positive part U <- as.ppp(X) nU <- npoints(U) cl <- weightedclosepairs(U, r, correction=correction, what="indices") if(is.null(cl)) # can't handle edge correction return(NULL) v <- sparseMatrix(i=cl$i, j=cl$j, x=cl$weight, dims=c(nU, nU)) #' hard core part hcl <- closepairs(U, hc, what="indices") ihit <- hcl$i jhit <- hcl$j vh <- NULL if(is.ppp(X)) { #' count conflicts between data points nhit <- as.integer(table(factor(jhit, levels=seq_len(nU)))) #' for a conflicting pair X[i], X[j], #' status of X[j] will change when X[i] is deleted #' iff X[j] is only in conflict with X[i] changes <- (nhit == 1) if(any(changes)) { changesJ <- changes[jhit] vh <- sparseMatrix(i=ihit[changesJ], j=jhit[changesJ], x=TRUE, dims=c(nU, nU)) } } else if(is.quad(X)) { #' count conflicts with existing data points izdat <- is.data(X) hitdata <- izdat[ihit] nhitdata <- as.integer(table(factor(jhit[hitdata], levels=seq_len(nU)))) #' for a conflicting pair U[i], U[j], #' status of U[j] will change when U[i] is added/deleted #' iff EITHER #' U[i] = X[i] is a data point and #' U[j] is only in conflict with X[i], #' OR #' U[i] is a dummy point, #' U[j] has no conflicts with X. changesJ <- (hitdata & (nhitdata[jhit] == 1)) | (!hitdata & (nhitdata[jhit] == 0)) if(any(changesJ)) vh <- sparseMatrix(i=ihit[changesJ], j=jhit[changesJ], x=TRUE, dims=c(nU, nU)) } else stop("X should be a ppp or quad object") # pack up if(!sparseOK) { v <- as.matrix(v) if(!is.null(vh)) vh <- as.matrix(vh) } attr(v, "deltaInf") <- vh return(v) } ) class(BlankStraussHard) <- "interact" StraussHard <- function(r, hc=NA) { instantiate.interact(BlankStraussHard, list(r=r, hc=hc)) } StraussHard <- intermaker(StraussHard, BlankStraussHard) StraussHard }) spatstat.model/R/interactions.R0000644000176200001440000002110314331173074016242 0ustar liggesusers# # interactions.R # # Works out which interaction is in force for a given point pattern # # $Revision: 1.26 $ $Date: 2021/12/29 00:24:51 $ # # impliedpresence <- function(tags, formula, df, extranames=character(0)) { # Determines, for each row of the data frame df, # whether the variable called tags[j] is required in the formula stopifnot(is.data.frame(df)) stopifnot(inherits(formula, "formula")) stopifnot(is.character(tags)) stopifnot(is.character(extranames)) # allvars <- variablesinformula(formula) if(any(tags %in% names(df))) stop(paste(sQuote("tags"), "conflicts with the name of a column of", sQuote("df"))) if(any(extranames %in% names(df))) stop(paste(sQuote("extranames"), "conflicts with the name of a column of", sQuote("df"))) # answer is a matrix nvars <- length(tags) nrows <- nrow(df) answer <- matrix(TRUE, nrows, nvars) # expand data frame with zeroes for each tags and extranames for(v in unique(c(tags, extranames))) df[ , v] <- 0 # loop for(i in seq(nrow(df))) { # make a fake data frame for the formula # using the data frame entries from row i # (includes 0 values for all other variables) pseudat <- df[i, , drop=FALSE] # use this to construct a fake model matrix mof0 <- model.frame(formula, pseudat) mom0 <- model.matrix(formula, mof0) for(j in seq(nvars)) { # Reset the variable called tags[j] to 1 pseudatj <- pseudat pseudatj[ , tags[j]] <- 1 # Now create the fake model matrix mofj <- model.frame(formula, pseudatj) momj <- model.matrix(formula, mofj) # Compare the two matrices answer[i,j] <- any(momj != mom0) } } return(answer) } active.interactions <- function(object) { stopifnot(inherits(object, "mppm")) interaction <- object$Inter$interaction iformula <- object$iformula nenv <- new.env() environment(iformula) <- nenv random <- object$random if(!is.null(random)) environment(random) <- nenv itags <- object$Inter$itags # The following are currently unused # ninter <- object$Inter$ninter # iused <- object$Inter$iused # trivial <- object$Inter$trivial # names of variables dat <- object$data datanames <- names(dat) dfnames <- summary(dat)$dfnames nondfnames <- datanames[!(datanames %in% dfnames)] nondfnames <- union(nondfnames, c("x", "y")) # extract data-frame values dfdata <- as.data.frame(dat[, dfnames, drop=FALSE], warn=FALSE) # determine which interaction(s) are in force answer <- impliedpresence(itags, iformula, dfdata, nondfnames) if(!is.null(random)) { if("|" %in% all.names(random)) { ## hack since model.matrix doesn't handle "|" as desired rnd <- gsub("|", "/", pasteFormula(random), fixed=TRUE) random <- as.formula(rnd, env=environment(random)) } answer2 <- impliedpresence(itags, random, dfdata, nondfnames) answer <- answer | answer2 } colnames(answer) <- names(interaction) return(answer) } impliedcoefficients <- function(object, tag, new.coef=NULL) { stopifnot(inherits(object, "mppm")) stopifnot(is.character(tag) && length(tag) == 1) fitobj <- object$Fit$FIT Vnamelist <- object$Fit$Vnamelist has.random <- object$Info$has.random # Not currently used: # fitter <- object$Fit$fitter # interaction <- object$Inter$interaction # ninteract <- object$Inter$ninteract # trivial <- object$Inter$trivial # iused <- object$Inter$iused itags <- object$Inter$itags if(!(tag %in% itags)) stop(paste("Argument", dQuote("tag"), "is not one of the interaction names")) # (0) Set up # Identify the columns of the glm data frame # that are associated with this interpoint interaction vnames <- Vnamelist[[tag]] if(!is.character(vnames)) stop("Internal error - wrong format for vnames") # Check atomic type of each covariate Moadf <- as.list(object$Fit$moadf) islog <- sapply(Moadf, is.logical) isnum <- sapply(Moadf, is.numeric) isfac <- sapply(Moadf, is.factor) # Interaction variables must be numeric or logical if(any(bad <- !(isnum | islog)[vnames])) stop(paste("Internal error: the", ngettext(sum(bad), "variable", "variables"), commasep(sQuote(vnames[bad])), "should be numeric or logical"), call.=FALSE) # The answer is a matrix of coefficients, # with one row for each point pattern, # and one column for each vname answer <- matrix(, nrow=object$npat, ncol=length(vnames)) colnames(answer) <- vnames # (1) make a data frame of covariates # Names of all columns in glm data frame allnames <- names(Moadf) # Extract the design covariates df <- as.data.frame(object$data, warn=FALSE) # Names of all covariates other than design covariates othernames <- allnames[!(allnames %in% names(df))] # Add columns in which all other covariates are set to 0, FALSE, etc for(v in othernames) { df[, v] <- if(isnum[[v]]) 0 else if(islog[[v]]) FALSE else if(isfac[[v]]) { lev <- levels(Moadf[[v]]) factor(lev[1], levels=lev) } else sort(unique(Moadf[[v]]))[1] } # (2) evaluate linear predictor Coefs <- new.coef %orifnull% (if(!has.random) coef(fitobj) else fixef(fitobj)) suppressWarnings({ # eta0 <- predict(fitobj, newdata=df, type="link") eta0 <- GLMpredict(fitobj, data=df, coefs=Coefs, changecoef=TRUE, type="link") }) # (3) for each vname in turn, # set the value of the vname to 1 and predict again for(j in seq_along(vnames)) { vnj <- vnames[j] df[[vnj]] <- 1 suppressWarnings({ # etaj <- predict(fitobj, newdata=df, type="link") etaj <- GLMpredict(fitobj, data=df, coefs=Coefs, changecoef=TRUE, type="link") }) answer[ ,j] <- etaj - eta0 # set the value of this vname back to 0 df[[vnj]] <- 0 } return(answer) } illegal.iformula <- local({ illegal.iformula <- function(ifmla, itags, dfvarnames) { ## THIS IS TOO STRINGENT! ## Check validity of the interaction formula. ## ifmla is the formula. ## itags is the character vector of interaction names. ## Check whether the occurrences of `itags' in `iformula' are valid: ## e.g. no functions applied to `itags[i]'. ## Returns NULL if legal, otherwise a character string stopifnot(inherits(ifmla, "formula")) stopifnot(is.character(itags)) ## formula must not have a LHS if(length(ifmla) > 2) return("iformula must not have a left-hand side") ## variables in formula must be interaction tags or data frame variables varsinf <- variablesinformula(ifmla) if(!all(ok <- varsinf %in% c(itags, dfvarnames))) return(paste( ngettext(sum(!ok), "variable", "variables"), paste(dQuote(varsinf[!ok]), collapse=", "), "not allowed in iformula")) ## if formula uses no interaction tags, it's trivial if(!any(itags %in% variablesinformula(ifmla))) return(NULL) ## create terms object tt <- attributes(terms(ifmla)) ## extract all variables appearing in the formula vars <- as.list(tt$variables)[-1] ## nvars <- length(vars) varexprs <- lapply(vars, as.expression) varstrings <- sapply(varexprs, paste) ## Each variable may be a name or an expression v.is.name <- sapply(vars, is.name) ## a term may be an expression like sin(x), poly(x,y,degree=2) v.args <- lapply(varexprs, all.vars) ## v.n.args <- sapply(v.args, length) v.has.itag <- sapply(lapply(v.args, "%in%", x=itags), any) ## interaction tags may only appear as names, not in functions if(any(nbg <- v.has.itag & !v.is.name)) return(paste("interaction tags may not appear inside a function:", paste(dQuote(varstrings[nbg]), collapse=", "))) ## Interaction between two itags is not defined ## Inspect the higher-order terms fax <- tt$factors if(prod(dim(fax)) == 0) return(NULL) ## rows are first order terms, columns are terms of order >= 1 fvars <- rownames(fax) fterms <- colnames(fax) fv.args <- lapply(fvars, variablesintext) ft.args <- lapply(fterms, variables.in.term, factors=fax, varnamelist=fv.args) ft.itags <- lapply(ft.args, intersect, y=itags) if(any(lengths(ft.itags) > 1)) return("Interaction between itags is not defined") return(NULL) } variables.in.term <- function(term, factors, varnamelist) { basis <- (factors[, term] != 0) unlist(varnamelist[basis]) } illegal.iformula }) spatstat.model/R/rhohat.R0000644000176200001440000000633714476233602015045 0ustar liggesusers#' #' rhohat.R #' #' $Revision: 1.116 $ $Date: 2023/08/14 06:33:10 $ #' #' Non-parametric estimation of a function rho(z) determining #' the intensity function lambda(u) of a point process in terms of a #' spatial covariate Z(u) through lambda(u) = rho(Z(u)). #' More generally allows offsets etc. #' Copyright (c) Adrian Baddeley 2015-2022 #' GNU Public Licence GPL >= 2.0 #' Code for generic rhohat() and rhohat.ppp() #' is now moved to spatstat.explore rhohat.ppm <- function(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "mountain", "valley", "piecewise"), subset=NULL, do.CI=TRUE, jitter=TRUE, jitterfactor=1, interpolate=TRUE, dimyx=NULL, eps=NULL, rule.eps = c("adjust.eps","grow.frame","shrink.frame"), n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) { callstring <- short.deparse(sys.call()) smoother <- match.arg(smoother) method <- match.arg(method) if(missing(positiveCI)) positiveCI <- (smoother == "local") if(missing(covname)) covname <- sensiblevarname(short.deparse(substitute(covariate)), "X") if(is.null(adjust)) adjust <- 1 if("baseline" %in% names(list(...))) warning("Argument 'baseline' ignored: not available for rhohat.ppm") ## validate model model <- object reference <- "model" modelcall <- model$call if(is.character(covariate) && length(covariate) == 1) { covname <- covariate switch(covname, x={ covariate <- function(x,y) { x } }, y={ covariate <- function(x,y) { y } }, stop("Unrecognised covariate name") ) covunits <- unitname(data.ppm(model)) } else if(inherits(covariate, "distfun")) { covunits <- unitname(covariate) } else { covunits <- NULL } W <- Window(data.ppm(model)) if(!is.null(subset)) W <- W[subset, drop=FALSE] areaW <- area(W) rhohatEngine(model, covariate, reference, areaW, ..., subset=subset, do.CI=do.CI, weights=weights, method=method, horvitz=horvitz, smoother=smoother, resolution=list(dimyx=dimyx, eps=eps, rule.eps=rule.eps), spatCovarArgs=list(clip.predict=FALSE, jitter=jitter, jitterfactor=jitterfactor, interpolate=interpolate), n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, covunits=covunits, confidence=confidence, positiveCI=positiveCI, breaks=breaks, modelcall=modelcall, callstring=callstring) } #' Code for rhohat infrastructure is now moved to spatstat.explore spatstat.model/R/rhohat.slrm.R0000644000176200001440000000540314331173073016005 0ustar liggesusers#' #' rhohat.slrm.R #' #' $Revision: 1.9 $ $Date: 2022/08/27 05:54:41 $ #' rhohat.slrm <- function(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "mountain", "valley", "piecewise"), subset=NULL, do.CI=TRUE, jitter=TRUE, jitterfactor=1, interpolate=TRUE, n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) { callstring <- short.deparse(sys.call()) smoother <- match.arg(smoother) method <- match.arg(method) if(missing(positiveCI)) positiveCI <- (smoother == "local") if(missing(covname)) covname <- sensiblevarname(short.deparse(substitute(covariate)), "X") if(is.null(adjust)) adjust <- 1 if("baseline" %in% names(list(...))) warning("Argument 'baseline' ignored: not available for rhohat.slrm") ## validate model model <- object reference <- "model" modelcall <- model$call if(!is.null(object$CallInfo$splitby)) stop("Sorry, rhohat.slrm is not yet implemented for split pixels", call.=FALSE) if(is.character(covariate) && length(covariate) == 1) { covname <- covariate switch(covname, x={ covariate <- function(x,y) { x } }, y={ covariate <- function(x,y) { y } }, stop("Unrecognised covariate name") ) covunits <- unitname(response(model)) } else if(inherits(covariate, "distfun")) { covunits <- unitname(covariate) } else { covunits <- NULL } W <- Window(response(model)) if(!is.null(subset)) W <- W[subset, drop=FALSE] areaW <- area(W) rhohatEngine(model, covariate, reference, areaW, ..., spatCovarArgs=list(lambdatype="intensity", clip.predict=FALSE, jitter=jitter, jitterfactor=jitterfactor, interpolate=interpolate), do.CI=do.CI, weights=weights, method=method, horvitz=horvitz, smoother=smoother, n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, covunits=covunits, confidence=confidence, positiveCI=positiveCI, breaks=breaks, modelcall=modelcall, callstring=callstring) } spatstat.model/R/vcov.kppm.R0000644000176200001440000001217714331173074015476 0ustar liggesusers# # vcov.kppm # # vcov method for kppm objects # # Original code: Abdollah Jalilian # # $Revision: 1.11 $ $Date: 2018/02/15 03:28:11 $ # vcov.kppm <- function(object, ..., what=c("vcov", "corr", "fisher", "internals"), fast = NULL, rmax = NULL, eps.rmax = 0.01, verbose = TRUE) { what <- match.arg(what) verifyclass(object, "kppm") fast.given <- !is.null(fast) #' secret argument (eg for testing) splitup <- resolve.1.default(list(splitup=FALSE), list(...)) #' if(is.null(object$improve)) { ## Normal composite likelihood (poisson) case ## extract composite likelihood results po <- object$po ## ensure it was fitted with quadscheme if(is.null(getglmfit(po))) { warning("Re-fitting model with forcefit=TRUE") po <- update(po, forcefit=TRUE) } ## extract quadrature scheme information Q <- quad.ppm(po) U <- union.quad(Q) nU <- npoints(U) wt <- w.quad(Q) ## compute fitted intensity values lambda <- fitted(po, type="lambda") ## extract covariate values Z <- model.matrix(po) ## evaluate integrand ff <- Z * lambda * wt ## extract pcf g <- pcfmodel(object) ## resolve options for algorithm maxmat <- spatstat.options("maxmatrix") if(!fast.given) { fast <- (nU^2 > maxmat) } else stopifnot(is.logical(fast)) ## attempt to compute large matrix: pair correlation function minus 1 if(!fast) { gminus1 <- there.is.no.try( matrix(g(c(pairdist(U))) - 1, nU, nU) ) } else { if(is.null(rmax)){ diamwin <- diameter(as.owin(U)) fnc <- get("fnc", envir = environment(improve.kppm)) rmax <- if(fnc(diamwin, eps.rmax, g) >= 0) diamwin else uniroot(fnc, lower = 0, upper = diamwin, eps=eps.rmax, g=g)$root } cp <- there.is.no.try( crosspairs(U,U,rmax,what="ijd") ) gminus1 <- if(is.null(cp)) NULL else sparseMatrix(i=cp$i, j=cp$j, x=g(cp$d) - 1, dims=c(nU, nU)) } ## compute quadratic form if(!splitup && !is.null(gminus1)) { E <- t(ff) %*% gminus1 %*% ff } else { ## split calculation of (gminus1 %*% ff) into blocks nrowperblock <- max(1, floor(maxmat/nU)) nblocks <- ceiling(nU/nrowperblock) g1ff <- NULL if(verbose) { splat("Splitting large matrix calculation into", nblocks, "blocks") pstate <- list() } if(!fast) { for(k in seq_len(nblocks)) { if(verbose) pstate <- progressreport(k, nblocks, state=pstate) istart <- nrowperblock * (k-1) + 1 iend <- min(nrowperblock * k, nU) ii <- istart:iend gm1 <- matrix(g(c(crossdist(U[ii], U))) - 1, iend-istart+1, nU) g1ff <- rbind(g1ff, gm1 %*% ff) } } else { for(k in seq_len(nblocks)) { if(verbose) pstate <- progressreport(k, nblocks, state=pstate) istart <- nrowperblock * (k-1) + 1 iend <- min(nrowperblock * k, nU) ii <- istart:iend cp <- crosspairs(U[ii], U, rmax, what="ijd") gm1 <- sparseMatrix(i=cp$i, j=cp$j, x=g(cp$d) - 1, dims=c(iend-istart+1, nU)) g1ff <- rbind(g1ff, as.matrix(gm1 %*% ff)) } } E <- t(ff) %*% g1ff } ## asymptotic covariance matrix in the Poisson case J <- t(Z) %*% ff J.inv <- try(solve(J)) ## could be singular if(inherits(J.inv, "try-error")) { if(what == "internals") { return(list(ff=ff, J=J, E=E, J.inv=NULL)) } else { return(NULL) } } ## asymptotic covariance matrix in the clustered case vc <- J.inv + J.inv %*% E %*% J.inv } else { ## Case of quasi-likelihood (or other things from improve.kppm) run <- is.null(object$vcov) || (!is.null(fast) && (fast != object$improve$fast.vcov)) if(run){ ## Calculate vcov if it hasn't already been so ## or if option fast differs from fast.vcov args <- object$improve internal <- what=="internals" if(!is.null(fast)) args$fast.vcov <- fast object <- with(args, improve.kppm(object, type = type, rmax = rmax, dimyx = dimyx, fast = fast, vcov = TRUE, fast.vcov = fast.vcov, maxIter = 0, save.internals = internal)) } vc <- object$vcov } ## Convert from Matrix to ordinary matrix: vc <- as.matrix(vc) switch(what, vcov={ return(vc) }, corr={ sd <- sqrt(diag(vc)) co <- vc/outer(sd, sd, "*") return(co) }, fisher={ fish <- try(solve(vc)) if(inherits(fish, "try-error")) fish <- NULL return(fish) }, internals={ return(list(ff=ff, J=J, E=E, J.inv=J.inv, vc=vc)) }) stop(paste("Unrecognised option: what=", what)) } spatstat.model/R/update.ppm.R0000644000176200001440000003362614374302014015625 0ustar liggesusers# # update.ppm.R # # # $Revision: 1.69 $ $Date: 2023/02/16 02:35:40 $ # # # update.ppm <- local({ ## update point pattern dataset using either data or formula newpattern <- function(oldpattern, lhs, callframe, envir) { eval(eval(substitute(substitute(l, list("."=Q)), list(l=lhs, Q=oldpattern)), envir=as.list(envir), enclos=callframe), envir=as.list(envir), enclos=callframe) } update.ppm <- function(object, ..., fixdummy=TRUE, use.internal=NULL, envir=environment(terms(object))) { verifyclass(object, "ppm") new.callstring <- short.deparse(sys.call()) aargh <- list(...) if(inherits(object, "ippm")) { call <- object$dispatched$call callframe <- object$dispatched$callframe } else { call <- getCall(object) if(!is.call(call)) stop(paste("Internal error - getCall(object) is not of class", sQuote("call"))) callframe <- object$callframe } callfun <- as.character(call[[1]]) newstyle <- (callfun == "ppm.formula") oldstyle <- !newstyle ## Special cases ## (1) no new information given if(length(aargh) == 0 && !identical(use.internal, TRUE)) { result <- eval(call, as.list(envir), enclos=callframe) result$callframe <- callframe return(result) } ## (2) model can be updated using existing covariate data frame if(!identical(use.internal, FALSE) && ## single argument which is a formula (length(aargh) == 1) && inherits(fmla <- aargh[[1]], "formula") && is.null(lhs.of.formula(fmla)) && ## not a ppm.formula call oldstyle && ## fitted by mpl using glm/gam with(object, method == "mpl" && !is.null(fitter) && fitter %in% c("gam", "glm"))) { ## This is a dangerous hack! glmdata <- object$internal$glmdata ## check whether data for new variables are available ## (this doesn't work with things like 'pi') vars.available <- c(colnames(glmdata), names(object$covfunargs)) if(all(variablesinformula(fmla) %in% c(".", vars.available))) { ## we can update using internal data FIT <- object$internal$glmfit orig.env <- environment(FIT$terms) orig.trend <- object$trend orig.formula <- update(orig.trend, formula(FIT)) ## update formulae using "." rules trend <- newformula(orig.trend, fmla, callframe, envir) fmla <- newformula(orig.formula, fmla, callframe, envir) ## expand polynom() in formula if(spatstat.options("expand.polynom")) { fmla <- expand.polynom(fmla) trend <- expand.polynom(trend) } ## update GLM/GAM fit upd.glm.call <- update(FIT, fmla, evaluate=FALSE) FIT <- eval(upd.glm.call, envir=orig.env) environment(FIT$terms) <- orig.env object$internal$glmfit <- FIT ## update entries of object object$trend <- trend object$terms <- terms(fmla) object$coef <- co <- FIT$coef object$callstring <- new.callstring object$internal$fmla <- fmla ## if(is.finite(object$maxlogpl)) { ## Update maxlogpl provided it is finite ## (If the likelihood is infinite, this is due to the interaction; ## if we update the trend, the likelihood will remain infinite.) W <- glmdata$.mpl.W SUBSET <- glmdata$.mpl.SUBSET Z <- is.data(object$Q) object$maxlogpl <- -(deviance(FIT)/2 + sum(log(W[Z & SUBSET])) + sum(Z & SUBSET)) } ## update the model call upd.call <- call upd.call$trend <- trend object$call <- upd.call ## update fitted interaction (depends on coefficients, if not Poisson) if(!is.null(inter <- object$interaction) && !is.poisson(inter)) object$fitin <- fii(inter, co, object$internal$Vnames, object$internal$IsOffset) ## if(is.stationary(object) && !is.marked(object)) { ## uniform Poisson if(eval(call$rename.intercept) %orifnull% TRUE) { names(object$coef) <- "log(lambda)" } } return(object) } } ## (3) Need to use internal data if(oldstyle) { ## decide whether to use internal data undecided <- is.null(use.internal) || !is.logical(use.internal) force.int <- !undecided && use.internal force.ext <- !undecided && !use.internal if(!force.int) { ## check for validity of format badformat <- damaged.ppm(object) } if(undecided) { use.internal <- badformat if(badformat) message("object format corrupted; repairing it") } else if(force.ext && badformat) warning("object format corrupted; try update(object, use.internal=TRUE)") if(use.internal) { ## reset the main arguments in the call using the internal data call$Q <- quad.ppm(object) namobj <- names(call) if("trend" %in% namobj) call$trend <- newformula(call$trend, object$trend, callframe, envir) if("interaction" %in% namobj) call$interaction <- object$interaction if("covariates" %in% namobj) call$covariates <- object$covariates } } ## General case. X.is.new <- FALSE ## First split named and unnamed arguments nama <- names(aargh) named <- if(is.null(nama)) rep.int(FALSE, length(aargh)) else nzchar(nama) namedargs <- aargh[named] unnamedargs <- aargh[!named] nama <- names(namedargs) ## Find the argument 'Q' by name or implicitly by class ## (including detection of conflicts) argQ <- argQexpr <- newQname <- NULL newX <- newXname <- NULL if(n <- sp.foundclasses(c("ppp", "quad"), unnamedargs, "Q", nama)) { argQ <- unnamedargs[[n]] argQexpr <- sys.call()[[which(!named)[n] + 2L]] newX <- argQ newXname <- short.deparse(argQexpr) unnamedargs <- unnamedargs[-n] } if("Q" %in% nama) { argQ <- namedargs$Q argQexpr <- sys.call()$Q newQname <- short.deparse(argQexpr) nama <- setdiff(nama, "Q") namedargs <- namedargs[nama] } ## Deal with argument 'Q' which has several possible forms if(!is.null(argQ)) { X.is.new <- TRUE if(inherits(argQ, "formula")) { ## Q = X ~ trend if(newstyle) { ## update the formula callQ <- update(object$trend, call$Q) call$Q <- newformula(callQ, argQ, callframe, envir) } else { ## split into Q = X and trend = ~trend if(!is.null(lhs <- lhs.of.formula(argQ))) { call$Q <- newX <- newpattern(call$Q, lhs, callframe, envir) newXname <- short.deparse(lhs) } call$trend <- newformula(call$trend, rhs.of.formula(eval(argQ)), callframe, envir) } newQ <- call$Q } else { ## Q = X if(newstyle) { ## convert old call to old style fo <- as.formula(call$Q) Yexpr <- lhs.of.formula(fo) trend <- rhs.of.formula(fo) newcall <- call("ppm", Q=Yexpr, trend=trend) if(length(call) > 2) { whichQ <- which(names(call) == "Q") morecall <- call[-c(1, whichQ)] if((mc <- length(morecall)) > 0) { newcall[3 + 1:mc] <- morecall names(newcall)[3 + 1:mc] <- names(call)[-c(1, whichQ)] } } call <- newcall newstyle <- FALSE oldstyle <- TRUE } ## Now update the dataset call$Q <- argQexpr %orifnull% argQ if(is.null(newX)) newX <- argQ if(is.null(newXname)) newXname <- newQname } } ## Find any formula arguments ## (including detection of conflicts) argfmla <- NULL if(n <- sp.foundclass("formula", unnamedargs, "trend", nama)) { argfmla <- unnamedargs[[n]] unnamedargs <- unnamedargs[-n] } else if(n <- sp.foundclass("character", unnamedargs, "trend", nama)) { ## string that might be interpreted as a formula strg <- unnamedargs[[n]] if(!is.na(charmatch("~", strg))) { argfmla <- as.formula(strg) unnamedargs <- unnamedargs[-n] } } if("trend" %in% nama) { argfmla <- namedargs$trend nama <- setdiff(nama, "trend") namedargs <- namedargs[nama] } ## Handle new formula if(!is.null(argfmla)) { lhs <- lhs.of.formula(argfmla) if(newstyle) { ## ppm.formula: update the formula if(is.null(lhs)) { argfmla <- as.formula(paste(".", deparse(argfmla))) } else { X.is.new <- TRUE newXname <- short.deparse(lhs) } callQ <- update(object$trend, call$Q) call$Q <- newformula(callQ, argfmla, callframe, envir) } else { ## ppm.ppp: update the trend and possibly the data if(is.null(lhs)) { ## assign new trend call$trend <- newformula(call$trend, argfmla, callframe, envir) } else { ## split into Q = X and trend = ~trend X.is.new <- TRUE call$Q <- newX <- newpattern(call$Q, lhs, callframe, envir) call$trend <- newformula(call$trend, rhs.of.formula(argfmla), callframe, envir) newXname <- short.deparse(lhs) } } } if(length(namedargs) > 0) { ## any other named arguments that were also present in the original call ## override their original values. existing <- !is.na(match(nama, names(call))) for (a in nama[existing]) call[[a]] <- aargh[[a]] ## add any named arguments not present in the original call if (any(!existing)) { call <- c(as.list(call), namedargs[!existing]) call <- as.call(call) } } if(length(unnamedargs) > 0) { ## some further objects identified by their class if(n<- sp.foundclass("interact", unnamedargs, "interaction", nama)) { call$interaction <- unnamedargs[[n]] unnamedargs <- unnamedargs[-n] } if(n <- sp.foundclasses(c("data.frame", "im"), unnamedargs, "covariates", nama)) { call$covariates <- unnamedargs[[n]] unnamedargs <- unnamedargs[-n] } } ## ************************************************************* ## ****** Special action when Q is a point pattern ************* ## ************************************************************* if(X.is.new && fixdummy && oldstyle && is.ppp(newX) && identical(Window(newX), Window(data.ppm(object)))) { ## Instead of allowing default.dummy(X) to occur, ## explicitly create a quadrature scheme from X, ## using the same dummy points and weight parameters ## as were used in the fitted model Qold <- quad.ppm(object) if(is.marked(Qold)) { dpar <- Qold$param$dummy wpar <- Qold$param$weight Qnew <- do.call(quadscheme, append(list(newX), append(dpar, wpar))) } else { Dum <- Qold$dummy wpar <- Qold$param$weight Qnew <- do.call(quadscheme, append(list(newX, Dum), wpar)) } ## replace X by new Q call$Q <- Qnew } ## finally call ppm call[[1]] <- as.name('ppm') result <- eval(call, as.list(envir), enclos=callframe) ## tweak name of 'original' data if(!is.null(newXname)) result$Qname <- newXname return(result) } update.ppm }) sp.foundclass <- function(cname, inlist, formalname, argsgiven) { ok <- unlist(lapply(inlist, inherits, what=cname)) nok <- sum(ok) if(nok > 1) stop(paste("I am confused: there are two unnamed arguments", "of class", sQuote(cname))) if(nok == 0) return(0) absent <- !(formalname %in% argsgiven) if(!absent) stop(paste("I am confused: there is an unnamed argument", "of class", sQuote(cname), "which conflicts with the", "named argument", sQuote(formalname))) theposition <- seq_along(ok)[ok] return(theposition) } sp.foundclasses <- function(cnames, inlist, formalname, argsgiven) { ncn <- length(cnames) pozzie <- logical(ncn) for(i in seq_len(ncn)) pozzie[i] <- sp.foundclass(cnames[i], inlist, formalname, argsgiven) found <- (pozzie > 0) nfound <- sum(found) if(nfound == 0) return(0) else if(nfound == 1) return(pozzie[found]) else stop(paste("I am confused: there are", nfound, "unnamed arguments of different classes (", paste(sQuote(cnames(pozzie[found])), collapse=", "), ") which could be interpreted as", sQuote(formalname))) } damaged.ppm <- function(object) { ## guess whether the object format has been damaged ## e.g. by dump/restore gf <- getglmfit(object) badfit <- !is.null(gf) && !inherits(gf$terms, "terms") if(badfit) return(TRUE) ## escape clause for fake models if(identical(object$fake, TRUE)) return(FALSE) ## otherwise it was made by ppm Qcall <- object$call$Q cf <- object$callframe if(is.null(cf)) { ## Old format of ppm objects if(is.name(Qcall) && !exists(paste(Qcall))) return(TRUE) Q <- eval(Qcall) } else { ## New format of ppm objects if(is.name(Qcall) && !exists(paste(Qcall), cf)) return(TRUE) Q <- eval(Qcall, cf) } badQ <- is.null(Q) || !(inherits(Q, c("ppp", "quad", "formula"))) return(badQ) } updateData.ppm <- function(model, X, ..., warn=TRUE) { ## wrapper to refit the 'model' to new data 'X' if(is.marked(X) && !is.multitype(X)) { if(warn) warning("Marks were ignored when re-fitting the model,", "because they were not a factor", call.=FALSE) X <- unmark(X) } update(model, Q=X) } spatstat.model/R/palmdiagnose.R0000644000176200001440000001276414334115572016222 0ustar liggesusers## palmdiagnose.R ## ## Palm intensity diagnostic ## proposed by Tanaka, Ogata and Stoyan (2008) ## ## Copyright (c) Adrian Baddeley 2022 ## GNU Public Licence >= 2.0 ## ## $Revision: 1.3 $ $Date: 2022/11/09 08:40:30 $ palmdiagnose <- function(object, ..., breaks=30, trim=30, rmax=Inf) { if(missing(object)) { models <- list(...) if(length(models) == 0) stop("No fitted models were supplied") if(!all(sapply(models, is.kppm))) stop("Each argument should be a kppm object, or a list of kppm objects") } else if(is.kppm(object)) { models <- list(object, ...) if(length(models) > 1 && !all(sapply(models, is.kppm))) stop("Each argument should be a kppm object, or a list of kppm objects") } else if(is.list(object) && all(sapply(object, is.kppm))) { models <- object } else stop("Argument 'object' should be a kppm object, or a list of kppm objects") ## must be stationary if(!all(sapply(models, is.stationary))) stop("Sorry, not yet implemented for inhomogeneous models") ## names nmodels <- length(models) if(sum(nzchar(names(models))) < nmodels) names(models) <- if(nmodels == 1) "fit" else paste0("Model", 1:nmodels) modelnames <- names(models) <- make.names(names(models), unique=TRUE, allow_ = FALSE) ## extract point pattern Xlist <- unique(lapply(models, response)) if(length(Xlist) > 1) stop("Models were fitted to different point patterns") X <- response(models[[1]]) nX <- npoints(X) W <- Window(X) aW <- area(W) lamX <- nX/aW ## translation edge correction weights tran <- aW/setcov(W) tran <- eval.im(pmin(100, tran)) tran <- tran/nX ## nonparametric estimate of Palm intensity r <- function(x,y) { sqrt(x^2 + y^2) } Z <- frypoints(X, dmax=rmax) R <- rhohat(Z, r, weights=tran, smoother="piecewise", breaks=breaks, from = 0, to = if(is.finite(rmax)) rmax else NULL) breaks <- attr(R, "stuff")$breaks ## replace 'ave' by \bar\lambda (using knowledge of Fry points) R$ave <- lamX ## rename function fnam <- c("lambda", "P") lmap <- c(rho=makefvlabel(NULL, "hat", fnam, "nonpar"), ave="bar(lambda)", var=makefvlabel("Var", "hat", fnam, "nonpar"), hi=makefvlabel(NULL, NULL, fnam, "hi"), lo=makefvlabel(NULL, NULL, fnam, "hi")) R <- rebadge.fv(R, new.ylab=quote(lambda[P](r)), new.fname=fnam, tags=names(lmap), new.labl=unname(lmap)) R <- rebadge.fv(R, tags=c("r", "rho"), new.tags=c("r", "est"), new.desc=c("Interpoint distance", "Nonparametric estimate")) shadenames <- fvnames(R, ".s") dotnames <- fvnames(R, ".") ## parametric estimate(s) palmlam <- lapply(models, function(fit, r, lam) { lam * (pcfmodel(fit))(r) }, r=R$r, lam=lamX) if(nmodels == 1) { new.df <- data.frame(fit=palmlam[[1]]) new.labl <- makefvlabel(NULL, NULL, fnam, "fit") new.desc <- "Parametric estimate" } else { new.df <- as.data.frame(palmlam) new.labl <- sapply(modelnames, function(x, fn) makefvlabel(NULL, NULL, fn, x), fn=fnam) new.desc <- paste("Parametric estimate from", modelnames) } R <- bind.fv(R, new.df, new.labl, new.desc) ## tidy up dotnames <- fvnames(R, ".") <- c(dotnames[1], modelnames, dotnames[-1]) if(length(shadenames)) fvnames(R, ".s") <- shadenames attr(R, "alim") <- intersect.ranges(attr(R, "alim"), c(0, rmax.rule("K", W, lamX))) unitname(R) <- unitname(X) ## add a new class so that we can imitate the plot style of Tanaka et al class(R) <- union("palmdiag", class(R)) attr(R, "breaks") <- breaks attr(R, "fitnames") <- setdiff(dotnames, c("est", "var", "hi", "lo")) return(R) } plot.palmdiag <- function(x, ..., style=c("intervals", "dots", "bands"), args.dots=list(pch=16), args.intervals=list(), xlim=NULL, main) { if(missing(main)) main <- short.deparse(substitute(x)) style <- match.arg(style) switch(style, bands = { z <- plot.fv(x, ..., main=main) }, dots = , intervals = { fitnames <- attr(x, "fitnames") fmla <- as.formula(paste("cbind", paren(paste(fitnames, collapse=", ")), "~ r")) if(is.null(xlim)) xlim <- attr(x, "alim") rvals <- getElement(x, name=fvnames(x, ".x")) xsub <- x[inside.range(rvals, xlim), ] z <- do.call(plot.fv, resolve.defaults(list(quote(x)), list(fmla, shade=NULL, main=main), list(...), list(xlim=xlim, ylim=range(xsub, na.rm=TRUE)) )) b <- attr(x, "breaks") rmid <- (b[-1] + b[-length(b)])/2 f <- as.function(x, value=c("est", "lo", "hi")) ymid <- f(rmid) do.call(points, append(list(rmid, ymid), args.dots)) if(style == "intervals") { yhi <- f(rmid, "hi") ylo <- f(rmid, "lo") do.call(segments, append(list(rmid, ylo, rmid, yhi), args.intervals)) } }) return(invisible(z)) } spatstat.model/R/harmonic.R0000644000176200001440000000327614331173073015352 0ustar liggesusers# # # harmonic.R # # $Revision: 1.2 $ $Date: 2004/01/07 08:57:39 $ # # harmonic() # Analogue of polynom() for harmonic functions only # # ------------------------------------------------------------------- # harmonic <- function(x,y,n) { if(missing(n)) stop("the order n must be specified") n <- as.integer(n) if(is.na(n) || n <= 0) stop("n must be a positive integer") if(n > 3) stop("Sorry, harmonic() is not implemented for degree > 3") namex <- deparse(substitute(x)) namey <- deparse(substitute(y)) if(!is.name(substitute(x))) namex <- paste("(", namex, ")", sep="") if(!is.name(substitute(y))) namey <- paste("(", namey, ")", sep="") switch(n, { result <- cbind(x, y) names <- c(namex, namey) }, { result <- cbind(x, y, x*y, x^2-y^2) names <- c(namex, namey, paste("(", namex, ".", namey, ")", sep=""), paste("(", namex, "^2-", namey, "^2)", sep="")) }, { result <- cbind(x, y, x * y, x^2-y^2, x^3 - 3 * x * y^2, y^3 - 3 * x^2 * y) names <- c(namex, namey, paste("(", namex, ".", namey, ")", sep=""), paste("(", namex, "^2-", namey, "^2)", sep=""), paste("(", namex, "^3-3", namex, ".", namey, "^2)", sep=""), paste("(", namey, "^3-3", namex, "^2.", namey, ")", sep="") ) } ) dimnames(result) <- list(NULL, names) return(result) } spatstat.model/R/measures.R0000644000176200001440000006707514331173073015405 0ustar liggesusers# # measures.R # # signed/vector valued measures with atomic and diffuse components # # $Revision: 1.105 $ $Date: 2022/06/19 04:08:19 $ # msr <- function(qscheme, discrete, density, check=TRUE) { if(!is.quad(qscheme)) stop("qscheme should be a quadrature scheme") nquad <- n.quad(qscheme) U <- union.quad(qscheme) wt <- w.quad(qscheme) Z <- is.data(qscheme) ndata <- sum(Z) # ensure conformable vectors/matrices stopifnot(is.numeric(discrete) || is.logical(discrete)) stopifnot(is.numeric(density)) if(is.vector(discrete) && is.vector(density)) { # handle constants if(length(discrete) == 1) discrete <- rep.int(discrete, ndata) if(length(density) == 1) density <- rep.int(density, nquad) # check lengths if(check) { check.nvector(discrete, ndata, things="data points", naok=TRUE, vname="discrete") check.nvector(density, nquad, things="quadrature points", naok=TRUE, vname="density") } discretepad <- numeric(nquad) discretepad[Z] <- discrete } else { if(length(discrete) == 1 && is.matrix(density)) { # replicate constant 'discrete' component to matrix of correct size discrete <- matrix(discrete, ndata, ncol(density)) } else if(length(density) == 1 && is.matrix(discrete)) { # replicate constant 'density' to matrix of correct size density <- matrix(density, nquad, ncol(discrete)) } else { discrete <- as.matrix(discrete) density <- as.matrix(density) } if(check) { # check numbers of rows check.nmatrix(discrete, ndata, things="data points", naok=TRUE, squarematrix=FALSE) check.nmatrix(density, nquad, things="quadrature points", naok=TRUE, squarematrix=FALSE) } nd <- ncol(discrete) nc <- ncol(density) if(nd != nc) { if(nd == 1) { # replicate columns of discrete component discrete <- matrix(rep.int(discrete, nc), ndata, nc) colnames(discrete) <- colnames(density) } else if(nc == 1) { # replicate columns of density component density <- matrix(rep.int(density, nd), nquad, nd) colnames(density) <- colnames(discrete) } else stop(paste("Incompatible numbers of columns in", sQuote("discrete"), paren(nd), "and", sQuote("density"), paren(nc))) } discretepad <- matrix(0, nquad, max(nd, nc)) discretepad[Z, ] <- discrete colnames(discretepad) <- colnames(density) } ## ## Increments of measure (value of measure for each quadrature tile) ## val <- discretepad + wt * density if(is.matrix(density)) colnames(val) <- colnames(density) ## ## if(check && !all(ok <- complete.cases(val))) { warning("Some infinite, NA or NaN increments were removed", call.=FALSE) val <- ok * val discretepad <- ok * discretepad density <- ok * density } ## finished out <- list(loc = U, val = val, atoms = Z, discrete = discretepad, density = density, wt = wt) class(out) <- "msr" return(out) } # Translation table for usage of measures # # e.g. res <- residuals(fit, ...) # # OLD NEW # res[ ] res$val[ ] with(res, "increment") # attr(res, "atoms") res$atoms with(res, "is.atom") # attr(res, "discrete") res$discrete with(res, "discrete") # attr(res, "continuous") res$density with(res, "density") # w.quad(quad.ppm(fit)) res$wt with(res, "qweights") # union.quad(quad.ppm(fit)) res$loc with(res, "qlocations") # ................................................. with.msr <- function(data, expr, ...) { stopifnot(inherits(data, "msr")) stuff <- list(increment = data$val, is.atom = data$atoms, discrete = data$discrete, density = data$density, continuous = data$density * data$wt, qweights = data$wt, qlocations = data$loc, atoms = data$loc[data$atoms], atommass = marksubset(data$discrete, data$atoms)) y <- eval(substitute(expr), envir=stuff, enclos=parent.frame()) if(is.character(y) && length(y) == 1 && y %in% names(stuff)) y <- stuff[[y]] return(y) } print.msr <- function(x, ...) { xloc <- x$loc n <- npoints(xloc) d <- ncol(as.matrix(x$val)) splat(paste0(if(d == 1) "Scalar" else paste0(d, "-dimensional vector"), "-valued measure")) if(d > 1 && !is.null(cn <- colnames(x$val)) && waxlyrical("space")) splat("vector components:", commasep(sQuote(cn))) if(is.marked(xloc)) { splat("\tDefined on 2-dimensional space x marks") if(is.multitype(xloc)) exhibitStringList("\tPossible marks: ", levels(marks(xloc))) } if(waxlyrical("gory")) { splat("Approximated by", n, "quadrature points") print(as.owin(xloc)) splat(sum(x$atoms), "atoms") } if(waxlyrical("extras")) { splat("Total mass:") if(d == 1) { splat("discrete =", signif(sum(with(x, "discrete")), 5), " continuous =", signif(sum(with(x, "continuous")), 5), " total =", signif(sum(with(x, "increment")), 5)) } else { if(is.null(cn)) cn <- paste("component", 1:d) for(j in 1:d) { splat(paste0(cn[j], ":\t"), "discrete =", signif(sum(with(x, "discrete")[,j]), 5), " continuous =", signif(sum(with(x, "continuous")[,j]), 5), " total =", signif(sum(with(x, "increment")[,j]), 5)) } } } return(invisible(NULL)) } summary.msr <- function(object, ...) { print(object) } is.multitype.msr <- function(X, ...) { is.multitype(X$loc, ...) } is.marked.msr <- function(X, ...) { is.marked(X$loc, ...) } split.msr <- function(x, f, drop=FALSE, ...) { xloc <- x$loc ## determine split using rules for split.ppp locsplit <- if(missing(f)) split(xloc, drop=drop) else split(xloc, f, drop=drop) ## extract grouping factor g <- attr(locsplit, "fgroup") ## split contributions to measure atomsplit <- split(x$atoms, g, drop=drop) # hyuk wtsplit <- split(x$wt, g, drop=drop) if(ncol(x) == 1) { ## scalar measure valsplit <- split(x$val, g, drop=drop) discsplit <- split(x$discrete, g, drop=drop) denssplit <- split(x$density, g, drop=drop) } else { ## vector measure valsplit <- lapply(split(as.data.frame(x$val), g, drop=drop), as.matrix) discsplit <- lapply(split(as.data.frame(x$discrete), g, drop=drop), as.matrix) denssplit <- lapply(split(as.data.frame(x$density), g, drop=drop), as.matrix) } ## form the component measures result <- mapply(list, loc=locsplit, val=valsplit, atoms=atomsplit, discrete=discsplit, density=denssplit, wt=wtsplit, SIMPLIFY=FALSE) names(result) <- names(locsplit) result <- lapply(result, "class<-", value="msr") if(drop && any(isnul <- (sapply(locsplit, npoints) == 0))) result[isnul] <- NULL result <- as.solist(result) return(result) } integral.msr <- function(f, domain=NULL, weight=NULL, ...) { stopifnot(inherits(f, "msr")) if(is.tess(domain)) { result <- sapply(tiles(domain), integral.msr, f = f, weight=weight) if(length(dim(result)) > 1) result <- t(result) return(result) } if(!is.null(domain)) f <- f[domain] y <- with(f, "increment") if(!is.null(weight)) { qloc <- with(f, "qlocations") W <- as.owin(qloc) if(is.function(weight)) { ## evaluate at quadrature points weightq <- weight(qloc$x, qloc$y) } else { ## expect image, or coerce to image if(!is.im(weight)) weight <- as.im(weight, W=W) ## sample image at quadrature points weightq <- weight[qloc, drop=FALSE] weightq[is.na(weightq)] <- 0 } ## apply weight to increments y <- y * as.numeric(weightq) } ## integrate z <- if(is.matrix(y)) apply(y, 2, sum) else sum(y) return(z) } update.msr <- function(object, ...) { #' reconcile internal data if(!is.null(smo <- attr(object, "smoothdensity"))) { sigma <- attr(smo, "sigma") object <- augment.msr(object, ..., sigma=sigma, recompute=TRUE) } return(object) } plot.msr <- function(x, ..., add=FALSE, how=c("image", "contour", "imagecontour"), main=NULL, do.plot=TRUE, multiplot=TRUE, massthresh=0, equal.markscale=FALSE, equal.ribbon=FALSE) { if(is.null(main)) main <- short.deparse(substitute(x)) how <- match.arg(how) if(!multiplot) { ## compress everything to a single panel x$loc <- unmark(x$loc) if(is.matrix(x$val)) x$val <- rowSums(x$val) if(is.matrix(x$discrete)) x$discrete <- rowSums(x$discrete) if(is.matrix(x$density)) x$density <- rowSums(x$density) if(!is.null(smo <- attr(x, "smoothdensity")) && inherits(smo, "solist")) attr(x, "smoothdensity") <- im.apply(smo, sum, check=FALSE) ## WAS: attr(x, "smoothdensity") <- Reduce("+", smo) } d <- dim(x)[2] k <- if(is.multitype(x)) length(levels(marks(x$loc))) else 1 ## multiple plot panels may be generated if(k == 1 && d == 1) { ## single plot y <- solist(x) } else if(k > 1 && d == 1) { ## multitype y <- split(x) } else if(k == 1 && d > 1) { ## vector-valued y <- unstack(x) } else if(k > 1 && d > 1) { ## both multitype and vector-valued y <- split(x) typenames <- names(y) vecnames <- colnames(x$val) y <- unstack(y) names(y) <- as.vector(t(outer(typenames, vecnames, paste, sep="."))) } #' ensure image of density is present y <- solapply(y, augment.msr) #' ready to plot if(length(y) > 1) { ## plot as an array of panels userarg <- list(...) rowcol <- list(nrows=k, ncols=d) if(any(c("nrows", "ncols") %in% names(userarg))) rowcol <- list() #' determine common scales if required scaleinfo <- list() if(equal.markscale) { W <- Window(x) #' extract vectors of atomic masses from each panel marx <- lapply(y, with, "atommass") #' make a separate scale calculation for each panel scales <- sapply(marx, mark.scale.default, w=W, ...) scaleinfo$markscale <- min(scales) scaleinfo$markrange <- range(unlist(marx)) } if(equal.ribbon) { images <- lapply(y, attr, which="smoothdensity") scaleinfo$zlim <- range(sapply(images, range)) } ## go result <- do.call(plot.solist, resolve.defaults(list(quote(y)), userarg, rowcol, scaleinfo, list(how=how, main=main, equal.scales=TRUE, halign=TRUE, valign=TRUE, claim.title.space=TRUE))) return(invisible(result)) } ## scalar measure x <- y[[1]] ## get atoms xatomic <- (x$loc %mark% x$discrete)[x$atoms] if(length(massthresh) && all(is.finite(massthresh))) { ## ignore atoms with absolute mass <= massthresh check.1.real(massthresh) xatomic <- xatomic[abs(marks(xatomic)) > massthresh] } xtra.im <- graphicsPars("image") xtra.pp <- setdiff(graphicsPars("ppp"), c("box", "col")) xtra.pp <- union(xtra.pp, c("markrange", "marklevels")) xtra.ow <- graphicsPars("owin") smo <- attr(x, "smoothdensity") ## do.image <- how %in% c("image", "imagecontour") do.contour <- how %in% c("contour", "imagecontour") ## allocate space for plot and legend using do.plot=FALSE mechanism pdata <- do.call.matched(plot.ppp, resolve.defaults(list(x=quote(xatomic), do.plot=FALSE, main=main), list(...), list(show.all=TRUE)), extrargs=xtra.pp) result <- pdata bb <- attr(pdata, "bbox") if(do.image) { idata <- do.call.matched(plot.im, resolve.defaults(list(x=quote(smo), main=main, do.plot=FALSE), list(...)), extrargs=xtra.im) result <- idata bb <- boundingbox(bb, attr(idata, "bbox")) } ## attr(result, "bbox") <- bb ## if(do.plot) { if(!add) { blankmain <- prepareTitle(main)$blank ## initialise plot do.call.matched(plot.owin, resolve.defaults(list(x=quote(bb), type="n", main=blankmain), list(...)), extrargs=xtra.ow) } ## display density if(do.image) do.call.matched(plot.im, resolve.defaults(list(x=quote(smo), add=TRUE), list(...), list(main=main, show.all=TRUE)), extrargs=xtra.im) if(do.contour) do.call.matched(contour.im, resolve.defaults(list(x=quote(smo), add=TRUE), list(...), list(main=main, axes=FALSE, show.all=!do.image)), extrargs=c("zlim", "labels", "labcex", ## DO NOT ALLOW 'col' "drawlabels", "method", "vfont", "lty", "lwd", "claim.title.space")) ## display atoms do.call.matched(plot.ppp, resolve.defaults(list(x=quote(xatomic), add=TRUE, main=""), list(...), list(show.all=TRUE)), extrargs=xtra.pp) } return(invisible(result)) } "[.msr" <- function(x, i, j, ...) { valu <- as.matrix(x$val) disc <- as.matrix(x$discrete) dens <- as.matrix(x$density) wt <- x$wt atoms <- x$atoms # if(!missing(j)) { valu <- valu[, j] disc <- disc[, j] dens <- dens[, j] } loc <- x$loc if(!missing(i)) { # use [.ppp to identify which points are retained locn <- loc %mark% seq_len(npoints(loc)) loci <- locn[i, clip=TRUE] loc <- unmark(loci) id <- marks(loci) # extract valu <- valu[id, , drop=FALSE] disc <- disc[id, , drop=FALSE] dens <- dens[id, , drop=FALSE] wt <- wt[id] atoms <- atoms[id] } out <- list(loc=loc, val=valu, atoms=atoms, discrete=disc, density=dens, wt=wt) class(out) <- "msr" return(out) } dim.msr <- function(x) { dim(as.matrix(x$val)) } dimnames.msr <- function(x) { list(NULL, colnames(x$val)) } # smooth.msr <- function(X, ...) { # .Deprecated("Smooth.msr", package="spatstat", # msg="smooth.msr is deprecated: use the generic Smooth with a capital S") # Smooth(X, ...) # } Smooth.msr <- function(X, ..., drop=TRUE) { verifyclass(X, "msr") loc <- X$loc val <- X$val result <- density(loc, weights=val, ...) if(!drop && is.im(result)) result <- solist(result) return(result) } as.owin.msr <- function(W, ..., fatal=TRUE) { as.owin(W$loc, ..., fatal=fatal) } domain.msr <- Window.msr <- function(X, ...) { as.owin(X) } shift.msr <- function(X, ...) { X$loc <- Xloc <- shift(X$loc, ...) if(!is.null(smo <- attr(X, "smoothdensity"))) attr(X, "smoothdensity") <- shift(smo, getlastshift(Xloc)) putlastshift(X, getlastshift(Xloc)) } as.layered.msr <- local({ as.layered.msr <- function(X) { nc <- ncol(X) if(nc == 0) return(layered()) if(nc == 1) return(layered(X)) Y <- lapply(seq_len(nc), pickcol, x=X) names(Y) <- colnames(X) return(layered(LayerList=Y)) } pickcol <- function(j,x) x[,j] as.layered.msr }) unitname.msr <- function(x) unitname(x$loc) "unitname<-.msr" <- function(x, value) { unitname(x$loc) <- value return(x) } scalardilate.msr <- function(X, f, ...) { X$loc <- scalardilate(X$loc, f, ...) X$density <- X$density/f^2 X$wt <- X$wt * f^2 return(X) } rotate.msr <- function(X, angle=pi/2, ..., centre=NULL) { X$loc <- rotate(X$loc, angle=angle, ..., centre=centre) return(X) } flipxy.msr <- function(X) { X$loc <- flipxy(X$loc) return(X) } rescale.msr <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier Y <- scalardilate(X, 1/s) unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } affine.msr <- function(X, mat = diag(c(1, 1)), vec = c(0, 0), ...) { X$loc <- affine(X$loc, mat=mat, vec=vec, ...) detmat <- abs(det(mat)) X$density <- X$density/detmat X$wt <- X$wt * detmat return(X) } Ops.msr <- function(e1,e2=NULL){ vn <- c("val", "discrete", "density") if(nargs() == 1L) { #' unary operator if(!is.element(.Generic, c("+", "-"))) stop(paste("Unary operation", sQuote(paste0(.Generic, "A")), "is undefined for a measure A."), call.=FALSE) e1 <- unclass(e1) e1[vn] <- lapply(e1[vn], .Generic) if(!is.null(sm <- attr(e1, "smoothdensity"))) attr(e1, "smoothdensity") <- do.call(.Generic, sm) class(e1) <- "msr" return(e1) } else { #' binary operator m1 <- inherits(e1, "msr") m2 <- inherits(e2, "msr") if(m1 && m2) { if(!is.element(.Generic, c("+", "-"))) stop(paste("Operation", sQuote(paste0("A", .Generic, "B")), "is undefined for measures A, B"), call.=FALSE) k1 <- dim(e1)[2] k2 <- dim(e2)[2] if(k1 != k2) stop(paste("Operation", sQuote(paste0("A", .Generic, "B")), "is undefined because A, B have incompatible dimensions:", "A is", ngettext(k1, "scalar", paste0(k1, "-vector")), ", B is", ngettext(k2, "scalar", paste0(k2, "-vector"))), call.=FALSE) if(!identical(e1$loc, e2$loc)) { haha <- harmonise(e1, e2) e1 <- haha[[1L]] e2 <- haha[[2L]] } e1 <- unclass(e1) e2 <- unclass(e2) e1[vn] <- mapply(.Generic, e1[vn], e2[vn], SIMPLIFY=FALSE) class(e1) <- "msr" #' handle smoothed densities sm1 <- attr(e1, "smoothdensity") sm2 <- attr(e2, "smoothdensity") sm <- if(is.null(sm1) || is.null(sm2)) { NULL } else if(is.im(sm1) && is.im(sm2)) { do.call(.Generic, list(sm1, sm2)) } else if(is.im(sm1) && is.solist(sm2)) { mapply(.Generic, e2=sm2, MoreArgs=list(e1=sm1), SIMPLIFY=FALSE) } else if(is.solist(sm1) && is.im(sm2)) { mapply(.Generic, e1=sm1, MoreArgs=list(e2=sm2), SIMPLIFY=FALSE) } else if(is.solist(sm1) && is.solist(sm2)) { mapply(.Generic, e1=sm1, e2=sm2, SIMPLIFY=FALSE) } else NULL attr(e1, "smoothdensity") <- sm return(e1) } else if(m1 && is.numeric(e2)) { if(!is.element(.Generic, c("/", "*"))) stop(paste("Operation", sQuote(paste0("A", .Generic, "z")), "is undefined for a measure A and numeric z."), call.=FALSE) e1 <- unclass(e1) e1[vn] <- lapply(e1[vn], .Generic, e2=e2) class(e1) <- "msr" #' handle smoothed density sm1 <- attr(e1, "smoothdensity") sm <- if(is.null(sm1)) NULL else if(is.im(sm1)) do.call(.Generic, list(e1=sm1, e2=e2)) else if(is.solist(sm1)) solapply(sm1, .Generic, e2=e2) else NULL attr(e1, "smoothdensity") <- sm return(e1) } else if(m2 && is.numeric(e1)) { if(.Generic != "*") stop(paste("Operation", sQuote(paste0("z", .Generic, "A")), "is undefined for a measure A and numeric z."), call.=FALSE) e2 <- unclass(e2) e2[vn] <- lapply(e2[vn], .Generic, e1=e1) class(e2) <- "msr" #' handle smoothed density sm2 <- attr(e2, "smoothdensity") sm <- if(is.null(sm2)) NULL else if(is.im(sm2)) do.call(.Generic, list(e1=e1, e2=sm2)) else if(is.solist(sm2)) solapply(sm2, .Generic, e1=e1) else NULL attr(e2, "smoothdensity") <- sm return(e2) } stop(paste("Operation", sQuote(paste0("e1", .Generic, "e2")), "is undefined for this kind of data"), call.=FALSE) } } measurePositive <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$discrete <- pmax(0, x$discrete) y$density <- pmax(0, x$density) y$val <- y$discrete + y$wt * y$density y <- update(y) return(y) } measureNegative <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$discrete <- -pmin(0, x$discrete) y$density <- -pmin(0, x$density) y$val <- y$discrete + y$wt * y$density y <- update(y) return(y) } measureVariation <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$discrete <- abs(x$discrete) y$density <- abs(x$density) y$val <- y$discrete + y$wt * y$density y <- update(y) return(y) } totalVariation <- function(x) integral(measureVariation(x)) measureDiscrete <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$density[] <- 0 y$val <- y$discrete y <- update(y) return(y) } measureContinuous <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$discrete[] <- 0 y$val <- y$wt * y$density y <- update(y) return(y) } harmonise.msr <- local({ harmonise.msr <- function(...) { argz <- list(...) n <- length(argz) if(n == 0) return(argz) ismeasure <- sapply(argz, inherits, what="msr") if(!any(ismeasure)) stop("No measures supplied") if(!all(ismeasure)) stop("All arguments should be measures (objects of class msr)") if(n < 2) return(argz) result <- vector(mode="list", length=n) ## extract entries loclist <- lapply(argz, getElement, name="loc") atomlist <- lapply(argz, getElement, name="atoms") masslist <- lapply(argz, getElement, name="discrete") denslist <- lapply(argz, getElement, name="density") ## check for compatible dimensions of measure values dimen <- unique(sapply(argz, ncol)) if(length(dimen) > 1) stop("Measures have different dimensions:", commasep(sort(dimen))) ## check for marked points ismarked <- sapply(loclist, is.marked) if(any(ismarked) && !all(ismarked)) stop("Some, but not all, quadrature schemes are marked") ismarked <- all(ismarked) ## union of all quadrature points in all measures Uloc <- do.call(superimpose, append(unname(loclist), list(check=FALSE))) Uloc <- unique(Uloc) nU <- npoints(Uloc) ## match each quadrature set to the union ## and find nearest data point to each point in the union if(!ismarked) { matchlist <- lapply(loclist, nncross, Y=Uloc, what="which") nearlist <- lapply(loclist, ssorcnn, xx=Uloc, what="which") } else { stop("Not yet implemented for marked quadrature schemes") } ## nearest neighbour interpolation of density values of each argument ## onto the common quadrature set Udenslist <- mapply(extract, x=denslist, i=nearlist, SIMPLIFY=FALSE) ## initialise other bits noatoms <- logical(nU) zeromass <- if(dimen == 1) numeric(nU) else matrix(0, nU, dimen) Uatomlist <- rep(list(noatoms), n) Umasslist <- rep(list(zeromass), n) ## assign atoms in each argument Uatomlist <- mapply(subsetgets, x=Uatomlist, i=matchlist, value=atomlist, SIMPLIFY=FALSE) Umasslist <- mapply(subsetgets, x=Umasslist, i=matchlist, value=masslist, SIMPLIFY=FALSE) ## union of atoms isatom <- Reduce("|", Uatomlist) ## masses at atoms Umasslist <- lapply(Umasslist, extract, i=isatom) ## make common quadrature scheme UQ <- quadscheme(Uloc[isatom], Uloc[!isatom]) ## reorder density data correspondingly neworder <- c(which(isatom), which(!isatom)) Udenslist <- lapply(Udenslist, extract, i=neworder) ## make new measures result <- mapply(msr, MoreArgs=list(qscheme=UQ), discrete=Umasslist, density=Udenslist, SIMPLIFY=FALSE) names(result) <- names(argz) class(result) <- unique(c("solist", class(result))) return(result) } ssorcnn <- function(xx, yy, what) nncross(xx, yy, what=what) extract <- function(x, i) { if(is.matrix(x)) x[i, , drop=FALSE] else x[i] } subsetgets <- function(x, i, value) { if(is.matrix(x)) { x[i, ] <- value } else { x[i] <- value } return(x) } harmonise.msr }) unstack.msr <- function(x, ...) { trap.extra.arguments(...) d <- dim(x) if(is.null(d)) return(solist(x)) smo <- attr(x, "smoothdensity") if(!inherits(smo, "imlist")) smo <- NULL nc <- d[2] y <- vector(mode="list", length=nc) for(j in seq_len(nc)) { xj <- x[,j,drop=FALSE] if(!is.null(smo)) attr(xj, "smoothdensity") <- smo[[j]] y[[j]] <- xj } names(y) <- colnames(x) return(as.solist(y)) } measureWeighted <- function(m, w) { verifyclass(m, "msr") if(is.numeric(w)) return(w * m) ## evaluate weight at quadrature locations qloc <- with(m, qlocations) if(is.im(w)) { wq <- w[qloc, drop=FALSE] } else if(is.function(w)) { wq <- w(qloc$x, qloc$y) } else stop("w should be a pixel image or a function(x,y)") wq[is.na(wq)] <- 0 if(!is.numeric(wq)) stop("weights should be numeric", call.=FALSE) ## apply weight to increments of measure wincr <- wq * with(m, increment) wdisc <- wq * with(m, discrete) wdens <- wq * with(m, density) ## pack up out <- list(loc = qloc, val = wincr, atoms = with(m, is.atom), discrete = wdisc, density = wdens, wt = with(m, qweights)) class(out) <- "msr" ## update smooth density if it was present if(!is.null(smo <- attr(m, "smoothdensity"))) { sigma <- attr(smo, "sigma") out <- augment.msr(out, sigma=sigma) } return(out) } residualMeasure <- function(Q, lambda, type = c("raw", "inverse", "Pearson", "pearson"), ...) { if(is.ppp(Q)) Q <- quadscheme(Q, ...) if(!is.quad(Q)) stop("Argument Q should be a point pattern or quadrature scheme") type <- match.arg(type) P <- union.quad(Q) Lambda <- if(is.im(lambda)) { lambda[P, drop=FALSE] } else { sapply(lambda, "[", i=unmark(P), drop=FALSE) } switch(type, raw = { disc <- 1 dens <- -Lambda }, inverse = { disc <- (Lambda > 0) * 1 # sic. To retain dimensions dens <- -1 }, pearson = , Pearson = { sqrl <- sqrt(Lambda) disc <- ifelse(Lambda > 0, 1/sqrl, 0) dens <- -sqrl }) result <- msr(Q, disc, dens) return(result) } spatstat.model/R/badgey.R0000644000176200001440000001616514243551505015010 0ustar liggesusers# # # badgey.S # # $Revision: 1.17 $ $Date: 2018/03/15 07:37:41 $ # # Hybrid Geyer process # # BadGey() create an instance of the process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # BadGey <- local({ # ........... auxiliary functions .............. delBG <- function(i, r, sat) { r <- r[-i] if(length(r) == length(sat)) { r <- r[-i] sat <- sat[-i] } else if(length(sat) == 1) { r <- r[-i] } else stop("Mismatch in dimensions of arguments r and sat") nr <- length(r) if(nr == 0) return(Poisson()) if(nr == 1) return(Geyer(r, sat)) return(BadGey(r, sat)) } # .............. template .................... BlankBG <- list( name = "hybrid Geyer process", creator = "BadGey", family = "pairsat.family", # will be evaluated later pot = function(d, par) { r <- par$r nr <- length(r) out <- array(FALSE, dim=c(dim(d), nr)) for(i in 1:nr) out[,,i] <- (d <= r[i]) out }, par = list(r = NULL, sat=NULL), # to fill in later parnames = c("interaction radii", "saturation parameters"), hasInf = FALSE, init = function(self) { r <- self$par$r sat <- self$par$sat if(!is.numeric(r) || !all(r > 0)) stop("interaction radii r must be positive numbers") if(length(r) > 1 && !all(diff(r) > 0)) stop("interaction radii r must be strictly increasing") if(!is.numeric(sat) || any(sat < 0)) stop("saturation parameters must be nonnegative numbers") if(length(sat) != length(r) && length(sat) != 1) stop("vectors r and sat must have equal length") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { r <- self$par$r npiece <- length(r) # extract coefficients gammas <- exp(as.numeric(coeffs)) # name them gn <- gammas names(gn) <- paste("[0,", r, ")", sep="") # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_i", printable=dround(gn))) }, valid = function(coeffs, self) { # interaction parameters gamma must be # non-NA # finite, if sat > 0 # less than 1, if sat = Inf gamma <- (self$interpret)(coeffs, self)$param$gammas sat <- self$par$sat if(anyNA(gamma)) return(FALSE) return(all((is.finite(gamma) | sat == 0) & (gamma <= 1 | sat != Inf))) }, project = function(coeffs, self){ loggammas <- as.numeric(coeffs) sat <- self$par$sat r <- self$par$r good <- is.finite(loggammas) & (is.finite(sat) | loggammas <= 0) if(all(good)) return(NULL) if(!any(good)) return(Poisson()) bad <- !good if(spatstat.options("project.fast") || sum(bad) == 1) { # remove smallest threshold with an unidentifiable parameter firstbad <- min(which(bad)) return(delBG(firstbad, r, sat)) } else { # consider all candidate submodels subs <- lapply(which(bad), delBG, r=r, sat=sat) return(subs) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r sat <- self$par$sat if(all(is.na(coeffs))) return(2 * max(r)) gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- (abs(log(gamma)) > epsilon) & (sat > 0) if(!any(active)) return(0) else return(2 * max(r[active])) }, version=NULL, # to be added later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ..., halfway=FALSE) { # fast evaluator for BadGey interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for BadGey") r <- potpars$r sat <- potpars$sat # ensure r and sat have equal length if(length(r) != length(sat)) { if(length(r) == 1) r <- rep.int(r, length(sat)) else if(length(sat) == 1) sat <- rep.int(sat, length(r)) else stop("lengths of r and sat do not match") } # first ensure all data points are in U nX <- npoints(X) nU <- npoints(U) Xseq <- seq_len(nX) if(length(EqualPairs) == 0) { # no data points currently included missingdata <- rep.int(TRUE, nX) } else { Xused <- EqualPairs[,1L] missingdata <- !(Xseq %in% Xused) } somemissing <- any(missingdata) if(somemissing) { # add the missing data points nmiss <- sum(missingdata) U <- superimpose(U, X[missingdata], W=X$window) # correspondingly augment the list of equal pairs originalrows <- seq_len(nU) newXindex <- Xseq[missingdata] newUindex <- nU + seq_len(nmiss) EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex)) nU <- nU + nmiss } nterms <- length(r) answer <- matrix(, nrow=nU, ncol=nterms) for(k in 1:nterms) { # first determine saturated pair counts counts <- strausscounts(U, X, r[k], EqualPairs) satcounts <- pmin.int(sat[k], counts) # trapdoor used by suffstat() if(halfway) answer[,k] <- satcounts else if(sat[k] == Inf) answer[,k] <- 2 * satcounts else { # extract counts for data points Uindex <- EqualPairs[,2L] Xindex <- EqualPairs[,1L] Xcounts <- integer(npoints(X)) Xcounts[Xindex] <- counts[Uindex] # evaluate change in saturated counts of other data points change <- geyercounts(U, X, r[k], sat[k], Xcounts, EqualPairs) answer[,k] <- satcounts + change } } if(somemissing) answer <- answer[originalrows, , drop=FALSE] return(answer) } ) class(BlankBG) <- "interact" BadGey <- function(r, sat) { instantiate.interact(BlankBG, list(r=r, sat=sat)) } BadGey <- intermaker(BadGey, BlankBG) BadGey }) spatstat.model/R/resid4plot.R0000644000176200001440000006041514331173073015641 0ustar liggesusers# # # Residual plots: # resid4plot four panels with matching coordinates # resid1plot one or more unrelated individual plots # resid1panel one panel of resid1plot # # $Revision: 1.39 $ $Date: 2020/12/19 05:25:06 $ # # resid4plot <- local({ Contour <- function(..., pch, chars, cols, etch, size, maxsize, meansize, markscale, symap, zap, legend, leg.side, leg.args) { ## avoid passing arguments of plot.ppp to contour.default contour(...) } do.clean <- function(fun, ..., pch, chars, cols, etch, size, maxsize, meansize, markscale, symap, zap, legend, leg.side, leg.args, nlevels, levels, labels, drawlabels, labcex) { ## avoid passing arguments of plot.ppp, contour.default to other functions fun(...) } do.lines <- function(x, y, defaulty=1, ...) { force(x) force(y) dont.complain.about(x,y) do.call(lines, resolve.defaults(list(quote(x), quote(y)), list(...), list(lty=defaulty))) } resid4plot <- function(RES, plot.neg=c("image", "discrete", "contour", "imagecontour"), plot.smooth=c("imagecontour", "image", "contour", "persp"), spacing=0.1, outer=3, srange=NULL, monochrome=FALSE, main=NULL, xlab="x coordinate", ylab="y coordinate", rlab, col.neg=NULL, col.smooth=NULL, ...) { plot.neg <- match.arg(plot.neg) if(missing(rlab)) rlab <- NULL rlablines <- if(is.null(rlab)) 1 else sum(nzchar(rlab)) clip <- RES$clip Yclip <- RES$Yclip Z <- RES$smooth$Z W <- RES$W Wclip <- Yclip$window type <- RES$type typename <- RES$typename Ydens <- RES$Ydens[Wclip, drop=FALSE] Ymass <- RES$Ymass[Wclip] # set up 2 x 2 plot with space wide <- diff(W$xrange) high <- diff(W$yrange) space <- spacing * max(wide,high) width <- wide + space + wide height <- high + space + high outerspace <- outer * space outerRspace <- (outer - 1 + rlablines) * space plot(c(0, width) + c(-outerRspace, outerspace), c(0, height) + c(-outerspace, outerRspace), type="n", asp=1.0, axes=FALSE, xlab="", ylab="") # determine colour map for background nullvalue <- if(type == "eem") 1 else 0 if(is.null(srange)) { Yrange <- if(!is.null(Ydens)) summary(Ydens)$range else NULL Zrange <- if(!is.null(Z)) summary(Z)$range else NULL srange <- range(c(Yrange, Zrange, nullvalue), na.rm=TRUE) } else check.range(srange) backcols <- beachcolours(srange, nullvalue, monochrome) if(is.null(col.neg)) col.neg <- backcols if(is.null(col.smooth)) col.smooth <- backcols # ------ plot residuals/marks (in top left panel) ------------ Xlowleft <- c(W$xrange[1],W$yrange[1]) vec <- c(0, high) + c(0, space) - Xlowleft # shift the original window Ws <- shift(W, vec) # shift the residuals Ys <- shift(Yclip,vec) # determine whether pre-plotting the window(s) is redundant redundant <- (plot.neg == "image") && (type != "eem") && (Yclip$window$type == "mask") # pre-plot the window(s) if(!redundant) { YsWin <- Ys$window if(!clip) do.clean(plot, YsWin, add=TRUE, ...) else do.clean(ploterodewin, Ws, YsWin, add=TRUE, ...) } ## adjust position of legend associated with eroded window sep <- if(clip) Wclip$yrange[1] - W$yrange[1] else NULL ## decide whether mark scale should be shown showscale <- (type != "raw") switch(plot.neg, discrete={ neg <- (Ys$marks < 0) ## plot negative masses of discretised measure as squares if(any(c("maxsize","meansize","markscale") %in% names(list(...)))) { plot(Ys[neg], add=TRUE, legend=FALSE, ...) } else { hackmax <- 0.5 * sqrt(area(Wclip)/Yclip$n) plot(Ys[neg], add=TRUE, legend=FALSE, maxsize=hackmax, ...) } ## plot positive masses at atoms plot(Ys[!neg], add=TRUE, leg.side="left", leg.args=list(sep=sep), show.all=TRUE, main="", ...) }, contour = { Yds <- shift(Ydens, vec) Yms <- shift(Ymass, vec) Contour(Yds, add=TRUE, ...) dont.complain.about(Yms) do.call(plot, resolve.defaults(list(x=quote(Yms), add=TRUE), list(...), list(use.marks=showscale, leg.side="left", show.all=TRUE, main="", leg.args=list(sep=sep)))) }, imagecontour=, image={ Yds <- shift(Ydens, vec) Yms <- shift(Ymass, vec) if(redundant) do.clean(ploterodeimage, Ws, Yds, rangeZ=srange, colsZ=col.neg, ...) else if(type != "eem") do.clean(image, Yds, add=TRUE, ribbon=FALSE, col=col.neg, zlim=srange, ...) if(plot.neg == "imagecontour") Contour(Yds, add=TRUE, ...) ## plot positive masses at atoms dont.complain.about(Yms) do.call(plot, resolve.defaults(list(x=quote(Yms), add=TRUE), list(...), list(use.marks=showscale, leg.side="left", show.all=TRUE, main="", leg.args=list(sep=sep)))) } ) # --------- plot smoothed surface (in bottom right panel) ------------ vec <- c(wide, 0) + c(space, 0) - Xlowleft Zs <- shift.im(Z, vec) switch(plot.smooth, image={ do.clean(image, Zs, add=TRUE, col=col.smooth, zlim=srange, ribbon=FALSE, ...) }, contour={ Contour(Zs, add=TRUE, ...) }, persp={ warning("persp not available in 4-panel plot") }, imagecontour={ do.clean(image, Zs, add=TRUE, col=col.smooth, zlim=srange, ribbon=FALSE, ...) Contour(Zs, add=TRUE, ...) } ) lines(Zs$xrange[c(1,2,2,1,1)], Zs$yrange[c(1,1,2,2,1)]) # -------------- lurking variable plots ----------------------- # --------- lurking variable plot for x coordinate ------------------ # (cumulative or marginal) # in bottom left panel if(!is.null(RES$xmargin)) { a <- RES$xmargin observedV <- a$xZ observedX <- a$x theoreticalV <- a$ExZ theoreticalX <- a$x theoreticalSD <- theoreticalHI <- theoreticalLO <- NULL if(is.null(rlab)) rlab <- paste("marginal of", typename) } else if(!is.null(RES$xcumul)) { a <- RES$xcumul observedX <- a$empirical$covariate observedV <- a$empirical$value theoreticalX <- a$theoretical$covariate theoreticalV <- a$theoretical$mean theoreticalSD <- a$theoretical$sd theoreticalHI <- a$theoretical$upper theoreticalLO <- a$theoretical$lower if(is.null(rlab)) rlab <- paste("cumulative sum of", typename) } # pretty axis marks pX <- pretty(theoreticalX) rV <- range(0, observedV, theoreticalV, theoreticalHI, theoreticalLO) if(!is.null(theoreticalSD)) rV <- range(rV, theoreticalV+2*theoreticalSD, theoreticalV-2*theoreticalSD) pV <- pretty(rV) # rescale smoothed values rr <- range(c(0, observedV, theoreticalV, pV)) yscale <- function(y) { high * (y - rr[1])/diff(rr) } xscale <- function(x) { x - W$xrange[1] } if(!is.null(theoreticalHI)) { ## shaded confidence bands xp <- xscale( c(theoreticalX, rev(theoreticalX)) ) yp <- yscale( c(theoreticalHI, rev(theoreticalLO)) ) dont.complain.about(xp, yp) do.call.matched(polygon, resolve.defaults( list(x=quote(xp), y=quote(yp)), list(...), list(col="grey", border=NA))) } do.clean(do.lines, xscale(observedX), yscale(observedV), 1, ...) do.clean(do.lines, xscale(theoreticalX), yscale(theoreticalV), 2, ...) if(!is.null(theoreticalSD)) { do.clean(do.lines, xscale(theoreticalX), yscale(theoreticalV + 2 * theoreticalSD), 3, ...) do.clean(do.lines, xscale(theoreticalX), yscale(theoreticalV - 2 * theoreticalSD), 3, ...) } axis(side=1, pos=0, at=xscale(pX), labels=pX) text(xscale(mean(theoreticalX)), - outerspace, xlab) axis(side=2, pos=0, at=yscale(pV), labels=pV) text(-outerRspace, yscale(mean(pV)), rlab, srt=90) # --------- lurking variable plot for y coordinate ------------------ # (cumulative or marginal) # in top right panel if(!is.null(RES$ymargin)) { a <- RES$ymargin observedV <- a$yZ observedY <- a$y theoreticalV <- a$EyZ theoreticalY <- a$y theoreticalSD <- NULL if(is.null(rlab)) rlab <- paste("marginal of", typename) } else if(!is.null(RES$ycumul)) { a <- RES$ycumul observedV <- a$empirical$value observedY <- a$empirical$covariate theoreticalY <- a$theoretical$covariate theoreticalV <- a$theoretical$mean theoreticalSD <- a$theoretical$sd theoreticalHI <- a$theoretical$upper theoreticalLO <- a$theoretical$lower if(is.null(rlab)) rlab <- paste("cumulative sum of", typename) } # pretty axis marks pY <- pretty(theoreticalY) rV <- range(0, observedV, theoreticalV, theoreticalHI, theoreticalLO) if(!is.null(theoreticalSD)) rV <- range(rV, theoreticalV+2*theoreticalSD, theoreticalV-2*theoreticalSD) pV <- pretty(rV) # rescale smoothed values rr <- range(c(0, observedV, theoreticalV, pV)) yscale <- function(y) { y - W$yrange[1] + high + space} xscale <- function(x) { wide + space + wide * (rr[2] - x)/diff(rr) } if(!is.null(theoreticalHI)) do.call.matched(polygon, resolve.defaults( list(x=xscale(c(theoreticalHI, rev(theoreticalLO))), y=yscale(c(theoreticalY, rev(theoreticalY)))), list(...), list(col="grey", border=NA))) do.clean(do.lines, xscale(observedV), yscale(observedY), 1, ...) do.clean(do.lines, xscale(theoreticalV), yscale(theoreticalY), 2, ...) if(!is.null(theoreticalSD)) { do.clean(do.lines, xscale(theoreticalV+2*theoreticalSD), yscale(theoreticalY), 3, ...) do.clean(do.lines, xscale(theoreticalV-2*theoreticalSD), yscale(theoreticalY), 3, ...) } axis(side=4, pos=width, at=yscale(pY), labels=pY) text(width + outerspace, yscale(mean(theoreticalY)), ylab, srt=90) axis(side=3, pos=height, at=xscale(pV), labels=pV) text(xscale(mean(pV)), height + outerRspace, rlab) # if(!is.null(main)) title(main=main) invisible(NULL) } resid4plot }) # # # Residual plot: single panel(s) # # resid1plot <- local({ Contour <- function(..., pch, chars, cols, etch, size, maxsize, meansize, markscale, symap, zap, legend, leg.side, leg.args) { ## avoid passing arguments of plot.ppp to contour.default contour(...) } do.clean <- function(fun, ..., pch, chars, cols, etch, size, maxsize, meansize, markscale, symap, zap, legend, leg.side, leg.args, nlevels, levels, labels, drawlabels, labcex) { ## avoid passing arguments of plot.ppp, contour.default to other functions fun(...) } resid1plot <- function(RES, opt, plot.neg=c("image", "discrete", "contour", "imagecontour"), plot.smooth=c("imagecontour", "image", "contour", "persp"), srange=NULL, monochrome=FALSE, main=NULL, add=FALSE, show.all=!add, do.plot=TRUE, col.neg=NULL, col.smooth=NULL, ...) { if(!any(unlist(opt[c("all", "marks", "smooth", "xmargin", "ymargin", "xcumul", "ycumul")]))) return(invisible(NULL)) if(!add && do.plot) { ## determine size of plot area by calling again with do.plot=FALSE cl <- match.call() cl$do.plot <- FALSE b <- eval(cl, parent.frame()) bb <- as.owin(b, fatal=FALSE) if(is.owin(bb)) { ## initialise plot area plot(bb, type="n", main="") force(show.all) add <- TRUE } } ## extract info clip <- RES$clip Y <- RES$Y Yclip <- RES$Yclip Z <- RES$smooth$Z W <- RES$W Wclip <- Yclip$window type <- RES$type Ydens <- RES$Ydens[Wclip, drop=FALSE] Ymass <- RES$Ymass[Wclip] ## determine colour map if(opt$all || opt$marks || opt$smooth) { nullvalue <- if(type == "eem") 1 else 0 if(is.null(srange)) { Yrange <- if(!is.null(Ydens)) summary(Ydens)$range else NULL Zrange <- if(!is.null(Z)) summary(Z)$range else NULL srange <- range(c(Yrange, Zrange, nullvalue), na.rm=TRUE) } else check.range(srange) backcols <- beachcolours(srange, nullvalue, monochrome) if(is.null(col.neg)) col.neg <- backcols if(is.null(col.smooth)) col.smooth <- backcols } ## determine main heading if(is.null(main)) { prefix <- if(opt$marks) NULL else if(opt$smooth) "Smoothed" else if(opt$xcumul) "Lurking variable plot for x coordinate\n" else if(opt$ycumul) "Lurking variable plot for y coordinate\n" else if(opt$xmargin) "Lurking variable plot for x coordinate\n" else if(opt$ymargin) "Lurking variable plot for y coordinate\n" else NULL main <- paste(prefix, RES$typename) } ## ------------- residuals --------------------------------- if(opt$marks) { ## determine whether pre-plotting the window(s) is redundant redundant <- (plot.neg == "image") && (type != "eem") && (Yclip$window$type == "mask") ## pre-plot the window(s) if(redundant && !add) { z <- do.clean(plot, as.rectangle(W), box=FALSE, main="", do.plot=do.plot, ...) } else { if(!clip) z <- do.clean(plot, W, main="", add=add, show.all=show.all, do.plot=do.plot, ...) else z <- do.clean(ploterodewin, W, Wclip, main="", add=add, show.all=show.all, do.plot=do.plot, ...) } bb <- as.owin(z) switch(plot.neg, discrete={ neg <- (Y$marks < 0) ## plot negative masses of discretised measure as squares if(any(c("maxsize", "markscale") %in% names(list(...)))) { z <- plot(Y[neg], add=TRUE, show.all=show.all, do.plot=do.plot, ...) } else { hackmax <- 0.5 * sqrt(area(Wclip)/Yclip$n) z <- plot(Y[neg], add=TRUE, maxsize=hackmax, show.all=show.all, do.plot=do.plot, ...) } ## plot positive masses at atoms zp <- plot(Y[!neg], add=TRUE, show.all=show.all, do.plot=do.plot, ...) bb <- boundingbox(bb, z, zp) }, contour = { z <- Contour(Ydens, add=TRUE, do.plot=do.plot, ...) bb <- boundingbox(bb, z) }, imagecontour=, image={ if(redundant) { z <- do.clean(ploterodeimage, W, Ydens, rangeZ=srange, colsZ=col.neg, add=add, show.all=show.all, main="", do.plot=do.plot, ...) } else if(type != "eem") { z <- do.clean(image, Ydens, col=col.neg, zlim=srange, ribbon=FALSE, add=TRUE, show.all=show.all, do.plot=do.plot, main="", ...) } bb <- boundingbox(bb, z) if(plot.neg == "imagecontour") { z <- Contour(Ydens, add=TRUE, show.all=show.all, do.plot=do.plot, ...) bb <- boundingbox(bb, z) } ## decide whether mark scale should be shown showscale <- (type != "raw") ## plot positive masses at atoms dont.complain.about(Ymass) z <- do.call(plot, resolve.defaults(list(x=quote(Ymass), add=TRUE), list(...), list(use.marks=showscale, do.plot=do.plot))) bb <- boundingbox(bb, z) } ) if(do.plot && show.all) title(main=main) } # ------------- smooth ------------------------------------- if(opt$smooth) { if(!clip) { switch(plot.smooth, image={ z <- do.clean(image, Z, main="", axes=FALSE, xlab="", ylab="", col=col.smooth, zlim=srange, ribbon=FALSE, do.plot=do.plot, add=add, show.all=show.all, ...) bb <- as.owin(z) }, contour={ z <- Contour(Z, main="", axes=FALSE, xlab="", ylab="", do.plot=do.plot, add=add, show.all=show.all, ...) bb <- as.owin(z) }, persp={ if(do.plot) do.clean(persp, Z, main="", axes=FALSE, xlab="", ylab="", ...) bb <- NULL }, imagecontour={ z <- do.clean(image, Z, main="", axes=FALSE, xlab="", ylab="", col=col.smooth, zlim=srange, ribbon=FALSE, do.plot=do.plot, add=add, show.all=show.all, ...) Contour(Z, add=TRUE, do.plot=do.plot, ...) bb <- as.owin(z) } ) if(do.plot && show.all) title(main=main) } else { switch(plot.smooth, image={ do.clean(plot, as.rectangle(W), box=FALSE, main=main, do.plot=do.plot, add=add, ...) z <- do.clean(ploterodeimage, W, Z, colsZ=col.smooth, rangeZ=srange, do.plot=do.plot, ...) bb <- boundingbox(as.rectangle(W), z) }, contour={ do.clean(plot, W, main=main, do.plot=do.plot, add=add, show.all=show.all, ...) z <- Contour(Z, add=TRUE, show.all=show.all, do.plot=do.plot, ...) bb <- as.owin(z) }, persp={ if(do.plot) do.clean(persp, Z, main=main, axes=FALSE, xlab="", ylab="", ...) bb <- NULL }, imagecontour={ do.clean(plot, as.rectangle(W), box=FALSE, main=main, do.plot=do.plot, add=add, ...) z <- do.clean(ploterodeimage, W, Z, colsZ=col.smooth, rangeZ=srange, do.plot=do.plot, ...) Contour(Z, add=TRUE, do.plot=do.plot, ...) bb <- as.owin(z) } ) } } # ------------ cumulative x ----------------------------------------- if(opt$xcumul) { a <- RES$xcumul obs <- a$empirical theo <- a$theoretical do.clean(resid1panel, obs$covariate, obs$value, theo$covariate, theo$mean, theo$sd, "x coordinate", "cumulative mark", main=main, ..., do.plot=do.plot) bb <- NULL } # ------------ cumulative y ----------------------------------------- if(opt$ycumul) { a <- RES$ycumul obs <- a$empirical theo <- a$theoretical do.clean(resid1panel, obs$covariate, obs$value, theo$covariate, theo$mean, theo$sd, "y coordinate", "cumulative mark", main=main, ..., do.plot=do.plot) bb <- NULL } ## ------------ x margin ----------------------------------------- if(opt$xmargin) { a <- RES$xmargin do.clean(resid1panel, a$x, a$xZ, a$x, a$ExZ, NULL, "x coordinate", "marginal of residuals", main=main, ..., do.plot=do.plot) bb <- NULL } # ------------ y margin ----------------------------------------- if(opt$ymargin) { a <- RES$ymargin do.clean(resid1panel, a$y, a$yZ, a$y, a$EyZ, NULL, "y coordinate", "marginal of residuals", main=main, ..., do.plot=do.plot) bb <- NULL } attr(bb, "bbox") <- bb return(invisible(bb)) } resid1plot }) resid1panel <- local({ do.lines <- function(x, y, defaulty=1, ...) { do.call(lines, resolve.defaults(list(x, y), list(...), list(lty=defaulty))) } resid1panel <- function(observedX, observedV, theoreticalX, theoreticalV, theoreticalSD, xlab, ylab, ..., do.plot=TRUE) { if(!do.plot) return(NULL) ## work out plot range rX <- range(observedX, theoreticalX) rV <- range(c(0, observedV, theoreticalV)) if(!is.null(theoreticalSD)) rV <- range(c(rV, theoreticalV + 2*theoreticalSD, theoreticalV - 2*theoreticalSD)) ## argument handling ## start plot plot(rX, rV, type="n", xlab=xlab, ylab=ylab, ...) do.lines(observedX, observedV, 1, ...) do.lines(theoreticalX, theoreticalV, 2, ...) if(!is.null(theoreticalSD)) { do.lines(theoreticalX, theoreticalV + 2 * theoreticalSD, 3, ...) do.lines(theoreticalX, theoreticalV - 2 * theoreticalSD, 3, ...) } } resid1panel }) # # ploterodewin <- function(W1, W2, col.edge=grey(0.75), col.inside=rgb(1,0,0), do.plot=TRUE, ...) { ## internal use only ## W2 is assumed to be an erosion of W1 switch(W1$type, rectangle={ z <- plot(W1, ..., do.plot=do.plot) plot(W2, add=TRUE, lty=2, do.plot=do.plot) }, polygonal={ z <- plot(W1, ..., do.plot=do.plot) plot(W2, add=TRUE, lty=2, do.plot=do.plot) }, mask={ Z <- as.im(W1) x <- as.vector(rasterx.mask(W1)) y <- as.vector(rastery.mask(W1)) ok <- inside.owin(x, y, W2) Z$v[ok] <- 2 z <- plot(Z, ..., col=c(col.edge, col.inside), add=TRUE, ribbon=FALSE, do.plot=do.plot, show.all=TRUE) } ) return(z) } ploterodeimage <- function(W, Z, ..., Wcol=grey(0.75), rangeZ, colsZ, do.plot=TRUE) { # Internal use only # Image Z is assumed to live on a subset of mask W # colsZ are the colours for the values in the range 'rangeZ' if(!is.mask(W)) { if(do.plot) plot(W, add=TRUE) W <- as.mask(W) } # Extend the colour map to include an extra colour for pixels in W # (1) Add the desired colour of W to the colour map pseudocols <- c(Wcol, colsZ) # (2) Breakpoints bks <- seq(from=rangeZ[1], to=rangeZ[2], length=length(colsZ)+1) dZ <- diff(bks)[1] pseudobreaks <- c(rangeZ[1] - dZ, bks) # (3) Determine a fake value for pixels in W Wvalue <- rangeZ[1] - dZ/2 # Create composite image on W grid # (with W-pixels initialised to Wvalue) X <- as.im(Wvalue, W) # Look up Z-values of W-pixels xx <- as.vector(rasterx.mask(W)) yy <- as.vector(rastery.mask(W)) Zvalues <- lookup.im(Z, xx, yy, naok = TRUE, strict=FALSE) # Overwrite pixels in Z inZ <- !is.na(Zvalues) X$v[inZ] <- Zvalues[inZ] z <- image(X, ..., add=TRUE, ribbon=FALSE, col=pseudocols, breaks=pseudobreaks, do.plot=do.plot) out <- list(X, pseudocols, pseudobreaks) attr(out, "bbox") <- as.owin(z) return(out) } spatstat.model/R/lurkmppm.R0000644000176200001440000001512114331173073015411 0ustar liggesusers#' lurkmppm.R #' Lurking variable plot for mppm #' $Revision: 1.9 $ $Date: 2022/01/04 05:30:06 $ lurking.mppm <- local({ zerofun <- function(x) rep(0, length(x)) threshfun <- function(threshold, value) { force(threshold) force(value) function(x) { value * (x >= threshold) } } approxcumfun <- function(x, y) { stopifnot(length(x) == length(y)) n <- length(x) if(n == 0) return(zerofun) if(n == 1) return(threshfun(x, y)) return(approxfun(x=x, y=y, yleft=0, yright=y[n], rule=2)) } as.function.lurk <- function(x, ..., what=c("empirical", "theoretical")) { what <- match.arg(what) switch(what, empirical = { with(x$empirical, approxcumfun(covariate, value)) }, theoretical = { with(x$theoretical, approxcumfun(covariate, mean)) }) } acceptable <- function(x) { is.im(x) || is.numeric(x) || is.expression(x) } approxcumul <- function(yin, xin, xout) { if(length(yin) > 1) { z <- approx(x=xin, y=yin, xout=xout, rule=2)$y } else { z <- yin * (xout >= xin) } return(z) } interpolateworking <- function(object, xx) { #' extract working data (variance terms) #' and interpolate them at the specified covariate values xx w <- attr(object, "working") if(is.null(w)) return(NULL) w <- as.data.frame(w) covariate <- object$theoretical$covariate y <- apply(w, 2, approxcumul, xin=covariate, xout=xx) return(as.data.frame(y)) } multilurk <- function(object, covariate, type="eem", ..., separate=FALSE, plot.it=TRUE, covname, oldstyle=FALSE, nx=512, main="") { cl <- match.call() stopifnot(is.mppm(object)) if(missing(covname)) { co <- cl$covariate covname <- if(is.name(co)) as.character(co) else if(is.expression(co)) format(co[[1]]) else "covariate" } Fisher <- vcov(object, what="fisher") Vcov <- solve(Fisher) if(acceptable(covariate)) { cov.is.list <- FALSE } else { cov.is.list <- is.list(covariate) && length(covariate) == object$npat && all(sapply(covariate, acceptable)) if(!cov.is.list) stop(paste("Argument 'covariate' should be", "a pixel image, a numeric vector, an expression", "or a list of such arguments", "with one entry for each row of original data"), call.=FALSE) } #' pseudo fitted model for each row of data futs <- subfits(object) #' make lurking variable plot object for each row if(cov.is.list) { #' list of covariate arguments, one for each row of data lurks <- mapply(lurking.ppm, object=futs, covariate=covariate, MoreArgs=list(type=type, plot.it=FALSE, ..., internal=list(saveworking=TRUE, Fisher=Fisher), nx=nx, oldstyle=oldstyle, covname=covname), SIMPLIFY=FALSE) } else { #' One covariate argument to rule them all #' First determine range of covariate values covrange <- range(sapply(futs, lurking, covariate=covariate, type=type, internal=list(getrange=TRUE)), na.rm=TRUE) #' Now compute lurking variable plots lurks <- anylapply(futs, lurking, covariate=covariate, type=type, plot.it=FALSE, ..., internal=list(saveworking=TRUE, Fisher=Fisher, covrange=covrange), nx=nx, oldstyle=oldstyle, covname=covname) } if(separate) { #' separate lurking variable plots for each row if(plot.it) { do.call(plot, resolve.defaults(list(x=quote(lurks)), list(...), list(main=main, mar.panel=c(5,4,2,3)))) return(invisible(lurks)) } else { return(lurks) } } #' auxiliary info infos <- lapply(lurks, attr, which="info") #' range of covariate values covrange <- range(unlist(lapply(infos, getElement, name="covrange")), na.rm=TRUE) xx <- seq(covrange[1], covrange[2], length=nx) #' empirical part efuns <- lapply(lurks, as.function.lurk, what="empirical") vlist <- lapply(efuns, do.call, list(xx)) sumv <- Reduce("+", vlist) empirical <- data.frame(covariate=xx, value=sumv) #' similar for theoretical curves tfuns <- lapply(lurks, as.function.lurk, what="theoretical") vlist <- lapply(tfuns, do.call, list(xx)) sumv <- Reduce("+", vlist) theoretical <- data.frame(covariate=xx, mean=sumv) #' variance calculation if available wlist <- lapply(lurks, interpolateworking, xx=xx) if(!any(sapply(wlist, is.null))) { w <- Reduce("+", wlist) varI <- w$varI if(oldstyle) { theoretical$sd <- sqrt(varI) } else { Bnames <- setdiff(colnames(w), c("varI", "varII")) B <- as.matrix(w[, Bnames, drop=FALSE]) if(ncol(B) != nrow(Vcov)) { warning("Internal variance data are incomplete; reverting to oldstyle=TRUE") oldstyle <- TRUE theoretical$sd <- sqrt(varI) } else { varII <- quadform(B, Vcov) varR <- varI - varII ra <- range(varR, finite=TRUE) if(ra[1] < 0) { warning(paste("Negative values of residual variance!", "Range =", prange(signif(ra, 4))), call.=FALSE) varR <- pmax(0, varR) } theoretical$sd <- sqrt(varR) } } } ## form result result <- list(empirical=empirical, theoretical=theoretical) class(result) <- "lurk" ## copy info e.g. type of residual info <- infos[[1]] info$covrange <- covrange attr(result, "info") <- info if(plot.it) plot(result, ..., main=main) return(invisible(result)) } multilurk }) spatstat.model/R/ppm.R0000644000176200001440000002372714331173073014351 0ustar liggesusers# # $Revision: 1.61 $ $Date: 2022/11/03 11:08:33 $ # # ppm() # Fit a point process model to a two-dimensional point pattern # # ppm <- function(Q, ...) { UseMethod("ppm") } ppm.formula <- function(Q, interaction=NULL, ..., data=NULL, subset) { ## remember call callstring <- short.deparse(sys.call()) cl <- match.call() ## trap a common error to give a more informative message if(is.sob(data) || is.function(data)) stop(paste("The argument", sQuote("data"), "should not be a spatial object;", "it should be a list of spatial objects"), call.=FALSE) ########### INTERPRET FORMULA ############################## if(!inherits(Q, "formula")) stop(paste("Argument 'Q' should be a formula")) formula <- Q ## check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Formula must have a left hand side")) Yexpr <- formula[[2]] trend <- formula[c(1,3)] ## FIT ####################################### thecall <- if(missing(subset)) { call("ppm", Q=Yexpr, trend=trend, data=data, interaction=interaction) } else { call("ppm", Q=Yexpr, trend=trend, data=data, interaction=interaction, subset=substitute(subset)) } ncall <- length(thecall) argh <- list(...) nargh <- length(argh) if(nargh > 0) { thecall[ncall + 1:nargh] <- argh names(thecall)[ncall + 1:nargh] <- names(argh) } callenv <- list2env(as.list(data), parent=parent.frame()) result <- eval(thecall, envir=callenv) result$call <- cl result$callstring <- callstring result$callframe <- parent.frame() return(result) } ppm.quad <- ppm.ppp <- ppm.default <- function(Q, trend = ~1, interaction = Poisson(), ..., covariates = data, data = NULL, covfunargs = list(), subset, clipwin, correction="border", rbord = reach(interaction), use.gam=FALSE, method = c("mpl", "logi", "VBlogi"), forcefit=FALSE, improve.type = c("none", "ho", "enet"), improve.args=list(), emend=project, project=FALSE, prior.mean = NULL, prior.var = NULL, nd = NULL, eps = NULL, gcontrol=list(), nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE, callstring=NULL ) { Qname <- short.deparse(substitute(Q)) subsetexpr <- if(!missing(subset)) substitute(subset) else NULL clipwin <- if(!missing(clipwin)) clipwin else NULL datalistname <- if(missing(covariates)) "data" else "covariates" if(!missing(emend) && !missing(project) && emend != project) stop("Conflicting options given: emend != project") ## Parse fitting method method.given <- !missing(method) improvetype.given <- !missing(improve.type) method <- match.arg(method) improve.type <- match.arg(improve.type) if(!is.null(prior.mean) | !is.null(prior.var)){ if(!method.given) { method <- "VBlogi" } else if(method != "VBlogi") { stop(paste("Prior specification only works with method", sQuote("VBlogi"))) } } switch(method, mpl = { }, logi = { VB <- FALSE }, VBlogi = { method <- "logi" VB <- TRUE }, ho = { ## old syntax method <- "mpl" if(!improvetype.given) { warning(paste("Syntax 'method=\"ho\"' is deprecated;", "use 'improve.type=\"ho\"'")) improve.type <- "ho" } else if(improve.type != "ho") { stop(paste("Old syntax 'method=\"ho\"' is inconsistent with", sQuote(paste("improve.type=", dQuote(improve.type))))) } }, stop(paste("Unrecognised option", sQuote(paste("method=", dQuote(method)))))) if(is.sob(covariates) || is.function(covariates)) stop(paste("The argument", sQuote(datalistname), "should not be a spatial object;", "it should be a list of spatial objects"), call.=FALSE) if(inherits(Q, "logiquad")){ if(missing(method)) method <- "logi" if(method != "logi") stop(paste("Only method =", sQuote("logi"), "makes sense when Q is of type", sQuote("logiquad"))) } cl <- match.call() if(is.null(callstring)) callstring <- paste(short.deparse(sys.call()), collapse="") if(is.ppp(Q) && is.marked(Q) && !is.multitype(Q)) stop(paste("ppm is not yet implemented for marked point patterns,", "other than multitype patterns.")) if(!(is.ppp(Q) || is.quad(Q) || checkfields(Q, c("data", "dummy")))) { stop("Argument Q must be a point pattern or a quadrature scheme") } X <- if(is.ppp(Q)) Q else Q$data ## Validate interaction if(is.null(interaction)) { interaction <- Poisson() } else if(inherits(interaction, "intermaker")) { ## e.g. 'interaction=Hardcore': invoke it without arguments interaction <- (f <- interaction)() dont.complain.about(f) } else if(!is.interact(interaction)) stop("Argument 'interaction' must be an object of class 'interact'") ## Ensure interaction is fully defined if(!is.null(ss <- interaction$selfstart)) { # invoke selfstart mechanism to fix all parameters interaction <- ss(X, interaction) } if(inherits(trend, "formula")) { ## handle "." in formula, representing all variables in 'data' if("." %in% variablesinformula(trend)) { if(is.null(covariates)) stop("Cannot expand '.' since 'data' is not present", call.=FALSE) rhs <- paste(names(covariates), collapse=" + ") allmaineffects <- as.formula(paste("~", rhs)) environment(allmaineffects) <- environment(trend) trend <- update(allmaineffects, trend) } ## expand polynom() in formula if(spatstat.options("expand.polynom")) trend <- expand.polynom(trend) } # validate choice of edge correction correction <- pickoption("correction", correction, c(border="border", periodic="periodic", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", none="none")) # validate rbord if(correction == "border") { # rbord for border correction rbord.given <- !missing(rbord) && !is.null(rbord) if(is.null(rbord)) rbord <- reach(interaction) infin <- is.infinite(rbord) too.large <- infin || (eroded.areas(as.owin(X), rbord) == 0) if(too.large) { whinge <- paste(if(rbord.given) "rbord" else "the reach of this interaction", if(infin) "is infinite or unknown;" else "is too large for this window;", "please specify", if(rbord.given) "a smaller value of", "rbord, or use a different edge correction") stop(whinge) } } else { # rbord must be numeric to satisfy mpl.engine if(is.null(rbord)) rbord <- 0 } ##.............. Fit model ............................... switch(method, logi = { ## Fit by logistic composite likelihood fit <- logi.engine(Q=Q, trend=trend, interaction=interaction, covariates=covariates, covfunargs=covfunargs, subsetexpr=subsetexpr, clipwin=clipwin, correction=correction, rbord=rbord, use.gam=use.gam, forcefit=forcefit, nd = nd, gcontrol=gcontrol, callstring=callstring, prior.mean=prior.mean, prior.var=prior.var, VB=VB, ...) }, mpl = { ## fit by maximum pseudolikelihood fit <- mpl.engine(Q=Q, trend=trend, interaction=interaction, covariates=covariates, covfunargs=covfunargs, subsetexpr=subsetexpr, clipwin=clipwin, correction=correction, rbord=rbord, use.gam=use.gam, forcefit=forcefit, nd = nd, eps = eps, gcontrol=gcontrol, callstring=callstring, ...) }, stop(paste("Internal error - method =", sQuote(method))) ) ## Fill in model details fit$Qname <- Qname if(!is.ppm(fit)) { ## internal use only - returns some other data return(fit) } fit$call <- cl fit$callstring <- callstring fit$callframe <- parent.frame() ## Detect invalid coefficients if(emend && !valid.ppm(fit)) fit <- emend.ppm(fit) ##.............. Improve fit ............................... switch(improve.type, none = { fit$improve.type <- "none" }, ho = { fit <- do.call(ho.engine, resolve.defaults(list(quote(fit), nsim=nsim, nrmh=nrmh, start=start, control=control, verb=verb), improve.args)) }, enet = { fit <- do.call(enet.engine, append(list(quote(fit)), improve.args)) }) if(emend && !valid.ppm(fit)) fit <- emend.ppm(fit) return(fit) } spatstat.model/R/rmh.ppm.R0000644000176200001440000001255614331173073015134 0ustar liggesusers# # simulation of FITTED model # # $Revision: 1.38 $ $Date: 2022/04/06 07:23:19 $ # # rmh.ppm <- function(model, start = NULL, control = default.rmhcontrol(model, w=w), ..., w = NULL, project=TRUE, nsim=1, drop=TRUE, saveinfo=TRUE, verbose=TRUE, new.coef=NULL) { verifyclass(model, "ppm") check.1.integer(nsim) stopifnot(nsim >= 0) if(nsim == 0) return(simulationresult(list())) argh <- list(...) if(is.null(control)) { control <- default.rmhcontrol(model, w=w) } else { control <- rmhcontrol(control) } # override if(length(list(...)) > 0) control <- update(control, ...) # convert fitted model object to list of parameters for rmh.default X <- rmhmodel(model, w=w, verbose=verbose, project=project, control=control, new.coef=new.coef) # set initial state if(is.null(start)) { datapattern <- data.ppm(model) start <- rmhstart(n.start=datapattern$n) } # call rmh.default # passing only arguments unrecognised by rmhcontrol known <- names(argh) %in% names(formals(rmhcontrol.default)) fargs <- argh[!known] Y <- do.call(rmh.default, append(list(model=X, start=start, control=control, nsim=nsim, drop=drop, saveinfo=saveinfo, verbose=verbose), fargs)) return(Y) } simulate.ppm <- function(object, nsim=1, ..., singlerun=FALSE, start = NULL, control = default.rmhcontrol(object, w=w), w = window, window = NULL, project=TRUE, new.coef=NULL, verbose=FALSE, progress=(nsim > 1), drop=FALSE) { verifyclass(object, "ppm") argh <- list(...) check.1.integer(nsim) stopifnot(nsim >= 0) if(nsim == 0) return(simulationresult(list())) starttime = proc.time() # set up control parameters if(missing(control) || is.null(control)) { rcontr <- default.rmhcontrol(object, w=w) } else { rcontr <- rmhcontrol(control) } if(singlerun) { # allow nsave, nburn to determine nrep nsave <- resolve.1.default("nsave", list(...), as.list(rcontr), .MatchNull=FALSE) nburn <- resolve.1.default("nburn", list(...), as.list(rcontr), list(nburn=nsave), .MatchNull=FALSE) if(!is.null(nsave)) { nrep <- nburn + (nsim-1) * sum(nsave) rcontr <- update(rcontr, nrep=nrep, nsave=nsave, nburn=nburn) } } # other overrides if(length(list(...)) > 0) rcontr <- update(rcontr, ...) # Set up model parameters for rmh rmodel <- rmhmodel(object, w=w, verbose=FALSE, project=TRUE, control=rcontr, new.coef=new.coef) if(is.null(start)) { datapattern <- data.ppm(object) start <- rmhstart(n.start=datapattern$n) } rstart <- rmhstart(start) ######### if(singlerun && nsim > 1) { # ////////////////////////////////////////////////// # execute one long run and save every k-th iteration if(is.null(rcontr$nsave)) { # determine spacing between subsamples if(!is.null(rcontr$nburn)) { nsave <- max(1, with(rcontr, floor((nrep - nburn)/(nsim-1)))) } else { # assume nburn = 2 * nsave nsave <- max(1, with(rcontr, floor(nrep/(nsim+1)))) nburn <- 2 * nsave } rcontr <- update(rcontr, nsave=nsave, nburn=nburn) } # check nrep is enough nrepmin <- with(rcontr, nburn + (nsim-1) * sum(nsave)) if(rcontr$nrep < nrepmin) rcontr <- update(rcontr, nrep=nrepmin) # OK, run it if(progress) { cat(paste("Generating", nsim, "simulated patterns in a single run ... ")) flush.console() } Y <- rmh(rmodel, rstart, rcontr, verbose=verbose) if(progress) cat("Done.\n") # extract sampled states out <- attr(Y, "saved") nout <- length(out) if(nout == nsim+1L && identical(names(out)[1], "Iteration_0")) { ## expected behaviour: first entry is initial state out <- out[-1L] } else if(nout != nsim) { stop(paste("Internal error: wrong number of simulations generated:", nout, "!=", nsim)) } } else { # ////////////////////////////////////////////////// # execute 'nsim' independent runs out <- list() # pre-digest arguments rmhinfolist <- rmh(rmodel, rstart, rcontr, preponly=TRUE, verbose=verbose) # go if(nsim > 0) { if(progress) { cat(paste("Generating", nsim, "simulated", ngettext(nsim, "pattern", "patterns"), "...")) flush.console() } # call rmh # passing only arguments unrecognised by rmhcontrol known <- names(argh) %in% names(formals(rmhcontrol.default)) fargs <- argh[!known] rmhargs <- append(list(InfoList=rmhinfolist, verbose=verbose), fargs) if(progress) pstate <- list() for(i in 1:nsim) { out[[i]] <- do.call(rmhEngine, rmhargs) if(progress) pstate <- progressreport(i, nsim, state=pstate) } } } out <- simulationresult(out, nsim, drop) out <- timed(out, starttime=starttime) return(out) } spatstat.model/R/evidence.R0000644000176200001440000002437214424137177015344 0ustar liggesusers#' #' evidence.R #' #' evaluate covariate values at data points and at pixels #' together with intensity of null/reference model #' #' $Revision: 1.50 $ $Date: 2023/05/02 06:45:02 $ #' ## Code for generic spatialCovariateEvidence() is moved to spatstat.explore spatialCovariateEvidence.ppm <- local({ spatialCovariateEvidence.ppm <- function(model, covariate, ..., lambdatype=c("cif", "trend", "intensity"), dimyx=NULL, eps=NULL, rule.eps=c("adjust.eps", "grow.frame", "shrink.frame"), interpolate=TRUE, jitter=TRUE, jitterfactor=1, modelname=NULL, covname=NULL, dataname=NULL, subset=NULL, clip.predict=TRUE) { lambdatype <- match.arg(lambdatype) #' evaluate covariate values at data points and at pixels ispois <- is.poisson(model) csr <- ispois && is.stationary(model) #' determine names if(is.null(modelname)) modelname <- if(csr) "CSR" else short.deparse(substitute(model)) if(is.null(covname)) { if(is.character(covariate)) covname <- covariate else covname <- singlestring(short.deparse(substitute(covariate))) } if(is.null(dataname)) dataname <- model$Qname info <- list(modelname=modelname, covname=covname, dataname=dataname, csr=csr, ispois=ispois, spacename="two dimensions") X <- data.ppm(model) W <- as.owin(model) #' explicit control of pixel resolution if(!is.null(dimyx) || !is.null(eps)) { rule.eps <- match.arg(rule.eps) W <- as.mask(W, dimyx=dimyx, eps=eps, rule.eps=rule.eps) } Wfull <- Zfull <- NULL if(!is.null(subset)) { #' restrict to subset if(!clip.predict) { ## use original window for prediction Wfull <- W } X <- X[subset] W <- W[subset, drop=FALSE] } #' evaluate covariate if(is.character(covariate)) { #' One of the characters 'x' or 'y' #' Turn it into a function. ns <- length(covariate) if(ns == 0) stop("covariate is empty") if(ns > 1) stop("more than one covariate specified") covname <- covariate covariate <- switch(covariate, x=xcoordfun, y=ycoordfun, stop(paste("Unrecognised covariate", dQuote(covariate)))) } if(!is.marked(model)) { #' ................... unmarked ....................... if(is.im(covariate)) { type <- "im" if(!interpolate) { #' look up covariate values ZX <- safelookup(covariate, X) } else { #' evaluate at data points by interpolation ZX <- interp.im(covariate, X$x, X$y) #' fix boundary glitches if(any(uhoh <- is.na(ZX))) ZX[uhoh] <- safelookup(covariate, X[uhoh]) } #' covariate values for pixels inside window (for calculation) Z <- covariate[W, drop=FALSE] #' covariate values for pixels inside window (for prediction) if(!is.null(Wfull)) Zfull <- covariate[Wfull, drop=FALSE] #' corresponding mask W <- as.owin(Z) } else if(is.function(covariate)) { type <- "function" #' evaluate exactly at data points ZX <- covariate(X$x, X$y) if(!all(is.finite(ZX))) warning("covariate function returned NA or Inf values") #' window W <- as.mask(W) #' covariate in window Z <- as.im(covariate, W=W) if(!is.null(Wfull)) Zfull <- as.im(covariate, W=Wfull) #' collapse function body to single string covname <- singlestring(covname) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("The covariate should be", "an image, a function(x,y)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) #' values of covariate in window Zvalues <- as.vector(Z[W, drop=TRUE]) #' corresponding fitted [conditional] intensity values lambda <- as.vector(predict(model, locations=W, type=lambdatype)[W, drop=TRUE]) #' pixel area (constant) pixelarea <- with(Z, xstep * ystep) } else { #' ................... marked ....................... if(!is.multitype(model)) stop("Only implemented for multitype models (factor marks)") marx <- marks(X, dfok=FALSE) possmarks <- levels(marx) npts <- npoints(X) #' single image: replicate if(is.im(covariate)) { covariate <- rep(list(covariate), times=length(possmarks)) names(covariate) <- as.character(possmarks) } #' if(is.list(covariate) && all(sapply(covariate, is.im))) { #' list of images type <- "im" if(length(covariate) != length(possmarks)) stop("Number of images does not match number of possible marks") #' evaluate covariate at each data point ZX <- numeric(npts) for(k in seq_along(possmarks)) { ii <- (marx == possmarks[k]) covariate.k <- covariate[[k]] if(!interpolate) { #' look up covariate values values <- safelookup(covariate, X) } else { #' interpolate values <- interp.im(covariate.k, x=X$x[ii], y=X$y[ii]) #' fix boundary glitches if(any(uhoh <- is.na(values))) values[uhoh] <- safelookup(covariate.k, X[ii][uhoh]) } ZX[ii] <- values } #' restrict covariate images to window Z <- solapply(covariate, "[", i=W, drop=FALSE) if(!is.null(Wfull)) Zfull <- solapply(covariate, "[", i=Wfull, drop=FALSE) #' extract pixel locations and pixel values Zframes <- lapply(Z, as.data.frame) #' covariate values at each pixel inside window Zvalues <- unlist(lapply(Zframes, getElement, name="value")) #' pixel locations locn <- lapply(Zframes, getxy) #' tack on mark values for(k in seq_along(possmarks)) locn[[k]] <- cbind(locn[[k]], data.frame(marks=possmarks[k])) loc <- do.call(rbind, locn) #' corresponding fitted [conditional] intensity values lambda <- predict(model, locations=loc, type=lambdatype) #' pixel areas pixelarea <- rep(sapply(Z, pixarea), sapply(Z, npixdefined)) } else if(is.function(covariate)) { type <- "function" #' evaluate exactly at data points ZX <- functioncaller(x=X$x, y=X$y, m=marx, f=covariate, ...) #' functioncaller: function(x,y,m,f,...) { f(x,y,m,...) } #' same window W <- as.mask(W) #' covariate in window Z <- list() for(k in seq_along(possmarks)) Z[[k]] <- as.im(functioncaller, m=possmarks[k], f=covariate, W=W, ...) Zvalues <- unlist(lapply(Z, pixelvalues)) #' covariate in original window, for prediction if(!is.null(Wfull)) { Zfull <- list() for(k in seq_along(possmarks)) Zfull[[k]] <- as.im(functioncaller, m=possmarks[k], f=covariate, W=Wfull, ...) } #' corresponding fitted [conditional] intensity values lambda <- predict(model, locations=W, type=lambdatype) lambda <- unlist(lapply(lambda, pixelvalues)) if(length(lambda) != length(Zvalues)) stop("Internal error: length(lambda) != length(Zvalues)") #' collapse function body to single string covname <- singlestring(covname) #' pixel areas pixelarea <- rep(sapply(Z, pixarea), sapply(Z, npixdefined)) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("For a multitype point process model,", "the covariate should be an image, a list of images,", "a function(x,y,m)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) } #' .......................................................... #' apply jittering to avoid ties if(jitter) { ZX <- jitter(ZX, factor=jitterfactor) Zvalues <- jitter(Zvalues, factor=jitterfactor) } lambdaname <- if(is.poisson(model)) "intensity" else lambdatype lambdaname <- paste("the fitted", lambdaname) check.finite(lambda, xname=lambdaname, usergiven=FALSE) check.finite(Zvalues, xname="the covariate", usergiven=TRUE) #' lambda values at data points lambdaX <- predict(model, locations=X, type=lambdatype) #' lambda image(s) lambdaimage <- predict(model, locations=Wfull %orifnull% W, type=lambdatype) #' wrap up values <- list(Zimage = Zfull %orifnull% Z, lambdaimage = lambdaimage, Zvalues = Zvalues, lambda = lambda, lambdaX = lambdaX, weights = pixelarea, ZX = ZX, type = type) return(list(values=values, info=info, X=X)) # X is possibly a subset of original } xcoordfun <- function(x,y,m){x} ycoordfun <- function(x,y,m){y} pixarea <- function(z) { z$xstep * z$ystep } npixdefined <- function(z) { sum(!is.na(z$v)) } pixelvalues <- function(z) { as.data.frame(z)[,3L] } getxy <- function(z) { z[,c("x","y")] } ## Function caller used for marked locations (x,y,m) only. functioncaller <- function(x,y,m,f,...) { nf <- length(names(formals(f))) if(nf < 2) stop("Covariate function must have at least 2 arguments") if(nf == 2) return(f(x,y)) if(nf == 3) return(f(x,y,m)) argh <- list(...) extra <- intersect(names(argh), names(formals(f))[-(1:3)]) value <- do.call(f, append(list(x,y,m), argh[extra])) return(value) } spatialCovariateEvidence.ppm }) ## Code for spatialCovariateEvidence.ppp() is moved to spatstat.explore ## Code for spatialCovariateEvidence.exactppm() is moved to spatstat.explore spatstat.model/R/objsurf.R0000644000176200001440000001762314334115572015230 0ustar liggesusers# # objsurf.R # # surface of the objective function for an M-estimator # # $Revision: 1.33 $ $Date: 2022/11/13 07:08:49 $ # objsurf <- function(x, ...) { UseMethod("objsurf") } objsurf.kppm <- objsurf.dppm <- function(x, ..., ngrid=32, xlim=NULL, ylim=NULL, enclose=FALSE, ratio=1.5, verbose=TRUE) { ## history of function evaluations h <- attr(x, "h") if(!is.null(h)) { dotargs <- list(...) if(!is.null(parmap <- dotargs$parmap)) { ## transform to new parametrisation tran <- parmap[[1]] tranpars <- as.data.frame(t(apply(h, 1, tran))) h <- cbind(tranpars, value=h$value) } if(enclose) { ## determine xlim, ylim to enclose the history if(is.null(xlim)) xlim <- range(h[,1]) if(is.null(ylim)) ylim <- range(h[,2]) } class(h) <- unique(c("traj", class(h))) } ## objective function surface Fit <- x$Fit switch(Fit$method, mincon = { result <- objsurf(Fit$mcfit, ..., ngrid=ngrid, xlim=xlim, ylim=ylim, ratio=ratio, verbose=verbose) }, palm = , clik2 = { optpar <- x$par.canon %orifnull% x$par objfun <- Fit$objfun objargs <- Fit$objargs result <- objsurfEngine(objfun, optpar, objargs, ..., objname = "log composite likelihood", ngrid=ngrid, xlim=xlim, ylim=ylim, ratio=ratio, verbose=verbose) }, stop(paste("Unrecognised fitting method", dQuote(Fit$method)), call.=FALSE) ) ## history of function evaluations attr(result, "h") <- h return(result) } objsurf.minconfit <- function(x, ..., ngrid=32, xlim=NULL, ylim=NULL, ratio=1.5, verbose=TRUE) { optpar <- x$par.canon %orifnull% x$par objfun <- x$objfun objargs <- x$objargs dotargs <- x$dotargs result <- objsurfEngine(objfun, optpar, objargs, ..., objname = "contrast", dotargs=dotargs, ngrid=ngrid, xlim=xlim, ylim=ylim, ratio=ratio, verbose=verbose) return(result) } objsurfEngine <- function(objfun, optpar, objargs, ..., dotargs=list(), objname="objective", new.objargs=list(), parmap = NULL, ngrid=32, xlim=NULL, ylim=NULL, ratio=1.5, verbose=TRUE) { trap.extra.arguments(...) if(!is.function(objfun)) stop("Object is in an outdated format and needs to be re-fitted") if(length(new.objargs)) objargs <- resolve.defaults(new.objargs, objargs) npar <- length(optpar) if(npar != 2) stop("Only implemented for functions of 2 arguments") ## create grid of values of (possibly transformed) parameters ratio <- ensure2vector(ratio) ngrid <- ensure2vector(ngrid) stopifnot(all(ratio > 1)) if(is.null(parmap)) { ## use original parameters if(is.null(xlim)) xlim <- optpar[1] * c(1/ratio[1], ratio[1]) if(is.null(ylim)) ylim <- optpar[2] * c(1/ratio[2], ratio[2]) xgrid <- seq(xlim[1], xlim[2], length=ngrid[1]) ygrid <- seq(ylim[1], ylim[2], length=ngrid[2]) pargrid <- expand.grid(xgrid, ygrid) colnames(pargrid) <- names(optpar) } else { if(!(length(parmap) == 2 && all(sapply(parmap, is.function)))) stop("parmap should be a list of 2 functions") tran <- parmap[[1]] invtran <- parmap[[2]] ## transformed parameters Toptpar <- tran(optpar) if(is.null(xlim)) xlim <- Toptpar[1] * c(1/ratio[1], ratio[1]) if(is.null(ylim)) ylim <- Toptpar[2] * c(1/ratio[2], ratio[2]) xgrid <- seq(xlim[1], xlim[2], length=ngrid[1]) ygrid <- seq(ylim[1], ylim[2], length=ngrid[2]) Tpargrid <- expand.grid(xgrid, ygrid) colnames(Tpargrid) <- names(Toptpar) ## inverse transform pargrid <- t(apply(Tpargrid, 1, invtran)) colnames(pargrid) <- names(optpar) ## finally overwrite optimal values optpar <- Toptpar } # evaluate objective function if(verbose) cat(paste("Evaluating", nrow(pargrid), "function values...")) values <- do.call(apply, append(list(pargrid, 1, objfun, objargs=objargs), dotargs)) if(verbose) cat("Done.\n") result <- list(x=xgrid, y=ygrid, z=matrix(values, ngrid[1], ngrid[2])) attr(result, "optpar") <- optpar attr(result, "objname") <- objname class(result) <- "objsurf" return(result) } print.objsurf <- function(x, ...) { cat("Objective function surface\n") optpar <- attr(x, "optpar") objname <- attr(x, "objname") nama <- names(optpar) cat(paste("\tFunction value:", objname, "\n")) cat("Parameter limits:\n") cat(paste("\t", paste0(nama[1L], ":"), prange(signif(range(x$x), 4)), "\n")) cat(paste("\t", paste0(nama[2L], ":"), prange(signif(range(x$y), 4)), "\n")) if(!is.null(attr(x, "h"))) splat("[Includes history of evaluations of objective function]") invisible(NULL) } summary.objsurf <- function(object, ...) { y <- list(xrange=range(object$x), yrange=range(object$y), objrange=range(object$z), optpar=as.list(attr(object, "optpar")), objname=attr(object, "objname") , trace=attr(object, "h") # may be NULL ) class(y) <- c("summary.objsurf", class(y)) return(y) } print.summary.objsurf <- function(x, ...) { with(x, { cat("Objective function surface\n") cat(paste("\tFunction value:", objname, "\n")) cat(paste("\tRange of values:", prange(objrange), "\n")) cat("Parameter limits (xrange, yrange):\n") nama <- names(optpar) cat(paste("\t", paste0(nama[1L], ":"), prange(xrange), "\n")) cat(paste("\t", paste0(nama[2L], ":"), prange(yrange), "\n")) cat("Selected parameter values (optpar):\n") cat(paste("\t", paste(nama, "=", optpar, collapse=", "), "\n")) if(!is.null(trace)) splat("[Includes history of evaluations of objective function]") }) return(invisible(NULL)) } image.objsurf <- plot.objsurf <- function(x, ...) { xname <- short.deparse(substitute(x)) optpar <- attr(x, "optpar") nama <- names(optpar) xx <- unclass(x) dont.complain.about(xx) do.call(image, resolve.defaults(list(x=quote(xx)), list(...), list(xlab=nama[1L], ylab=nama[2L], main=xname))) abline(v=optpar[1L], lty=3) abline(h=optpar[2L], lty=3) return(invisible(NULL)) } lines.objsurf <- function(x, ..., directed=FALSE) { sx <- summary(x) if(!is.null(h <- sx[["trace"]])) { lines(h, ..., directed=directed) } else message("No trajectory data") return(invisible(NULL)) } contour.objsurf <- function(x, ...) { xname <- short.deparse(substitute(x)) optpar <- summary(x)[["optpar"]] nama <- names(optpar) xx <- unclass(x) dont.complain.about(xx) do.call(contour, resolve.defaults(list(x=quote(xx)), list(...), list(xlab=nama[1], ylab=nama[2], main=xname))) abline(v=optpar[1], lty=3) abline(h=optpar[2], lty=3) return(invisible(NULL)) } persp.objsurf <- function(x, ...) { xname <- short.deparse(substitute(x)) optpar <- attr(x, "optpar") objname <- attr(x, "objname") nama <- names(optpar) xx <- x$x yy <- x$y zz <- x$z dont.complain.about(xx, yy, zz) r <- do.call(persp, resolve.defaults(list(x=quote(xx), y=quote(yy), z=quote(zz)), list(...), list(xlab=nama[1], ylab=nama[2], zlab=objname, main=xname))) return(invisible(r)) } spatstat.model/R/residuals.mppm.R0000644000176200001440000000566414331173074016521 0ustar liggesusers# # residuals.mppm.R # # computes residuals for fitted multiple point process model # # # $Revision: 1.5 $ $Date: 2015/01/29 06:44:26 $ # residuals.mppm <- function(object, type="raw", ..., fittedvalues = fitted.mppm(object)) { verifyclass(object, "mppm") userfitted <- !missing(fittedvalues) type <- pickoption("type", type, c(inverse="inverse", raw="raw", pearson="pearson", Pearson="pearson")) typenames <- c(inverse="inverse-lambda residuals", raw="raw residuals", pearson="Pearson residuals") typename <- typenames[[type]] # Extract quadrature points and weights Q <- quad.mppm(object) # U <- lapply(Q, union.quad) # quadrature point patterns Z <- unlist(lapply(Q, is.data)) # indicator data/dummy W <- unlist(lapply(Q, w.quad)) # quadrature weights # total number of quadrature points nquadrature <- length(W) # number of quadrature points in each pattern nU <- unlist(lapply(Q, n.quad)) # number of rows of hyperframe npat <- object$npat # attribution of each quadrature point id <- factor(rep(1:npat, nU), levels=1:npat) # Compute fitted conditional intensity at quadrature points if(!is.list(fittedvalues) || length(fittedvalues) != npat) stop(paste(sQuote("fittedvalues"), "should be a list of length", npat, "containing vectors of fitted values")) lambda <- unlist(fittedvalues) # consistency check if(length(lambda) != nquadrature) stop(paste(if(!userfitted) "internal error:" else NULL, "number of fitted values", paren(length(lambda)), "does not match number of quadrature points", paren(nquadrature))) # indicator is 1 if lambda > 0 # (adjusted for numerical behaviour of predict.glm) indicator <- (lambda > .Machine$double.eps) # Evaluate residual measure components discrete <- ifelse(Z, switch(type, raw = 1, inverse = 1/lambda, pearson = 1/sqrt(lambda) ), 0) density <- switch(type, raw = -lambda, inverse = -indicator, pearson = -indicator * sqrt(lambda)) atoms <- as.logical(Z) # All components resdf <- data.frame(discrete=discrete, density=density, atoms=atoms) # Split residual data according to point pattern affiliation splitres <- split(resdf, id) # Associate with quadrature scheme reshf <- hyperframe(R=splitres, Q=Q) # Convert to signed measures answer <- with(reshf, msr(Q, R$discrete[R$atoms], R$density)) # tag answer <- lapply(answer, "attr<-", which="type", value=type) answer <- lapply(answer, "attr<-", which="typename", value=typename) return(as.solist(answer)) } spatstat.model/R/reduceformula.R0000644000176200001440000000627414331173074016411 0ustar liggesusers# # reduceformula.R # # $Revision: 1.7 $ $Date: 2016/12/30 01:44:07 $ # # delete variable from formula # #...................................................... # reduceformula <- function(fmla, deletevar, verbose=FALSE) { ## removes the variable `deletevar' from the formula `fmla' ## returns a simplified formula, or NULL if it can't simplify. stopifnot(inherits(fmla, "formula")) stopifnot(is.character(deletevar) && length(deletevar) == 1) if(!(deletevar %in% all.vars(as.expression(fmla)))) { if(verbose) message(paste("The formula does not involve", dQuote(deletevar), "and is therefore unchanged")) return(fmla) } lhs <- if(length(fmla) < 3) NULL else fmla[[2]] ## create terms object tt <- attributes(terms(fmla)) ## formula.has.intercept <- (tt$intercept == 1) ## extract all variables appearing in the model vars <- as.list(tt$variables)[-1] nvars <- length(vars) varexprs <- lapply(vars, as.expression) varstrings <- sapply(varexprs, paste) ## identify any offsets offs <- tt$offset v.is.offset <- if(!is.null(offs)) (1:nvars) %in% offs else rep(FALSE, nvars) ## remove the response repo <- tt$response if(repo != 0) { vars <- vars[-repo] varstrings <- varstrings[-repo] varexprs <- varexprs[-repo] v.is.offset <- v.is.offset[-repo] } ## a term may be a variable name ## v.is.name <- sapply(vars, is.name) ## a term may be an expression like sin(x), poly(x,y,degree=2) v.args <- lapply(varexprs, all.vars) matches.delete <- lapply(v.args, "==", deletevar) v.has.delete <- sapply(matches.delete, any) v.has.other <- !sapply(matches.delete, all) v.is.mixed <- v.has.delete & v.has.other ## we can't handle mixed terms like sin(x-d), poly(x,d) ## where d is to be deleted. Handling these would require ## knowledge about the functions sin and poly. if(any(v.is.mixed)) { nmixed <- sum(v.is.mixed) if(verbose) message(paste("Don't know how to reduce the", ngettext(nmixed, "term", "terms"), paste(dQuote(varstrings[v.is.mixed]), collapse=","))) return(NULL) } ## OK. We have identified all first order terms to be deleted. condemned.names <- varstrings[v.has.delete] ## Determine the terms of all orders that include these first order terms ## (1) terms with model coefficients fax <- tt$factors if(prod(dim(fax)) == 0) retained.terms <- character(0) else { ## Rows are first order terms condemned.row <- rownames(fax) %in% condemned.names ## Columns are the terms of all orders allterms <- colnames(fax) ## Find all columns containing a 1 in a row that is to be deleted if(any(condemned.row)) { condemned.column <- matcolany(fax[condemned.row, , drop=FALSE] != 0) retained.terms <- allterms[!condemned.column] } else retained.terms <- allterms } ## (2) offsets if any if(any(v.is.offset)) retained.terms <- c(retained.terms, varstrings[v.is.offset & !v.has.delete]) ## (3) intercept forced? if(length(retained.terms) == 0) retained.terms <- "1" ## OK. Cut-and-paste f <- paste(lhs, "~", paste(retained.terms, collapse=" + ")) return(as.formula(f)) } spatstat.model/R/profilepl.R0000644000176200001440000002766414331173073015555 0ustar liggesusers# # profilepl.R # # $Revision: 1.47 $ $Date: 2020/11/17 03:47:24 $ # # computes profile log pseudolikelihood # profilepl <- local({ ## Determine edge correction ## with partial matching, avoiding collisions with ## other arguments to ppm that have similar names. getppmcorrection <- function(..., correction = "border", covariates = NULL, covfunargs = NULL, control = NULL) { return(correction) } isSingleNA <- function(x) { length(x) == 1 && is.na(x) } profilepl <- function(s, f, ..., aic=FALSE, rbord=NULL, verbose=TRUE, fast=TRUE) { callenv <- parent.frame() s <- as.data.frame(s) n <- nrow(s) stopifnot(is.function(f)) ## validate 's' parms <- names(s) fargs <- names(formals(f)) if(!all(fargs %in% parms)) { bad <- !(fargs %in% parms) forgiven <- sapply(formals(f)[bad], isSingleNA) if(!all(forgiven)) { slecht <- fargs[bad[!forgiven]] nsl <- length(slecht) stop(paste(ngettext(nsl, "Argument", "Arguments"), commasep(sQuote(slecht)), ngettext(nsl, "is", "are"), "not provided in the data frame s")) } } ## extra columns in 's' are assumed to be parameters of covariate functions is.farg <- parms %in% fargs pass.cfa <- any(!is.farg) got.cfa <- "covfunargs" %in% names(list(...)) if(pass.cfa && got.cfa) stop("Some columns in s are superfluous") ## criterion <- numeric(n) ## make a fake call pseudocall <- match.call() pseudocall[[1]] <- as.symbol("ppm") namcal <- names(pseudocall) ## remove arguments 's' and 'verbose' retain <- !(namcal %in% c("s", "verbose")) pseudocall <- pseudocall[retain] namcal <- namcal[retain] ## place 'f' argument third np <- length(pseudocall) fpos <- (1:np)[namcal == "f"] indices <- (1:np)[-fpos] if(length(indices) < 3) { indices <- c(indices, fpos) } else { indices <- c(indices[1:3], fpos, indices[-(1:3)]) } pseudocall <- pseudocall[indices] namcal <- names(pseudocall) namcal[namcal=="f"] <- "interaction" names(pseudocall) <- namcal ## get correction correction <- getppmcorrection(...) if(correction == "border") { ## determine border correction distance if(is.null(rbord)) { ## compute rbord = max reach of interactions if(verbose) message("(computing rbord)") for(i in 1:n) { fi <- do.call(f, as.list(s[i, is.farg, drop=FALSE])) if(!inherits(fi, "interact")) stop(paste("f did not yield an object of class", sQuote("interact"))) re <- reach(fi) if(is.null(rbord)) rbord <- re else if(rbord < re) rbord <- re } } } ## determine whether computations can be saved if(pass.cfa || got.cfa) { savecomp <- FALSE } else { Q <- do.call(ppm, append(list(...), list(rbord=rbord, justQ=TRUE)), envir=callenv) savecomp <- !oversize.quad(Q) } ## go gc() if(verbose) { message(paste("comparing", n, "models...")) pstate <- list() } for(i in 1:n) { if(verbose) pstate <- progressreport(i, n, state=pstate) fi <- do.call(f, as.list(s[i, is.farg, drop=FALSE])) if(!inherits(fi, "interact")) stop(paste("f did not yield an object of class", sQuote("interact"))) if(pass.cfa) cfai <- list(covfunargs=as.list(s[i, !is.farg, drop=FALSE])) ## fit model if(i == 1) { ## fit from scratch arg1 <- list(..., interaction=fi, rbord=rbord, savecomputed=savecomp, warn.illegal=FALSE, callstring="", skip.border=TRUE, clip.interaction=!fast) if(pass.cfa) arg1 <- append(arg1, cfai) fiti <- do.call(ppm, arg1, envir=callenv) ## save intermediate computations (pairwise distances, etc) precomp <- fiti$internal$computed savedargs <- list(..., rbord=rbord, precomputed=precomp, warn.illegal=FALSE, callstring="", skip.border=TRUE, clip.interaction=!fast) } else { ## use precomputed data argi <- append(savedargs, list(interaction=fi)) if(pass.cfa) argi <- append(argi, cfai) fiti <- do.call(ppm, argi, envir=callenv) } ## save log pl for each fit criterion[i] <- if(aic) -AIC(fiti) else as.numeric(logLik(fiti, warn=FALSE)) ## save fitted coefficients for each fit co <- coef(fiti) if(i == 1) { allcoef <- data.frame(matrix(co, nrow=1)) names(allcoef) <- names(co) } else allcoef <- rbind(allcoef, co) } if(verbose) message("fitting optimal model...") opti <- which.max(criterion) gc() optint <- do.call(f, as.list(s[opti, is.farg, drop=FALSE])) optarg <- list(..., interaction=optint, rbord=rbord) if(pass.cfa) { optcfa <- as.list(s[opti, !is.farg, drop=FALSE]) attr(optcfa, "fitter") <- "profilepl" optarg <- append(optarg, list(covfunargs=optcfa)) } optfit <- do.call(ppm, optarg, envir=callenv) if(verbose) message("done.") critname <- if(aic) "-AIC" else if(is.poisson(optfit)) "log l" else if(optfit$method == "logi") "log CL" else "log PL" result <- list(param=s, prof=criterion, critname=critname, iopt=opti, fit=optfit, rbord=rbord, fname=as.interact(optfit)$name, allcoef=allcoef, otherstuff=list(...), pseudocall=pseudocall) class(result) <- c("profilepl", class(result)) return(result) } profilepl }) ## ## print method ## print.profilepl <- function(x, ...) { head1 <- "profile log pseudolikelihood" head2 <- "for model: " psc <- paste(unlist(strsplitretain(format(x$pseudocall))), collapse=" ") if(nchar(psc) + nchar(head2) + 1 <= getOption('width')) { splat(head1) splat(head2, psc) } else { splat(head1, head2) splat(psc) } nparm <- ncol(x$param) if(waxlyrical('extras')) { corx <- x$fit$correction if(identical(corx, "border") && !is.null(x$rbord)) splat("fitted with rbord =", x$rbord) splat("interaction:", x$fname) splat("irregular", ngettext(nparm, "parameter:", "parameters:\n"), paste(names(x$param), "in", unlist(lapply(lapply(as.list(x$param), range), prange)), collapse="\n")) } popt <- x$param[x$iopt,, drop=FALSE] splat("optimum", ngettext(nparm, "value", "values"), "of irregular", ngettext(nparm, "parameter: ", "parameters:\n"), commasep(paste(names(popt), "=", popt))) invisible(NULL) } ## ## summary method ## summary.profilepl <- function(object, ...) { print(object) cat("\n\noptimal model:\n") print(object$fit) } as.ppm.profilepl <- function(object) { object$fit } fitin.profilepl <- function(object) { fitin(as.ppm(object)) } predict.profilepl <- function(object, ...) { predict(as.ppm(object), ...) } ## ## plot method ## plot.profilepl <- local({ plot.profilepl <- function(x, ..., add=FALSE, main=NULL, tag=TRUE, coeff=NULL, xvariable=NULL, col=1, lty=1, lwd=1, col.opt="green", lty.opt=3, lwd.opt=1) { para <- x$param ## graphics arguments may be expressions involving parameters if(ncol(para) > 1) { col <- eval(substitute(col), para) lwd <- eval(substitute(lwd), para) lty <- eval(substitute(lty), para) px <- cbind(para, col, lwd, lty, stringsAsFactors=FALSE) col <- px$col lwd <- px$lwd lty <- px$lty } ## strip any column that is entirely na if(any(nacol <- sapply(para, none.finite))) { warning(paste("Deleted the irregular", ngettext(sum(nacol), "parameter", "parameters"), commasep(sQuote(names(para)[nacol])), "because all values were NA"), call.=FALSE) para <- para[, !nacol, drop=FALSE] } ## npara <- ncol(para) ## main header if(is.null(main)) main <- short.deparse(x$pseudocall) ## x variable for plot if(is.null(xvariable)) { xvalues <- para[,1L] xname <- names(para)[1L] } else { stopifnot(is.character(xvariable)) if(!(xvariable %in% names(para))) stop("there is no irregular parameter named", sQuote(xvariable)) xvalues <- para[[xvariable]] xname <- xvariable } ## y variable for plot if(is.null(coeff)) { yvalues <- x$prof ylab <- x$critname %orifnull% "log pl" } else { stopifnot(is.character(coeff)) allcoef <- x$allcoef if(!(coeff %in% names(allcoef))) stop(paste("there is no coefficient named", sQuote(coeff), "in the fitted model")) yvalues <- allcoef[[coeff]] ylab <- paste("coefficient:", coeff) } ## start plot if(!add) do.call.matched(plot.default, resolve.defaults(list(x=range(xvalues), y=range(yvalues)), list(type="n", main=main), list(...), list(ylab=ylab, xlab=xname)), extrargs=graphicsPars("plot")) linepars <- graphicsPars("lines") if(npara == 1) { ## single curve do.call.matched(lines.default, resolve.defaults(list(x=quote(xvalues), y=quote(yvalues), ...), spatstat.options("par.fv")), extrargs=linepars) } else { ## multiple curves xvarindex <- match(xname, names(para)) other <- para[, -xvarindex, drop=FALSE] tapply(1:nrow(para), as.list(other), plotslice, xvalues=xvalues, yvalues=yvalues, other=other, tag=tag, ..., col=col, lwd=lwd, lty=lty, lineargs=linepars) } ## show optimal value do.call.matched(abline, resolve.defaults(list(v = xvalues[x$iopt]), list(...), list(lty=lty.opt, lwd=lwd.opt, col=col.opt)), extrargs=linepars) return(invisible(NULL)) } plotslice <- function(z, xvalues, yvalues, other, tag=TRUE, ..., lty=1, col=1, lwd=1, lineargs) { fz <- xvalues[z] pz <- yvalues[z] n <- length(xvalues) if(length(lty) == n) lty <- unique(lty[z])[1] if(length(col) == n) col <- unique(col[z])[1] if(length(lwd) == n) lwd <- unique(lwd[z])[1] do.call.matched(lines.default, resolve.defaults(list(x=quote(fz), y=quote(pz), col=col, lwd=lwd, lty=lty), list(...)), extrargs=lineargs) if(tag) { oz <- other[z, , drop=FALSE] uniques <- apply(oz, 2, unique) labels <- paste(names(uniques), "=", uniques, sep="") label <- paste(labels, sep=",") ii <- which.max(pz) do.call.matched(text.default, list(x=fz[ii], y=pz[ii], labels=label, col=col, ...), funargs=graphicsPars("text")) } return(NULL) } none.finite <- function(x) all(!is.finite(x)) plot.profilepl }) simulate.profilepl <- function(object, ...) { simulate(as.ppm(object), ...) } parameters.profilepl <- function(model, ...) { parameters(as.ppm(model)) } spatstat.model/R/qqplotppm.R0000644000176200001440000002751714331173073015613 0ustar liggesusers# # QQ plot of smoothed residual field against model # # qqplot.ppm() QQ plot (including simulation) # # $Revision: 1.33 $ $Date: 2022/05/23 02:33:06 $ # qqplot.ppm <- local({ ## How to refit the model refit <- function(fit, pattern) { update.ppm(fit, Q=pattern, use.internal=(fit$method != "mppm")) } ## how to compute the residual field residualfield <- function(fit, ..., addtype=FALSE) { d <- diagnose.ppm(fit, which="smooth", plot.it=FALSE, compute.cts=FALSE, compute.sd=FALSE, check=FALSE, ...) result <- d$smooth$Z$v if(addtype) { attr(result, "type") <- d$type attr(result, "typename") <- d$typename } return(result) } qqplot.ppm <- function(fit, nsim=100, expr=NULL, ..., type="raw", style="mean", fast=TRUE, verbose=TRUE, plot.it=TRUE, dimyx=NULL, nrep=if(fast) 5e4 else 1e5, control=update(default.rmhcontrol(fit), nrep=nrep), saveall=FALSE, monochrome=FALSE, limcol=if(monochrome) "black" else "red", maxerr=max(100, ceiling(nsim/10)), check=TRUE, repair=TRUE, envir.expr) { verifyclass(fit, "ppm") if(check && damaged.ppm(fit)) { if(!repair) stop("object format corrupted; try update(fit, use.internal=TRUE)") message("object format corrupted; repairing it.") fit <- update(fit, use.internal=TRUE) } if(fast) { oldnpixel <- spatstat.options("npixel") if(is.null(dimyx)) dimyx <- pmin(40, rev(oldnpixel)) spatstat.options(npixel=rev(dimyx)) } ################ How to evaluate residuals ########################## ## Quantiles of the residual field will be computed. ## Data values dat <- residualfield(fit, type=type, ..., dimyx=dimyx, addtype=TRUE) ## type of residuals (partially matched and validated by diagnose.ppm) type <- attr(dat, "type") typename <- attr(dat, "typename") ################## How to perform simulations? ####################### ## envir.call <- sys.parent() envir.here <- sys.frame(sys.nframe()) ## extract.from.list <- FALSE inext <- 0 # to placate package checker dont.complain.about(inext) if(is.null(expr)) { ## We will simulate from the fitted model 'nsim' times ## and refit the model to these simulations simsource <- "fit" how.simulating <- "simulating from fitted model" ## prepare rmh arguments rcontrol <- rmhcontrol(control) rmodel <- rmhmodel(fit, control=rcontrol, project=FALSE, verbose=verbose) rstart <- rmhstart(n.start=data.ppm(fit)$n) ## pre-digest arguments rmhinfolist <- rmh(rmodel, rstart, rcontrol, preponly=TRUE, verbose=FALSE) ## expression to be evaluated each time expr <- expression( refit(fit, rmhEngine(rmhinfolist, verbose=FALSE))) envir.expr <- envir.here ## pacify code checkers dont.complain.about(rmhinfolist) } else if(is.expression(expr)) { simsource <- "expr" how.simulating <- paste("evaluating", sQuote("expr")) if(missing(envir.expr) || is.null(envir.expr)) envir.expr <- parent.frame() } else if(inherits(expr, "envelope")) { simpat <- attr(expr, "simpatterns") if(!is.null(simpat) && all(sapply(simpat, is.ppp))) { expr <- expression(simpat[[inext]]) envir.expr <- envir.here dont.complain.about(simpat) simsource <- "list" how.simulating <- "extracting point pattern from list" } else stop(paste("Argument", sQuote("expr"), "is an envelope object,", "but does not contain point patterns"), call.=FALSE) } else if(is.list(expr) && all(sapply(expr, is.ppp))) { simpat <- expr expr <- expression(simpat[[inext]]) envir.expr <- envir.here dont.complain.about(simpat) simsource <- "list" how.simulating <- "extracting point pattern from list" } else stop(paste(sQuote("expr"), "should be an expression, or an envelope object,", "or a list of point patterns"), call.=FALSE) exprstring <- if(simsource == "expr") deparse(expr) else NULL ###### Perform simulations if(verbose) { cat(paste("Simulating", nsim, "realisations... ")) pstate <- list() } simul.sizes <- numeric(nsim) isim <- 0 ierr <- 0 repeat { inext <- isim + 1 ## protect from randomly-generated crashes in gam ## ei <- try(eval(expr, envir=envir.expr), silent=!verbose) ei <- eval(expr, envir=envir.expr) if(inherits(ei, "try-error")) { ## error encountered in evaluating 'expr' ierr <- ierr + 1 if(ierr > maxerr) stop(paste("Exceeded maximum of", maxerr, "failures in", how.simulating, "after generating only", isim, "realisations")) else break } else { ## simulation successful isim <- isim + 1 fiti <- if(simsource == "fit") ei else if(is.ppm(ei)) ei else if(is.ppp(ei)) refit(fit, ei) else stop("result of eval(expr) is not a ppm or ppp object") ## diagnostic info simul.sizes[isim] <- data.ppm(fiti)$n ## compute residual field resi <- residualfield(fiti, type=type, ..., dimyx=dimyx) if(isim == 1) sim <- array(, dim=c(dim(resi), nsim)) sim[,,isim] <- resi if(verbose) pstate <- progressreport(isim, nsim, state=pstate) if(isim >= nsim) break } } ###### Report diagnostics if(ierr > 0) cat(paste("\n\n**Alert:", ierr, "failures occurred in", how.simulating, "\n\n")) nempty <- sum(simul.sizes == 0) if(nempty > 0) cat(paste("\n\n**Alert:", nempty, "out of", nsim, "simulated patterns were empty.\n\n")) else cat(paste("\nDiagnostic info:\n", "simulated patterns contained an average of", mean(simul.sizes), "points.\n")) if(nempty == nsim) warning("All simulated patterns were empty") ############ Plot them switch(style, classical = { rr <- range(c(dat,sim)) result <- qqplot(sim, dat, xlim=rr, ylim=rr, asp=1.0, xlab="Quantiles of simulation", ylab="Quantiles of data",plot.it=plot.it) title(sub=typename) abline(0,1, lty=2) result <- append(result, list(data=dat, sim=sim, xlim=rr, ylim=rr, xlab="Quantiles of simulation", ylab="Quantiles of data", rtype=type, typename=typename, nsim=nsim, fit=fit, expr=exprstring, simsource = simsource ) ) }, mean = { ## compute quantiles corresponding to probabilities p[i] ## separately in each realisation. if(verbose) cat("Calculating quantiles...") if(fast) { p <- ppoints(min(100,length(dat)), 3/8) qsim <- apply(sim, 3, quantile, probs=p, na.rm=TRUE) } else { qsim <- apply(sim, 3, sort, na.last=TRUE) } if(verbose) cat("averaging...") ## sample mean of each quantile meanq <- apply(qsim, 1, mean, na.rm=TRUE) ## et cetera varq <- apply(qsim, 1, var, na.rm=TRUE) sdq <- sqrt(varq) q.025 <- apply(qsim, 1, quantile, probs=0.025, na.rm=TRUE) q.975 <- apply(qsim, 1, quantile, probs=0.975, na.rm=TRUE) rr <- range(c(meanq,dat), na.rm=TRUE) dats <- if(fast) quantile(dat, probs=p, na.rm=TRUE) else sort(dat, na.last=TRUE) if(verbose) cat("..Done.\n") if(plot.it) { plot(meanq, dats, xlab="Mean quantile of simulations", ylab="data quantile", xlim=rr, ylim=rr, asp=1.0) abline(0,1) lines(meanq, q.025, lty=2, col=limcol) lines(meanq, q.975, lty=2, col=limcol) title(sub=typename) } result <- list(x=meanq, y=dats, sdq=sdq, q.025=q.025, q.975=q.975, data=dat, sim=sim, xlim=rr, ylim=rr, xlab="Mean quantile of simulations", ylab="data quantile", rtype=type, typename=typename, nsim=nsim, fit=fit, expr=exprstring, simsource=simsource) }, stop(paste("Unrecognised option for", sQuote("style"))) ) ## Throw out baggage if not wanted if(!saveall) { result$fit <- summary(fit, quick=TRUE) result$sim <- NULL } ## reset npixel if(fast) spatstat.options(npixel=oldnpixel) ## class(result) <- c("qqppm", class(result)) return(invisible(result)) } qqplot.ppm }) plot.qqppm <- local({ plot.qqppm <- function(x, ..., limits=TRUE, monochrome=spatstat.options('monochrome'), limcol=if(monochrome) "black" else "red") { stopifnot(inherits(x, "qqppm")) default.type <- if(length(x$x) > 150) "l" else "p" do.call(myplot, resolve.defaults(list(quote(x), ..., type=default.type, limits=limits, limcol=limcol))) return(invisible(x)) } myplot <- function(object, xlab = object$xlab, ylab = object$ylab, xlim = object$xlim, ylim = object$ylim, asp = 1, type = default.type, ..., limits=TRUE, limcol="red") { plot(object$x, object$y, xlab = xlab, ylab = ylab, xlim = xlim, ylim = ylim, asp = asp, type = type, ...) abline(0, 1) if(limits) { if(!is.null(object$q.025)) lines(object$x, object$q.025, lty = 2, col=limcol) if(!is.null(object$q.975)) lines(object$x, object$q.975, lty = 2, col=limcol) } typename <- object$typename %orifnull% paste(object$rtype, "residuals") title(sub=typename) } plot.qqppm }) print.qqppm <- function(x, ...) { stopifnot(inherits(x, "qqppm")) splat("Q-Q plot of point process residuals", "of type", sQuote(x$rtype), "\n", "based on", x$nsim, "simulations") simsource <- x$simsource if(is.null(simsource)) # old version simsource <- if(x$simulate.from.fit) "fit" else "expr" switch(simsource, fit = { fit <- x$fit sumfit <- if(is.ppm(fit)) summary(fit, quick=TRUE) else if(inherits(fit, "summary.ppm")) fit else list(name="(unrecognised format)") splat("\nSimulations from fitted model:", sumfit$name) }, expr = { splat("Simulations obtained by evaluating the following expression:") print(x$expr) }, list = { splat("Simulated point patterns were provided in a list") }) invisible(NULL) } spatstat.model/R/traj.R0000644000176200001440000000310214334115572014501 0ustar liggesusers#' #' traj.R #' #' Trajectories of function evaluations #' #' An object of class 'traj' is a data frame containing the sequence of #' function evaluations (input and output) performed by 'optim'. #' #' If kppm is called with pspace$save=TRUE, #' the resulting kppm object has an attribute 'h' containing the trajectory. #' This is extracted by attr(, "h") or traj() #' #' There are methods for print, plot and lines #' #' $Revision: 1.3 $ $Date: 2022/11/10 07:03:33 $ #' #' Copyright (c) Adrian Baddeley 2022 #' GNU Public Licence >= 2.0 #' traj <- function(object) { h <- attr(object, "h") if(inherits(h, "traj")) return(h) return(NULL) } print.traj <- function(x, ...) { cat("Trajectory of function evaluations\n") cat(paste0("Variables: ", commasep(sQuote(colnames(x))), "\n")) invisible(NULL) } plot.traj <- function(x, ..., show.ends=TRUE, add=FALSE, xlab=NULL, ylab=NULL) { if(add) { ## add to existing plot (handles 'type') points(x[,1], x[,2], ...) } else { ## new plot nama <- colnames(x) plot(x[,1], x[,2], xlab=xlab %orifnull% nama[1], ylab=ylab %orifnull% nama[2], ...) } ## indicate first and last states if(show.ends) { n <- nrow(x) points(x[1,1], x[1,2], pch=1, col="blue", cex=3) points(x[n,1], x[n,2], pch=3, col="red", cex=2) } return(invisible(NULL)) } lines.traj <- function(x, ..., directed=FALSE) { xx <- x[,1] yy <- x[,2] nn <- length(xx) if(directed) { arrows(xx[-nn], yy[-nn], xx[-1], yy[-1], ...) } else { lines(x=xx, y=yy, ...) } return(invisible(NULL)) } spatstat.model/R/residppm.R0000644000176200001440000000717114366621174015403 0ustar liggesusers# # residppm.R # # computes residuals for fitted point process model # # # $Revision: 1.27 $ $Date: 2023/02/02 02:37:29 $ # residuals.ppm <- function(object, type="raw", ..., check=TRUE, drop=FALSE, fittedvalues = NULL, new.coef=NULL, dropcoef=FALSE, quad=NULL) { verifyclass(object, "ppm") trap.extra.arguments(..., .Context="In residuals.ppm") type <- pickoption("type", type, c(inverse="inverse", raw="raw", pearson="pearson", Pearson="pearson", score="score")) typenames <- c(inverse="inverse-lambda residuals", raw="raw residuals", pearson="Pearson residuals", score="score residuals") typename <- typenames[[type]] given.fitted <- !missing(fittedvalues) && !is.null(fittedvalues) # ................. determine fitted values ................. NewCoef <- NULL if(is.null(new.coef) && is.null(quad)) { # use 'object' without modification # validate 'object' if(check && !given.fitted && damaged.ppm(object)) stop("object format corrupted; try update(object, use.internal=TRUE)") } else { # determine a new set of model coefficients if(!is.null(new.coef)) { # use specified model parameters NewCoef <- new.coef } else { # estimate model parameters using a (presumably) denser set of dummy pts # Determine new quadrature scheme if(is.quad(quad)) hi.res.quad <- quad else if(is.ppp(quad)) hi.res.quad <- quadscheme(data=data.ppm(object), dummy=quad) else { # assume 'quad' is a list of arguments to 'quadscheme' hi.res.quad <- do.call(quadscheme, append(list(data.ppm(object)), quad)) } ## refit the model with new quadscheme e <- list2env(list(hi.res.quad=hi.res.quad), parent=object$callframe) hi.res.fit <- update(object, hi.res.quad, envir=e) NewCoef <- coef(hi.res.fit) } } #' now compute fitted values using new coefficients if(!given.fitted) fittedvalues <- fitted(object, drop=drop, check=check, new.coef=NewCoef, dropcoef=dropcoef) # ..................... compute residuals ..................... # Extract quadrature points and weights Q <- quad.ppm(object, drop=drop, clip=drop) # U <- union.quad(Q) # quadrature points Z <- is.data(Q) # indicator data/dummy # W <- w.quad(Q) # quadrature weights # Compute fitted conditional intensity at quadrature points lambda <- fittedvalues # indicator is 1 if lambda > 0 # (adjusted for numerical behaviour of predict.glm) indicator <- (lambda > .Machine$double.eps) if(type == "score") { # need the covariates X <- model.matrix(object) if(drop) { gs <- getglmsubset(object) ok <- !is.na(gs) & gs X <- X[ok, , drop=FALSE] } } # Evaluate residual measure components discrete <- switch(type, raw = rep.int(1, sum(Z)), inverse = 1/lambda[Z], pearson = 1/sqrt(lambda[Z]), score = X[Z, , drop=FALSE] ) density <- switch(type, raw = -lambda, inverse = -indicator, pearson = -indicator * sqrt(lambda), score = -lambda * X) # Residual measure (return value) res <- msr(Q, discrete, density) # name the residuals attr(res, "type") <- type attr(res, "typename") <- typename return(res) } spatstat.model/R/summary.mppm.R0000644000176200001440000002176214331173074016220 0ustar liggesusers# # summary.mppm.R # # $Revision: 1.19 $ $Date: 2022/01/04 05:30:06 $ # summary.mppm <- function(object, ..., brief=FALSE) { # y will be the summary y <- object[c("Call", "Info", "Inter", "trend", "iformula", "random", "npat", "maxlogpl")] y$brief <- brief Info <- object$Info Inter <- object$Inter FIT <- object$Fit$FIT moadf <- object$Fit$moadf y$Fit <- object$Fit[c("fitter", "use.gam", "fmla", "Vnamelist", "Isoffsetlist")] y$Fit$FIT <- summary(FIT) y$Fit$moadf <- list(nrow=nrow(moadf), colnames=colnames(moadf)) ninteract <- Inter$ninteract interaction <- Inter$interaction iused <- Inter$iused itags <- Inter$itags processnames <- Inter$processes constant <- Inter$constant trivial <- Inter$trivial npat <- y$npat iformula <- y$iformula random <- y$random Vnamelist <- y$Fit$Vnamelist allVnames <- unlist(Vnamelist) Isoffsetlist <- y$Fit$Isoffsetlist poistags <- itags[trivial] intertags <- c(allVnames, poistags) ## does the model depend on covariates? y$depends.covar <- Info$has.covar && (length(Info$used.cov.names) > 0) # rownames <- y$Info$rownames switch(y$Fit$fitter, glmmPQL={ y$coef <- co <- fixed.effects(FIT) y$coef.rand <- random.effects(FIT) }, gam=, glm={ y$coef <- co <- coef(FIT) }) ## identify model terms which involve interpoint interaction md <- model.depends(FIT) is.interpoint <- colnames(md) %in% intertags involves.interpoint <- apply(md[ , is.interpoint, drop=FALSE], 1, any) y$coef.inter <- co[involves.interpoint] ## identify trend and design coefficients systematic <- !involves.interpoint y$coef.syst <- co[systematic] # random effects y$ranef <- if(Info$has.random) summary(FIT$modelStruct) else NULL ### Interpoint interactions # model is Poisson ? y$poisson <- ispois <- all(trivial[iused]) # Determine how complicated the interactions are: # (0) are there random effects involving the interactions randominteractions <- !is.null(random) && any(variablesinformula(random) %in% itags) # (1) is the interaction formula of the form ~ tag + tag + ... + tag isimple <- identical(sort(variablesinformula(iformula)), sort(termsinformula(iformula))) # (2) is it of the form ~tag trivialformula <- (isimple && ninteract == 1) # (3) is it of the form ~tag where the interaction is the same in each row fixedinteraction <- (trivialformula && constant && !randominteractions) ### Determine printing of interactions, accordingly ### iprint <- list() if(randominteractions) { toohard <- TRUE printeachrow <- FALSE } else if(fixedinteraction || ispois) { # exactly the same interaction for all patterns interaction <- interaction[1L,1L,drop=TRUE] fi.all <- fii(interaction, co, Vnamelist[[1L]], Isoffsetlist[[1L]]) iprint <- list("Interaction for all patterns"=fi.all) printeachrow <- FALSE toohard <- FALSE } else if(trivialformula) { ## same interaction structure for all patterns; ## irregular parameters may be different on each row; ## regular parameters of interaction do not depend on design pname <- unlist(processnames)[iused] iprint <- list("Interaction for each pattern" = pname) printeachrow <- TRUE toohard <- FALSE } else if(sum(iused) == 1) { ## same interaction structure for all patterns; ## irregular parameters may be different on each row; ## regular parameters of interaction may depend on design pname <- unlist(processnames)[iused] iprint <- list("Interaction for each pattern" = pname) printeachrow <- TRUE toohard <- FALSE ## look for design : interaction terms mm <- md[involves.interpoint, !is.interpoint, drop=FALSE] tangled <- apply(mm, 2, any) if(any(tangled)) { tanglednames <- colnames(mm)[tangled] textra <- list(commasep(sQuote(tanglednames))) names(textra) <- paste("Interaction depends on design", ngettext(length(tanglednames), "covariate", "covariates")) iprint <- append(iprint, textra) } } else if(isimple && all(constant)) { # several interactions involved, each of which is the same for all patterns iprint <- list("Interaction formula"=iformula, "Interactions defined for each pattern"=NULL) for(j in (1:ninteract)[iused]) { name.j <- paste("Interaction", sQuote(itags[j])) int.j <- Inter$interaction[1L,j,drop=TRUE] Vnames.j <- Vnamelist[[j]] Isoffset.j <- Isoffsetlist[[j]] fii.j <- fii(int.j, co, Vnames.j, Isoffset.j) extra.j <- list(fii.j) names(extra.j) <- name.j iprint <- append(iprint, extra.j) } printeachrow <- FALSE toohard <- FALSE } else { # general case # determine which interaction(s) are active on each row active <- active.interactions(object) if(ninteract > 1 || !all(active)) iprint <- list("Active interactions"=active) printeachrow <- TRUE toohard <- any(rowSums(active) > 1) } y$ikind <- list( randominteractions=randominteractions, isimple =isimple, trivialformula =trivialformula, fixedinteraction =fixedinteraction, toohard =toohard, printeachrow =printeachrow) y$depends.on.row <- ("id" %in% variablesinformula(y$trend)) || !fixedinteraction if(toohard) iprint <- append(iprint, list("(Sorry, cannot interpret fitted interactions)")) else if(printeachrow) { subs <- subfits(object, what="interactions") um <- uniquemap(subs) uniq <- (um == seq_along(um)) if(mean(uniq) <= 0.5) { icode <- cumsum(uniq)[um] inames <- if(max(icode) <= 26) LETTERS[icode] else as.character(icode) itable <- data.frame(row=seq_along(um), interaction=inames) usubs <- subs[um[uniq]] names(usubs) <- inames[uniq] iprint <- append(iprint, list("Summary table of interactions"=itable, "key to interaction table"=usubs, "=== Interactions on each row ===="=NULL)) } names(subs) <- paste("Interaction on row", 1:npat) iprint <- append(iprint, subs) } y$iprint <- iprint class(y) <- c("summary.mppm", class(list)) return(y) } print.summary.mppm <- function(x, ..., brief=x$brief) { # NB: x is an object of class "summary.mppm" npat <- x$npat # Inter <- x$Inter # ninteract <- Inter$ninteract # interaction <- Inter$interaction # iused <- Inter$iused # constant <- Inter$constant # iformula <- x$iformula # processnames <- Inter$processes # itags <- Inter$itags # trivial <- Inter$trivial # random <- x$random FIT <- x$Fit$FIT # Vnamelist <- x$Fit$Vnamelist # allVnames <- unlist(Vnamelist) # poistags <- itags[trivial] terselevel <- spatstat.options("terse") # rownames <- x$Info$rownames splat("Point process model fitted to", npat, "point patterns") if(waxlyrical('gory', terselevel)) splat("Call:", x$Call$callstring) splat("Log trend formula:", pasteFormula(x$trend)) switch(x$Fit$fitter, glmmPQL={ cat("\nFixed effects:\n") print(x$coef.syst) cat("Random effects:\n") print(x$coef.rand) co <- fixed.effects(FIT) }, gam=, glm={ cat("\nFitted trend coefficients:\n") print(x$coef.syst) co <- coef(FIT) }) if(length(x$coef.inter)) { cat("\nFitted coefficients of interpoint interaction:\n") print(x$coef.inter) } if(!brief && waxlyrical('extras', terselevel)) { cat("All fitted coefficients:\n") print(co) } parbreak(terselevel) if(!is.null(x$ranef)) { splat("Random effects summary:") print(x$ranef) parbreak(terselevel) } ### Print interaction information ### if(waxlyrical('extras', terselevel)) { iprint <- x$iprint nama <- names(iprint) %orifnull% rep("", length(iprint)) for(i in seq_along(iprint)) { nami <- nama[i] vali <- iprint[[i]] if(brief && is.matrix(vali)) vali <- paren(paste(nrow(vali), "x", ncol(vali), "matrix")) if(nami != "") { inline <- inherits(vali, "formula") || is.character(vali) || (brief && inherits(vali, "fii")) if(inline) cat(paste0(nami, ":\t")) else splat(paste0(nami, ":")) } if(!is.null(vali)) { if(inherits(vali, "fii")) { print(vali, tiny=brief) } else if(is.character(vali)) { splat(vali) } else { print(vali) } } parbreak(terselevel) } } if(!brief && waxlyrical('gory', terselevel)) { splat("--- Gory details: ---") splat("Combined data frame has", x$Fit$moadf$nrow, "rows") print(FIT) } invisible(NULL) } spatstat.model/R/dppmclass.R0000644000176200001440000000205714331173072015533 0ustar liggesusers#' Methods and support for class dppm #' #' $Revision: 1.5 $ $Date: 2019/10/18 03:41:43 $ is.dppm <- function(x) { inherits(x, "dppm") } plot.dppm <- function (x, ..., what = c("intensity", "statistic")){ objectname <- short.deparse(substitute(x)) if(missing(what) && is.stationary(x)) what <- "statistic" plot.kppm(x, ..., xname = objectname, what = what) } Kmodel.dppm <- function (model, ...){ Kmodel(model$fitted, W=model$window) } pcfmodel.dppm <- function (model, ...){ pcfmodel(model$fitted, W=model$window) } intensity.dppm <- function (X, ...){ return(intensity(X$fitted)) } reach.dppm <- function(x, ...){ reach(x$fitted, ...) } repul <- function(model, ...) { UseMethod("repul") } repul.dppm <- function(model, ...) { g <- pcfmodel(model) f <- function(x) { 2 * pi * x * (1 - g(x)) } rmax <- reach(model) h <- integrate(f, 0, rmax)$value lam <- intensity(model) ans <- h * lam return(ans) } #' print.dppm is identical to print.kppm and defined in kppm.R #' summary.dppm is defined in summary.dppm.R spatstat.model/R/penttinen.R0000644000176200001440000000413714243551505015555 0ustar liggesusers# # # penttinen.R # # $Revision: 1.3 $ $Date: 2018/03/15 07:37:41 $ # # Penttinen pairwise interaction # # # ------------------------------------------------------------------- # Penttinen <- local({ # create blank template object without family and pars BlankAntti <- list( name = "Penttinen process", creator = "Penttinen", family = "pairwise.family", # evaluated later pot = function(d, par) { ans <- numeric(length(d)) dim(ans) <- dim(d) zz <- d/(2 * par$r) ok <- (zz < 1) z <- zz[ok] ans[ok] <- (2/pi) * (acos(z) - z * sqrt(1-z^2)) return(ans) }, par = list(r = NULL), # to be filled in parnames = "circle radius", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { theta <- as.numeric(coeffs[1]) gamma <- exp(theta) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { theta <- as.numeric(coeffs[1]) return(is.finite(theta) && (theta <= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(anyNA(coeffs)) return(2 * r) theta <- coeffs[1] if(abs(theta) <= epsilon) return(0) else return(2 * r) }, version=NULL # to be filled in ) class(BlankAntti) <- "interact" # Finally define main function Penttinen <- function(r) { instantiate.interact(BlankAntti, list(r=r)) } Penttinen <- intermaker(Penttinen, BlankAntti) Penttinen }) spatstat.model/R/detPPF-class.R0000644000176200001440000002056514243551505016001 0ustar liggesusers## support for class 'detpointprocfamily' print.detpointprocfamily <- function(x, ...){ splat(x$name, "determinantal point process model", ifelse(is.numeric(x$dim), paste("in dimension", x$dim), "")) #' Not used: #' parnames <- names(x$par) anyfixed <- length(x$fixedpar)>0 if(anyfixed){ fixedlambda <- NULL if(!is.null(x$intensity) && is.element(x$intensity, names(x$fixedpar))){ lambda <- signif(x$fixedpar[[x$intensity]], 4) x$fixedpar <- x$fixedpar[names(x$fixedpar)!=x$intensity] fixedlambda <- paste(x$intensity, ifelse(is.null(x$thin), paste("=", lambda), "= an image")) } if(length(x$fixedpar)>0){ fixedparstring <- paste(names(x$fixedpar), signif(unlist(x$fixed),4), sep = " = ", collapse = ", ") fixedparstring <- paste(fixedlambda, fixedparstring, sep=", ") } else{ fixedparstring <- fixedlambda } } ## Partially specified model: if(length(x$freepar)>0){ splat("The model is only partially specified.") splat("The following parameters are free (e.g. to be estimated by dppm):") cat(x$freepar, sep = ", ") cat("\n") if(anyfixed){ cat("The fixed parameters are: ") cat(fixedparstring, sep = ", ") } else{ splat("There are no fixed parameters.") } } else{ cat("The parameters are: ") cat(fixedparstring, sep = ", ") } cat("\n") if(!is.null(x$intensity)){ splat("The parameter", x$intensity, "specifies the intensity of the process.") } if(is.character(x$dim)){ splat("The parameter", x$dim, "specifies the dimension of the state space.") } invisible(NULL) } reach.detpointprocfamily <- function(x, ...){ model <- x fun <- model$range nam <- names(formals(fun)) do.call(model$range, c(model$fixedpar[is.element(names(model$fixedpar),nam)], list(...))) } dppparbounds <- function(model, name, ...){ if(inherits(model, "dppm")) model <- model$fitted if(!inherits(model, "detpointprocfamily")) stop("input model must be of class detpointprocfamily or dppm") fun <- model$parbounds nam <- names(formals(fun)) if(missing(name)) name <- nam[!is.element(nam, c("name", model$dim))] rslt <- matrix(0,length(name), 2, dimnames = list(name, c("lower", "upper"))) for(nn in name){ tmp <- try(do.call(fun, c(model$fixedpar[is.element(names(model$fixedpar),nam)], list(...), list(name=nn))), silent=TRUE) if(inherits(tmp, "try-error")) { rslt[nn,] <- c(NA, NA) }else{ rslt[nn,] <- tmp } } rslt } valid.detpointprocfamily <- function(object, ...){ if(length(object$freepar)>0) return(NA) ## If there is no function for checking validity we always return TRUE: if(is.null(object$valid)) return(TRUE) do.call(object$valid, object$fixedpar) } dppspecdenrange <- function(model){ ## If there is no function for checking finite range of spectral density we always return Inf: fun <- model$specdenrange if(is.null(fun)) return(Inf) xx <- try(fun(model), silent = TRUE) if(inherits(xx, "try-error")) Inf else xx } dppspecden <- function(model){ fun <- model$specden if(is.null(fun)) stop("Spectral density unknown for this model!") if(length(model$freepar)>0) stop("Cannot extract the spectral density of a partially specified model. Please supply all parameters.") specden <- function(x, ...){ allargs <- c(list(x), model$fixedpar, list(...)) do.call(fun, allargs) } return(specden) } dppkernel <- function(model, ...){ if(inherits(model, "dppm")) model <- model$fitted fun <- model$kernel if(is.null(fun)) return(dppapproxkernel(model, ...)) if(length(model$freepar)>0) stop("Cannot extract the kernel of a partially specified model. Please supply all parameters.") firstarg <- names(formals(fun))[1L] kernel <- function(x){ allargs <- c(structure(list(x), .Names=firstarg), model$fixedpar) do.call(fun, allargs) } return(kernel) } dppapproxkernel <- function(model, trunc = .99, W = NULL){ if(inherits(model, "dppm")){ W <- model$window model <- model$fitted } ####### BACKDOOR TO SPHERICAL CASE ######## if(!is.null(spherefun <- model$approxkernelfun)){ spherefun <- get(spherefun) rslt <- spherefun(model, trunc) return(rslt) } ########################################### d <- dim(model) if(is.null(W)) W <- boxx(replicate(d, c(-.5,.5), simplify=FALSE)) W <- as.boxx(W) if(d!=ncol(W$ranges)) stop(paste("The dimension of the window:", ncol(W$ranges), "is inconsistent with the dimension of the model:", d)) Wscale <- as.numeric(W$ranges[2L,]-W$ranges[1L,]) tmp <- dppeigen(model, trunc, Wscale, stationary=FALSE) index <- tmp$index eig <- tmp$eig prec <- tmp$prec trunc <- tmp$trunc rm(tmp) f <- function(r){ x <- matrix(0, nrow=length(r), ncol=d) x[,1L] <- r basis <- fourierbasis(x, index, win = W) approx <- matrix(eig, nrow=length(eig), ncol=length(r)) * basis return(Re(colSums(approx))) } attr(f, "dpp") <- list(prec = prec, trunc = trunc) return(f) } dppDpcf <- function(model, ...){ if(inherits(model, "dppm")) model <- model$fitted fun <- model$Dpcf if(is.null(fun)) stop("Gradient of the pcf is not available for this model.") if(length(model$freepar)>0) stop("Cannot extract the gradient of the pcf of a partially specified model. Please supply all parameters.") firstarg <- names(formals(fun))[1L] Dg <- function(x){ allargs <- c(structure(list(x), .Names=firstarg), model$fixedpar) do.call(fun, allargs) } return(Dg) } pcfmodel.detpointprocfamily <- function(model, ...){ kernel <- dppkernel(model, ...) f <- function(x){ 1 - (kernel(x)/kernel(0))^2 } return(f) } dppapproxpcf <- function(model, trunc = .99, W = NULL){ kernel <- dppapproxkernel(model, trunc = trunc, W = W) f <- function(x){ 1 - (kernel(x)/kernel(0))^2 } attr(f, "dpp") <- attr(kernel, "dpp") return(f) } Kmodel.detpointprocfamily <- function(model, ...){ if(length(model$freepar)>0) stop("Cannot extract the K function of a partially specified model. Please supply all parameters.") fun <- model$Kfun if(!is.null(fun)){ firstarg <- names(formals(fun))[1L] Kfun <- function(r){ allargs <- c(structure(list(r), .Names=firstarg), model$fixedpar) do.call(fun, allargs) } } else{ pcf <- pcfmodel(model, ...) intfun <- function(xx){ 2*pi*xx*pcf(xx) } Kfun <- function(r){ r <- sort(r) if(r[1L]<0) stop("Negative values not allowed in K function!") r <- c(0,r) int <- unlist(lapply(2:length(r), function(i) integrate(intfun, r[i-1L], r[i], subdivisions=10)$value)) return(cumsum(int)) } } return(Kfun) } update.detpointprocfamily <- function(object, ...){ newpar <- list(...) if(length(newpar)==1L && is.list(newpar[[1L]]) && !is.im(newpar[[1L]])) newpar <- newpar[[1L]] nam <- names(newpar) if(length(newpar)>0&&is.null(nam)) stop(paste("Named arguments are required. Please supply parameter values in a", sQuote("tag=value"), "form")) oldpar <- object$fixedpar[!is.element(names(object$fixedpar), nam)] thin <- object$thin object <- do.call(object$caller, c(newpar,oldpar)) if(is.null(object$thin)) object$thin <- thin return(object) } is.stationary.detpointprocfamily <- function(x){ if(is.null(x$intensity)) return(FALSE) lambda <- getElement(x$fixedpar, x$intensity) if(!is.null(lambda)&&is.numeric(lambda)&&is.null(x$thin)) return(TRUE) return(FALSE) } intensity.detpointprocfamily <- function(X, ...){ lambda <- NULL if(!is.null(X$intensity)) lambda <- getElement(X$fixedpar, X$intensity) if(!is.null(lambda)){ if(!is.null(X$thin)) lambda <- lambda*X$thin return(lambda) } return(NA) } parameters.dppm <- parameters.detpointprocfamily <- function(model, ...){ if(inherits(model, "dppm")) model <- model$fitted c(model$fixed, structure(rep(NA,length(model$freepar)), .Names = model$freepar)) } dim.detpointprocfamily <- function(x){ if(is.numeric(d <- x$dim)){ return(d) } else{ return(getElement(x$fixedpar, d)) } } spatstat.model/R/auc.R0000644000176200001440000000336614476233602014327 0ustar liggesusers## ## auc.R ## ## Calculate ROC curve or area under it ## ## $Revision: 1.17 $ $Date: 2023/08/15 07:44:11 $ ## Code for roc() and roc.ppp() and internals is moved to spatstat.explore roc.ppm <- function(X, ...) { stopifnot(is.ppm(X)) model <- X lambda <- predict(model, ...) Y <- data.ppm(model) nullmodel <- ppm(Y) result <- rocModel(lambda, nullmodel, ...) return(result) } roc.kppm <- function(X, ...) { stopifnot(is.kppm(X)) model <- as.ppm(X) lambda <- predict(model, ...) Y <- data.ppm(model) nullmodel <- ppm(Y) result <- rocModel(lambda, nullmodel, ...) return(result) } roc.slrm <- function(X, ...) { stopifnot(is.slrm(X)) model <- X lambda <- predict(model, ..., type="probabilities") Y <- response(model) nullmodel <- slrm(Y ~ 1) dont.complain.about(Y) result <- rocModel(lambda, nullmodel, ..., lambdatype="probabilities") return(result) } # ...................................................... ## Code for auc(), auc.ppp() is moved to spatstat.explore auc.kppm <- function(X, ...) { auc(as.ppm(X), ...) } auc.ppm <- function(X, ...) { model <- X if(is.multitype(model)) { # cheat ro <- roc(model, ...) aobs <- with(ro, mean(fobs)) atheo <- with(ro, mean(ftheo)) } else if(is.stationary(model)) { aobs <- atheo <- 1/2 } else { lambda <- intensity(model) Fl <- ecdf(lambda[]) lambda <- as.im(lambda, Window(model)) X <- data.ppm(model) lamX <- lambda[X] aobs <- mean(Fl(lamX)) atheo <- mean(lambda[] * Fl(lambda[]))/mean(lambda) } result <- c(aobs, atheo) names(result) <- c("obs", "theo") return(result) } auc.slrm <- function(X, ...) { ro <- roc(X, ...) result <- with(ro, list(obs=mean(fobs), theo=mean(ftheo))) return(unlist(result)) } spatstat.model/R/psst.R0000644000176200001440000001427714366621174014556 0ustar liggesusers# # psst.R # # Computes the GNZ contrast of delta-f for any function f # # $Revision: 1.12 $ $Date: 2023/02/02 02:39:37 $ # ################################################################################ # psst <- function(object, fun, r=NULL, breaks=NULL, ..., model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), truecoef=NULL, hi.res=NULL, funargs=list(correction="best"), verbose=TRUE) { if(is.ppm(object)) { fit <- object } else if(is.ppp(object) || is.quad(object)) { if(is.ppp(object)) object <- quadscheme(object, ...) if(!is.null(model)) { stopifnot(is.ppm(model)) ## update 'model' using new point pattern 'object' e <- list2env(list(object=object), parent=model$callframe) fit <- update(model, Q=object, forcefit=TRUE, envir=e) } else { fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } } else stop("object should be a fitted point process model or a point pattern") # rfixed <- !is.null(r) || !is.null(breaks) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy # E <- equalsfun.quad(Q) # WQ <- w.quad(Q) # quadrature weights # integrals will be restricted to quadrature points # that were actually used in the fit # USED <- getglmsubset(fit) if(fit$correction == "border") { rbord <- fit$rbord b <- bdist.points(U) USED <- (b > rbord) } else USED <- rep.int(TRUE, U$n) # basic statistics Win <- Window(X) npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW # adjustments to account for restricted domain of pseudolikelihood # if(any(!USED) && spatstat.options("eroded.intensity")) { # XUSED <- USED[Z] # npts.used <- sum(Z & USED) # area.used <- sum(WQ[USED]) # lambda.used <- npts.used/area.used # } else { # XUSED <- rep.int(TRUE, npts) # npts.used <- npts # area.used <- areaW # lambda.used <- lambda # } # determine breakpoints for r values rmaxdefault <- rmax.rule("G", Win, lambda) breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max # residuals resid <- residuals(fit, type="raw",drop=FALSE, new.coef=truecoef, quad=hi.res) rescts <- with(resid, "continuous") # absolute weight for continuous integrals wc <- -rescts # initialise fv object df <- data.frame(r=rvals, theo=0) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~Delta~S(r), NULL), "theo", . ~ r, alim=c(0, rmax), c("r","%s[theo](r)"), desc, fname="bold(R)~Delta~S") # evaluate fun(X) for data fX <- do.call(fun, append(list(X, r=rvals), funargs)) fXunits <- unitname(fX) # Extract 'best' estimate only fX <- with(fX, .y) zero <- numeric(length(fX)) # sum over all quadrature points iused <- seq(U$n)[USED] nused <- length(iused) if(verbose) cat(paste("\nProcessing", nused, "quadrature points...")) # running sums & integrals sumX <- zero integ <- integ2 <- zero # template for X \cup {u} uX <- superimpose(U[1], X, W=Win, check=FALSE) Ux <- U$x Uy <- U$y # if(verbose) pstate <- list() # for(j in seq(nused)) { i <- iused[j] wi <- wc[i] if(Z[i]) { # data point fXi <- do.call(fun, append(list(X[-i], r=rvals), funargs)) fXi <- with(fXi, .y) deltaf <- fX - fXi sumX <- sumX + deltaf } else { # dummy point uX$x[1] <- Ux[i] uX$y[1] <- Uy[i] fuX <- do.call(fun, append(list(uX, r=rvals), funargs)) fuX <- with(fuX, .y) deltaf <- fuX - fX } integ <- integ + wi * deltaf integ2 <- integ2 + wi * deltaf^2 # if(j %% 500 == 0) { cat("[garbage ") gc() cat("collected]") } if(verbose) pstate <- progressreport(j, nused, state=pstate) } sdv <- sqrt(integ2) res <- sumX - integ ans <- bind.fv(ans, data.frame(dat=sumX, com=integ, var=integ2, sd=sdv, hi=2*sdv, lo=-2*sdv, res=res, stdres=res/sdv), c("Sigma~Delta~S(r)", "bold(C)~Delta~S(r)", "bold(C)^2~Delta~S(r)", "sqrt(bold(C)^2~Delta~S(r))", "%s[hi](r)", "%s[lo](r)", "bold(R)~Delta~S(r)", "bold(T)~Delta~S(r)"), c("data pseudosum (contribution to %s)", "model compensator (contribution to %s)", "pseudovariance of %s", "sqrt(pseudovariance) of %s", "upper 2 sigma critical band for %s", "lower 2 sigma critical band for %s", "pseudoresidual function %s", "standardised pseudoresidual function %s"), "res") fvnames(ans,".") <- c("res", "hi", "lo", "theo") unitname(ans) <- fXunits # return(ans) } npfun <- function(X, ..., r) { npts <- npoints(X) # initialise fv object df <- data.frame(r=r, theo=0, npoint=npts) desc <- c("distance argument r", "value 0", "value equal to number of points") ans <- fv(df, "r", substitute(npoints(r), NULL), "npoint", . ~ r, alim=c(0, max(r)), c("r","%s[theo](r)", "%s[obs](r)"), desc, fname="npoints") unitname(ans) <- unitname(X) return(ans) } nndcumfun <- function(X, ..., r) { nn <- nndist(X) bk <- breakpts.from.r(r) # nn <- nn[nn <= bdist.points(X)] h <- whist(nn, bk$val) # initialise fv object df <- data.frame(r=r, theo=0, obs=h) desc <- c("distance argument r", "value 0", "observed count") ans <- fv(df, "r", substitute(nndcount(r), NULL), "obs", . ~ r, alim=c(0, max(r)), c("r","%s[theo](r)", "%s[obs](r)"), desc, fname="nndcount") unitname(ans) <- unitname(X) return(ans) } spatstat.model/R/parameters.R0000644000176200001440000000121314331173073015702 0ustar liggesusers## ## parameters.R ## ## $Revision: 1.2 $ $Date: 2015/05/08 04:27:15 $ ## parameters <- function(model, ...) { UseMethod("parameters") } parameters.ppm <- function(model, ...) { ss <- summary(model, quick="no variances") out <- c(list(trend=ss$trend$value), ss$covfunargs, ss$interaction$interaction$par, ss$interaction$sensible$param) return(out) } parameters.kppm <- function(model, ...) { ss <- summary(model, quick="no variances") out <- c(list(trend=ss$trend$trend$value), ss$covfunargs, ss$clustpar, ss$clustargs, list(mu=ss$mu)) return(out) } spatstat.model/R/logistic.R0000644000176200001440000002313114331173073015357 0ustar liggesusers## ## logistic.R ## ## $Revision: 1.28 $ $Date: 2020/11/27 03:04:30 $ ## ## Logistic composite likelihood method ## ## (quadscheme construction is now in 'logiquad.R') logi.engine <- function(Q, trend = ~1, interaction, ..., covariates=NULL, subsetexpr=NULL, clipwin=NULL, correction="border", rbord=reach(interaction), covfunargs=list(), allcovar=FALSE, vnamebase=c("Interaction", "Interact."), vnameprefix=NULL, justQ = FALSE, savecomputed = FALSE, precomputed = NULL, VB=FALSE ){ if(is.null(trend)) trend <- ~1 if(is.null(interaction)) interaction <- Poisson() want.trend <- !identical.formulae(trend, ~1) want.inter <- !is.poisson(interaction) want.subset <- !is.null(subsetexpr) # validate choice of edge correction correction <- pickoption("correction", correction, c(border="border", periodic="periodic", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", none="none")) # rbord applies only to border correction if(correction == "border") { check.1.real(rbord, "In ppm") explain.ifnot(rbord >= 0, "In ppm") } else rbord <- 0 # backdoor stuff if(!missing(vnamebase)) { if(length(vnamebase) == 1) vnamebase <- rep.int(vnamebase, 2) if(!is.character(vnamebase) || length(vnamebase) != 2) stop("Internal error: illegal format of vnamebase") } if(!is.null(vnameprefix)) { if(!is.character(vnameprefix) || length(vnameprefix) != 1) stop("Internal error: illegal format of vnameprefix") } # create dummy points if(inherits(Q, "ppp")){ Xplus <- Q Q <- quadscheme.logi(Xplus, ...) D <- Q$dummy Dinfo <- Q$param } else if(checkfields(Q, c("data", "dummy"))) { Xplus <- Q$data D <- Q$dummy Dinfo <- Q$param if(is.null(Dinfo)){ Dinfo <- list(how="given", rho=npoints(D)/(area(D)*markspace.integral(D))) } Q <- quadscheme.logi(Xplus, D) } else stop("Format of object Q is not understood") ## clip to subset? if(!is.null(clipwin)) { if(is.data.frame(covariates)) { ok <- inside.owin(union.quad(Q), w=clipwin) covariates <- covariates[ok, , drop=FALSE] } Q <- Q[clipwin] Xplus <- Q$data D <- Q$dummy } if (justQ) return(Q) ### Dirty way of recording arguments so that the model can be refitted later (should probably be done using call, eval, envir, etc.): extraargs <- list(covfunargs = covfunargs, allcovar = allcovar, vnamebase = vnamebase, vnameprefix = vnameprefix) extraargs <- append(extraargs, list(...)) ## Dummy intensity if(correction == "border" && Dinfo$how=="grid"){ Dbord <- D[bdist.points(D)>=rbord] Dinfo$rho <- npoints(Dbord)/(eroded.areas(as.owin(Dbord), rbord)*markspace.integral(Dbord)) } rho <- Dinfo$rho ##Setting the B from Barker dynamics (relative to dummy intensity) B <- list(...)$Barker if(is.null(B)) B <- 1 B <- B*rho Dinfo <- append(Dinfo, list(B=B)) Dinfo <- append(Dinfo, list(extraargs=extraargs)) # Wplus <- as.owin(Xplus) nXplus <- npoints(Xplus) U <- superimpose(Xplus, D, W=Wplus, check=FALSE) # E <- equalpairs(U, Xplus, marked = is.marked(Xplus)) E <- cbind(1:nXplus, 1:nXplus) # computed <- if (savecomputed) list(X = Xplus, Q = Q, U = U) else list() # assemble covariate data frame if(want.trend || want.subset) { tvars <- variablesinformula(trend) if(want.subset) tvars <- union(tvars, all.vars(subsetexpr)) if(!is.data.frame(covariates)) { ## resolve 'external' covariates externalvars <- setdiff(tvars, c("x", "y", "marks")) tenv <- environment(trend) covariates <- getdataobjects(externalvars, tenv, covariates, fatal=TRUE) } wantxy <- c("x", "y") %in% tvars wantxy <- wantxy | rep.int(allcovar, 2) cvdf <- data.frame(x=U$x, y=U$y)[, wantxy, drop=FALSE] if(!is.null(covariates)) { df <- mpl.get.covariates(covariates, U, "quadrature points", covfunargs) cvdf <- cbind(cvdf, df) } wantmarks <- "marks" %in% tvars if(wantmarks) cvdf <- cbind(cvdf, marks = marks(U)) } else cvdf <- NULL # evaluate interaction sufficient statistics if (!is.null(ss <- interaction$selfstart)) interaction <- ss(Xplus, interaction) V <- evalInteraction(Xplus, U, E, interaction, correction, precomputed = precomputed, savecomputed = savecomputed) if(!is.matrix(V)) stop("evalInteraction did not return a matrix") if (savecomputed) computed <- append(computed, attr(V, "computed")) IsOffset <- attr(V, "IsOffset") if(is.null(IsOffset)) IsOffset <- rep.int(FALSE, ncol(V)) # determine names if(ncol(V) > 0) { Vnames <- colnames(V) if(is.null(Vnames)) { nc <- ncol(V) Vnames <- if(nc == 1) vnamebase[1L] else paste(vnamebase[2L], 1:nc, sep="") colnames(V) <- Vnames } else if(!is.null(vnameprefix)) { Vnames <- paste(vnameprefix, Vnames, sep="") colnames(V) <- Vnames } } else Vnames <- character(0) # combine all data glmdata <- as.data.frame(V) if(!is.null(cvdf)) glmdata <- cbind(glmdata, cvdf) # construct response and weights ok <- if(correction == "border") (bdist.points(U) >= rbord) else rep.int(TRUE, npoints(U)) # Keep only those quadrature points for which the # conditional intensity is nonzero. KEEP <- if(ncol(V)>0) matrowall(V != -Inf) else rep.int(TRUE, npoints(U)) ok <- ok & KEEP wei <- c(rep.int(1,npoints(Xplus)),rep.int(B/rho,npoints(D))) resp <- c(rep.int(1,npoints(Xplus)),rep.int(0,npoints(D))) ## User-defined subset: if(!is.null(subsetexpr)) { USERSUBSET <- eval(subsetexpr, glmdata, environment(trend)) ok <- ok & USERSUBSET } # add offset, subset and weights to data frame # using reserved names beginning with ".logi." glmdata <- cbind(glmdata, .logi.Y = resp, .logi.B = B, .logi.w = wei, .logi.ok =ok) # build glm formula # (reserved names begin with ".logi.") trendpart <- paste(as.character(trend), collapse=" ") fmla <- paste(".logi.Y ", trendpart) # Interaction terms if(want.inter) { VN <- Vnames # enclose offset potentials in 'offset(.)' if(any(IsOffset)) VN[IsOffset] <- paste("offset(", VN[IsOffset], ")", sep="") fmla <- paste(c(fmla, VN), collapse="+") } # add offset intrinsic to logistic technique fmla <- paste(fmla, "offset(-log(.logi.B))", sep="+") fmla <- as.formula(fmla) # to satisfy package checker: .logi.B <- B .logi.w <- wei .logi.ok <- ok .logi.Y <- resp # suppress warnings from code checkers dont.complain.about(.logi.B, .logi.w, .logi.ok, .logi.Y) # go ##fit <- glm(fmla, data=glmdata, ## family=binomial(), subset = .logi.ok, weights = .logi.w) fit <- if(VB) vblogit.fmla(fmla, data = glmdata, subset = .logi.ok, weights = .logi.w, ...) else glm(fmla, data = glmdata, family = binomial(), subset = .logi.ok, weights = .logi.w) environment(fit$terms) <- sys.frame(sys.nframe()) ## Fitted coeffs co <- coef(fit) fitin <- fii(interaction, co, Vnames, IsOffset) ## Saturated log-likelihood: satlogpl <- sum(ok*resp*log(B)) ## Max. value of log-likelihood: maxlogpl <- logLik(fit) + satlogpl # Stamp with spatstat version number spv <- package_version(versionstring.spatstat()) the.version <- list(major=spv$major, minor=spv$minor, release=spv$patchlevel, date="$Date: 2020/11/27 03:04:30 $") ## Compile results fit <- list(method = "logi", fitter = "glm", projected = FALSE, coef = co, trend = trend, interaction = interaction, fitin = fitin, Q = Q, maxlogpl = maxlogpl, satlogpl = satlogpl, internal = list(Vnames = Vnames, IsOffset=IsOffset, glmdata = glmdata, glmfit = fit, logistic = Dinfo, computed = computed, vnamebase=vnamebase, vnameprefix=vnameprefix, VB = if(VB) TRUE else NULL, priors = if(VB) fit$priors else NULL ), covariates = mpl.usable(covariates), covfunargs= covfunargs, subsetexpr = subsetexpr, correction = correction, rbord = rbord, fisher = NULL, varcov = NULL, # if(VB) fit$S else NULL, terms = terms(trend), version = the.version, problems = list() ) class(fit) <- "ppm" return(fit) } forbid.logi <- function(object) { if(object$method == "logi") stop("Sorry, this is not implemented for method=\'logi\'") return(invisible(NULL)) } spatstat.model/R/hybrid.R0000644000176200001440000003275414331173073015036 0ustar liggesusers# # # hybrid.R # # $Revision: 1.12 $ $Date: 2022/03/07 03:35:48 $ # # Hybrid of several interactions # # Hybrid() create a hybrid of several interactions # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Hybrid <- local({ Hybrid <- function(...) { interlist <- list(...) n <- length(interlist) if(n == 0) stop("No arguments given") #' arguments may be interaction objects or ppm objects isinter <- unlist(lapply(interlist, is.interact)) isppm <- unlist(lapply(interlist, is.ppm)) if(any(nbg <- !(isinter | isppm))) stop(paste(ngettext(sum(nbg), "Argument", "Arguments"), paste(which(nbg), collapse=", "), ngettext(sum(nbg), "is not an interaction", "are not interactions"))) #' ensure the list contains only interaction objects if(any(isppm)) interlist[isppm] <- lapply(interlist[isppm], as.interact) #' recursively expand any components that are themselves hybrids while(any(ishybrid <- unlist(lapply(interlist, is.hybrid)))) { i <- min(which(ishybrid)) n <- length(interlist) expandi <- interlist[[i]]$par interlist <- c(if(i > 1) interlist[1:(i-1L)] else NULL, expandi, if(i < n) interlist[(i+1L):n] else NULL) } #' ncomponents <- length(interlist) if(ncomponents == 1) { #' single interaction - return it return(interlist[[1L]]) } #' ensure all components have names names(interlist) <- good.names(names(interlist), "HybridComponent", 1:ncomponents) #' check for infinite potential values haveInf <- lapply(interlist, getElement, name="hasInf") haveInf <- !sapply(haveInf, identical, y=FALSE) hasInf <- any(haveInf) #' determine interaction order maxorder <- max(sapply(interlist, interactionorder)) #' build object out <- list( name = "Hybrid interaction", creator = "Hybrid", family = hybrid.family, order = maxorder, # Overrides family$order pot = NULL, par = interlist, parnames = names(interlist), hasInf = hasInf, selfstart = function(X, self) { ilist <- self$par sslist <- lapply(ilist, getElement, name="selfstart") has.ss <- sapply(sslist, is.function) if(any(has.ss)) { ilist[has.ss] <- lapply(ilist[has.ss], invokeSelfStart, Y=X) self$par <- ilist } return(self) }, init = NULL, update = NULL, # default OK print = function(self, ..., family=FALSE, brief=FALSE) { if(family) print.isf(self$family) ncomponents <- length(self$par) clabs <- self$parnames splat("Hybrid of", ncomponents, "components:", commasep(sQuote(clabs))) for(i in 1:ncomponents) { splat(paste0(clabs[i], ":")) print(self$par[[i]], ..., family=family, brief=brief) } parbreak() return(invisible(NULL)) }, interpret = function(coeffs, self) { interlist <- self$par result <- list(param=list(), inames=character(0), printable=list()) for(i in 1:length(interlist)) { interI <- interlist[[i]] nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") #' find coefficients with prefix that exactly matches nameI. Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) #' extract them if(any(relevant)) { Crelevant <- coeffs[relevant] names(Crelevant) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) #' invoke the self-interpretation of interI interpretI <- interI$interpret if(is.function(interpretI)) { resultI <- interpretI(Crelevant, interI) paramI <- resultI$param prinI <- resultI$printable inamesI <- resultI$inames inamesI <- paste(nameI, inamesI) if(length(prinI) > 0) { result$param <- append(result$param, paramI) result$printable <- append(result$printable, list(prinI)) result$inames <- c(result$inames, inamesI) } } } } return(result) }, valid = function(coeffs, self) { #' check validity via mechanism used for 'rmhmodel' siminfo <- .Spatstat.Rmhinfo[["Hybrid interaction"]] Z <- siminfo(coeffs, self) cifs <- Z$cif pars <- Z$par ntypes <- Z$ntypes if((Ncif <- length(cifs)) == 1) { #' single cif pars <- append(pars, list(beta=rep.int(1, ntypes))) } else { for(i in 1:Ncif) pars[[i]] <- append(pars[[i]], list(beta=rep.int(1, ntypes[i]))) } RM <- rmhmodel(cif=cifs, par=pars, types=1:max(ntypes), stopinvalid=FALSE) return(RM$integrable) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) #' separate into components spl <- splitHybridInteraction(coeffs, self) interlist <- spl$interlist coeflist <- spl$coeflist #' compute projection for each component interaction Ncif <- length(interlist) projlist <- vector(mode="list", length=Ncif) nproj <- integer(Ncif) for(i in 1:Ncif) { coefsI <- coeflist[[i]] interI <- interlist[[i]] if(!is.interact(interI)) stop("Internal error: interlist entry is not an interaction") projI <- interI$project if(is.null(projI)) stop(paste("Projection is not yet implemented for a", interI$name)) p <- projI(coefsI, interI) #' p can be NULL (indicating no projection required for interI) #' or a single interaction or a list of interactions. if(is.null(p)) { if(Ncif == 1) return(NULL) # no projection required p <- list(NULL) nproj[i] <- 0 } else if(is.interact(p)) { p <- list(p) nproj[i] <- 1L } else if(is.list(p) && all(unlist(lapply(p, is.interact)))) { nproj[i] <- length(p) } else stop("Internal error: result of projection had wrong format") projlist[[i]] <- p } #' for interaction i there are nproj[i] **new** interactions to try. if(all(nproj == 0)) return(NULL) if(spatstat.options("project.fast")) { #' Single interaction required. #' Extract first entry from each list #' (there should be only one entry, but...) qlist <- lapply(projlist, "[[", i=1L) #' replace NULL entries by corresponding original interactions isnul <- unlist(lapply(qlist, is.null)) if(all(isnul)) return(NULL) if(any(isnul)) qlist[isnul] <- interlist[isnul] names(qlist) <- names(interlist) #' build hybrid and return result <- do.call(Hybrid, qlist) return(result) } #' Full case result <- list() for(i in which(nproj > 0)) { ntry <- nproj[i] tries <- projlist[[i]] for(j in 1:ntry) { #' assemble list of component interactions for hybrid qlist <- interlist qlist[[i]] <- tries[[j]] #' eliminate Poisson ispois <- unlist(lapply(qlist, is.poisson)) if(all(ispois)) { #' collapse to single Poisson h <- Poisson() } else { if(any(ispois)) qlist <- qlist[!ispois] h <- do.call(Hybrid, qlist) } result <- append(result, list(h)) } } #' 'result' is a list of interactions, each a hybrid if(length(result) == 1) result <- result[[1L]] return(result) }, irange = function(self, coeffs=NA, epsilon=0, ...) { interlist <- self$par answer <- 0 for(i in 1:length(interlist)) { interI <- interlist[[i]] nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") #' find coefficients with prefix that exactly matches nameI. if(all(is.na(coeffs))) Crelevant <- NA else { Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) #' extract them Crelevant <- coeffs[relevant] names(Crelevant) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) } #' compute reach reachI <- interI$irange if(is.function(reachI)) { resultI <- reachI(interI, coeffs=Crelevant, epsilon=epsilon, ...) answer <- max(answer, resultI) } } return(answer) }, hardcore = function(self, coeffs=NA, epsilon=0, ...) { interlist <- self$par n <- length(interlist) results <- vector(mode="list", length=n) for(i in seq_len(n)) { interI <- interlist[[i]] nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") #' find coefficients with prefix that exactly matches nameI. if(all(is.na(coeffs))) Crelevant <- NA else { Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) #' extract them Crelevant <- coeffs[relevant] names(Crelevant) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) } #' compute hard core for component i hardI <- interI$hardcore if(is.function(hardI)) results[[i]] <- hardI(interI, coeffs=Crelevant, epsilon=epsilon, ...) } ## collate answers results <- results[!sapply(results, is.null)] if(length(results) == 0) return(0) values <- Reduce(function(...) pmax(..., na.rm=TRUE), results) dims <- lapply(results, dim) dims <- dims[!sapply(dims, is.null)] if(length(dims) == 0) return(values) dims <- unique(dims) if(length(dims) > 1) stop("Incompatible matrix dimensions of hardcore distance matrices in hybrid") d <- dims[[1]] dn <- unique(lapply(results, dimnames))[[1]] answer <- matrix(values, d[1], d[2], dimnames=dn) return(answer) }, version=versionstring.spatstat() ) class(out) <- "interact" return(out) } invokeSelfStart <- function(inte, Y) { ss <- inte$selfstart if(!is.function(ss)) return(inte) return(ss(Y, inte)) } Hybrid }) is.hybrid <- function(x) { UseMethod("is.hybrid") } is.hybrid.interact <- function(x) { return(is.interact(x) && (x$name == "Hybrid interaction")) } is.hybrid.ppm <- function(x) { return(is.hybrid(as.interact(x))) } splitHybridInteraction <- function(coeffs, inte) { # For hybrids, $par is a list of the component interactions, # but coeffs is a numeric vector. # Split the coefficient vector into the relevant coeffs for each interaction interlist <- inte$par N <- length(interlist) coeflist <- vector(mode="list", length=N) for(i in 1:N) { interI <- interlist[[i]] # forbid hybrids-of-hybrids - these should not occur anyway if(interI$name == "Hybrid interaction") stop("A hybrid-of-hybrid interactions is not implemented") # nameI is the tag that identifies I-th component in hybrid nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) # extract coefficients # (there may be none, if this interaction is Poisson or an 'offset') coeffsI <- coeffs[relevant] # remove the prefix so the coefficients are recognisable to interaction if(any(relevant)) names(coeffsI) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) # store coeflist[[i]] <- coeffsI } names(coeflist) <- names(interlist) return(list(coeflist=coeflist, interlist=interlist)) } Hybrid <- intermaker(Hybrid, list(creator="Hybrid", name="general hybrid Gibbs process", par=list("..."=42), parnames=list("any list of interactions"))) spatstat.model/R/subfits.R0000644000176200001440000005457714331173074015244 0ustar liggesusers# # # $Revision: 1.56 $ $Date: 2022/07/08 00:59:33 $ # # subfits.new <- local({ subfits.new <- function(object, what="models", verbose=FALSE) { stopifnot(inherits(object, "mppm")) what <- match.arg(what, c("models", "interactions", "basicmodels")) if(what == "interactions") return(subfits.old(object, what, verbose)) ## extract stuff announce <- if(verbose) Announce else Ignore announce("Extracting stuff...") fitter <- object$Fit$fitter FIT <- object$Fit$FIT trend <- object$trend random <- object$random info <- object$Info npat <- object$npat Inter <- object$Inter interaction <- Inter$interaction itags <- Inter$itags Vnamelist <- object$Fit$Vnamelist Isoffsetlist <- object$Fit$Isoffsetlist has.design <- info$has.design # has.random <- info$has.random announce("done.\n") ## fitted parameters coefs.full <- coef(object) if(is.null(dim(coefs.full))) { ## fixed effects model: replicate vector to matrix coefs.names <- names(coefs.full) coefs.full <- matrix(coefs.full, byrow=TRUE, nrow=npat, ncol=length(coefs.full), dimnames=list(NULL, coefs.names)) } else { ## random/mixed effects model: coerce to matrix coefs.names <- colnames(coefs.full) coefs.full <- as.matrix(coefs.full) } ## determine which interaction(s) are active on each row announce("Determining active interactions...") active <- active.interactions(object) announce("done.\n") ## exceptions if(any(rowSums(active) > 1)) stop(paste("subfits() is not implemented for models", "in which several interpoint interactions", "are active on the same point pattern")) if(!is.null(random) && any(variablesinformula(random) %in% itags)) stop(paste("subfits() is not yet implemented for models", "with random effects that involve", "the interpoint interactions")) ## implied coefficients for each active interaction announce("Computing implied coefficients...") implcoef <- list() for(tag in itags) { announce(tag) implcoef[[tag]] <- impliedcoefficients(object, tag) announce(", ") } announce("done.\n") ## Fisher information and vcov fisher <- varcov <- NULL if(what == "models") { announce("Fisher information...") fisher <- vcov(object, what="fisher", err="null") varcov <- try(solve(fisher), silent=TRUE) if(inherits(varcov, "try-error")) varcov <- NULL announce("done.\n") } ## Extract data frame announce("Extracting data...") datadf <- object$datadf rownames <- object$Info$rownames announce("done.\n") ## set up lists for results models <- rep(list(NULL), npat) interactions <- rep(list(NULL), npat) ## interactions announce("Determining interactions...") pstate <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) ## Find relevant interaction acti <- active[i,] nactive <- sum(acti) interi <- if(nactive == 0) Poisson() else interaction[i, acti, drop=TRUE] tagi <- names(interaction)[acti] ## Find relevant coefficients coefs.avail <- coefs.full[i,] names(coefs.avail) <- coefs.names if(nactive == 1) { ic <- implcoef[[tagi]] coefs.implied <- ic[i, ,drop=TRUE] names(coefs.implied) <- colnames(ic) ## overwrite any existing values of coefficients; add new ones. coefs.avail[names(coefs.implied)] <- coefs.implied } ## create fitted interaction with these coefficients vni <- if(nactive > 0) Vnamelist[[tagi]] else character(0) iso <- if(nactive > 0) Isoffsetlist[[tagi]] else logical(0) interactions[[i]] <- fii(interi, coefs.avail, vni, iso) } announce("Done!\n") names(interactions) <- rownames ## if(what=="interactions") return(interactions) ## Extract data required to reconstruct complete model fits announce("Extracting more data...") data <- object$data Y <- object$Y Yname <- info$Yname moadf <- object$Fit$moadf fmla <- object$Fit$fmla ## deal with older formats of mppm if(is.null(Yname)) Yname <- info$Xname if(is.null(Y)) Y <- data[ , Yname, drop=TRUE] ## used.cov.names <- info$used.cov.names has.covar <- info$has.covar if(has.covar) { covariates.hf <- data[, used.cov.names, drop=FALSE] dfvar <- used.cov.names %in% names(datadf) } announce("done.\n") ## Construct template for fake ppm object spv <- package_version(versionstring.spatstat()) fake.version <- list(major=spv$major, minor=spv$minor, release=spv$patchlevel, date="$Date: 2022/07/08 00:59:33 $") fake.call <- call("cannot.update", Q=NULL, trend=trend, interaction=NULL, covariates=NULL, correction=object$Info$correction, rbord = object$Info$rbord) fakemodel <- list( method = "mpl", fitter = fitter, coef = coef(object), trend = object$trend, interaction = NULL, fitin = NULL, Q = NULL, maxlogpl = NA, internal = list(glmfit = FIT, glmdata = NULL, Vnames = NULL, IsOffset = NULL, fmla = fmla, computed = list()), covariates = NULL, correction = object$Info$correction, rbord = object$Info$rbord, version = fake.version, problems = list(), fisher = fisher, varcov = varcov, call = fake.call, callstring = "cannot.update()", fake = TRUE) class(fakemodel) <- "ppm" ## Loop through point patterns announce("Generating models for each row...") pstate <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) Yi <- Y[[i]] Wi <- if(is.ppp(Yi)) Yi$window else Yi$data$window ## assemble relevant covariate images covariates <- if(has.covar) covariates.hf[i, , drop=TRUE, strip=FALSE] else NULL if(has.covar && has.design) ## Convert each data frame covariate value to an image covariates[dfvar] <- lapply(covariates[dfvar], as.im, W=Wi) ## Extract relevant interaction finte <- interactions[[i]] inte <- finte$interaction if(is.poisson.interact(inte)) inte <- NULL Vnames <- finte$Vnames if(length(Vnames) == 0) Vnames <- NULL IsOffset <- finte$IsOffset if(length(IsOffset) == 0) IsOffset <- NULL ## Construct fake ppm object fakemodel$interaction <- inte fakemodel$fitin <- finte fakemodel$Q <- Yi fakemodel$covariates <- covariates fakemodel$internal$glmdata <- moadf[moadf$id == i, ] fakemodel$internal$Vnames <- Vnames fakemodel$internal$IsOffset <- IsOffset fake.call$Q <- Yi fake.call$covariates <- covariates fakemodel$call <- fake.call fakemodel$callstring <- short.deparse(fake.call) ## store in list models[[i]] <- fakemodel } announce("done.\n") names(models) <- rownames models <- as.anylist(models) return(models) } Announce <- function(...) cat(...) Ignore <- function(...) { NULL } subfits.new }) ## ///////////////////////////////////////////////////// subfits <- subfits.old <- local({ subfits.old <- function(object, what="models", verbose=FALSE, new.coef=NULL) { stopifnot(inherits(object, "mppm")) what <- match.arg(what, c("models","interactions", "basicmodels")) ## extract stuff announce <- if(verbose) Announce else Ignore announce("Extracting stuff...") trend <- object$trend random <- object$random use.gam <- object$Fit$use.gam info <- object$Info npat <- object$npat Inter <- object$Inter interaction <- Inter$interaction itags <- Inter$itags Vnamelist <- object$Fit$Vnamelist Isoffsetlist <- object$Fit$Isoffsetlist has.design <- info$has.design has.random <- info$has.random moadf <- object$Fit$moadf announce("done.\n") ## levels of any factors levelslist <- lapply(as.list(moadf), levelsAsFactor) isfactor <- !sapply(levelslist, is.null) ## fitted parameters coefs.full <- new.coef %orifnull% coef(object) if(is.null(dim(coefs.full))) { ## fixed effects model: replicate vector to matrix coefs.names <- names(coefs.full) coefs.full <- matrix(coefs.full, byrow=TRUE, nrow=npat, ncol=length(coefs.full), dimnames=list(NULL, coefs.names)) } else { ## random/mixed effects model: coerce to matrix coefs.names <- colnames(coefs.full) coefs.full <- as.matrix(coefs.full) } ## determine which interaction(s) are active on each row announce("Determining active interactions...") active <- active.interactions(object) announce("done.\n") ## exceptions if(any(rowSums(active) > 1)) stop(paste("subfits() is not implemented for models", "in which several interpoint interactions", "are active on the same point pattern")) if(!is.null(random) && any(variablesinformula(random) %in% itags)) stop(paste("subfits() is not yet implemented for models", "with random effects that involve", "the interpoint interactions")) ## implied coefficients for each active interaction announce("Computing implied coefficients...") implcoef <- list() for(tag in itags) { announce(tag) implcoef[[tag]] <- impliedcoefficients(object, tag, new.coef=new.coef) announce(", ") } announce("done.\n") ## This code is currently not usable because the mapping is wrong reconcile <- FALSE if(reconcile) { ## determine which coefficients of main model are interaction terms announce("Identifying interaction coefficients...") md <- model.depends(object$Fit$FIT) usetags <- unlist(lapply(implcoef, colnames)) isVname <- apply(md[, usetags, drop=FALSE], 1, any) mainVnames <- row.names(md)[isVname] announce("done.\n") } ## Fisher information and vcov fisher <- varcov <- NULL if(what == "models") { announce("Fisher information...") fisher <- vcov(object, what="fisher", err="null", new.coef=new.coef) ## note vcov.mppm calls subfits(what="basicmodels") to avoid infinite loop varcov <- try(solve(fisher), silent=TRUE) if(inherits(varcov, "try-error")) varcov <- NULL announce("done.\n") } ## Extract data frame announce("Extracting data...") datadf <- object$datadf rownames <- object$Info$rownames announce("done.\n") ## set up list for results results <- rep(list(NULL), npat) if(what == "interactions") { announce("Determining interactions...") pstate <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) ## Find relevant interaction acti <- active[i,] nactive <- sum(acti) interi <- if(nactive == 0) Poisson() else interaction[i, acti, drop=TRUE] tagi <- names(interaction)[acti] ## Find relevant coefficients coefs.avail <- coefs.full[i,] names(coefs.avail) <- coefs.names if(nactive == 1) { ic <- implcoef[[tagi]] coefs.implied <- ic[i, ,drop=TRUE] names(coefs.implied) <- colnames(ic) ## overwrite any existing values of coefficients; add new ones. coefs.avail[names(coefs.implied)] <- coefs.implied } ## create fitted interaction with these coefficients vni <- if(nactive > 0) Vnamelist[[tagi]] else character(0) iso <- if(nactive > 0) Isoffsetlist[[tagi]] else logical(0) results[[i]] <- fii(interi, coefs.avail, vni, iso) } announce("Done!\n") names(results) <- rownames return(results) } ## Extract data required to reconstruct complete model fits announce("Extracting more data...") data <- object$data Y <- object$Y Yname <- info$Yname ## deal with older formats of mppm if(is.null(Yname)) Yname <- info$Xname if(is.null(Y)) Y <- data[ , Yname, drop=TRUE] ## used.cov.names <- info$used.cov.names has.covar <- info$has.covar if(has.covar) { covariates.hf <- data[, used.cov.names, drop=FALSE] dfvar <- used.cov.names %in% names(datadf) } announce("done.\n") ## Loop through point patterns announce("Looping through rows...") pstate <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) Yi <- Y[[i]] Wi <- if(is.ppp(Yi)) Yi$window else Yi$data$window ## assemble relevant covariate images scrambled <- FALSE if(!has.covar) { covariates <- NULL } else { covariates <- covariates.hf[i, , drop=TRUE, strip=FALSE] if(has.design) { ## Convert each data frame covariate value to an image imrowi <- lapply(covariates[dfvar], as.im, W=Wi) ## Problem: constant covariate leads to singular fit ## --------------- Hack: --------------------------- scrambled <- TRUE ## Construct fake data by resampling from possible values covar.vals <- lapply(as.list(covariates[dfvar, drop=FALSE]), possible) fake.imrowi <- lapply(covar.vals, scramble, W=Wi, Y=Yi$data) ## insert fake data into covariates covariates[dfvar] <- fake.imrowi ## ------------------ end hack ---------------------------- } ## identify factor-valued spatial covariates spatialfactors <- !dfvar & isfactor[names(covariates)] if(any(spatialfactors)) { ## problem: factor levels may be dropped ## more fakery... scrambled <- TRUE spfnames <- names(spatialfactors)[spatialfactors] covariates[spatialfactors] <- lapply(levelslist[spfnames], scramble, W=Wi, Y=Yi$data) } } ## Fit ppm to data for case i only ## using relevant interaction acti <- active[i,] nactive <- sum(acti) if(nactive == 1){ interi <- interaction[i, acti, drop=TRUE] tagi <- names(interaction)[acti] fiti <- PiPiM(Yi, trend, interi, covariates=covariates, allcovar=has.random, use.gam=use.gam, vnamebase=tagi, vnameprefix=tagi) } else { fiti <- PiPiM(Yi, trend, Poisson(), covariates=covariates, allcovar=has.random, use.gam=use.gam) } fiti$scrambled <- scrambled ## fiti determines which coefficients are required coefi.fitted <- fiti$coef coefnames.wanted <- coefnames.fitted <- names(coefi.fitted) ## reconcile interaction coefficient names if(reconcile) { coefnames.translated <- coefnames.wanted ma <- match(coefnames.fitted, fiti$internal$Vnames) hit <- !is.na(ma) if(any(hit)) coefnames.translated[hit] <- mainVnames[ ma[hit] ] } ## take the required coefficients from the full mppm fit coefs.avail <- coefs.full[i,] names(coefs.avail) <- coefs.names if(nactive == 1) { ic <- implcoef[[tagi]] coefs.implied <- ic[i, ,drop=TRUE] names(coefs.implied) <- colnames(ic) ## overwrite any existing values of coefficients; add new ones. coefs.avail[names(coefs.implied)] <- coefs.implied } ## check if(!all(coefnames.wanted %in% names(coefs.avail))) stop("Internal error: some fitted coefficients not accessible") ## hack entries in ppm object fiti$method <- "mppm" ## reset coefficients coefi.new <- coefs.avail[coefnames.wanted] fiti$coef.orig <- coefi.fitted ## (detected by summary.ppm, predict.ppm) fiti$theta <- fiti$coef <- coefi.new ## reset interaction coefficients in 'fii' object coef(fiti$fitin)[] <- coefi.new[names(coef(fiti$fitin))] ## ... and replace fake data by true data if(has.design) { fiti$internal$glmdata.scrambled <- gd <- fiti$internal$glmdata fixnames <- intersect(names(imrowi), colnames(gd)) for(nam in fixnames) { fiti$covariates[[nam]] <- imrowi[[nam]] fiti$internal$glmdata[[nam]] <- data[i, nam, drop=TRUE] } } ## Adjust rank of glm fit object # fiti$internal$glmfit$rank <- FIT$rank fiti$internal$glmfit$rank <- sum(is.finite(fiti$coef)) ## Fisher information and variance-covariance if known ## Extract submatrices for relevant parameters if(reconcile) { #' currently disabled because mapping is wrong if(!is.null(fisher)) { if(!reconcile) { fiti$fisher <- fisher[coefnames.wanted, coefnames.wanted, drop=FALSE] } else { fush <- fisher[coefnames.translated, coefnames.translated, drop=FALSE] dimnames(fush) <- list(coefnames.wanted, coefnames.wanted) fiti$fisher <- fush } } if(!is.null(varcov)) { if(!reconcile) { fiti$varcov <- varcov[coefnames.wanted, coefnames.wanted, drop=FALSE] } else { vc <- varcov[coefnames.translated, coefnames.translated, drop=FALSE] dimnames(vc) <- list(coefnames.wanted, coefnames.wanted) fiti$varcov <- vc } } } ## store in list results[[i]] <- fiti } announce("done.\n") names(results) <- rownames results <- as.anylist(results) return(results) } PiPiM <- function(Y, trend, inter, covariates, ..., allcovar=FALSE, use.gam=FALSE, vnamebase=c("Interaction", "Interact."), vnameprefix=NULL) { # This ensures that the model is fitted in a unique environment # so that it can be updated later. force(Y) force(trend) force(inter) force(covariates) force(allcovar) force(use.gam) force(vnamebase) force(vnameprefix) feet <- ppm(Y, trend, inter, covariates=covariates, allcovar=allcovar, use.gam=use.gam, forcefit=TRUE, vnamebase=vnamebase, vnameprefix=vnameprefix) return(feet) } possible <- function(z) { if(!is.factor(z)) unique(z) else factor(levels(z), levels=levels(z)) } scramble <- function(vals, W, Y) { W <- as.mask(W) npixels <- prod(W$dim) nvalues <- length(vals) npts <- npoints(Y) ## sample the possible values randomly at the non-data pixels sampled <- sample(vals, npixels, replace=TRUE) Z <- im(sampled, xcol=W$xcol, yrow=W$yrow) ## repeat the possible values cyclically at the data points if(npts >= 1) Z[Y] <- vals[1 + ((1:npts) %% nvalues)] return(Z) } Announce <- function(...) cat(...) Ignore <- function(...) { NULL } subfits.old }) cannot.update <- function(...) { stop("This model cannot be updated") } mapInterVars <- function(object, subs=subfits(object), mom=model.matrix(object)) { #' Map the canonical variables of each 'subs[[i]]' to those of 'object' #' This is only needed for interaction variables. #' #' (1) Information about the full model #' Names of interaction variables Vnamelist <- object$Fit$Vnamelist Isoffsetlist <- object$Fit$Isoffsetlist #' Dependence map of canonical variables on the original variables/interactions md <- model.depends(object$Fit$FIT) cnames <- rownames(md) #' (2) Information about the fit on each row #' Identify the (unique) active interaction in each row activeinter <- active.interactions(object) #' Determine which canonical variables of full model are active in each row mats <- split.data.frame(mom, object$Fit$moadf$id) activevars <- sapply(mats, function(df) { apply(df != 0, 2, any, na.rm=TRUE) }) activevars <- if(ncol(mom) > 1) t(activevars) else matrix(activevars, ncol=1) if(ncol(activevars) != ncol(mom)) warning("Internal error: activevars columns do not match canonical variables") if(nrow(activevars) != length(mats)) warning("Internal error: activevars rows do not match hyperframe rows") #' (3) Process each submodel n <- length(subs) result <- rep(list(list()), n) for(i in seq_len(n)) { #' the submodel in this row subi <- subs[[i]] if(!is.poisson(subi)) { cnames.i <- names(coef(subi)) #' the (unique) tag name of the interaction in this model tagi <- colnames(activeinter)[activeinter[i,]] #' the corresponding variable name(s) in glmdata and coef(subi) vni <- Vnamelist[[tagi]] iso <- Isoffsetlist[[tagi]] #' ignore offset variables vni <- vni[!iso] if(length(vni)) { #' retain only interaction variables e <- cnames.i %in% vni cnames.ie <- cnames.i[e] #' which coefficients of the full model are active in this row acti <- activevars[i,] #' for each interaction variable name in the submodel, #' find the coefficient(s) in the main model to which it contributes nie <- length(cnames.ie) cmap <- vector(mode="list", length=nie) names(cmap) <- cnames.ie for(j in seq_len(nie)) { cj <- cnames.ie[j] cmap[[j]] <- cnames[ md[,cj] & acti ] } #' The result 'cmap' is a named list of character vectors #' where each name is an interaction variable in subs[[i]] #' and the corresponding value is the vector of names of #' corresponding canonical variables in the full model result[[i]] <- cmap } } } return(result) } spatstat.model/R/zclustermodel.R0000644000176200001440000000715514510474257016455 0ustar liggesusers#' #' zclustermodel.R #' #' Experimental #' #' $Revision: 1.11 $ $Date: 2023/09/11 04:13:58 $ #' zclustermodel <- function(name="Thomas", ..., mu, kappa, scale) { if(missing(kappa)) stop("The parent intensity kappa must be given") if(missing(mu)) stop("The mean cluster size mu must be given") if(missing(scale)) stop("The cluster scale must be given") rules <- spatstatClusterModelInfo(name) par.std <- c(kappa=kappa, scale=scale) par.std <- rules$checkpar(par.std, native=FALSE) par.idio <- rules$checkpar(par.std, native=TRUE) other <- rules$resolveshape(...) clustargs <- rules$outputshape(other$margs) out <- list(name=name, rules=rules, par.std=par.std, par.idio=par.idio, mu=mu, clustargs=clustargs, other=other) class(out) <- "zclustermodel" return(out) } print.zclustermodel <- local({ print.zclustermodel <- function(x, ...) { with(x, { splat(rules$printmodelname(list(clustargs=clustargs))) newpar <- rules$checkpar(par.std, native=FALSE) splat("Parent intensity kappa =", blurb("kappa", newpar["kappa"])) splat("Cluster scale = ", newpar["scale"]) splat("Mean cluster size mu =", blurb("mu", mu)) if(length(clustargs) > 0) { hdr <- paste("Cluster shape", ngettext(length(clustargs), "parameter:", "parameters:")) if(is.list(clustargs) && all(sapply(clustargs, is.numeric)) && all(lengths(clustargs) == 1)) { splat(hdr, paste(names(clustargs), as.numeric(clustargs), sep="=", collapse=", ")) } else { splat(hdr) print(clustargs) } } }) return(invisible(NULL)) } blurb <- function(name, value) { if(is.numeric(value)) as.character(value) else if(is.im(value)) "[image]" else "[unrecognized format]" } print.zclustermodel }) pcfmodel.zclustermodel <- function(model, ...) { p <- model$rules$pcf mpar <- model$par.idio other <- model$other f <- function(r) { as.numeric(do.call(p, c(list(par=mpar, rvals=r), other))) } return(f) } Kmodel.zclustermodel <- function(model, ...) { K <- model$rules$K mpar <- model$par.idio other <- model$other f <- function(r) { as.numeric(do.call(K, c(list(par=mpar, rvals=r), other))) } return(f) } intensity.zclustermodel <- function(X, ...) { X$par[["kappa"]] * X$mu } predict.zclustermodel <- function(object, ..., locations, type="intensity", ngrid=NULL) { ## limited use!!! if(!identical(type, "intensity")) stop("Sorry, only type='intensity' is implemented") lambda <- object$par.std[["kappa"]] * object$mu if(is.numeric(lambda)) { if(is.ppp(locations)) return(rep(lambda, npoints(locations))) W <- as.owin(locations) if(!is.mask(W)) W <- as.mask(W, dimyx=ngrid, ...) return(as.im(lambda, W=W)) } return(lambda[locations]) } clusterradius.zclustermodel <- function(model, ..., thresh = NULL, precision = FALSE) { do.call(clusterradius.character, resolve.defaults( list(model = model$name, thresh = thresh, precision = precision), list(...), as.list(model$par.std), # sic model$clustargs) ) } reach.zclustermodel <- function(x, ..., epsilon) { thresh <- if(missing(epsilon)) NULL else epsilon 2 * clusterradius(x, ..., thresh=thresh) } is.poissonclusterprocess.zclustermodel <- function(model) { TRUE } spatstat.model/R/fii.R0000644000176200001440000001651114243551505014317 0ustar liggesusers# # fii.R # # Class of fitted interpoint interactions # # fii <- function(interaction=NULL, coefs=numeric(0), Vnames=character(0), IsOffset=NULL, vnameprefix="") { if(is.null(interaction)) interaction <- Poisson() stopifnot(is.interact(interaction)) if(is.poisson.interact(interaction)) { if(length(Vnames) > 0) stop("Coefficients inappropriate for Poisson process") } if(is.null(IsOffset)) IsOffset <- rep.int(FALSE, length(Vnames)) else { stopifnot(is.logical(IsOffset)) stopifnot(length(IsOffset) == length(Vnames)) } hasInf <- !identical(interaction$hasInf, FALSE) out <- list(interaction=interaction, coefs=coefs, Vnames=Vnames, IsOffset=IsOffset, hasInf=hasInf, vnameprefix=vnameprefix) class(out) <- c("fii", class(out)) return(out) } summary.fii <- function(object, ...) { y <- unclass(object) INTERACT <- object$interaction coefs <- object$coefs Vnames <- object$Vnames IsOffset <- object$IsOffset vnprefix <- object$vnameprefix y$poisson <- is.poisson.interact(INTERACT) thumbnail <- NULL if(y$poisson) { thumbnail <- "Poisson()" } else { if(!is.null(interpret <- INTERACT$interpret)) { ## invoke auto-interpretation feature newstyle <- newstyle.coeff.handling(INTERACT) Icoefs <- if(newstyle) coefs[Vnames[!IsOffset]] else coefs ## strip off vname prefix used by mppm if(npre <- sum(nchar(vnprefix))) names(Icoefs) <- substring(names(Icoefs), npre+1L) ## auto-interpret sensible <- interpret(Icoefs, INTERACT) if(!is.null(sensible) && sum(lengths(sensible))) { header <- paste("Fitted", sensible$inames) printable <- sensible$printable # Try to make a thumbnail description param <- sensible$param ipar <- INTERACT$par if(all(lengths(param) == 1) && all(lengths(ipar) == 1)) { allargs <- append(ipar, param) allargs <- lapply(allargs, signif, digits=4) thumbnail <- fakecallstring(INTERACT$creator, allargs) } } else { # no fitted interaction parameters (e.g. Hard Core) header <- NULL printable <- NULL thumbnail <- paste0(INTERACT$creator, "()") } } else { # fallback sensible <- NULL VN <- Vnames[!IsOffset] if(length(VN) > 0) { header <- "Fitted interaction terms" icoef <- coefs[VN] printable <- exp(unlist(icoef)) ricoef <- lapply(icoef, signif, digits=4) thumbnail <- fakecallstring(INTERACT$creator, ricoef) } else { header <- NULL printable <- NULL thumbnail <- paste0(INTERACT$creator, "()") } } y <- append(y, list(sensible=sensible, header=header, printable=printable, thumbnail=thumbnail)) } class(y) <- c("summary.fii", class(y)) return(y) } print.fii <- function(x, ...) { sx <- summary(x) do.call(print.summary.fii, resolve.defaults(list(x=sx, brief=TRUE), list(...))) return(invisible(NULL)) } print.summary.fii <- local({ #' hide internal arguments print.summary.fii <- function(x, ...) { PrintIt(x, ...) } PrintIt <- function(x, ..., prefix="Interaction: ", banner=TRUE, family = waxlyrical('extras'), brief = !family, tiny = !waxlyrical('errors')) { if(tiny) { #' use thumbnail if available thumbnail <- x$thumbnail if(!is.null(thumbnail)) { splat(thumbnail) return(invisible(NULL)) } } terselevel <- spatstat.options('terse') if(banner && !brief) cat(prefix) if(x$poisson) { splat("Poisson process") parbreak(terselevel) } else { print(x$interaction, family=family, brief=TRUE, banner=banner) if(!is.null(x$printable)) { nvalues <- length(x$printable) nheader <- length(x$header) if(nvalues == 1 || nvalues == nheader) { for(i in 1:nvalues) { hdi <- x$header[i] xpi <- x$printable[[i]] if(!is.list(xpi) && length(xpi) == 1) { splat(paste0(hdi, ":\t", xpi)) } else { splat(paste0(hdi, ":")) print(xpi) } } } else { splat(x$header) print(x$printable) } } } if(!brief) { co <- x$coefs[x$Vnames[!x$IsOffset]] if(length(co) > 0) { parbreak(terselevel) splat("Relevant coefficients:") print(co) } } return(invisible(NULL)) } print.summary.fii }) parameters.fii <- function(model, ...) { ss <- summary(model) out <- append(ss$interaction$par, ss$sensible$param) return(out) } coef.summary.fii <- function(object, ...) { object$printable } reach.fii <- function(x, ..., epsilon=0) { inte <- x$interaction coeffs <- x$coefs Vnames <- x$Vnames if(is.poisson.interact(inte)) return(0) # get 'irange' function from interaction object irange <- inte$irange if(is.null(irange)) return(Inf) # apply 'irange' function using fitted coefficients if(newstyle.coeff.handling(inte)) ir <- irange(inte, coeffs[Vnames], epsilon=epsilon) else ir <- irange(inte, coeffs, epsilon=epsilon) if(is.na(ir)) ir <- Inf return(ir) } hardcoredist <- function(x, ...) { UseMethod("hardcoredist") } hardcoredist.fii <- function(x, ..., epsilon=0) { inte <- x$interaction coeffs <- x$coefs Vnames <- x$Vnames if(is.poisson.interact(inte)) return(0) # get 'hardcore' function from interaction object hardcore <- inte$hardcore if(is.null(hardcore) || !is.function(hardcore)) return(0) # apply 'hardcore' function using fitted coefficients h <- hardcore(inte, coeffs[Vnames], epsilon=epsilon) if(is.na(h) || is.null(h)) h <- 0 return(as.numeric(h)) } plot.fii <- function(x, ...) { inte <- x$interaction if(is.poisson.interact(inte)) { message("Poisson interaction; nothing plotted") return(invisible(NULL)) } plfun <- inte$plot %orifnull% inte$family$plot if(is.null(plfun)) stop("Plotting not implemented for this type of interaction") plfun(x, ...) } fitin <- function(object) { UseMethod("fitin") } fitin.ppm <- function(object) { f <- object$fitin if(!is.null(f)) return(f) ## For compatibility with older versions inte <- object$interaction if(is.null(inte)) { f <- fii() # Poisson } else { coefs <- coef(object) Vnames <- object$internal$Vnames IsOffset <- object$internal$IsOffset if(npre <- sum(nchar(object$internal$vnameprefix))) names(coefs) <- substring(names(coefs), npre+1L) # Internal names of regressor variables f <- fii(inte, coefs, Vnames, IsOffset) } unitname(f) <- unitname(data.ppm(object)) return(f) } as.interact.fii <- function(object) { verifyclass(object, "fii") return(object$interaction) } coef.fii <- function(object, ...) { verifyclass(object, "fii") return(object$coefs) } "coef<-.fii" <- function(object, ..., value) { verifyclass(object, "fii") check.nvector(value, length(object$coefs), things="coefficients", naok=TRUE, vname="value") object$coefs <- value %orifnull% numeric(0) return(object) } spatstat.model/R/hackglmm.R0000644000176200001440000000726414331173074015337 0ustar liggesusers# hackglmm.R # $Revision: 1.12 $ $Date: 2022/04/26 07:19:58 $ hackglmmPQL <- function (fixed, random, family, data, correlation, weights, control, niter = 10, verbose = TRUE, subset, ..., reltol=1e-3) { if (is.character(family)) family <- get(family) if (is.function(family)) family <- family() if (is.null(family$family)) { print(family) stop("'family' not recognized") } m <- mcall <- Call <- match.call() nm <- names(m)[-1L] keep <- is.element(nm, c("weights", "data", "subset", "na.action")) for (i in nm[!keep]) m[[i]] <- NULL allvars <- if (is.list(random)) allvars <- c(all.vars(fixed), names(random), unlist(lapply(random, function(x) all.vars(formula(x))))) else c(all.vars(fixed), all.vars(random)) Terms <- if (missing(data)) terms(fixed) else terms(fixed, data = data) off <- attr(Terms, "offset") if (length(off <- attr(Terms, "offset"))) allvars <- c(allvars, as.character(attr(Terms, "variables"))[off + 1]) Call$fixed <- eval(fixed) Call$random <- eval(random) m$formula <- as.formula(paste("~", paste(allvars, collapse = "+"))) environment(m$formula) <- environment(fixed) m$drop.unused.levels <- TRUE m[[1L]] <- as.name("model.frame") mf <- eval.parent(m) off <- model.offset(mf) if (is.null(off)) off <- 0 w <- model.weights(mf) if (is.null(w)) w <- rep(1, nrow(mf)) wts <- mf$wts <- w if(missing(subset)) fit0 <- glm(formula = fixed, family = family, data = mf, weights = wts, ...) else { # hack to get around peculiar problem with `subset' argument glmmsubset <- eval(expression(subset), data) if(length(glmmsubset) != nrow(mf)) { if(sum(glmmsubset) != nrow(mf)) stop("Internal error: subset vector is wrong length") message("(Fixing subset index..)") glmmsubset <- glmmsubset[glmmsubset] } mf$glmmsubset <- glmmsubset fit0 <- glm(formula = fixed, family = family, data = mf, weights = wts, subset=glmmsubset, ...) } w <- fit0$prior.weights eta <- fit0$linear.predictor zz <- eta + fit0$residuals - off wz <- fit0$weights fam <- family nm <- names(mcall)[-1L] keep <- is.element(nm, c("fixed", "random", "data", "subset", "na.action", "control")) for (i in nm[!keep]) mcall[[i]] <- NULL fixed[[2L]] <- quote(zz) mcall[["fixed"]] <- fixed mcall[[1L]] <- as.name("lme") mcall$random <- random mcall$method <- "ML" if (!missing(correlation)) mcall$correlation <- correlation mcall$weights <- quote(varFixed(~invwt)) mf$zz <- zz mf$invwt <- 1/wz mcall$data <- mf for (i in 1:niter) { if (verbose) cat("iteration", i, "\n") fit <- eval(mcall) etaold <- eta eta <- fitted(fit) + off if (sum((eta - etaold)^2) < (reltol^2) * sum(eta^2)) break mu <- fam$linkinv(eta) mu.eta.val <- fam$mu.eta(eta) mf$zz <- eta + (fit0$y - mu)/mu.eta.val - off wz <- w * mu.eta.val^2/fam$variance(mu) mf$invwt <- 1/wz mcall$data <- mf } fit$call <- Call fit$family <- family # if(!spatstat.options("developer")) { # attributes(fit$logLik) <- NULL # fit$logLik <- as.numeric(NA) # } oldClass(fit) <- c("hackglmmPQL", "glmmPQL", oldClass(fit)) fit } family.hackglmmPQL <- function(object, ...) { object$family } formula.hackglmmPQL <- function(x, ...) { x$call$fixed } stripGLMM <- function(object) { oldClass(object) <- setdiff(oldClass(object), c("hackglmmPQL", "glmmPQL")) return(object) } spatstat.model/R/panysib.R0000644000176200001440000000156014334115572015214 0ustar liggesusers#' panysib.R #' Probability that a point has any siblings #' #' $Revision: 1.3 $ $Date: 2022/11/13 03:13:18 $ #' #' Copyright (c) Adrian Baddeley 2022 #' GNU Public Licence >= 2.0 panysib <- function(object) { stopifnot(is.poissonclusterprocess(object)) if(is.stationary(object)) return(1-exp(-object$mu)) R <- reach(object) W <- Window(object) if(R > 5 * diameter(Frame(W))) { ## treat as stationary, but return image value <- 1 - exp(-mean(object$mu)) result <- as.im(value, W=W) } else { par <- parameters(object) lam <- predict(object, window=grow.rectangle(Frame(W), R)) mu <- lam/par[["kappa"]] h <- clusterkernel(object) EM <- blur(mu, kernel=h) EM <- eval.im(pmax(0, EM)) P <- blur(exp(-EM), kernel=h) P <- eval.im(pmax(0, P)) result <- 1 - P[W, drop=FALSE, tight=TRUE] } return(result) } spatstat.model/R/percy.R0000644000176200001440000000545414331173073014674 0ustar liggesusers## percus.R ## ## Percus-Yevick style approximations to pcf and K ## ## $Revision: 1.6 $ $Date: 2022/01/20 00:47:44 $ pcfmodel.ppm <- local({ pcfmodel.ppm <- function(model, ...) { if(is.multitype(model)) stop("Not yet implemented for multitype models") if(is.poisson(model)) return(function(r) rep(1, length(r))) if(!is.stationary(model)) stop("Not implemented for non-stationary Gibbs models") inte <- as.interact(model) if(inte$family$name != "pairwise") stop("Only implemented for pairwise-interaction models") lambda <- intensity(model) beta <- exp(coef(model)[1]) par <- inte$par pot <- inte$pot f <- fitin(model) Vcoefs <- f$coefs[f$Vnames] Mayer <- inte$Mayer G <- Mayer(Vcoefs, inte) irange <- reach(inte, epsilon=1e-6) G2fun <- inte$Percy testit <- resolve.1.default(list(testit=FALSE), list(...)) if(testit || is.null(G2fun)) G2fun <- pairwisePercy fun <- function(r) { pcfapprox(r, beta, lambda, pot, par, Vcoefs, G, G2fun, irange) } return(fun) } pcfapprox <- function(r, beta, lambda, pot, par, Vcoefs, G, G2fun, irange) { as.numeric((beta/lambda)^2 * exp(logpairpot(r, pot, par, Vcoefs) - lambda * G2fun(r, Vcoefs, par, pot=pot, irange=irange, G=G))) } logpairpot <- function(r, pot, par, Vcoefs) { as.numeric(pot(matrix(r, ncol=1), par) %*% Vcoefs) } negpair <- function(x,y, pot, par, Vcoefs) { ## evaluate 1 - g(x,y) ## where g(x,y) is pair interaction between (0,0) and (x,y) 1 - exp(logpairpot(sqrt(x^2+y^2), pot, par, Vcoefs)) } pairwisePercy <- function(r, Vcoefs, par, ..., G, pot, irange, dimyx=256) { S <- max(max(r), irange) ng <- as.im(negpair, square(c(-S,S)), pot=pot, par=par, Vcoefs=Vcoefs, dimyx=dimyx) ng2 <- convolve.im(ng) rr <- seq(min(r), max(r), length=dimyx[1]) yy <- ng2[list(x=rr, y=rep.int(0, dimyx[1]))] zz <- 2 * G - yy z <- approx(rr, zz, r)$y return(z) } pcfmodel.ppm }) Kmodel.ppm <- local({ Kmodel.ppm <- function(model, ...) { if(is.poisson(model)) return(function(r) { pi * r^2 }) pc <- pcfmodel(model, ...) K <- function(r) pcf2K(r, pc) return(K) } pcf2K <- function(r, pc) { ## integrate the pair correlation function to obtain the K-function if(length(r) == 1) { ## definite integral spcfs <- function(s) { s * pc(s) } y <- 2 * pi * integrate(spcfs, lower=0, upper=r)$value } else { ## indefinite integral rr <- seq(0, max(r), length=1025) dr <- max(r)/(length(rr) - 1) ff <- 2 * pi * rr * pc(rr) yy <- dr * cumsum(ff) y <- approx(rr, yy, r)$y } return(y) } Kmodel.ppm }) spatstat.model/R/hierhard.R0000644000176200001440000001537214331173073015340 0ustar liggesusers## ## hierhard.R ## ## $Revision: 1.5 $ $Date: 2022/03/07 02:06:42 $ ## ## The hierarchical hard core process ## ## ------------------------------------------------------------------- ## HierHard <- local({ # ......... define interaction potential HHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrices of interaction radii h <- par$hradii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) ## list all ordered pairs of types to be checked uptri <- par$archy$relation & !is.na(h) mark1 <- (lx[row(h)])[uptri] mark2 <- (lx[col(h)])[uptri] ## corresponding names mark1name <- (lxname[row(h)])[uptri] mark2name <- (lxname[col(h)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) ## create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # apply relevant hard core distance to each pair of points hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- array(0, dim=dim(d)) value[forbid] <- -Inf ## score for(i in 1:npairs) { # data points with mark m1 Xsub <- (tx == mark1[i]) # quadrature points with mark m2 Qsub <- (tu == mark2[i]) # assign z[Xsub, Qsub, i] <- value[Xsub, Qsub] } } attr(z, "IsOffset") <- TRUE return(z) } #### end of 'pot' function #### # Set up basic object except for family and parameters BlankHHobject <- list( name = "Hierarchical hard core process", creator = "HierHard", family = "hierpair.family", # evaluated later pot = HHpotential, par = list(types=NULL, hradii=NULL, archy=NULL), parnames = c("possible types", "hardcore distances", "hierarchical order"), pardesc = c("vector of possible types", "matrix of hardcore distances", "hierarchical order"), hasInf = TRUE, selfstart = function(X, self) { types <- self$par$types hradii <- self$par$hradii archy <- self$par$archy if(!is.null(types) && !is.null(hradii) && !is.null(archy)) return(self) if(is.null(types)) types <- levels(marks(X)) if(is.null(archy)) archy <- seq_len(length(types)) if(!inherits(archy, "hierarchicalordering")) archy <- hierarchicalordering(archy, types) if(is.null(hradii)) { marx <- marks(X) d <- nndist(X, by=marx) h <- aggregate(d, by=list(from=marx), min) h <- as.matrix(h[, -1L, drop=FALSE]) m <- table(marx) mm <- outer(m, m, pmin) hradii <- h * mm/(mm+1) dimnames(hradii) <- list(types, types) h[!(archy$relation)] <- NA } HierHard(types=types,hradii=hradii,archy=archy) }, init = function(self) { types <- self$par$types hradii <- self$par$hradii ## hradii could be NULL if(!is.null(types)) { if(!is.null(dim(types))) stop(paste("The", sQuote("types"), "argument should be a vector")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } nt <- length(types) if(!is.null(hradii)) MultiPair.checkmatrix(hradii, nt, sQuote("hradii"), asymmok=TRUE) } }, update = NULL, # default OK print = function(self) { hradii <- self$par$hradii types <- self$par$types archy <- self$par$archy if(waxlyrical('gory')) splat(nrow(hradii), "types of points") if(!is.null(types) && !is.null(archy)) { if(waxlyrical('space')) { splat("Possible types and ordering:") } else cat("Hierarchy: ") print(archy) } else if(!is.null(types)) { (if(waxlyrical('space')) splat else cat)("Possible types: ") print(types) } else if(waxlyrical('gory')) splat("Possible types:\t not yet determined") if(!is.null(hradii)) { splat("Hardcore radii:") print(hiermat(dround(hradii), archy)) } else splat("Hardcore radii: not yet determined") invisible(NULL) }, interpret = function(coeffs, self) { # there are no regular parameters (woo-hoo!) return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { h <- self$par$hradii active <- !is.na(h) & self$par$archy$relation return(max(0, h[active])) }, hardcore = function(self, coeffs=NA, epsilon=0, ...) { h <- self$par$hradii active <- !is.na(h) & self$par$archy$relation h[!active] <- 0 return(h) }, version=NULL # to be added ) class(BlankHHobject) <- "interact" # finally create main function HierHard <- function(hradii=NULL, types=NULL, archy=NULL) { if(!is.null(types)) { if(is.null(archy)) archy <- seq_len(length(types)) archy <- hierarchicalordering(archy, types) } out <- instantiate.interact(BlankHHobject, list(types=types, hradii=hradii, archy=archy)) if(!is.null(types) && !is.null(out$par$hradii)) dimnames(out$par$hradii) <- list(types,types) return(out) } HierHard <- intermaker(HierHard, BlankHHobject) HierHard }) spatstat.model/R/pairwise.R0000644000176200001440000000505714331173073015374 0ustar liggesusers# # # pairwise.S # # $Revision: 1.12 $ $Date: 2019/02/20 03:32:22 $ # # Pairwise() create a user-defined pairwise interaction process # [an object of class 'interact'] # # ------------------------------------------------------------------- # Pairwise <- function(pot, name = "user-defined pairwise interaction process", par = NULL, parnames=NULL, printfun) { fop <- names(formals(pot)) if(!isTRUE(all.equal(fop, c("d", "par"))) && !isTRUE(all.equal(fop, c("d", "tx", "tu", "par")))) stop(paste("Formal arguments of pair potential function", sQuote("pot"), "must be either (d, par) or (d, tx, tu, par)")) if(!is.null(parnames)) { stopifnot(is.character(parnames)) if(is.null(par) || length(par) != length(parnames)) stop("par does not match parnames") } if(missing(printfun)) printfun <- function(self) { cat("Potential function:\n") print(self$pot) if(!is.null(parnames <- self$parnames)) { for(i in 1:length(parnames)) { cat(paste(parnames[i], ":\t")) pari <- self$par[[i]] if(is.numeric(pari) && length(pari) == 1) cat(pari, "\n") else print(pari) } } } out <- list( name = name, creator = "Pairwise", family = pairwise.family, pot = pot, par = par, parnames = parnames, hasInf = NA, init = NULL, update = function(self, ...){ do.call(Pairwise, resolve.defaults(list(...), list(pot=self$pot, name=self$name, par=self$par, parnames=self$parnames, printfun=self$print))) } , print = printfun, version = versionstring.spatstat() ) class(out) <- "interact" return(out) } Pairwise <- intermaker(Pairwise, list(creator="Pairwise", name="user-defined pairwise interaction process", par=formals(Pairwise), parnames=list("the potential", "the name of the interaction", "the list of parameters", "a description of each parameter", "an optional print function"))) spatstat.model/R/eem.R0000644000176200001440000000132214331173072014305 0ustar liggesusers# eem.R # # Computes the Stoyan-Grabarnik "exponential energy weights" # # $Revision: 1.6 $ $Date: 2021/10/30 05:19:06 $ # eem <- function(fit, ...) { UseMethod("eem") } eem.ppm <- function(fit, check=TRUE, ...) { verifyclass(fit, "ppm") lambda <- fitted.ppm(fit, dataonly=TRUE, check=check) eemarks <- 1/lambda attr(eemarks, "type") <- "eem" attr(eemarks, "typename") <- "exponential energy marks" return(eemarks) } eem.slrm <- function(fit, check=TRUE, ...) { verifyclass(fit, "slrm") Y <- response(fit) lambdaY <- predict(fit, type="intensity")[Y, drop=FALSE] eemarks <- 1/lambdaY attr(eemarks, "type") <- "eem" attr(eemarks, "typename") <- "exponential energy marks" return(eemarks) } spatstat.model/R/summary.dppm.R0000644000176200001440000000532714331173073016205 0ustar liggesusers#' #' summary.dppm.R #' #' $Revision: 1.5 $ $Date: 2021/07/05 08:48:33 $ summary.dppm <- function(object, ..., quick=FALSE) { nama <- names(object) result <- unclass(object)[!(nama %in% c("X", "po", "call", "callframe"))] ## Fitting information result$has.subset <- "subset" %in% names(object$call) ## Summarise trend component result$trend <- summary(as.ppm(object), ..., quick=quick) ## repulsion strength result$repul <- mean(repul(object)) #' pack up class(result) <- "summary.dppm" return(result) } print.summary.dppm <- function(x, ...) { terselevel <- spatstat.options('terse') digits <- getOption('digits') splat(if(x$stationary) "Stationary" else "Inhomogeneous", "determinantal point process model") if(waxlyrical('extras', terselevel) && nchar(x$Xname) < 20) splat("Fitted to point pattern dataset", sQuote(x$Xname)) Fit <- x$Fit if(waxlyrical('gory', terselevel)) { switch(Fit$method, mincon = { splat("Fitted by minimum contrast") splat("\tSummary statistic:", Fit$StatName) print(Fit$mcfit) }, clik =, clik2 = { splat("Fitted by maximum second order composite likelihood") splat("\trmax =", Fit$rmax) if(!is.null(wtf <- Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } printStatus(optimStatus(Fit$clfit)) }, adapcl = { splat("Fitted by adaptive second order composite likelihood") splat("\tepsilon =", x$Fit$epsilon) if(!is.null(wtf <- x$Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } }, palm = { splat("Fitted by maximum Palm likelihood") splat("\trmax =", Fit$rmax) if(!is.null(wtf <- Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } printStatus(optimStatus(Fit$clfit)) }, warning(paste("Unrecognised fitting method", sQuote(Fit$method))) ) } # ............... trend ......................... parbreak() splat("----------- TREND -----") print(x$trend, ...) # ..................... determinantal part ................ parbreak() splat("---------- DETERMINANTAL STRUCTURE -----------------") print(x$fitted) parbreak() splat(if(x$stationary) "Strength" else "(Average) strength", "of repulsion:", signif(x$repul, digits)) return(invisible(NULL)) } spatstat.model/R/fitted.ppm.R0000644000176200001440000001275014331173073015621 0ustar liggesusers# # fitted.ppm.R # # method for 'fitted' for ppm objects # # $Revision: 1.19 $ $Date: 2022/01/20 02:05:37 $ # fitted.ppm <- function(object, ..., type="lambda", dataonly=FALSE, new.coef=NULL, leaveoneout=FALSE, drop=FALSE, check=TRUE, repair=TRUE, ignore.hardcore=FALSE, dropcoef=FALSE) { verifyclass(object, "ppm") if(check && damaged.ppm(object)) { if(!repair) stop("object format corrupted; try update(object, use.internal=TRUE)") message("object format corrupted; repairing it.") object <- update(object, use.internal=TRUE) } if(leaveoneout) { ## Leave-one-out calculation for data points only if(missing(dataonly)) dataonly <- TRUE if(!dataonly) stop("Leave-one-out calculation requires dataonly=TRUE") if(!is.null(new.coef)) stop("Leave-one-out calculation requires new.coef=NULL") if(length(coef(object)) == 0) warning("Model has no fitted coefficients; using leaveoneout=FALSE") } coeffs <- adaptcoef(new.coef, coef(object), drop=dropcoef) uniform <- is.poisson.ppm(object) && no.trend.ppm(object) typelist <- c("lambda", "cif", "trend", "link") typevalu <- c("lambda", "lambda", "trend", "link") if(is.na(m <- pmatch(type, typelist))) stop(paste("Unrecognised choice of ", sQuote("type"), ": ", sQuote(type), sep="")) type <- typevalu[m] if(uniform) { lambda <- exp(coeffs[[1L]]) Q <- quad.ppm(object, drop=drop) lambda <- rep.int(lambda, n.quad(Q)) } else { glmdata <- getglmdata(object, drop=drop) glmfit <- getglmfit(object) Vnames <- object$internal$Vnames interacting <- (length(Vnames) != 0) # Modification of `glmdata' may be required if(interacting) switch(type, trend={ ## zero the interaction statistics glmdata[ , Vnames] <- 0 }, link=, lambda={ if(!ignore.hardcore) { ## Find any dummy points with zero conditional intensity forbid <- matrowany(as.matrix(glmdata[, Vnames]) == -Inf) ## Exclude these locations from predict.glm glmdata <- glmdata[!forbid, ] } else { ## Compute positive part of cif Q <- quad.ppm(object, drop=drop) X <- Q[["data"]] U <- union.quad(Q) E <- equalpairs.quad(Q) eva <- evalInteraction(X, U, E, object$interaction, object$correction, splitInf=TRUE) forbid <- attr(eva, "-Inf") %orifnull% logical(npoints(U)) ## Use positive part of interaction if(ncol(eva) != length(Vnames)) stop(paste("Internal error: evalInteraction yielded", ncol(eva), "variables instead of", length(Vnames)), call.=FALSE) glmdata[,Vnames] <- as.data.frame(eva) } }) # Compute predicted [conditional] intensity values changecoef <- !is.null(new.coef) || (object$method != "mpl") lambda <- GLMpredict(glmfit, glmdata, coeffs, changecoef=changecoef, type = ifelse(type == "link", "link", "response")) # Note: the `newdata' argument is necessary in order to obtain # predictions at all quadrature points. If it is omitted then # we would only get predictions at the quadrature points j # where glmdata$SUBSET[j]=TRUE. Assuming drop=FALSE. if(interacting && type=="lambda" && !ignore.hardcore) { # reinsert zeroes lam <- numeric(length(forbid)) lam[forbid] <- 0 lam[!forbid] <- lambda lambda <- lam } } if(dataonly) lambda <- lambda[is.data(quad.ppm(object))] if(leaveoneout) { ## Perform leverage calculation dfb <- dfbetas(object, multitypeOK=TRUE) delta <- with(dfb, 'discrete')[with(dfb, 'is.atom'),,drop=FALSE] ## adjust fitted value mom <- model.matrix(object)[is.data(quad.ppm(object)),,drop=FALSE] if(type == "trend" && !uniform && interacting) mom[, Vnames] <- 0 lambda <- lambda * exp(- rowSums(delta * mom)) } lambda <- unname(as.vector(lambda)) return(lambda) } adaptcoef <- function(new.coef, fitcoef, drop=FALSE) { ## a replacement for 'fitcoef' will be extracted from 'new.coef' if(is.null(new.coef)) { coeffs <- fitcoef } else if(length(new.coef) == length(fitcoef)) { coeffs <- new.coef } else { fitnames <- names(fitcoef) newnames <- names(new.coef) if(is.null(newnames) || is.null(fitnames)) stop(paste("Argument new.coef has wrong length", length(new.coef), ": should be", length(fitcoef)), call.=FALSE) absentnames <- setdiff(fitnames, newnames) excessnames <- setdiff(newnames, fitnames) if((nab <- length(absentnames)) > 0) stop(paste(ngettext(nab, "Coefficient", "Coefficients"), commasep(sQuote(absentnames)), ngettext(nab, "is", "are"), "missing from new.coef"), call.=FALSE) if(!drop && ((nex <- length(excessnames)) > 0)) stop(paste(ngettext(nex, "Coefficient", "Coefficients"), commasep(sQuote(excessnames)), ngettext(nab, "is", "are"), "present in new.coef but not in coef(object)"), call.=FALSE) #' extract only the relevant coefficients coeffs <- new.coef[fitnames] } return(coeffs) } spatstat.model/R/summary.ppm.R0000644000176200001440000004273314366605615016055 0ustar liggesusers# # summary.ppm.R # # summary() method for class "ppm" # # $Revision: 1.81 $ $Date: 2023/02/02 00:07:00 $ # # summary.ppm() # print.summary.ppm() # summary.ppm <- local({ covtype <- function(x) { if(is.im(x)) "im" else if(is.function(x)) "function" else if(is.owin(x)) "owin" else if(is.numeric(x) && length(x) == 1) "number" else if(is.factor(x)) "factor" else if(is.integer(x)) "integer" else if(is.numeric(x)) "numeric" else storage.mode(x) } xargs <- function(f) { ar <- names(formals(f))[-(1:2)] return(ar[ar != "..."]) } summary.ppm <- function(object, ..., quick=FALSE, fine=FALSE) { verifyclass(object, "ppm") x <- object y <- list() class(y) <- "summary.ppm" ####### Extract main data components ######################### QUAD <- object$Q DATA <- QUAD$data TREND <- x$trend INTERACT <- x$interaction if(is.null(INTERACT)) INTERACT <- Poisson() ####### Check version ######################### mpl.ver <- versionstring.ppm(object) int.ver <- versionstring.interact(INTERACT) current <- versionstring.spatstat() virgin <- min(package_version(c(mpl.ver, int.ver))) y$antiquated <- antiquated <- (virgin <= package_version("1.5")) y$old <- (virgin < majorminorversion(current)) y$version <- as.character(virgin) ####### Determine type of model ############################ y$entries <- list() y$no.trend <- identical.formulae(TREND, NULL) || identical.formulae(TREND, ~1) y$trendvar <- trendvar <- variablesinformula(TREND) y$stationary <- y$no.trend || all(trendvar == "marks") y$poisson <- is.poisson.interact(INTERACT) y$marked <- is.marked.ppp(DATA) y$multitype <- is.multitype.ppp(DATA) y$marktype <- if(y$multitype) "multitype" else if(y$marked) "marked" else "unmarked" if(y$marked) y$entries$marks <- marks(DATA) y$name <- paste(if(y$stationary) "Stationary " else "Nonstationary ", if(y$poisson) { if(y$multitype) "multitype " else if(y$marked) "marked " else "" }, INTERACT$name, sep="") ###### Fitting algorithm ######################################## y$method <- x$method y$improve.type <- x$improve.type %orifnull% "none" y$VB <- x$internal$VB y$problems <- x$problems y$fitter <- if(!is.null(x$fitter)) x$fitter else "unknown" if(y$fitter %in% c("glm", "gam")) y$converged <- x$internal$glmfit$converged ###### Coefficients were changed after fit? ##################### y$projected <- yproj <- identical(x$projected, TRUE) y$changedcoef <- yproj || !is.null(x$coef.orig) y$valid <- valid.ppm(x, warn=FALSE) ###### Extract fitted model coefficients ######################### y$entries$coef <- COEFS <- x$coef y$coef.orig <- x$coef.orig y$entries$Vnames <- Vnames <- x$internal$Vnames y$entries$IsOffset <- x$internal$IsOffset ###### Extract fitted interaction and summarise ################# FITIN <- fitin(x) y$interaction <- summary(FITIN) # Exit here if quick=TRUE if(identical(quick, TRUE)) return(y) ###### Does it have external covariates? #################### # defaults y <- append(y, list(has.covars = FALSE, covnames = character(0), covars.used = character(0), uses.covars = FALSE, covars.are.df = FALSE, expandable = TRUE, covar.type = character(0), covar.descrip = character(0), has.funcs = FALSE, covfunargs = NULL, has.xargs = FALSE, xargmap = NULL)) class(y) <- "summary.ppm" if(!antiquated) { covars <- x$covariates y$has.covars <- hc <- !is.null(covars) && (length(covars) > 0) if(hc) { y$covnames <- names(covars) used <- (y$trendvar %in% names(covars)) y$covars.used <- y$trendvar[used] y$uses.covars <- any(used) y$covars.are.df <- is.data.frame(covars) # describe covariates ctype <- unlist(lapply(covars, covtype)) y$expandable <- all(ctype[used] %in%c("function", "number")) names(ctype) <- names(covars) y$covar.type <- ctype y$covar.descrip <- ctype # are there any functions? y$has.funcs <- any(isfun <- (ctype == "function")) # do covariates depend on additional arguments? if(y$has.funcs) { y$covfunargs <- x$covfunargs y$cfafitter <- attr(x$covfunargs, "fitter") funs <- covars[isfun] fdescrip <- function(f) { if(inherits(f, "distfun")) return("distfun") alist <- paste(names(formals(f)), collapse=", ") paste("function(", alist, ")", sep="") } y$covar.descrip[isfun] <- unlist(lapply(funs, fdescrip)) # find any extra arguments (after args 1 & 2) explicitly named fargs <- lapply(funs, xargs) nxargs <- lengths(fargs) y$has.xargs <- any(nxargs > 0) if(y$has.xargs) { # identify which function arguments are fixed in the call fmap <- data.frame(Covariate=rep.int(names(funs), nxargs), Argument=unlist(fargs)) fmap$Given <- (fmap$Argument %in% names(y$covfunargs)) y$xargmap <- fmap } } } } else { # Antiquated format # Interpret the function call instead callexpr <- parse(text=x$call) callargs <- names(as.list(callexpr[[1]])) # Data frame of covariates was called 'data' in versions up to 1.4-x y$has.covars <- !is.null(callargs) && !is.na(pmatch("data", callargs)) # conservative guess y$uses.covars <- y$has.covars y$covfunargs <- NULL } ###### Arguments in call #################################### y$args <- x[c("call", "correction", "rbord")] y$dataname <- x$dataname %orifnull% x$Qname ####### Main data components ######################### y$entries <- append(list(quad=QUAD, data=DATA, interaction=INTERACT), y$entries) if(is.character(quick) && (quick == "entries")) return(y) ####### Summarise data ############################ y$data <- summary(DATA, checkdup=FALSE) y$quad <- summary(QUAD, checkdup=FALSE) if(is.character(quick) && (quick == "no prediction")) return(y) ###### Trend component ######################### y$trend <- list() y$trend$name <- if(y$poisson) "Intensity" else "Trend" y$trend$formula <- if(y$no.trend) NULL else TREND if(y$poisson && y$no.trend) { # uniform Poisson process y$trend$value <- exp(COEFS[[1]]) y$trend$label <- switch(y$marktype, unmarked="Uniform intensity", multitype="Uniform intensity for each mark level", marked="Uniform intensity in product space", "") } else if(y$stationary) { # stationary switch(y$marktype, unmarked={ # stationary non-poisson non-marked y$trend$label <- "First order term" y$trend$value <- c(beta=exp(COEFS[[1]])) }, multitype={ # stationary, multitype mrk <- marks(DATA) y$trend$label <- if(y$poisson) "Intensities" else "First order terms" # Use predict.ppm to evaluate the fitted intensities lev <- factor(levels(mrk), levels=levels(mrk)) nlev <- length(lev) marx <- list(x=rep.int(0, nlev), y=rep.int(0, nlev), marks=lev) betas <- predict(x, locations=marx, type="trend") names(betas) <- paste("beta_", as.character(lev), sep="") y$trend$value <- betas }, marked={ # stationary, marked y$trend$label <- "Fitted intensity coefficients" y$trend$value <- blankcoefnames(COEFS) }) } else { # not stationary # extract trend terms without trying to understand them much if(is.null(Vnames)) trendbits <- COEFS else { agree <- outer(names(COEFS), Vnames, "==") whichbits <- matrowall(!agree) trendbits <- COEFS[whichbits] } y$trend$label <- ngettext(length(trendbits), "Fitted trend coefficient", "Fitted trend coefficients") y$trend$value <- blankcoefnames(trendbits) } # ----- parameters with SE -------------------------- if(is.character(quick) && (quick == "no variances")) return(y) # Exit before SE for variational Bayes if(!is.null(x$internal$VB)) return(y) if(length(COEFS) > 0) { # compute standard errors se <- x$internal$se if(is.null(se)) { vc <- vcov(x, fine=fine, matrix.action="warn", invalid.action="silent") if(!is.null(vc)) { se <- if(is.matrix(vc)) sqrt(diag(vc)) else if(length(vc) == 1) sqrt(vc) else NULL } } if(!is.null(se)) { two <- qnorm(0.975) lo <- COEFS - two * se hi <- COEFS + two * se zval <- COEFS/se pval <- 2 * pnorm(abs(zval), lower.tail=FALSE) psig <- cut(pval, c(0,0.001, 0.01, 0.05, 1), labels=c("***", "**", "*", " "), include.lowest=TRUE) # table of coefficient estimates with SE and 95% CI y$coefs.SE.CI <- data.frame(Estimate=COEFS, S.E.=se, CI95.lo=lo, CI95.hi=hi, Ztest=psig, Zval=zval) } } return(y) } summary.ppm }) coef.summary.ppm <- function(object, ...) { object$coefs.SE.CI } print.summary.ppm <- function(x, ...) { if(x$old) warning("Model was fitted by an older version of spatstat") if(is.null(x$args)) { # this is the quick version splat(x$name) return(invisible(NULL)) } ## otherwise - full details splat("Point process model") if(!is.null(dataname <- x$dataname)) splat("Fitted to data:", dataname) fitter <- x$fitter %orifnull% "unknown" methodchosen <- if(is.null(x$method)) "unspecified method" else if(fitter == "exact") "maximum likelihood" else switch(x$method, mpl={ if(x$poisson) { # Poisson process "maximum likelihood (Berman-Turner approximation)" } else { "maximum pseudolikelihood (Berman-Turner approximation)" } }, logi={ if(is.null(x$VB)){ if(x$poisson) { # Poisson process "maximum likelihood (logistic regression approximation)" } else { "maximum pseudolikelihood (logistic regression approximation)" } } else { "maximum posterior density (variational Bayes approximation)" } }, paste("unrecognised method", sQuote(x$method))) improv <- x$improve.type switch(improv, none = { }, ho = { methodchosen <- paste("Huang-Ogata approximate maximum likelihood,", "starting from", methodchosen, "fit") }, enet = { methodchosen <- paste("Regularized", methodchosen) }) splat("Fitting method:", methodchosen) howfitted <- switch(fitter, exact= "analytically", gam = "using gam()", glm = "using glm()", ho = NULL, # legacy paste("using unrecognised fitter", sQuote(fitter))) if(!is.null(howfitted)) splat("Model was fitted", howfitted) if(fitter %in% c("glm", "gam")) { if(x$converged) splat("Algorithm converged") else splat("*** Algorithm did not converge ***") } if(x$projected) splat("Fit was projected to obtain a valid point process model") cat("Call:\n") print(x$args$call) if(x$old) splat("** Executed by old spatstat version", x$version, " **") splat("Edge correction:", dQuote(x$args$correction)) if(x$args$correction == "border") splat("\t[border correction distance r =", x$args$rbord,"]") # print summary of quadrature scheme if(is.null(x$quad)) return(invisible(NULL)) ruletextline() print(x$quad) ## start printing trend information if(is.null(x$no.trend)) return(invisible(NULL)) ruletextline() splat("FITTED :") parbreak() # This bit is currently identical to print.ppm() # except for a bit more fanfare # and the inclusion of the 'gory details' bit notrend <- x$no.trend # stationary <- x$stationary poisson <- x$poisson markeddata <- x$marked multitype <- x$multitype # markedpoisson <- poisson && markeddata # ----------- Print model type ------------------- cat(x$name) cat("\n") if(markeddata) mrk <- x$entries$marks if(multitype) { splat("Possible marks:") cat(paste(levels(mrk))) } # ----- trend -------------------------- if(length(x$trend) == 0) return(invisible(NULL)) parbreak() splat(paste0("---- ", x$trend$name, ": ----")) parbreak() if(!notrend) { splat("Log", if(poisson) "intensity:" else "trend:", pasteFormula(x$trend$formula)) if(x$uses.covars) splat("Model depends on external", ngettext(length(x$covars.used), "covariate", "covariates"), commasep(sQuote(x$covars.used))) } if(x$has.covars) { if(notrend || !x$uses.covars) splat("Model object contains external covariates") isdf <- identical(x$covars.are.df, TRUE) if(!is.null(cd <- x$covar.descrip)) { # print description of each covariate splat(paste0("Covariates provided", if(isdf) " (in data frame)" else NULL, ":")) namescd <- names(cd) for(i in seq_along(cd)) splat(paste0("\t", namescd[i], ": ", cd[i])) } if(!is.null(cfa <- x$covfunargs) && length(cfa) > 0) { splat("Covariate function arguments (covfunargs) provided:") namescfa <- names(cfa) for(i in seq_along(cfa)) { cat(paste(namescfa[i], "= ")) cfai <- cfa[[i]] if(is.numeric(cfai) && length(cfai) == 1) { cat(paste(cfai, "\n")) } else print(cfa[[i]]) } } } parbreak() splat(paste0(x$trend$label, ":")) tv <- x$trend$value if(!is.list(tv)) print(tv) else for(i in seq_along(tv)) print(tv[[i]]) # table of coefficient estimates with SE and 95% CI if(!is.null(cose <- x$coefs.SE.CI)) { cat("\n") print(cose) } # ---- Interaction ---------------------------- if(!poisson) { parbreak() splat(" ---- Interaction: -----") parbreak() print(x$interaction) } ####### Gory details ################################### parbreak() splat("----------- gory details -----") parbreak() COEFS <- x$entries$coef splat("Fitted regular parameters (theta):") print(COEFS) parbreak() splat("Fitted exp(theta):") print(exp(unlist(COEFS))) ##### Warnings issued ####### probs <- x$problems if(!is.null(probs) && is.list(probs) && (length(probs) > 0)) lapply(probs, function(a) { if(is.list(a) && !is.null(p <- a$print)) cat(paste("Problem:\n", p, "\n\n")) }) vali <- x$valid if(identical(vali, FALSE) && waxlyrical("errors")) { parbreak() splat("*** Model is not valid ***") if(!all(is.finite(x$entries$coef))) { splat("*** Some coefficients are NA or Inf ***") } else { splat("*** Interaction parameters are outside valid range ***") } } else if(is.na(vali) && waxlyrical("extras")) { parbreak() splat("[Validity of model could not be checked]") } return(invisible(NULL)) } no.trend.ppm <- function(x) { summary.ppm(x, quick=TRUE)$no.trend } is.stationary.ppm <- function(x) { TREND <- x$trend if(is.null(TREND) || identical.formulae(TREND, ~1)) return(TRUE) if(all(variablesinformula(TREND) == "marks")) return(TRUE) return(FALSE) } is.poisson.ppm <- function(x) { stopifnot(is.ppm(x)) y <- x$interaction if(is.null(y)) y <- Poisson() is.poisson.interact(y) } is.marked.ppm <- function(X, ...) { summary.ppm(X, quick=TRUE)$marked } is.multitype.ppm <- function(X, ...) { summary.ppm(X, quick=TRUE)$multitype } is.expandable.ppm <- function(x) { return(identical(summary(x, quick="entries")$expandable, TRUE)) } blankcoefnames <- function(x) { # remove name labels from ppm coefficients # First decide whether there are 'labels within labels' unlabelled <- unlist(lapply(x, function(z) { is.null(names(z)) } )) if(all(unlabelled)) value <- unlist(x) else { value <- list() for(i in seq_along(x)) value[[i]] <- if(unlabelled[i]) unlist(x[i]) else x[[i]] } return(value) } spatstat.model/R/ho.R0000644000176200001440000000426614331173073014160 0ustar liggesusers# # ho.R # # Huang-Ogata method # # $Revision: 1.18 $ $Date: 2022/11/03 11:08:33 $ # ho.engine <- function(model, ..., nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE) { verifyclass(model, "ppm") if(is.null(start)) start <- list(n.start=data.ppm(model)$n) # check that the model can be simulated if(!valid.ppm(model)) { warning("Fitted model is invalid - cannot be simulated") return(NULL) } # compute the observed value of the sufficient statistic X <- data.ppm(model) sobs <- suffstat(model, X) # generate 'nsim' realisations of the fitted model # and compute the sufficient statistics of the model rmhinfolist <- rmh(model, start, control, preponly=TRUE, verbose=FALSE) if(verb) { cat("Simulating... ") state <- list() } ndone <- 0 while(ndone < nsim) { Xi <- rmhEngine(rmhinfolist, verbose=FALSE) v <- try(suffstat(model,Xi)) if(!inherits(v, "try-error")) { if(ndone == 0) svalues <- matrix(, nrow=nsim, ncol=length(v)) ndone <- ndone + 1 svalues[ndone, ] <- v } if(verb) state <- progressreport(ndone, nsim, state=state) } if(verb) cat("Done.\n\n") # calculate the sample mean and variance of the # sufficient statistic for the simulations smean <- apply(svalues, 2, mean, na.rm=TRUE) svar <- var(svalues, na.rm=TRUE) # value of canonical parameter from MPL fit theta0 <- coef(model) # Newton-Raphson update Vinverse <- solve(svar) theta <- theta0 + as.vector(Vinverse %*% (sobs - smean)) ## appropriate names nama <- names(theta0) if(!is.null(nama)) { names(theta) <- nama dimnames(svar) <- dimnames(Vinverse) <- list(nama, nama) } ## update model newmodel <- model newmodel$coef <- theta newmodel$coef.orig <- theta0 newmodel$improve.type <- "ho" newmodel$fisher <- svar newmodel$varcov <- Vinverse # recompute fitted interaction newmodel$fitin <- NULL newmodel$fitin <- fitin(newmodel) ## update pseudolikelihood value using code in logLik.ppm newmodel$maxlogpl.orig <- model$maxlogpl newmodel$maxlogpl <- logLik(newmodel, new.coef=theta, warn=FALSE) ## return(newmodel) } spatstat.model/R/vcov.mppm.R0000644000176200001440000002347414331173074015502 0ustar liggesusers# Variance-covariance matrix for mppm objects # # $Revision: 1.23 $ $Date: 2021/12/29 07:50:32 $ # # vcov.mppm <- local({ errhandler <- function(whinge, err) { switch(err, fatal=stop(whinge), warn={ warning(whinge) return(NA) }, null= return(NULL), stop(paste("Unrecognised option: err=", dQuote(err)))) } vcov.mppm <- function(object, ..., what="vcov", err="fatal") { what <- match.arg(what, c("vcov", "corr", "fisher", "Fisher", "internals", "all")) if(what == "Fisher") what <- "fisher" if(is.poisson.mppm(object) && object$Fit$fitter == "glm") return(vcmPois(object, ..., what=what, err=err)) return(vcmGibbs(object, ..., what=what, err=err)) } vcmPois <- function(object, ..., what, err, nacoef.action=c("warn", "fatal", "silent"), new.coef=NULL ) { #' legacy algorithm for Poisson case #' detect NA coefficients if(missing(nacoef.action) && !missing(err) && !is.null(err)) { nacoef.action <- err } else { nacoef.action <- match.arg(nacoef.action) } if(!all(is.finite(coef(object)))) { gripe <- "Cannot compute variance; some coefficients are NA, NaN or Inf" switch(nacoef.action, fatal = stop(gripe, call.=FALSE), warn = warning(gripe, call.=FALSE), silent = {}) return(NULL) } #' get to work gf <- object$Fit$FIT gd <- object$Fit$moadf wt <- gd$.mpl.W fo <- object$trend if(is.null(fo)) fo <- (~1) mof <- model.frame(fo, gd) mom <- model.matrix(fo, mof) momnames <- dimnames(mom)[[2]] ## fitted intensity if(!is.null(new.coef) && inherits(gf, c("gam", "lme"))) { warn.once("vcovGAMnew", "'new.coef' is not supported by vcov.mppm for GAM or LME models; ignored") new.coef <- NULL } fi <- if(is.null(new.coef)) fitted(gf) else GLMpredict(gf, gd, new.coef, changecoef=TRUE, type="response") fisher <- sumouter(mom, fi * wt) dimnames(fisher) <- list(momnames, momnames) switch(what, fisher = { return(fisher) }, vcov = { vc <- try(solve(fisher), silent=(err == "null")) if(inherits(vc, "try-error")) return(errhandler("Fisher information is singular", err)) else return(vc) }, corr={ co <- try(solve(fisher), silent=(err == "null")) if(inherits(co, "try-error")) return(errhandler("Fisher information is singular", err)) sd <- sqrt(diag(co)) return(co / outer(sd, sd, "*")) }) } vcmGibbs <- function(object, ..., what, err, matrix.action=c("warn", "fatal", "silent"), gam.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), nacoef.action=c("warn", "fatal", "silent"), new.coef=NULL ) { if(!missing(err)) { if(err == "null") err <- "silent" matrix.action <- if(missing(matrix.action)) err else match.arg(matrix.action) gam.action <- if(missing(gam.action)) err else match.arg(gam.action) logi.action <- if(missing(logi.action)) err else match.arg(logi.action) nacoef.action <- if(missing(nacoef.action)) err else match.arg(nacoef.action) } else { matrix.action <- match.arg(matrix.action) gam.action <- match.arg(gam.action) logi.action <- match.arg(logi.action) nacoef.action <- match.arg(nacoef.action) } #' detect NA coefficients if(!all(is.finite(as.matrix(coef(object))))) { gripe <- "Cannot compute variance; some coefficients are NA, NaN or Inf" switch(nacoef.action, fatal = stop(gripe, call.=FALSE), warn = warning(gripe, call.=FALSE), silent = {}) return(NULL) } #' extract stuff from fitted model Inter <- object$Inter interaction <- Inter$interaction itags <- Inter$itags Vnamelist <- object$Fit$Vnamelist Isoffsetlist <- object$Fit$Isoffsetlist glmdata <- object$Fit$moadf fitter <- object$Fit$fitter fitobj <- object$Fit$FIT #' compute fitted intensity if(is.null(new.coef)) { fi <- fitted(fitobj) } else if(fitter != "glm") { warn.once("vcovMppmGAMnew", "'new.coef' is not supported by vcov.mppm for GAM or LME models; ignored") new.coef <- NULL fi <- fitted(fitobj) } else { fi <- GLMpredict(fitobj, glmdata, new.coef, changecoef=TRUE, type="response") } #' initialise cnames <- names(fixed.effects(object)) nc <- length(cnames) A2 <- A3 <- matrix(0, nc, nc, dimnames=list(cnames, cnames)) #' (1) Compute matrix A1 directly glmsub <- glmdata$.mpl.SUBSET wt <- glmdata$.mpl.W mom <- model.matrix(object) lam <- unlist(fitted(object, new.coef=new.coef)) A1 <- sumouter(mom, lam * wt * glmsub) #' (2) compute matrices A2 and A3 for submodels #' compute submodels subs <- subfits(object, what="basicmodels", new.coef=new.coef) n <- length(subs) #' identify the (unique) active interaction in each row activeinter <- active.interactions(object) ## compute A2 and A3 for each submodel guts <- lapply(subs, vcov, what="internals", matrix.action=matrix.action, gam.action=gam.action, logi.action=logi.action, dropcoef=TRUE, ...) a2 <- lapply(guts, getElement, name="A2") a3 <- lapply(guts, getElement, name="A3") #' (3) Determine map from interaction variables of subfits #' to canonical variables of 'object' maps <- mapInterVars(object, subs, mom) #' (4) Process each row, summing A2 and A3 for(i in seq_len(n)) { subi <- subs[[i]] cmap <- maps[[i]] #' contributes to second order terms only if non-Poisson if(!is.poisson(subi)) { cnames.i <- names(coef(subi)) a2i <- a2[[i]] a3i <- a3[[i]] #' the (unique) tag name of the interaction in this model tagi <- colnames(activeinter)[activeinter[i,]] #' the corresponding canonical variable name(s) for this interaction vni <- Vnamelist[[tagi]] #' ignore offset variables iso <- Isoffsetlist[[tagi]] vni <- vni[!iso] if(length(vni)) { #' retain only interaction rows & columns (the rest are zero anyway) e <- cnames.i %in% vni a2ie <- a2i[e, e, drop=FALSE] a3ie <- a3i[e, e, drop=FALSE] #' all possible mappings mappings <- do.call(expand.grid, append(cmap, list(stringsAsFactors=FALSE))) nmappings <- nrow(mappings) if(nmappings == 0) { warning("Internal error: Unable to map submodel to full model") } else { for(irow in 1:nmappings) { for(jcol in 1:nmappings) { cmi <- as.character(mappings[irow,]) cmj <- as.character(mappings[jcol,]) if(anyDuplicated(cmi) || anyDuplicated(cmj)) { warning("Internal error: duplicated labels in submodel map") } else if(!is.null(a2ie)) { A2[cmi,cmj] <- A2[cmi,cmj] + a2ie A3[cmi,cmj] <- A3[cmi,cmj] + a2ie } } } } } } } #' (5) pack up internals <- list(A1=A1, A2=A2, A3=A3) if(what %in% c("internals", "all")) internals <- c(internals, list(suff=mom)) if(what %in% c("vcov", "corr", "all")) { #' variance-covariance matrix required U <- checksolve(A1, matrix.action, , "variance") vc <- if(is.null(U)) NULL else (U %*% (A1 + A2 + A3) %*% U) } out <- switch(what, fisher = A1 + A2 + A3, vcov = vc, corr = { if(is.null(vc)) return(NULL) sd <- sqrt(diag(vc)) vc / outer(sd, sd, "*") }, internals = internals, all = list(internals=internals, fisher=A1+A2+A3, varcov=vc, invgrad=A1) ) return(out) } addsubmatrix <- function(A, B, guessnames) { if(is.null(B)) return(A) if(is.null(colnames(B)) && !missing(guessnames)) { if(is.character(guessnames)) guessnames <- list(guessnames, guessnames) if(all(lengths(guessnames) == dim(B))) colnames(B) <- guessnames } if(is.null(colnames(B))) { #' unusual if(!all(dim(A) == dim(B))) stop("Internal error: no column names, and matrices non-conformable") A <- A + B return(A) } j <- match(colnames(B), colnames(A)) if(anyNA(j)) stop("Internal error: unmatched column name(s)") A[j,j] <- A[j,j] + B return(A) } bindsubmatrix <- function(A, B) { if(is.null(B)) return(A) if(is.null(colnames(B))) { if(ncol(A) != ncol(B)) stop("Internal error: no column names, and matrices non-conformable") A <- rbind(A, B) return(A) } j <- match(colnames(B), colnames(A)) if(anyNA(j)) stop("Internal error: unmatched column name(s)") BB <- matrix(0, nrow(B), ncol(A)) BB[,j] <- B A <- rbind(A, BB) return(A) } mergeAlternatives <- function(A, B) { okA <- !sapply(A, is.null) okB <- !sapply(B, is.null) if(any(override <- !okA & okB)) A[override] <- B[override] return(A) } ## notallzero <- function(df) { apply(df != 0, 2, any) } vcov.mppm }) spatstat.model/R/hierstrhard.R0000644000176200001440000002715214331173073016070 0ustar liggesusers## ## hierstrhard.R ## ## $Revision: 1.6 $ $Date: 2022/03/07 02:07:02 $ ## ## The hierarchical Strauss-hard core process ## ## ------------------------------------------------------------------- ## HierStraussHard <- local({ # ......... define interaction potential HSHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrices of interaction radii r <- par$iradii h <- par$hradii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) ## list all ordered pairs of types to be checked uptri <- par$archy$relation & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] ## corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) ## create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { ## assemble the relevant interaction distance for each pair of points rxu <- r[ tx, tu ] ## apply relevant threshold to each pair of points str <- (d <= rxu) # and the relevant hard core distance hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- str value[forbid] <- -Inf ## score for(i in 1:npairs) { # data points with mark m1 Xsub <- (tx == mark1[i]) # quadrature points with mark m2 Qsub <- (tu == mark2[i]) # assign z[Xsub, Qsub, i] <- value[Xsub, Qsub] } } return(z) } #### end of 'pot' function #### # ........ auxiliary functions .............. delHSH <- function(which, types, iradii, hradii, archy, ihc) { iradii[which] <- NA if(any(!is.na(iradii))) { # some gamma interactions left # return modified HierStraussHard with fewer gamma parameters return(HierStraussHard(types=types, iradii=iradii, hradii=hradii, archy=archy)) } else if(any(!ihc)) { # ihc = inactive hard cores # no gamma interactions left, but some active hard cores return(HierHard(types=types, hradii=hradii, archy=archy)) } else return(Poisson()) } # Set up basic object except for family and parameters BlankHSHobject <- list( name = "Hierarchical Strauss-hard core process", creator = "HierStraussHard", family = "hierpair.family", # evaluated later pot = HSHpotential, par = list(types=NULL, iradii=NULL, hradii=NULL, archy=NULL), parnames = c("possible types", "interaction distances", "hardcore distances", "hierarchical order"), pardesc = c("vector of possible types", "matrix of interaction distances", "matrix of hardcore distances", "hierarchical order"), hasInf = TRUE, selfstart = function(X, self) { types <- self$par$types hradii <- self$par$hradii archy <- self$par$archy if(!is.null(types) && !is.null(hradii) && !is.null(archy)) return(self) if(is.null(types)) types <- levels(marks(X)) if(is.null(archy)) archy <- seq_len(length(types)) if(!inherits(archy, "hierarchicalordering")) archy <- hierarchicalordering(archy, types) if(is.null(hradii)) { marx <- marks(X) d <- nndist(X, by=marx) h <- aggregate(d, by=list(from=marx), min) h <- as.matrix(h[, -1L, drop=FALSE]) m <- table(marx) mm <- outer(m, m, pmin) hradii <- h * mm/(mm+1) dimnames(hradii) <- list(types, types) h[!(archy$relation)] <- NA } HierStraussHard(types=types,hradii=hradii, iradii=self$par$iradii, archy=archy) }, init = function(self) { types <- self$par$types iradii <- self$par$iradii hradii <- self$par$hradii ## hradii could be NULL if(!is.null(types)) { if(!is.null(dim(types))) stop(paste("The", sQuote("types"), "argument should be a vector")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } nt <- length(types) MultiPair.checkmatrix(iradii, nt, sQuote("iradii"), asymmok=TRUE) if(!is.null(hradii)) MultiPair.checkmatrix(hradii, nt, sQuote("hradii"), asymmok=TRUE) } ina <- is.na(iradii) if(all(ina)) stop(paste("All entries of", sQuote("iradii"), "are NA")) if(!is.null(hradii)) { hna <- is.na(hradii) both <- !ina & !hna if(any(iradii[both] <= hradii[both])) stop("iradii must be larger than hradii") } }, update = NULL, # default OK print = function(self) { iradii <- self$par$iradii hradii <- self$par$hradii types <- self$par$types archy <- self$par$archy if(waxlyrical('gory')) splat(nrow(iradii), "types of points") if(!is.null(types) && !is.null(archy)) { if(waxlyrical('space')) { splat("Possible types and ordering:") } else cat("Hierarchy: ") print(archy) } else if(!is.null(types)) { (if(waxlyrical('space')) splat else cat)("Possible types: ") print(types) } else if(waxlyrical('gory')) splat("Possible types:\t not yet determined") splat("Interaction radii:") dig <- getOption("digits") print(hiermat(signif(iradii, dig), archy)) if(!is.null(hradii)) { splat("Hardcore radii:") print(hiermat(signif(hradii, dig), archy)) } else splat("Hardcore radii: not yet determined") invisible(NULL) }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) ## get matrices of interaction radii r <- self$par$iradii h <- self$par$hradii ## list all unordered pairs of types uptri <- self$par$archy$relation & !is.na(r) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(NA, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) gammas[ cbind(index1, index2) ] <- exp(coeffs) # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=hiermat(dround(gammas), self$par$archy))) }, valid = function(coeffs, self) { # interaction radii r[i,j] iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # parameters to estimate required <- !is.na(iradii) & self$par$archy$relation # all required parameters must be finite if(!all(is.finite(gamma[required]))) return(FALSE) # DIAGONAL interactions must be non-explosive d <- diag(rep(TRUE, nrow(iradii))) activehard <- !is.na(hradii) & (hradii > 0) return(all(gamma[required & d & !activehard] <= 1)) }, project = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii types <- self$par$types archy <- self$par$archy # active hard cores activehard <- !is.na(hradii) & (hradii > 0) ihc <- !activehard # problems? uptri <- archy$relation required <- !is.na(iradii) & uptri offdiag <- !diag(nrow(iradii)) gammavalid <- is.finite(gamma) & (activehard | offdiag | (gamma <= 1)) naughty <- required & !gammavalid # if(!any(naughty)) return(NULL) if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delHSH(naughty, types, iradii, hradii, archy, ihc)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) ord <- self$par$archy$ordering uptri <- (ord[rn] <= ord[cn]) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matrix, ncol=2) inters <- lapply(mats, delHSH, types=types, iradii=iradii, hradii=hradii, archy=archy, ihc=ihc) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$iradii h <- self$par$hradii ractive <- !is.na(r) & self$par$archy$relation hactive <- !is.na(h) & self$par$archy$relation if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 ractive <- ractive & (abs(log(gamma)) > epsilon) } if(!any(c(ractive,hactive))) return(0) else return(max(c(r[ractive],h[hactive]))) }, hardcore = function(self, coeffs=NA, epsilon=0, ...) { h <- self$par$hradii active <- !is.na(h) & self$par$archy$relation h[!active] <- 0 return(h) }, version=NULL # to be added ) class(BlankHSHobject) <- "interact" # finally create main function HierStraussHard <- function(iradii, hradii=NULL, types=NULL, archy=NULL) { if(!is.null(types)) { if(is.null(archy)) archy <- seq_len(length(types)) archy <- hierarchicalordering(archy, types) } iradii[iradii == 0] <- NA out <- instantiate.interact(BlankHSHobject, list(types=types, iradii=iradii, hradii=hradii, archy=archy)) if(!is.null(types)) { dn <- list(types, types) dimnames(out$par$iradii) <- dn if(!is.null(out$par$hradii)) dimnames(out$par$hradii) <- dn } return(out) } HierStraussHard <- intermaker(HierStraussHard, BlankHSHobject) HierStraussHard }) spatstat.model/R/vcov.ppm.R0000644000176200001440000017451614331173074015331 0ustar liggesusers## ## Asymptotic covariance & correlation matrices ## and Fisher information matrix ## for ppm objects ## ## $Revision: 1.138 $ $Date: 2022/06/21 02:19:56 $ ## vcov.ppm <- local({ vcov.ppm <- function(object, ..., what="vcov", verbose=TRUE, fine=FALSE, gam.action=c("warn", "fatal", "silent"), matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), nacoef.action=c("warn", "fatal", "silent"), hessian=FALSE) { verifyclass(object, "ppm") argh <- list(...) gam.action <- match.arg(gam.action) matrix.action <- match.arg(matrix.action) logi.action <- match.arg(logi.action) nacoef.action <- match.arg(nacoef.action) if(!all(is.finite(coef(object)))) { gripe <- "Cannot compute variance; model is not valid" switch(nacoef.action, fatal = stop(gripe, call.=FALSE), warn = warning(gripe, call.=FALSE), silent = {}) return(NULL) } if(missing(fine) && ("A1dummy" %in% names(argh))) { message("Argument 'A1dummy' has been replaced by 'fine'") fine <- as.logical(argh$A1dummy) } else fine <- as.logical(fine) stopifnot(length(what) == 1 && is.character(what)) what.options <- c("vcov", "corr", "fisher", "Fisher", "internals", "all") what.map <- c("vcov", "corr", "fisher", "fisher", "internals", "all") if(is.na(m <- pmatch(what, what.options))) stop(paste("Unrecognised option: what=", sQuote(what))) what <- what.map[m] ## No vcov for Variational Bayes if(!is.null(object$internal$VB)) stop("Variance calculations currently not possible for variational Bayes fit.") if(identical(object$improve.type, "enet")) { switch(gam.action, warn = warning(paste("Variance calculations ignore the fact that", "the model was updated using elastic net")), fatal = stop(paste("Variance calculations are", "currently not possible for", "models updated using elastic net")), silent = { }) } ## no parameters, no variance if(length(coef(object)) == 0) { result <- switch(what, vcov=, corr=, fisher= { matrix(, 0, 0) }, internals=, all={ list() }) return(result) } ## nonstandard calculations (hack) generic.triggers <- c("A1", "new.coef", "modmat", "matwt", "saveterms", "sparseOK") nonstandard <- any(generic.triggers %in% names(argh)) || fine # saveterms <- identical(resolve.1.default("saveterms", argh), TRUE) ## Fisher information *may* be contained in object fisher <- object$fisher varcov <- object$varcov ## Do we need to go into the guts? needguts <- nonstandard || (is.null(fisher) && what=="fisher") || (is.null(varcov) && what %in% c("vcov", "corr")) || (what %in% c("internals", "all")) ## In general it is not true that varcov = solve(fisher) ## because we might use different estimators, ## or the parameters might be a subset of the canonical parameter if(needguts) { ## warn if fitted model was obtained using GAM if(identical(object$fitter, "gam")) { switch(gam.action, fatal={ stop(paste("model was fitted by gam();", "execution halted because fatal=TRUE"), call.=FALSE) }, warn={ warning(paste("model was fitted by gam();", "asymptotic variance calculation ignores this"), call.=FALSE) }, silent={}) } ## ++++ perform main calculation ++++ if((is.poisson(object) || (hessian && what!="internals")) && object$method != "logi") { ## Poisson model, or Hessian of Gibbs model without internals results <- vcalcPois(object, ..., what=what, matrix.action=matrix.action, verbose=verbose, fisher=fisher) } else { ## Gibbs model results <- vcalcGibbs(object, ..., what=what, fine=fine, matrix.action=matrix.action, hessian = hessian) } varcov <- results$varcov fisher <- results$fisher internals <- results$internals } if(what %in% c("vcov", "corr") && is.null(varcov)) { ## Need variance-covariance matrix. if(!is.null(fisher) && is.poisson(object)) ## Derive from Fisher information varcov <- checksolve(fisher, matrix.action, "Fisher information matrix", "variance") } out <- switch(what, fisher = fisher, vcov = varcov, corr = { if(is.null(varcov)) return(NULL) sd <- sqrt(diag(varcov)) varcov / outer(sd, sd, "*") }, internals = internals, all = results ) return(out) } ## ................ variance calculation for Poisson models ............. vcalcPois <- function(object, ..., what = c("vcov", "corr", "fisher", "internals", "all"), matrix.action=c("warn", "fatal", "silent"), nacoef.action=c("warn", "fatal", "silent"), method=c("C", "interpreted"), verbose=TRUE, fisher=NULL, modmat=model.matrix(object), matwt=NULL, # weights on rows of model matrix new.coef=NULL, dropcoef=FALSE, saveterms=FALSE) { ## variance-covariance matrix of Poisson model, ## or Hessian of Gibbs model what <- match.arg(what) method <- match.arg(method) matrix.action <- match.arg(matrix.action) if(reweighting <- !is.null(matwt)) stopifnot(is.numeric(matwt) && is.vector(matwt)) internals <- NULL nonstandard <- reweighting || !is.null(new.coef) || saveterms ## detect invalid model if(!all(is.finite(coef(object)))) { gripe<-"Cannot compute variance; some coefficients are NA, NaN or infinite" switch(nacoef.action, fatal=stop(gripe, call.=FALSE), warn=warning(gripe, call.=FALSE), silent={}) return(NULL) } ## compute Fisher information if not known if(is.null(fisher) || nonstandard) { gf <- getglmfit(object) ## we need a glm or gam if(is.null(gf)) { if(verbose) warning("Refitting the model using GLM/GAM") object <- update(object, forcefit=TRUE) gf <- getglmfit(object) if(is.null(gf)) stop("Internal error - refitting did not yield a glm object") } ## compute fitted intensity and sufficient statistic ltype <- if(is.poisson(object)) "trend" else "lambda" lambda <- fitted(object, type=ltype, new.coef=new.coef, dropcoef=dropcoef, check=FALSE) mom <- modmat nmom <- nrow(mom) Q <- quad.ppm(object) wt <- w.quad(Q) ok <- getglmsubset(object) Z <- is.data(Q) ## save them if(what == "internals") { internals <- if(!saveterms) list(suff=mom) else list(suff=mom, mom=mom, lambda=lambda, Z=Z, ok=ok) } ## Now restrict all terms to the domain of the pseudolikelihood lambda <- lambda[ok] mom <- mom[ok, , drop=FALSE] wt <- wt[ok] Z <- Z[ok] ## apply weights to rows of model matrix - temporary hack if(reweighting) { nwt <- length(matwt) if(nwt == nmom) { ## matwt matches original quadrature scheme - trim it matwt <- matwt[ok] } else if(nwt != sum(ok)) stop("Hack argument matwt has incompatible length") mom.orig <- mom mom <- matwt * mom } ## compute Fisher information switch(method, C = { fisher <- sumouter(mom, lambda * wt) if(reweighting) { gradient <- sumouter(mom.orig, matwt * lambda * wt) } }, interpreted = { if(!reweighting) { fisher <- 0 for(i in 1:nrow(mom)) { ro <- mom[i, ] v <- outer(ro, ro, "*") * lambda[i] * wt[i] if(!anyNA(v)) fisher <- fisher + v } momnames <- dimnames(mom)[[2]] dimnames(fisher) <- list(momnames, momnames) } else { fisher <- gradient <- 0 for(i in 1:nrow(mom)) { ro <- mom[i, ] ro0 <- mom.orig[i,] ldu <- lambda[i] * wt[i] v <- outer(ro, ro, "*") * ldu v0 <- outer(ro0, ro0, "*") * matwt[i] * ldu if(!anyNA(v)) fisher <- fisher + v if(!anyNA(v0)) gradient <- gradient + v0 } momnames <- dimnames(mom)[[2]] dn <- list(momnames, momnames) dimnames(fisher) <- dimnames(gradient) <- dn } }) } if(what %in% c("all", "internals")) { ## Internals needed if(is.null(internals)) internals <- list(suff = modmat) internals$fisher <- fisher if(reweighting) internals$gradient <- gradient ilist <- list(internals=internals) } if(what %in% c("all", "vcov", "corr")) { ## Variance-covariance matrix needed if(!reweighting) { ## Derive variance-covariance from Fisher info varcov <- checksolve(fisher, matrix.action, "Fisher information matrix", "variance") vcovlist <- list(fisher=fisher, varcov=varcov) } else { invgrad <- checksolve(gradient, matrix.action, "gradient matrix", "variance") varcov <- if(is.null(invgrad)) NULL else invgrad %*% fisher %*% invgrad vcovlist <- list(fisher=fisher, varcov=varcov, invgrad=invgrad) } } result <- switch(what, fisher = list(fisher=fisher), vcov = vcovlist, corr = vcovlist, internals = ilist, all = append(ilist, vcovlist)) return(result) } ## ...................... vcov calculation for Gibbs models .................... vcalcGibbs <- function(fit, ..., fine=FALSE, what = c("vcov", "corr", "fisher", "internals", "all"), generic=FALSE) { what <- match.arg(what) if(missing(generic)) { ## Change default to TRUE in certain cases ## For logistic fits, use generic method by default if(fit$method == "logi") generic <- TRUE ## For 'difficult' interactions, use generic method by default fasterbygeneric <- c("Areainter") if(as.interact(fit)$creator %in% fasterbygeneric) generic <- TRUE } ## decide whether to use the generic algorithm generic.triggers <- c("A1", "hessian", "new.coef", "matwt", "saveterms", "sparseOK") use.generic <- generic || fine || !is.stationary(fit) || (fit$method == "logi" && ("marks" %in% variablesinformula(fit$trend))) || (fit$method != "logi" && has.offset(fit)) || (fit$method == "logi" && has.offset.term(fit)) || !(fit$correction == "border" && fit$rbord == reach(fit)) || any(generic.triggers %in% names(list(...))) || !identical(options("contrasts")[[1]], c(unordered="contr.treatment", ordered="contr.poly")) ## compute spill <- (what %in% c("all", "internals", "fisher")) spill.vc <- (what == "all") out <- if(use.generic) vcalcGibbsGeneral(fit, ..., fine=fine, spill=spill, spill.vc=spill.vc) else vcalcGibbsSpecial(fit, ..., spill=spill, spill.vc=spill.vc) switch(what, vcov = , corr = { ## out is the variance-covariance matrix; return it return(list(varcov=out)) }, fisher = { ## out is a list of internal data: extract the Fisher info Fmat <- with(out, if(fit$method != "logi") Sigma else Sigma1log+Sigma2log) return(list(fisher=Fmat)) }, internals = { ## out is a list of internal data: return it ## (ensure model matrix is included) if(is.null(out$mom)) out$mom <- model.matrix(fit) return(list(internals=out)) }, all = { ## out is a list(internals, vc): return it ## (ensure model matrix is included) if(is.null(out$internals$mom)) out$internals$mom <- model.matrix(fit) ## ensure Fisher info is included if(is.null(out$internals$fisher)) { Fmat <- with(out$internals, if(fit$method != "logi") Sigma else Sigma1log+Sigma2log) out$internals$fisher <- Fmat } return(out) }, ) return(NULL) } ## ...................... general algorithm ........................... vcalcGibbsGeneral <- function(model, ..., spill = FALSE, spill.vc = FALSE, na.action=c("warn", "fatal", "silent"), matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), algorithm=c("vectorclip", "vector", "basic"), A1 = NULL, fine = FALSE, hessian = FALSE, modmat = model.matrix(model), matwt = NULL, new.coef = NULL, dropcoef=FALSE, saveterms = FALSE, parallel = TRUE, sparseOK = TRUE ) { modmat.given <- !missing(modmat) na.action <- match.arg(na.action) matrix.action <- match.arg(matrix.action) logi.action <- match.arg(logi.action) algorithm <- match.arg(algorithm) if(reweighting <- !is.null(matwt)) stopifnot(is.numeric(matwt) && is.vector(matwt)) spill <- spill || spill.vc saveterms <- spill && saveterms logi <- model$method=="logi" asked.parallel <- !missing(parallel) old.coef <- coef(model) use.coef <- adaptcoef(new.coef, old.coef, drop=dropcoef) if(modmat.given) { p <- ncol(modmat) pnames <- colnames(modmat) } else { p <- length(old.coef) pnames <- names(old.coef) } if(p == 0) { ## this probably can't happen if(!spill) return(matrix(, 0, 0)) else return(list()) } dnames <- list(pnames, pnames) # (may be revised later) internals <- list() ## sumobj <- summary(model, quick="entries") correction <- model$correction rbord <- model$rbord R <- reach(model, epsilon=1e-2) Q <- quad.ppm(model) D <- dummy.ppm(model) rho <- model$internal$logistic$rho #### If dummy intensity rho is unknown we estimate it if(is.null(rho)) rho <- npoints(D)/(area(D)*markspace.integral(D)) X <- data.ppm(model) Z <- is.data(Q) W <- as.owin(model) areaW <- if(correction == "border") eroded.areas(W, rbord) else area(W) ## ## determine which quadrature points contributed to the ## sum/integral in the pseudolikelihood ## (e.g. some points may be excluded by the border correction) okall <- getglmsubset(model) ## conditional intensity lambda(X[i] | X) = lambda(X[i] | X[-i]) ## data and dummy: lamall <- fitted(model, check = FALSE, new.coef = new.coef, dropcoef=dropcoef) if(anyNA(lamall)) { whinge <- "Some values of the fitted conditional intensity are NA" switch(na.action, fatal = { stop(whinge, call.=FALSE) }, warn = { warning(whinge, call.=FALSE) okall <- okall & !is.na(lamall) }, silent = { okall <- okall & !is.na(lamall) }) } ## data only: lam <- lamall[Z] ok <- okall[Z] nX <- npoints(X) ## sufficient statistic h(X[i] | X) = h(X[i] | X[-i]) ## data and dummy: mall <- modmat if(ncol(mall) != length(pnames)) { if(!dropcoef) stop(paste("Internal error: dimension of sufficient statistic = ", ncol(mall), "does not match length of coefficient vector =", length(pnames)), call.=FALSE) p <- length(pnames) pnames <- colnames(mall) dnames <- list(pnames, pnames) } ## save if(saveterms) internals <- append(internals, list(mom=mall, lambda=lamall, Z=Z, ok=okall, matwt=matwt)) if(reweighting) { ## each column of the model matrix is multiplied by 'matwt' check.nvector(matwt, nrow(mall), things="quadrature points", vname="matwt") mall.orig <- mall mall <- mall * matwt } ## subsets of model matrix mokall <- mall[okall, , drop=FALSE] ## data only: m <- mall[Z, , drop=FALSE] mok <- m[ok, , drop=FALSE] ## if(reweighting) { ## save unweighted versions mokall.orig <- mall.orig[okall, , drop=FALSE] m.orig <- mall.orig[Z, , drop=FALSE] mok.orig <- m.orig[ok, , drop=FALSE] ## matwtX <- matwt[Z] } ## ^^^^^^^^^^^^^^^^ First order (sensitivity) matrices A1, S ## logistic if(logi){ ## Sensitivity matrix S for logistic case Slog <- sumouter(mokall, w = lamall[okall]*rho/(lamall[okall]+rho)^2) dimnames(Slog) <- dnames ## A1 matrix for logistic case A1log <- sumouter(mokall, w = lamall[okall]*rho*rho/(lamall[okall]+rho)^3) dimnames(A1log) <- dnames } ## Sensitivity matrix for MPLE case (= A1) if(is.null(A1) || reweighting) { if(fine){ A1 <- sumouter(mokall, w = (lamall * w.quad(Q))[okall]) if(reweighting) gradient <- sumouter(mokall.orig, w=(matwt * lamall * w.quad(Q))[okall]) } else{ A1 <- sumouter(mok) if(reweighting) gradient <- sumouter(mok.orig, w=matwtX) } } else { stopifnot(is.matrix(A1)) if(!all(dim(A1) == p)) stop(paste("Matrix A1 has wrong dimensions:", prange(dim(A1)), "!=", prange(c(p, p)))) } dimnames(A1) <- dnames ## ^^^^^^^^^^ Second order interaction effects A2, A3 if(hessian) { ## interaction terms suppressed A2 <- A3 <- matrix(0, p, p, dimnames=dnames) if(logi) A2log <- A3log <- matrix(0, p, p, dimnames=dnames) } else { ## ^^^^^^^^^^^^^^^^^^^^ `parallel' evaluation need.loop <- TRUE if(parallel) { ## compute second order difference ## ddS[i,j,] = h(X[i] | X) - h(X[i] | X[-j]) ddS <- deltasuffstat(model, restrict="pairs", force=FALSE, sparseOK=sparseOK) sparse <- inherits(ddS, "sparse3Darray") if(is.null(ddS)) { if(asked.parallel) warning("parallel option not available - reverting to loop") } else { need.loop <- FALSE ## rearrange so that ## ddS[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j]) ddS <- aperm(ddS, c(3,2,1)) ## now compute sum_{i,j} for i != j ## outer(ddS[,i,j], ddS[,j,i]) ddSok <- ddS[ , ok, ok, drop=FALSE] A3 <- sumsymouter(ddSok) ## compute pairweight and other arrays if(sparse) { ## Entries are only required for pairs i,j which interact. ## mom.array[ ,i,j] = h(X[i] | X) mom.array <- mapSparseEntries(ddS, margin=2, values=m, conform=TRUE, across=1) ## momdel[ ,i,j] = h(X[i] | X[-j]) momdel <- mom.array - ddS ## lamdel[i,j] = lambda(X[i] | X[-j]) is not sparse; avoid computing it lamdel <- NULL ## pairweight[i,j] = lambda(X[i] | X[-j] )/lambda( X[i] | X ) - 1 pairweight <- expm1(tensor1x1(-use.coef, ddS)) } else { ## mom.array[ ,i,j] = h(X[i] | X) mom.array <- array(t(m), dim=c(p, nX, nX)) ## momdel[ ,i,j] = h(X[i] | X[-j]) momdel <- mom.array - ddS ## lamdel[i,j] = lambda(X[i] | X[-j]) lamdel <- matrix(lam, nX, nX) * exp(tensor::tensor(-use.coef, ddS, 1, 1)) ## pairweight[i,j] = lamdel[i,j]/lambda[i] - 1 pairweight <- lamdel / lam - 1 } ## now compute sum_{i,j} for i != j ## pairweight[i,j] * outer(momdel[,i,j], momdel[,j,i]) ## for data points that contributed to the pseudolikelihood momdelok <- momdel[ , ok, ok, drop=FALSE] pwok <- pairweight[ok, ok] if(anyNA(momdelok) || anyNA(pwok)) stop("Unable to compute variance: NA values present", call.=FALSE) A2 <- sumsymouter(momdelok, w=pwok) dimnames(A2) <- dimnames(A3) <- dnames if(logi){ if(!sparse) { ## lam.array[ ,i,j] = lambda(X[i] | X) lam.array <- array(lam, c(nX,nX,p)) lam.array <- aperm(lam.array, c(3,1,2)) ## lamdel.array[,i,j] = lambda(X[i] | X[-j]) lamdel.array <- array(lamdel, c(nX,nX,p)) lamdel.array <- aperm(lamdel.array, c(3,1,2)) momdellogi <- rho/(lamdel.array+rho)*momdel ddSlogi <- rho/(lam.array+rho)*mom.array - momdellogi } else { ## lam.array[ ,i,j] = lambda(X[i] | X) lam.array <- mapSparseEntries(ddS, margin=2, lam, conform=TRUE, across=1) ## lamdel.array[,i,j] = lambda(X[i] | X[-j]) pairweight.array <- aperm(as.sparse3Darray(pairweight), c(3,1,2)) lamdel.array <- pairweight.array * lam.array + lam.array lamdel.logi <- applySparseEntries(lamdel.array, function(y,rho) { rho/(rho+y) }, rho=rho) lam.logi <- applySparseEntries(lam.array, function(y,rho) { rho/(rho+y) }, rho=rho) momdellogi <- momdel * lamdel.logi ddSlogi <- mom.array * lam.logi - momdellogi } momdellogiok <- momdellogi[ , ok, ok, drop=FALSE] A2log <- sumsymouter(momdellogiok, w=pwok) ddSlogiok <- ddSlogi[ , ok, ok, drop=FALSE] A3log <- sumsymouter(ddSlogiok) dimnames(A2log) <- dimnames(A3log) <- dnames } } } ## ^^^^^^^^^^^^^^^^^^^^ loop evaluation if(need.loop) { A2 <- A3 <- matrix(0, p, p, dimnames=dnames) if(logi) A2log <- A3log <- matrix(0, p, p, dimnames=dnames) if(saveterms) { ## *initialise* matrices ## lamdel[i,j] = lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)]) lamdel <- matrix(lam, nX, nX) ## momdel[ ,i,j] = h(X[i] | X[-j]) = h(X[i] | X[-c(i,j)]) momdel <- array(t(m), dim=c(p, nX, nX)) } ## identify close pairs if(is.finite(R)) { cl <- closepairs(X, R, what="indices") I <- cl$i J <- cl$j if(algorithm == "vectorclip") { cl2 <- closepairs(X, 2*R, what="indices") I2 <- cl2$i J2 <- cl2$j } } else { ## either infinite reach, or something wrong IJ <- expand.grid(I=1:nX, J=1:nX) IJ <- subset(IJ, I != J) I2 <- I <- IJ$I J2 <- J <- IJ$J } ## filter: I and J must both belong to the nominated subset okIJ <- ok[I] & ok[J] I <- I[okIJ] J <- J[okIJ] ## if(length(I) > 0 && length(J) > 0) { ## .............. loop over pairs ........................ ## The following ensures that 'empty' and 'X' have compatible marks empty <- X[integer(0)] ## make an empty 'equalpairs' matrix nonE <- matrix(, nrow=0, ncol=2) ## Run through pairs switch(algorithm, basic={ for(i in unique(I)) { Xi <- X[i] Ji <- unique(J[I==i]) if((nJi <- length(Ji)) > 0) { for(k in 1:nJi) { j <- Ji[k] X.ij <- X[-c(i,j)] ## compute conditional intensity ## lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)] plamj.i <- predict(model, type="cif", locations=X[j], X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) ## corresponding values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] pmj.i <- partialModelMatrix(X.ij, X[j], model)[nX-1, ] ## conditional intensity and sufficient statistic ## in reverse order ## lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)] plami.j <- predict(model, type="cif", locations=X[i], X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) pmi.j <- partialModelMatrix(X.ij, Xi, model)[nX-1, ] ## if(reweighting) { pmj.i <- pmj.i * matwtX[j] pmi.j <- pmi.j * matwtX[i] } if(saveterms) { lamdel[i,j] <- plami.j momdel[ , i, j] <- pmi.j lamdel[j,i] <- plamj.i momdel[ , j, i] <- pmj.i } ## increment A2, A3 wt <- plami.j / lam[i] - 1 A2 <- A2 + wt * outer(pmi.j, pmj.i) if(logi) A2log <- A2log + wt * rho/(plami.j+rho) * rho/(plamj.i+rho) * outer(pmi.j, pmj.i) ## delta sufficient statistic ## delta_i h(X[j] | X[-c(i,j)]) ## = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)]) ## = h(X[j] | X) - h(X[j] | X[-i]) ## delta_j h(X[i] | X[-c(i,j)]) ## = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)]) ## = h(X[i] | X) - h(X[i] | X[-j]) deltaiSj <- m[j, ] - pmj.i deltajSi <- m[i, ] - pmi.j A3 <- A3 + outer(deltaiSj, deltajSi) if(logi){ deltaiSjlog <- rho*(m[j, ]/ (lam[j]+rho) - pmj.i/(plamj.i+rho)) deltajSilog <- rho*(m[i, ]/ (lam[i]+rho) - pmi.j/(plami.j+rho)) A3log <- A3log + outer(deltaiSjlog, deltajSilog) } } } } }, vector={ ## --------- faster algorithm using vector functions -------- for(i in unique(I)) { Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { Xi <- X[i] ## neighbours of X[i] XJi <- X[Ji] ## all points other than X[i] X.i <- X[-i] ## index of XJi in X.i J.i <- Ji - (Ji > i) ## equalpairs matrix E.i <- cbind(J.i, seq_len(nJi)) ## compute conditional intensity ## lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)] ## for all j plamj <- predict(model, type="cif", locations=XJi, X=X.i, check = FALSE, new.coef = new.coef, sumobj=sumobj, E=E.i) ## corresponding values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] ## for all j pmj <- partialModelMatrix(X.i, empty, model)[J.i, , drop=FALSE] ## ## conditional intensity & sufficient statistic ## in reverse order ## lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)] ## for all j plami <- numeric(nJi) pmi <- matrix(, nJi, p) for(k in 1:nJi) { j <- Ji[k] X.ij <- X[-c(i,j)] plami[k] <- predict(model, type="cif", locations=Xi, X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) pmi[k, ] <- partialModelMatrix(X.ij, Xi, model)[nX-1, ] } ## if(reweighting) { pmj <- pmj * matwtX[Ji] pmi <- pmi * matwtX[i] } if(saveterms) { lamdel[Ji, i] <- plamj momdel[ , Ji, i] <- t(pmj) lamdel[i,Ji] <- plami momdel[ , i, Ji] <- t(pmi) } ## increment A2, A3 wt <- plami / lam[i] - 1 for(k in 1:nJi) { j <- Ji[k] A2 <- A2 + wt[k] * outer(pmi[k,], pmj[k,]) if(logi) A2log <- A2log + wt[k] * rho/(plami[k]+rho) * rho/(plamj[k]+rho) * outer(pmi[k,], pmj[k,]) ## delta sufficient statistic ## delta_i h(X[j] | X[-c(i,j)]) ## = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)]) ## = h(X[j] | X) - h(X[j] | X[-i]) ## delta_j h(X[i] | X[-c(i,j)]) ## = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)]) ## = h(X[i] | X) - h(X[i] | X[-j]) deltaiSj <- m[j, ] - pmj[k,] deltajSi <- m[i, ] - pmi[k,] A3 <- A3 + outer(deltaiSj, deltajSi) if(logi){ deltaiSjlog <- rho*(m[j, ]/(lam[j]+rho) - pmj[k,]/(plamj[k]+rho)) deltajSilog <- rho*(m[i, ]/(lam[i]+rho) - pmi[k,]/(plami[k]+rho)) A3log <- A3log + outer(deltaiSjlog, deltajSilog) } } } } }, vectorclip={ ## --------- faster version of 'vector' algorithm ## -------- by removing non-interacting points of X for(i in unique(I)) { ## all points within 2R J2i <- unique(J2[I2==i]) ## all points within R Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { Xi <- X[i] ## neighbours of X[i] XJi <- X[Ji] ## replace X[-i] by X[-i] \cap b(0, 2R) X.i <- X[J2i] nX.i <- length(J2i) ## index of XJi in X.i J.i <- match(Ji, J2i) if(anyNA(J.i)) stop("Internal error: Ji not a subset of J2i") ## equalpairs matrix E.i <- cbind(J.i, seq_len(nJi)) ## compute conditional intensity ## lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)] ## for all j plamj <- predict(model, type="cif", locations=XJi, X=X.i, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=E.i) ## corresponding values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] ## for all j pmj <- partialModelMatrix(X.i, empty, model)[J.i, , drop=FALSE] ## ## conditional intensity & sufficient statistic ## in reverse order ## lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)] ## for all j plami <- numeric(nJi) pmi <- matrix(, nJi, p) for(k in 1:nJi) { j <- Ji[k] ## X.ij <- X[-c(i,j)] X.ij <- X.i[-J.i[k]] plami[k] <- predict(model, type="cif", locations=Xi, X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) pmi[k, ] <- partialModelMatrix(X.ij, Xi, model)[nX.i, ] } ## if(reweighting) { pmj <- pmj * matwtX[Ji] pmi <- pmi * matwtX[i] } if(saveterms) { lamdel[Ji, i] <- plamj momdel[ , Ji, i] <- t(pmj) lamdel[i,Ji] <- plami momdel[ , i, Ji] <- t(pmi) } ## increment A2, A3 wt <- plami / lam[i] - 1 for(k in 1:nJi) { j <- Ji[k] A2 <- A2 + wt[k] * outer(pmi[k,], pmj[k,]) if(logi) A2log <- A2log + wt[k] * rho/(plami[k]+rho) * rho/(plamj[k]+rho) * outer(pmi[k,], pmj[k,]) ## delta sufficient statistic ## delta_i h(X[j] | X[-c(i,j)]) ## = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)]) ## = h(X[j] | X) - h(X[j] | X[-i]) ## delta_j h(X[i] | X[-c(i,j)]) ## = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)]) ## = h(X[i] | X) - h(X[i] | X[-j]) deltaiSj <- m[j, ] - pmj[k,] deltajSi <- m[i, ] - pmi[k,] A3 <- A3 + outer(deltaiSj, deltajSi) if(logi){ deltaiSjlog <- rho*(m[j, ]/(lam[j]+rho) - pmj[k,]/(plamj[k]+rho)) deltajSilog <- rho*(m[i, ]/(lam[i]+rho) - pmi[k,]/(plami[k]+rho)) A3log <- A3log + outer(deltaiSjlog, deltajSilog) } } } } }) } } ## ......... end of loop computation ............... } #### Matrix Sigma Sigma <- A1+A2+A3 if(spill) { ## save internal data (with matrices unnormalised) Hessian <- if(reweighting) gradient else if(logi) Slog else A1 internals <- c(internals, list(A1=A1, A2=A2, A3=A3, Sigma=Sigma, areaW=areaW, fisher=Sigma, hessian = Hessian), if(logi) list(A1log=A1log, A2log=A2log, A3log=A3log, Slog=Slog) else NULL, if(reweighting) list(gradient=gradient) else NULL, if(saveterms) c(list(lamdel=lamdel, momdel=momdel), if(logi) list(ddSlogi=ddSlogi) else list(ddS=ddS))) ## return internal data if no further calculation needed if(!spill.vc && !logi) return(internals) } ## ........... calculate variance/covariance matrix for MPL ......... if(!reweighting) { ## Normalise A1 <- A1/areaW Sigma <- Sigma/areaW ## Enforce exact symmetry A1 <- (A1 + t(A1))/2 Sigma <- (Sigma + t(Sigma))/2 ## calculate inverse negative Hessian U <- checksolve(A1, matrix.action, , "variance") } else { ## Normalise gradient <- gradient/areaW Sigma <- Sigma/areaW ## Enforce exact symmetry gradient <- (gradient + t(gradient))/2 Sigma <- (Sigma + t(Sigma))/2 ## calculate inverse negative Hessian U <- checksolve(gradient, matrix.action, , "variance") } ## compute variance-covariance vc.mpl <- if(is.null(U)) matrix(NA, p, p) else U %*% Sigma %*% U / areaW dimnames(vc.mpl) <- dnames ## return variance-covariance matrix, if model was fitted by MPL if(!logi) { if(spill.vc) return(list(varcov=vc.mpl, internals=internals)) return(vc.mpl) } ###### Everything below is only computed for logistic fits ####### ## Matrix Sigma1log (A1log+A2log+A3log): Sigma1log <- A1log+A2log+A3log ## Resolving the dummy process type how <- model$internal$logistic$how if(how %in% c("given", "grid", "transgrid")){ whinge <- paste("vcov is not implemented for dummy type", sQuote(how)) if(logi.action=="fatal") stop(whinge) how <- if(how=="given") "poisson" else "stratrand" if(logi.action=="warn") warning(paste(whinge,"- using", sQuote(how), "formula"), call.=FALSE) } ## Matrix Sigma2log (depends on dummy process type) switch(how, poisson={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) }, binomial={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) A1vec <- t(mokall) %*% (rho*lamall[okall]/(lamall[okall]+rho)^2) Sigma2log <- Sigma2log - A1vec%*%t(A1vec)/rho*1/sum(1/(lamall[okall]+rho)) }, stratrand={ ## Dirty way of refitting model with new dummy pattern (should probably be done using call, eval, envir, etc.): ## Changed by ER 2013/06/14 to use the new quadscheme.logi ## D2 <- logi.dummy(X = X, type = "stratrand", nd = model$internal$logistic$args) ## Q2 <- quad(data=X, dummy=D2) ## Q2$dummy$Dinfo <- D2$Dinfo Q2 <- quadscheme.logi(data=X, dummytype = "stratrand", nd = model$internal$logistic$nd) D2 <- Q2$dummy Q2$dummy$Dinfo <- D2$Dinfo Z2 <- is.data(Q2) arglist <- list(Q=Q2, trend=model$trend, interaction = model$interaction, method = model$method, correction = model$correction, rbord = model$rbord, covariates = model$covariates) arglist <- append(arglist, model$internal$logistic$extraargs) model2 <- do.call(ppm, args = arglist) ## New cif lamall2 <- fitted(model2, check = FALSE, new.coef = new.coef, dropcoef=dropcoef) ## New model matrix mall2 <- model.matrix(model2) okall2 <- getglmsubset(model2) ## index vectors of stratrand cell indices of dummy points inD <- model$internal$logistic$inD inD2 <- model2$internal$logistic$inD ## Dummy points inside eroded window (for border correction) if(is.finite(R) && (correction == "border")){ ii <- (bdist.points(D) >= R) ii2 <- (bdist.points(D2) >= R) } else{ ii <- rep.int(TRUE, npoints(D)) ii2 <- rep.int(TRUE, npoints(D2)) } ## OK points of dummy pattern 1 with a valid point of dummy pattern 2 in same stratrand cell (and vice versa) okdum <- okall[!Z] okdum2 <- okall2[!Z2] ok1 <- okdum & ii & is.element(inD, inD2[okdum2 & ii2]) ok2 <- okdum2 & ii2 & is.element(inD2, inD[okdum & ii]) ## ok1 <- okdum & okdum2 & ii & is.element(inD, inD2[ii2]) ## ok2 <- okdum2 & okdum1 & ii2 & is.element(inD2, inD[ii]) ## ok1 <- ii & is.element(inD, inD2[ii2]) ## ok2 <- ii2 & is.element(inD2, inD[ii]) ## cif and suff. stat. for valid points in dummy patterns 1 and 2 lamdum <- lamall[!Z][ok1] lamdum2 <- lamall2[!Z2][ok2] mdum <- mall[!Z,,drop=FALSE][ok1,] mdum2 <- mall2[!Z2,,drop=FALSE][ok2,] ## finally calculation of Sigma2 wlam <- mdum * rho*lamdum/(lamdum+rho) wlam2 <- mdum2 * rho*lamdum2/(lamdum2+rho) ## Sigma2log <- t(wlam-wlam2)%*%(wlam-wlam2)/(2*rho*rho) Sigma2log <- crossprod(wlam-wlam2)/(2*rho*rho) }, stop("sorry - unrecognized dummy process in logistic fit") ) ## Attaching to Sigma2log calculated above dimnames(Sigma2log) <- dnames if(spill) { ## return internal data only (with matrices unnormalised) internals <- c(internals, list(Sigma1log=Sigma1log, Sigma2log=Sigma2log, mple=vc.mpl)) if(!spill.vc) return(internals) } ## .. Calculate variance-covariance matrix for logistic fit ........... ## normalise Slog <- Slog/areaW Sigma1log <- Sigma1log/areaW Sigma2log <- Sigma2log/areaW ## evaluate Ulog <- checksolve(Slog, matrix.action, , "variance") vc.logi <- if(is.null(Ulog)) matrix(NA, p, p) else Ulog %*% (Sigma1log+Sigma2log) %*% Ulog / areaW dimnames(vc.logi) <- dnames ## if(spill.vc) return(list(varcov=vc.logi, internals=internals)) return(vc.logi) } ## vcalcGibbs from Ege Rubak and J-F Coeurjolly ## 2013/06/14, modified by Ege to handle logistic case as well vcalcGibbsSpecial <- function(fit, ..., spill=FALSE, spill.vc=FALSE, special.alg = TRUE, matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent")) { matrix.action <- match.arg(matrix.action) logi.action <- match.arg(logi.action) spill <- spill || spill.vc ## Interaction name: iname <- fit$interaction$name ## Does the model have marks which are in the trend? marx <- is.marked(fit) && ("marks" %in% variablesinformula(fit$trend)) ## The full data and window: Xplus <- data.ppm(fit) Wplus <- as.owin(Xplus) ## Fitted parameters and the parameter dimension p (later consiting of p1 trend param. and p2 interaction param.): theta <- coef(fit) p <- length(theta) ## Number of points: n <- npoints(Xplus) ## Using the faster algorithms for special cases if(special.alg && fit$method != "logi"){ param <- coef(fit) switch(iname, "Strauss process"={ ## Only implemented for non-marked case: if(!marx) return(vcovPairPiece(Xplus, reach(fit$interaction), exp(coef(fit)[2]), matrix.action, spill=spill, spill.vc=spill.vc)) }, "Piecewise constant pairwise interaction process"={ ## Only implemented for non-marked case: if(!marx) return(vcovPairPiece(Xplus, fit$interaction$par$r, exp(coef(fit)[-1]), matrix.action, spill=spill, spill.vc=spill.vc)) }, "Multitype Strauss process"={ matR <- fit$interaction$par$radii R <- c(matR[1,1], matR[1,2], matR[2,2]) ## Only implemented for 2 types with equal interaction range: if(ncol(matR)==2 && marx){ n <- length(theta) res <- vcovMultiStrauss(Xplus, R, exp(theta[c(n-2,n-1,n)]), matrix.action,spill=spill,spill.vc=spill.vc) if(!spill) { res <- contrastmatrix(res, 2) dimnames(res) <- list(names(theta), names(theta)) } return(res) } } ) } ## Matrix specifying equal points in the two patterns in the call to eval below: E <- matrix(rep.int(1:n, 2), ncol = 2) ## Eval. the interaction potential difference at all points (internal spatstat function): # V1 <- fit$interaction$family$eval(Xplus, Xplus, E, fit$interaction$pot, fit$interaction$par, fit$correction) oldopt <- NULL if(fit$interaction$family$name=="pairwise"){ oldopt <- spatstat.options(fasteval = "off") } V1 <- evalInteraction(Xplus, Xplus, E, as.interact(fit), fit$correction) spatstat.options(oldopt) ## Calculate parameter dimensions and correct the contrast type parameters: p2 <- ncol(V1) p1 <- p-p2 if(p1>1) theta[2:p1] <- theta[2:p1] + theta[1] ## V1 <- evalInteraction(Q, Xplus, union.quad(Q), fit$interaction, fit$correction) POT <- attr(V1, "POT") attr(V1, "POT") <- NULL ## Adding the constant potential as first column (one column per type for multitype): if(!marx){ V1 <- cbind(1, V1) colnames(V1) <- names(theta) } else{ lev <- levels(marks(Xplus)) ## Indicator matrix for mark type attached to V1: tmp <- matrix(marks(Xplus), nrow(V1), p1)==matrix(lev, nrow(V1), p-ncol(V1), byrow=TRUE) colnames(tmp) <- lev V1 <- cbind(tmp,V1) } ## Matrices for differences of potentials: E <- matrix(rep.int(1:(n-1), 2), ncol = 2) dV <- V2 <- array(0,dim=c(n,n,p)) for(k in 1:p1){ V2[,,k] <- matrix(V1[,k], n, n, byrow = FALSE) } for(k in (p1+1):p){ diag(V2[,,k]) <- V1[,k] } for(j in 1:n){ ## Fast evaluation for pairwise interaction processes: if(fit$interaction$family$name=="pairwise" && !is.null(POT)){ V2[-j,j,-(1:p1)] <- V1[-j,-(1:p1)]-POT[-j,j,] } else{ V2[-j,j,-(1:p1)] <- fit$interaction$family$eval(Xplus[-j], Xplus[-j], E, fit$interaction$pot, fit$interaction$par, fit$correction) ## Q <- quadscheme(Xplus[-j],emptyppp) ## V2[-j,j,-1] <- evalInteraction(Q, Xplus[-j], Xplus[-j], fit$interaction, fit$correction) } for(k in 1:p){ dV[,j,k] <- V1[,k] - V2[,j,k] } } ## Ratio of first and second order Papangelou - 1: frac <- 0*dV[,,1] for(k in (p1+1):p){ frac <- frac + dV[,,k]*theta[k] } frac <- exp(-frac)-1 ## In the rest we restrict attention to points in the interior: ## The interaction range: R <- reach(fit$interaction) ## The reduced window, area and point pattern: W<-erosion.owin(Wplus,R) areaW <- area(W) ## Interior points determined by bdist.points: IntPoints <- bdist.points(Xplus)>=R X <- Xplus[IntPoints] ## Making a logical matrix, I, indicating R-close pairs which are in the interior: D <- pairdist(Xplus) diag(D) <- Inf I <- (D<=R) & outer(IntPoints,IntPoints, "&") ## Matrix A1: A1 <- t(V1[IntPoints,])%*%V1[IntPoints,] ## Matrix A2: A2 <- matrix(0,p,p) for(k in 1:p){ for(l in k:p){ A2[k,l] <- A2[l,k] <- sum(I*V2[,,k]*frac*t(V2[,,l])) } } ## Matrix A3: A3 <- matrix(0,p,p) for(k in 1:p){ for(l in k:p){ A3[k,l] <- A3[l,k] <- sum(I*dV[,,k]*t(dV[,,l])) } } ## Matrix Sigma (A1+A2+A3): Sigma<-A1+A2+A3 if(spill) { # save internal data (with matrices unnormalised) dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- list(names(theta), names(theta)) internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma, areaW=areaW) # return internal data, if model fitted by MPL if(!spill.vc && fit$method != "logi") return(internals) } # ......... Calculate variance-covariance matrix for MPL ........ # normalise A1 <- A1/areaW Sigma <- Sigma/areaW # evaluate U <- checksolve(A1, matrix.action, , "variance") vc.mpl <- if(is.null(U)) matrix(NA, p, p) else U %*% Sigma %*% U / areaW ## Convert to treatment contrasts if(marx) vc.mpl <- contrastmatrix(vc.mpl, p1) dimnames(vc.mpl) <- list(names(theta), names(theta)) # Return result for standard ppm method: if(fit$method!="logi") { if(spill.vc) return(list(varcov=vc.mpl, internals=internals)) return(vc.mpl) } ######################################################################## ###### The remainder is only executed when the method is logistic ###### ######################################################################## ### Most of this is copy/pasted from vcalcGibbsGeneral correction <- fit$correction Q <- quad.ppm(fit) D <- dummy.ppm(fit) rho <- fit$internal$logistic$rho ## If dummy intensity rho is unknown we estimate it if(is.null(rho)) rho <- npoints(D)/(area(D)*markspace.integral(D)) X <- data.ppm(fit) Z <- is.data(Q) # determine which data points entered into the sum in the pseudolikelihood # (border correction, nonzero cif) # data and dummy: okall <- getglmsubset(fit) ## # data only: ## ok <- okall[Z] # conditional intensity lambda(X[i] | X) = lambda(X[i] | X[-i]) # data and dummy: lamall <- fitted(fit, check = FALSE) ## # data only: ## lam <- lamall[Z] # sufficient statistic h(X[i] | X) = h(X[i] | X[-i]) # data and dummy: mall <- model.matrix(fit) mokall <- mall[okall, , drop=FALSE] ## # data only: ## m <- mall[Z, , drop=FALSE] ## mok <- m[ok, , drop=FALSE] # Sensitivity matrix S and A1 matrix for logistic case Slog <- sumouter(mokall, w = lamall[okall]*rho/(lamall[okall]+rho)^2) A1log <- sumouter(mokall, w = lamall[okall]*rho*rho/(lamall[okall]+rho)^3) ## Define W1, W2 and dW for the logistic method based on V1, V2 and dV (frac is unchanged) lambda1 <- exp(.rowSums(matrix(theta,n,p,byrow=TRUE)*V1, n, p)) W1 <- V1*rho/(lambda1+rho) lambda2 <- exp(apply(array(rep(theta,each=n*n),dim=c(n,n,p))*V2, c(1,2), sum)) W2 <- V2 dW <- dV for(k in 1:p){ W2[,,k] <- V2[,,k] * rho/(lambda2+rho) for(j in 1:n){ dW[,j,k] <- W1[,k] - W2[,j,k] } } ## Matrices A2log and A3log for the first component Sigma1log of the variance: A2log <- A3log <- matrix(0,p,p) for(k in 1:p){ for(l in k:p){ A2log[k,l] <- A2log[l,k] <- sum(I*W2[,,k]*frac*t(W2[,,l])) A3log[k,l] <- A3log[l,k] <- sum(I*dW[,,k]*t(dW[,,l])) } } A2log <- A2log A3log <- A3log ## First variance component Sigma1log (A1log+A2log+A3log): Sigma1log <- A1log+A2log+A3log ## Resolving the dummy process type how <- fit$internal$logistic$how if(how %in% c("given", "grid", "transgrid")){ whinge <- paste("vcov is not implemented for dummy type", sQuote(how)) if(logi.action=="fatal") stop(whinge) how <- if(how=="given") "poisson" else "stratrand" if(logi.action=="warn") warning(paste(whinge,"- using", sQuote(how), "formula"), call.=FALSE) } ## Matrix Sigma2log (depends on dummy process type) switch(how, poisson={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) }, binomial={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) A1vec <- t(mokall) %*% (rho*lamall[okall]/(lamall[okall]+rho)^2) Sigma2log <- Sigma2log - A1vec%*%t(A1vec)/rho*1/sum(1/(lamall[okall]+rho)) }, stratrand={ ### Dirty way of refitting model with new dummy pattern (should probably be done using call, eval, envir, etc.): ## D2 <- logi.dummy(X = X, type = "stratrand", nd = model$internal$logistic$args) ## Q2 <- quad(data=X, dummy=D2) ## Q2$dummy$Dinfo <- D2$Dinfo Q2 <- quadscheme.logi(data=X, dummytype = "stratrand", nd = fit$internal$logistic$nd) D2 <- Q2$dummy Z2 <- is.data(Q2) arglist <- list(Q=Q2, trend=fit$trend, interaction = fit$interaction, method = fit$method, correction = fit$correction, rbord = fit$rbord, covariates = fit$covariates) arglist <- append(arglist, fit$internal$logistic$extraargs) fit2 <- do.call(ppm, args = arglist) ## New cif lamall2 <- fitted(fit2, check=FALSE) ## New model matrix mall2 <- model.matrix(fit2) okall2 <- getglmsubset(fit2) # index vectors of stratrand cell indices of dummy points inD <- fit$internal$logistic$inD inD2 <- fit2$internal$logistic$inD # Dummy points inside eroded window (for border correction) if(is.finite(R) && (correction == "border")){ ii <- inside.owin(D, w = W) ii2 <- inside.owin(D2, w = W) } else{ ii <- rep.int(TRUE, npoints(D)) ii2 <- rep.int(TRUE, npoints(D2)) } # OK points of dummy pattern 1 with a valid point of dummy pattern 2 in same stratrand cell (and vice versa) okdum <- okall[!Z] okdum2 <- okall2[!Z2] ok1 <- okdum & ii & is.element(inD, inD2[okdum2 & ii2]) ok2 <- okdum2 & ii2 & is.element(inD2, inD[okdum & ii]) ## ok1 <- okdum & okdum2 & ii & is.element(inD, inD2[ii2]) ## ok2 <- okdum2 & okdum1 & ii2 & is.element(inD2, inD[ii]) ## ok1 <- ii & is.element(inD, inD2[ii2]) ## ok2 <- ii2 & is.element(inD2, inD[ii]) # cif and suff. stat. for valid points in dummy patterns 1 and 2 lamdum <- lamall[!Z][ok1] lamdum2 <- lamall2[!Z2][ok2] mdum <- mall[!Z,][ok1,] mdum2 <- mall2[!Z2,][ok2,] # finally calculation of Sigma2 wlam <- mdum * rho*lamdum/(lamdum+rho) wlam2 <- mdum2 * rho*lamdum2/(lamdum2+rho) ## Sigma2log <- t(wlam-wlam2)%*%(wlam-wlam2)/(2*rho*rho) Sigma2log <- crossprod(wlam-wlam2)/(2*rho*rho) }, stop("sorry - unrecognized dummy process in logistic fit") ) if(spill) { ## Attach dimnames to all matrices dimnames(Sigma2log) <- dimnames(Slog) <- dimnames(Sigma1log) <- dimnames(A1log) <- dimnames(A2log) <- dimnames(A3log) <- list(names(theta),names(theta)) # return internal data (with matrices unnormalised) internals <- c(internals, list(A1log=A1log, A2log=A2log, A3log=A3log, Slog=Slog, Sigma1log=Sigma1log, Sigma2log=Sigma2log, mple=vc.mpl)) if(!spill.vc) return(internals) } # ....... Compute variance-covariance for logistic fit ............. # Normalise Slog <- Slog/areaW Sigma1log <- Sigma1log/areaW Sigma2log <- Sigma2log/areaW ## Finally the result is calculated: Ulog <- checksolve(Slog, matrix.action, , "variance") vc.logi <- if(is.null(Ulog)) matrix(NA, p, p) else Ulog %*% (Sigma1log+Sigma2log) %*% Ulog / areaW # dimnames(vc.logi) <- list(names(theta), names(theta)) if(spill.vc) return(list(varcov=vc.logi, internals=internals)) return(vc.logi) } vcovPairPiece <- function(Xplus, R, Gam, matrix.action, spill=FALSE, spill.vc=FALSE){ ## R is the vector of breaks (R[length(R)]= range of the pp. ## Gam is the vector of weights Rmax <- R[length(R)] ## Xplus : point process observed in W+R ## Extracting the window and calculating area: Wplus<-as.owin(Xplus) W<-erosion.owin(Wplus,Rmax) areaW <- area(W) ## Interior points determined by bdist.points: IntPoints <- bdist.points(Xplus)>=Rmax X <- Xplus[IntPoints] nX <- npoints(X) nXplus <- npoints(Xplus) ## Matrix D with pairwise distances between points and infinite distance ## between a point and itself: Dplus<-pairdist(Xplus) D <- pairdist(X) diag(D) <- diag(Dplus) <- Inf ## logical matrix, I, indicating R-close pairs: p<-length(R) Tplus<-T<-matrix(0,X$n,p) I<-Iplus<-list() for (i in 1:p){ if (i==1){ Iplus[[1]]<- Dplus <=R[1] I[[1]] <- D<=R[1] } else { Iplus[[i]]<- ((Dplus>R[i-1]) & (Dplus <=R[i])) I[[i]] <- ((D>R[i-1]) & (D <=R[i])) } ## Vector T with the number of $R$-close neighbours to each point: Tplus[,i]<- .colSums(Iplus[[i]], nXplus, nXplus)[IntPoints] T[,i] <- .colSums(I[[i]], nX, nX) } ## Matrices A1, A2 and A3 are initialized to zero: A1 <- A2 <- A3 <- matrix(0,p+1,p+1) ## A1 and A3: A1[1,1] <- npoints(X) for (j in (2:(p+1))){ A1[1,j]<-A1[j,1]<-sum(Tplus[,j-1]) A3[j,j]<-sum(T[,j-1]) for (k in (2:(p+1))){ A1[j,k]<-sum(Tplus[,j-1] * Tplus[,k-1]) } } ## A2: for (j in (2:(p+1))){ A2[1,1]<-A2[1,1]+(Gam[j-1]^(-1)-1)*sum(T[,j-1]) for (l in (2:(p+1))){ if (l==j) vj<-Tplus[,j-1]-1 else vj<-Tplus[,j-1] A2[1,j]<-A2[1,j]+(Gam[l-1]^(-1)-1)*sum(T[,l-1]*(vj) ) } A2[j,1]<-A2[1,j] for (k in (2:(p+1))){ for (l in (2:(p+1))){ if (l==j) vj<-Tplus[,j-1]-1 else vj<-Tplus[,j-1] if (l==k) vk<-Tplus[,k-1]-1 else vk<-Tplus[,k-1] A2[j,k]<-A2[j,k]+ (Gam[l-1]^(-1)-1)*sum(I[[l-1]]*outer(vj,vk)) } } } Sigma<-A1+A2+A3 nam <- c("(Intercept)", names(Gam)) dnam <- list(nam, nam) if(spill) { # return internal data (with matrices unnormalised) dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- dimnames(Sigma) <- dnam internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma) if(!spill.vc) return(internals) } ## Calculate variance-covariance # Normalise: A1 <- A1/areaW Sigma <- Sigma/areaW U <- checksolve(A1, matrix.action, , "variance") mat <- if(is.null(U)) matrix(NA, length(nam), length(nam)) else U%*%Sigma%*%U / areaW dimnames(mat) <- dnam if(spill.vc) return(list(varcov=mat, internals=internals)) return(mat) } vcovMultiStrauss <- function(Xplus, vecR, vecg, matrix.action, spill=FALSE, spill.vc=FALSE){ ## Xplus : marked Strauss point process ## with two types ## observed in W+R (R=max(R11,R12,R22)) ## vecg = estimated parameters of interaction parameters ## ordered as the output of ppm, i.e. vecg=(g11,g12,g22) ## vecR = range for the diff. strauss ordered a vecg(R11,R12,R22) R <- max(vecR) R11<-vecR[1];R12<-vecR[2];R22<-vecR[3] ## Extracting the window and calculating area: Wplus<-as.owin(Xplus) W<-erosion.owin(Wplus,R) areaW <- area(W) X1plus<-Xplus[Xplus$marks==levels(Xplus$marks)[1]] X2plus<-Xplus[Xplus$marks==levels(Xplus$marks)[2]] ## Interior points determined by bdist.points: IntPoints1 <- bdist.points(X1plus)>=R IntPoints2 <- bdist.points(X2plus)>=R X1 <- X1plus[IntPoints1] X2 <- X2plus[IntPoints2] nX1 <- npoints(X1) nX2 <- npoints(X2) nX1plus <- npoints(X1plus) nX2plus <- npoints(X2plus) ## Matrix D with pairwise distances between points and infinite distance ## between a point and itself: D1plus<-pairdist(X1plus) D1 <- pairdist(X1) diag(D1) <- diag(D1plus) <- Inf D2plus<-pairdist(X2plus) D2 <- pairdist(X2) diag(D2) <- diag(D2plus) <- Inf D12plus<-crossdist(X1,X2plus) T12plus<- .rowSums(D12plus<=R12, nX1, nX2plus) D21plus<-crossdist(X2,X1plus) T21plus<- .rowSums(D21plus<=R12, nX2, nX1plus) I12<-crossdist(X1,X2)<=R12 I21<-crossdist(X2,X1)<=R12 T12<- .rowSums(I12, nX1, nX2) T21<- .rowSums(I21, nX2, nX1) ## logical matrix, I, indicating R-close pairs: I1plus<- D1plus <=R11 I1 <- D1<=R11 I2plus<- D2plus <=R22 I2 <- D2<=R22 ## Vector T with the number of $R$-close neighbours to each point: T1plus<- .colSums(I1plus, nX1plus, nX1plus)[IntPoints1] T1 <- .colSums(I1, nX1, nX1) T2plus<- .colSums(I2plus, nX2plus, nX2plus)[IntPoints2] T2 <- .colSums(I2, nX2, nX2) ## Matrices A1, A2 and A3 are initialized to zero: A1 <- A2 <- A3 <- matrix(0,5,5) ## A1 is filled: A1[1,1]<-npoints(X1) A1[1,3]<-A1[3,1]<-sum(T1plus) A1[1,4]<-A1[4,1]<-sum(T12plus) A1[2,2]<-npoints(X2) A1[2,5]<-A1[5,2]<-sum(T2plus) A1[2,4]<-A1[4,2]<-sum(T21plus) A1[3,3]<-sum(T1plus*T1plus) A1[3,4]<-A1[4,3]<-sum(T1plus*T12plus) A1[5,5]<-sum(T2plus*T2plus) A1[4,5]<-A1[5,4]<-sum(T2plus*T21plus) A1[4,4]<-sum(T12plus*T12plus)+sum(T21plus*T21plus) ## A3 is filled: A3[3,3]<-sum(T1) A3[5,5]<-sum(T2) A3[4,4]<-sum(T12)+sum(T21) ## A2 is filled: gamInv<-vecg^(-1)-1 gi1<-gamInv[1];gi12<-gamInv[2];gi2<-gamInv[3] A2[1,1]<-sum(T1)*gi1 A2[1,2]<-A2[2,1]<-sum(T12)*gi12 A2[1,3]<-A2[3,1]<-sum(T1*(T1plus-1))*gi1 A2[1,5]<-A2[5,1]<-sum(T21*T2plus)*gi12 A2[1,4]<-A2[4,1]<-gi1*sum(T1*(T12plus))+gi12*sum(T21*(T21plus-1)) A2[2,2]<-sum(T2)*gi2 A2[2,3]<-A2[3,2]<-sum(T12*T1plus)*gi12 A2[2,5]<-A2[5,2]<-sum(T2*(T2plus-1))*gi2 A2[2,4]<-A2[4,2]<-gi2*sum(T2*(T21plus))+gi12*sum(T12*(T12plus-1)) A2[3,3]<-gi1*sum(I1*outer(T1plus-1,T1plus-1)) A2[3,5]<-A2[5,3]<- gi12*sum(I12*outer(T1plus,T2plus)) A2[3,4]<-A2[4,3]<-gi1*sum(I1*outer(T1plus-1,T12plus))+gi12*sum(I12*outer(T1plus,T21plus-1)) A2[5,5]<-gi2*sum(I2*outer(T2plus-1,T2plus-1)) A2[4,5]<-A2[5,4]<-gi2*sum(I2*outer(T2plus-1,T21plus))+gi12*sum(I21*outer(T2plus,T12plus-1)) A2[4,4]<-gi1*sum(I1*outer(T12plus,T12plus))+gi2*sum(I2*outer(T21plus,T21plus))+ gi12*sum(I12*outer(T12plus-1,T21plus-1))+gi12*sum(I21*outer(T21plus-1,T12plus-1)) Sigma<-A1+A2+A3 nam <- c(levels(marks(Xplus)), names(vecg)) dnam <- list(nam, nam) if(spill) { # return internal data (with matrices unnormalised) dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- dimnames(Sigma) <- dnam internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma) if(!spill.vc) return(internals) } ## Calculate variance-covariance # Normalise: A1 <- A1/areaW Sigma <- Sigma/areaW U <- checksolve(A1, matrix.action, , "variance") mat <- if(is.null(U)) matrix(NA, length(nam), length(nam)) else U%*%Sigma%*%U / areaW dimnames(mat) <- dnam if(spill.vc) return(list(varcov=mat, internals=internals)) return(mat) } # Convert the first p rows & columns of variance matrix x # to variances of treatment contrasts contrastmatrix <- function(x,p){ mat <- x ## Correct column and row 1: for(i in 2:p){ mat[1,i] <- mat[i,1] <- x[1,i]-x[1,1] } ## Correct columns and rows 2,...,p: for(i in 2:p){ for(j in 2:p){ mat[i,j] <- x[1,1]-x[1,i]-x[1,j]+x[i,j] } for(j in (p+1):ncol(x)){ mat[i,j] <- mat[j,i] <- x[i,j]-x[1,j] } } mat } vcov.ppm } ) suffloc <- function(object) { verifyclass(object, "ppm") if(!is.poisson(object)) stop("Internals not available for Gibbs models") return(vcov(object, what="internals")$suff) } spatstat.model/R/Gres.R0000644000176200001440000000502514331173072014443 0ustar liggesusers# # Gres.R # # Residual G # # $Revision: 1.3 $ $Date: 2013/04/25 06:37:43 $ # ############################################################################# # Gres <- function(object, ...) { if(!is.fv(object)) { # usual case where 'object' is a ppm, ppp or quad G <- Gcom(object, ...) } else { # case where 'object' is the output of 'Gcom' a <- attr(object, "maker") if(is.null(a) || a != "Gcom") stop("fv object was not created by Gcom") G <- object if(length(list(...)) > 0) warning("Extra arguments ignored") } # initialise fv object df <- data.frame(r=G$r, theo=numeric(length(G$r))) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~hat(G)(r), NULL), "theo", . ~ r, attr(G, "alim"), c("r","bold(R)~%s[theo](r)"), desc, fname="G") # add residual estimates nam <- names(G) if(all(c("border","bcom") %in% nam)) ans <- bind.fv(ans, data.frame(bres=with(G, border-bcom)), "bold(R)~hat(%s)[bord](r)", "border corrected residual of %s", "bres") if(all(c("han","hcom") %in% nam)) ans <- bind.fv(ans, data.frame(hres=with(G, han-hcom)), "bold(R)~hat(%s)[han](r)", "Hanisch corrected residual of %s", "hres") if("hvar" %in% nam) { savedotnames <- fvnames(ans, ".") hsd <- with(G, sqrt(hvar)) ans <- bind.fv(ans, data.frame(hvar=with(G, hvar), hsd = hsd, hi = 2*hsd, lo = -2*hsd), c("bold(C)^2~hat(%s)[han](r)", "sqrt(bold(C)^2~hat(%s)[han](r))", "bold(R)~hat(%s)[Hi](r)", "bold(R)~hat(%s)[Lo](r)"), c("pseudovariance of Hanisch corrected residual %s", "pseudo-SD of Hanisch corrected residual %s", "upper critical band for Hanisch corrected residual %s", "lower critical band for Hanisch corrected residual %s"), "hres") ans <- bind.fv(ans, data.frame(hstdres=with(ans, hres/hsd)), "bold(T)~hat(%s)[han](r)", "standardised Hanisch-corrected residual %s", "hres") fvnames(ans, ".") <- c(savedotnames, c("hi", "lo")) } unitname(ans) <- unitname(G) return(ans) } spatstat.model/R/dppm.R0000644000176200001440000001410514331173072014502 0ustar liggesusers#' #' dppm.R #' #' $Revision: 1.17 $ $Date: 2022/08/09 03:38:00 $ dppm <- function(formula, family, data=NULL, ..., startpar = NULL, method = c("mincon", "clik2", "palm", "adapcl"), weightfun=NULL, control=list(), algorithm, statistic="K", statargs=list(), rmax = NULL, epsilon = 0.01, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) { method <- match.arg(method) # Instantiate family if not already done. if(is.character(family)) family <- get(family, mode="function") if(inherits(family, "detpointprocfamilyfun")) { familyfun <- family family <- familyfun() } verifyclass(family, "detpointprocfamily") # Check for intensity as only unknown and exit (should be changed for likelihood method) if(length(family$freepar)==1 && (family$freepar %in% family$intensity)) stop("Only the intensity needs to be estimated. Please do this with ppm yourself.") # Detect missing rhs of 'formula' and fix if(inherits(formula, c("ppp", "quad"))){ Xname <- short.deparse(substitute(formula)) formula <- as.formula(paste(Xname, "~ 1")) } if(!inherits(formula, "formula")) stop(paste("Argument 'formula' should be a formula")) # kppm(formula, DPP = family, data = data, covariates = data, # startpar = startpar, method = method, weightfun = weightfun, # control = control, algorithm = algorithm, statistic = statistic, # statargs = statargs, rmax = rmax, covfunargs = covfunargs, # use.gam = use.gam, nd = nd, eps = eps, ...) if(missing(algorithm)) { algorithm <- if(method == "adapcl") "Broyden" else "Nelder-Mead" } else check.1.string(algorithm) thecall <- call("kppm", X=formula, DPP=family, data = data, covariates = data, startpar = startpar, method = method, weightfun = weightfun, control = control, algorithm = algorithm, statistic = statistic, statargs = statargs, rmax = rmax, covfunargs = covfunargs, use.gam = use.gam, nd = nd, eps = eps) ncall <- length(thecall) argh <- list(...) nargh <- length(argh) if(nargh > 0) { thecall[ncall + 1:nargh] <- argh names(thecall)[ncall + 1:nargh] <- names(argh) } callenv <- parent.frame() if(!is.null(data)) callenv <- list2env(data, parent=callenv) result <- eval(thecall, envir=callenv, enclos=baseenv()) return(result) } ## Auxiliary function to mimic cluster models for DPPs in kppm code spatstatDPPModelInfo <- function(model){ out <- list( modelname = paste(model$name, "DPP"), # In modelname field of mincon fv obj. descname = paste(model$name, "DPP"), # In desc field of mincon fv obj. modelabbrev = paste(model$name, "DPP"), # In fitted obj. printmodelname = function(...) paste(model$name, "DPP"), # Used by print.kppm parnames = model$freepar, shapenames = NULL, clustargsnames = NULL, # deprecated checkpar = function(par, ...){ return(par) }, outputshape = function(margs) list(), checkclustargs = function(margs, native=old, ..., old = TRUE) { # deprecated return(list()) }, resolveshape = function(...) { return(list(...)) }, resolvedots = function(...){ return(list(...)) }, # deprecated parhandler = function(...){ return(list(...)) }, # deprecated ## K-function K = function(par, rvals, ...){ if(length(par)==1 && is.null(names(par))) names(par) <- model$freepar mod <- update(model, as.list(par)) if(!valid(mod)){ return(rep(Inf, length(rvals))) } else{ return(Kmodel(mod)(rvals)) } }, ## pair correlation function pcf = function(par, rvals, ...){ if(length(par)==1 && is.null(names(par))) names(par) <- model$freepar mod <- update(model, as.list(par)) if(!valid(mod)){ return(rep(Inf, length(rvals))) } else{ return(pcfmodel(mod)(rvals)) } }, Dpcf = function(par, rvals, ...){ if(length(par)==1 && is.null(names(par))) names(par) <- model$freepar mod <- update(model, as.list(par)) if(!valid(mod)){ return(rep(Inf, length(rvals))) } else{ return(sapply(rvals, FUN = dppDpcf(mod))) } }, ## sensible starting parameters selfstart = function(X) { return(model$startpar(model, X)) } ) return(out) } ## Auxilliary function used for DPP stuff in kppm.R dppmFixIntensity <- function(DPP, lambda, po){ lambdaname <- DPP$intensity if(is.null(lambdaname)) warning("The model has no intensity parameter.\n", "Prediction from the fitted model is invalid ", "(but no warning or error will be given by predict.dppm).") ## Update model object with estimated intensity if it is a free model parameter if(lambdaname %in% DPP$freepar){ clusters <- update(DPP, structure(list(lambda), .Names=lambdaname)) } else{ clusters <- DPP lambda <- intensity(clusters) ## Overwrite po object with fake version X <- po$Q$data dont.complain.about(X) po <- ppm(X~offset(log(lambda))-1) po$fitter <- "dppm" ## update pseudolikelihood value using code in logLik.ppm po$maxlogpl.orig <- po$maxlogpl po$maxlogpl <- logLik(po, warn=FALSE) ######################################### } return(list(clusters=clusters, lambda=lambda, po=po)) } ## Auxiliary function used for DPP stuff in kppm.R dppmFixAlgorithm <- function(algorithm, changealgorithm, clusters, startpar){ if(!setequal(clusters$freepar, names(startpar))) stop("Names of startpar vector does not match the free parameters of the model.") lower <- upper <- NULL if(changealgorithm){ bb <- dppparbounds(clusters, names(startpar)) if(all(is.finite(bb))){ algorithm <- "Brent" lower <- bb[1L] upper <- bb[2L] } else{ algorithm <- "BFGS" } } return(list(algorithm = algorithm, lower = lower, upper = upper)) } updateData.dppm <- function(model, X, ...) { update.kppm(model, X=X) } spatstat.model/R/ic.kppm.R0000644000176200001440000000174614374302014015107 0ustar liggesusers#' #' ic.kppm.R #' #' Original by Rasmus Waagepetersen, 10 april 2021 #' #' Tweaks by Adrian Baddeley ic <- function(object) { UseMethod("ic") } ic.ppm <- function(object) { loglike <- logLik(object) ll <- as.numeric(loglike) df <- attr(loglike, "df") ## betahat <- coef(object) n <- npoints(data.ppm(object)) cbic <- -2*ll+df*log(n) cic <- -2*ll+df*2 ## cbic is BIC and cic is AIC in case of Poisson process return(list(loglike=ll, cbic=cbic, cic=cic, df=df)) } ic.kppm <- function(object){ po <- as.ppm(object) loglike <- logLik(po) ll <- as.numeric(loglike) betahat <- coef(object) p <- length(betahat) n <- npoints(data.ppm(po)) co <- vcov(object, what="internals") df <- p + sum(diag(as.matrix(co$J.inv %*% co$E))) #compute p_approx cbic = -2*loglike+df*log(n) cic = -2*loglike+df*2 cbic <- -2*ll+df*log(n) cic <- -2*ll+df*2 ## cbic is BIC and cic is AIC in case of Poisson process return(list(loglike=ll, cbic=cbic, cic=cic, df=df)) } spatstat.model/R/ippm.R0000644000176200001440000002257514331173073014522 0ustar liggesusers# # ippm.R # # $Revision: 2.28 $ $Date: 2020/12/19 05:25:06 $ # # Fisher scoring algorithm for irregular parameters in ppm trend # ippm <- local({ chucknames <- c("iScore", "start", "nlm.args", "silent", "warn.unused") hasarg <- function(f,a) { a %in% names(formals(f)) } ippm <- function(Q, ..., iScore=NULL, start=list(), covfunargs=start, nlm.args=list(stepmax=1/2), silent=FALSE, warn.unused=TRUE) { ## remember call cl <- match.call() callframe <- parent.frame() callstring <- short.deparse(sys.call()) ## ppmcall <- cl[!(names(cl) %in% chucknames)] ppmcall[[1L]] <- as.name('ppm') ## validate if(!is.list(start)) stop("start should be a list of initial values for irregular parameters") if(length(start) == 0) { ppmcall <- ppmcall[names(ppmcall) != "covfunargs"] return(eval(ppmcall, callframe)) } if(!is.null(iScore)) { if(!is.list(iScore) || length(iScore) != length(start)) stop("iScore should be a list of the same length as start") stopifnot(identical(names(iScore), names(start))) if(!all(sapply(iScore, is.function))) stop("iScore should be a list of functions") } ## smap <- match(names(start), names(covfunargs)) if(anyNA(smap)) stop("variables in start should be a subset of variables in covfunargs") covfunargs[smap] <- start ## fit the initial model and extract information ppmcall$covfunargs <- covfunargs fit0 <- eval(ppmcall, callframe) # lpl0 <- fit0$maxlogpl # p <- length(coef(fit0)) ## examine covariates and trend covariates <- fit0$covariates isfun <- sapply(covariates, is.function) covfuns <- covariates[isfun] ## determine which covariates depend on which irregular parameters pnames <- names(start) depmat <- matrix(FALSE, nrow=length(covfuns), ncol=length(pnames)) rownames(depmat) <- names(covfuns) colnames(depmat) <- pnames for(j in 1:length(pnames)) depmat[,j] <- sapply(covfuns, hasarg, pnames[j]) ## find covariates that depend on ANY irregular parameter depvar <- rownames(depmat)[apply(depmat, 1L, any)] ## check that these covariates appear only in offset terms covnames.fitted <- model.covariates(fit0, fitted=TRUE, offset=FALSE) if(any(uhoh <- depvar %in% covnames.fitted)) stop(paste(ngettext(sum(uhoh), "The covariate", "The covariates"), commasep(sQuote(depvar[uhoh])), "should appear only in offset terms")) ## check that every irregular parameter to be updated appears somewhere cov.names.offset <- model.covariates(fit0, fitted=FALSE, offset=TRUE) covfun.names.offset <- intersect(cov.names.offset, names(covfuns)) usearg <- apply(depmat[covfun.names.offset, , drop=FALSE], 2L, any) if(!all(usearg)) { if(warn.unused) { nbad <- sum(!usearg) warning(paste("Cannot maximise over the irregular", ngettext(nbad, "parameter", "parameters"), commasep(sQuote(names(usearg)[!usearg])), ngettext(nbad, "because it is", "because they are"), "not used in any term of the model")) } ## restrict start <- start[usearg] if(!is.null(iScore)) iScore <- iScore[usearg] pnames <- names(start) } if(length(start) == 0) { ppmcall <- ppmcall[names(ppmcall) != "covfunargs"] return(eval(ppmcall, callframe)) } ## parameters for objective function fdata <- list(fit0=fit0, nreg=length(coef(fit0)), covfunargs=covfunargs, smap=smap, pnames=pnames, iScore=iScore) ## minimise objective startvec <- unlist(start) typsize <- abs(startvec) typsize <- pmax(typsize, min(typsize[typsize > 0])) dont.complain.about(fdata) g <- do.call(nlm, resolve.defaults(list(f=quote(objectivefun), p=startvec, thedata=quote(fdata)), nlm.args, list(typsize=typsize))) popt <- g$estimate ## detect error states icode <- g$code if(!silent && icode > 2) { errmess <- nlmcodes[[icode]] if(!is.null(errmess)) warning(errmess) else warning("Unrecognised error code ", paste(icode), " returned from nlm", call.=FALSE) } ## return optimised model covfunargs[smap] <- popt attr(covfunargs, "fitter") <- "ippm" attr(covfunargs, "free") <- names(start) fit <- update(fit0, covfunargs=covfunargs, use.internal=TRUE) fit$dispatched <- fit[c("call", "callstring", "callframe")] fit$call <- cl fit$callstring <- callstring fit$callframe <- callframe fit$iScore <- iScore class(fit) <- c("ippm", class(fit)) return(fit) } ## define objective function objectivefun <- function(param, thedata) { with(thedata, { ## fit model with current irregular parameters param <- as.list(param) names(param) <- pnames covfunargs[smap] <- param fit <- update(fit0, covfunargs=covfunargs, use.internal=TRUE) lpl <- logLik(fit, warn=FALSE) ## return negative logL because nlm performs *minimisation* value <- -as.numeric(lpl) if(!is.null(iScore)) { ## compute analytic derivatives stuff <- ppmInfluence(fit, what="score", iScore=iScore, iArgs=param) score <- stuff$score if(length(score) == length(coef(fit)) + length(param)) attr(value, "gradient") <- -score[-(1:nreg), drop=FALSE] ## attr(value, "hessian") <- -hess[-(1:nreg), -(1:nreg), drop=FALSE] } return(value) }) } ## from help(nlm) nlmcodes <- list(c("Relative gradient is close to zero; ", "current iterate is probably solution"), c("Successive iterates are within tolerance; ", "current iterate is probably solution"), c("Last global step failed to locate a point ", "lower than current estimate. ", "Either current estimate is an approximate ", "local minimum of the function ", "or 'steptol' is too small"), "Iteration limit exceeded", c("Maximum step size 'stepmax' ", "exceeded five consecutive times. ", "Either the function is unbounded below, ", "becomes asymptotic to a finite value ", "from above in some direction, ", "or 'stepmax' is too small")) ippm }) update.ippm <- local({ update.ippm <- function(object, ..., envir=environment(terms(object))) { # call <- match.call() new.call <- old.call <- object$call old.callframe <- object$callframe Qold <- eval(old.call$Q, as.list(envir), enclos=old.callframe) argh <- list(...) if(any(isfmla <- sapply(argh, inherits, what="formula"))) { if(sum(isfmla) > 1) stop("Syntax not understood: several arguments are formulas") i <- min(which(isfmla)) new.fmla <- argh[[i]] argh <- argh[-i] if(inherits(Qold, "formula")) { ## formula will replace 'Q' if(is.null(lhs.of.formula(new.fmla))) { f <- (. ~ x) f[[3L]] <- new.fmla[[2L]] new.fmla <- f } new.call$Q <- newformula(Qold, new.fmla, old.callframe, envir, expandpoly=FALSE) } else if(inherits(Qold, c("ppp", "quad"))) { ## formula will replace 'trend' and may replace 'Q' new.fmla <- newformula(formula(object), new.fmla, old.callframe, envir, expandpoly=FALSE) if(!is.null(lhs <- lhs.of.formula(new.fmla))) { newQ <- eval(eval(substitute(substitute(l, list("."=Q)), list(l=lhs, Q=Qold))), envir=as.list(envir), enclos=old.callframe) new.call$Q <- newQ } new.fmla <- rhs.of.formula(new.fmla) if("trend" %in% names(old.call)) { new.call$trend <- new.fmla } else { ## find which argument in the original call was a formula wasfmla <- sapply(old.call, formulaic, envir=as.list(envir), enclos=old.callframe) if(any(wasfmla)) { new.call[[min(which(wasfmla))]] <- new.fmla } else { new.call$trend <- new.fmla } } } } ## silence the warnings about unused covfunargs (unless overruled) new.call$warn.unused <- FALSE ## other arguments if(length(argh) > 0) { nama <- names(argh) named <- if(is.null(nama)) rep(FALSE, length(argh)) else nzchar(nama) if(any(named)) new.call[nama[named]] <- argh[named] if(any(!named)) new.call[length(new.call) + 1:sum(!named)] <- argh[!named] } result <- eval(new.call, as.list(envir), enclos=old.callframe) return(result) } formulaic <- function(z, envir, enclos) { u <- try(eval(z, envir, enclos)) return(inherits(u, "formula")) } update.ippm }) spatstat.model/R/mpl.R0000644000176200001440000017320514460147456014353 0ustar liggesusers# mpl.R # # $Revision: 5.238 $ $Date: 2023/07/26 07:45:45 $ # # mpl.engine() # Fit a point process model to a two-dimensional point pattern # by maximum pseudolikelihood # # mpl.prepare() # set up data for glm procedure # # ------------------------------------------------------------------- # # "mpl" <- function(Q, # trend = ~1, # interaction = NULL, # data = NULL, # correction="border", # rbord = 0, # use.gam=FALSE) { # .Deprecated("ppm", package="spatstat") # ppm(Q=Q, trend=trend, interaction=interaction, # covariates=data, correction=correction, rbord=rbord, # use.gam=use.gam, method="mpl") # } mpl.engine <- function(Q, trend = ~1, interaction = NULL, ..., covariates = NULL, subsetexpr = NULL, clipwin = NULL, covfunargs = list(), correction="border", rbord = 0, use.gam=FALSE, gcontrol=list(), GLM=NULL, GLMfamily=NULL, GLMcontrol=NULL, famille=NULL, forcefit=FALSE, nd = NULL, eps = eps, allcovar=FALSE, callstring="", precomputed=NULL, savecomputed=FALSE, preponly=FALSE, rename.intercept=TRUE, justQ = FALSE, weightfactor = NULL) { GLMname <- if(!missing(GLM)) short.deparse(substitute(GLM)) else NULL ## Extract precomputed data if available if(!is.null(precomputed$Q)) { Q <- precomputed$Q X <- precomputed$X P <- precomputed$U } else { ## Determine quadrature scheme from argument Q if(verifyclass(Q, "quad", fatal=FALSE)) { ## user-supplied quadrature scheme - validate it validate.quad(Q, fatal=TRUE, repair=FALSE, announce=TRUE) ## Extract data points X <- Q$data } else if(verifyclass(Q, "ppp", fatal = FALSE)) { ## point pattern - create default quadrature scheme X <- Q Q <- quadscheme(X, nd=nd, eps=eps, check=FALSE) } else stop("First argument Q should be a point pattern or a quadrature scheme") ## Data and dummy points together P <- union.quad(Q) } ## clip to subset? if(!is.null(clipwin)) { if(is.data.frame(covariates)) covariates <- covariates[inside.owin(P, w=clipwin), , drop=FALSE] Q <- Q[clipwin] X <- X[clipwin] P <- P[clipwin] } ## secret exit if(justQ) return(Q) ## computed <- if(savecomputed) list(X=X, Q=Q, U=P) else NULL ## ## Validate main arguments if(!is.null(trend) && !inherits(trend, "formula")) stop(paste("Argument", sQuote("trend"), "must be a formula")) if(!is.null(interaction) && !inherits(interaction, "interact")) stop(paste("Argument", sQuote("interaction"), "has incorrect format")) ## check.1.real(rbord, "In ppm") explain.ifnot(rbord >= 0, "In ppm") ## rbord applies only to border correction if(correction != "border") rbord <- 0 ## covfunargs <- as.list(covfunargs) ## ## Interpret the call if(is.null(trend)) { trend <- ~1 environment(trend) <- parent.frame() } want.trend <- !identical.formulae(trend, ~1) want.inter <- !is.null(interaction) && !is.null(interaction$family) ## Stamp with spatstat version number spv <- package_version(versionstring.spatstat()) the.version <- list(major=spv$major, minor=spv$minor, release=spv$patchlevel, date="$Date: 2023/07/26 07:45:45 $") if(want.inter) { ## ensure we're using the latest version of the interaction object if(outdated.interact(interaction)) interaction <- update(interaction) } ## if(!want.trend && !want.inter && !forcefit && !allcovar && is.null(subsetexpr)) { ## the model is the uniform Poisson process ## The MPLE (= MLE) can be evaluated directly npts <- npoints(X) W <- as.owin(X) if(correction == "border" && rbord > 0) { npts <- sum(bdist.points(X) >= rbord) areaW <- eroded.areas(W, rbord) } else { npts <- npoints(X) areaW <- area(W) } volume <- areaW * markspace.integral(X) lambda <- npts/volume ## fitted canonical coefficient co <- log(lambda) ## asymptotic variance of canonical coefficient varcov <- matrix(1/npts, 1, 1) fisher <- matrix(npts, 1, 1) se <- sqrt(1/npts) ## give names tag <- if(rename.intercept) "log(lambda)" else "(Intercept)" names(co) <- tag dimnames(varcov) <- dimnames(fisher) <- list(tag, tag) ## maximised log likelihood maxlogpl <- if(npts == 0) 0 else npts * (log(lambda) - 1) ## rslt <- list( method = "mpl", fitter = "exact", projected = FALSE, coef = co, trend = trend, interaction = NULL, fitin = fii(), Q = Q, maxlogpl = maxlogpl, satlogpl = NULL, internal = list(computed=computed, se=se), covariates = mpl.usable(covariates), ## covariates are still retained! covfunargs = covfunargs, subsetexpr = NULL, correction = correction, rbord = rbord, terms = terms(trend), fisher = fisher, varcov = varcov, version = the.version, problems = list()) class(rslt) <- "ppm" return(rslt) } ################# P r e p a r e D a t a ###################### prep <- mpl.prepare(Q, X, P, trend, interaction, covariates, want.trend, want.inter, correction, rbord, "quadrature points", callstring, subsetexpr=subsetexpr, allcovar=allcovar, precomputed=precomputed, savecomputed=savecomputed, covfunargs=covfunargs, weightfactor=weightfactor, ...) ## back door if(preponly) { ## exit now, returning prepared data frame and internal information prep$info <- list(want.trend=want.trend, want.inter=want.inter, correction=correction, rbord=rbord, interaction=interaction) return(prep) } fmla <- prep$fmla glmdata <- prep$glmdata problems <- prep$problems likelihood.is.zero <- prep$likelihood.is.zero is.identifiable <- prep$is.identifiable computed <- resolve.defaults(prep$computed, computed) IsOffset <- prep$IsOffset ## update covariates (if they were resolved from the environment) if(!is.null(prep$covariates)) covariates <- prep$covariates ################# F i t i t #################################### if(!is.identifiable) stop(paste("in", callstring, ":", problems$unidentifiable$print), call.=FALSE) ## to avoid problem with package checker .mpl.W <- glmdata$.mpl.W .mpl.SUBSET <- glmdata$.mpl.SUBSET ## determine algorithm control parameters if(is.null(gcontrol)) gcontrol <- list() else stopifnot(is.list(gcontrol)) gcontrol <- if(!is.null(GLMcontrol)) do.call(GLMcontrol, gcontrol) else if(want.trend && use.gam) do.call(mgcv::gam.control, gcontrol) else do.call(stats::glm.control, gcontrol) ## Fit the generalized linear/additive model. if(is.null(GLM) && is.null(famille)) { ## the sanctioned technique, using `quasi' family if(want.trend && use.gam) { FIT <- gam(fmla, family=quasi(link="log", variance="mu"), weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol) fittername <- "gam" } else { FIT <- glm(fmla, family=quasi(link="log", variance="mu"), weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol, model=FALSE) fittername <- "glm" } } else if(!is.null(GLM)) { ## alternative GLM fitting function or penalised GLM etc fam <- GLMfamily %orifnull% quasi(link="log", variance="mu") FIT <- GLM(fmla, family=fam, weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol) fittername <- GLMname } else { ## experimentation only! if(is.function(famille)) famille <- famille() stopifnot(inherits(famille, "family")) if(want.trend && use.gam) { FIT <- gam(fmla, family=famille, weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol) fittername <- "experimental" } else { FIT <- glm(fmla, family=famille, weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol, model=FALSE) fittername <- "experimental" } } environment(FIT$terms) <- sys.frame(sys.nframe()) ################ I n t e r p r e t f i t ####################### ## Fitted coefficients co <- FIT$coef ## glm covariates W <- glmdata$.mpl.W SUBSET <- glmdata$.mpl.SUBSET Z <- is.data(Q) Vnames <- prep$Vnames vnameprefix <- prep$vnameprefix ## saturated log pseudolikelihood satlogpl <- - (sum(log(W[Z & SUBSET])) + sum(Z & SUBSET)) ## attained value of max log pseudolikelihood maxlogpl <- if(likelihood.is.zero) -Inf else (satlogpl - deviance(FIT)/2) ## fitted interaction object fitin <- if(want.inter) { fii(interaction, co, Vnames, IsOffset, vnameprefix) } else fii() unitname(fitin) <- unitname(X) ###################################################################### ## Clean up & return rslt <- list( method = "mpl", fitter = fittername, projected = FALSE, coef = co, trend = trend, interaction = if(want.inter) interaction else NULL, fitin = fitin, Q = Q, maxlogpl = maxlogpl, satlogpl = satlogpl, internal = list(glmfit=FIT, glmdata=glmdata, Vnames=Vnames, IsOffset=IsOffset, fmla=fmla, computed=computed, vnamebase=prep$vnamebase, vnameprefix=prep$vnameprefix), covariates = mpl.usable(covariates), covfunargs = covfunargs, subsetexpr = subsetexpr, correction = correction, rbord = rbord, terms = terms(trend), version = the.version, problems = problems) class(rslt) <- "ppm" return(rslt) } ########################################################################## ### ///////////////////////////////////////////////////////////////////// ########################################################################## mpl.prepare <- local({ mpl.prepare <- function(Q, X, P, trend, interaction, covariates, want.trend, want.inter, correction, rbord, Pname="quadrature points", callstring="", ..., subsetexpr=NULL, covfunargs=list(), allcovar=FALSE, precomputed=NULL, savecomputed=FALSE, vnamebase=c("Interaction", "Interact."), vnameprefix=NULL, warn.illegal=TRUE, warn.unidentifiable=TRUE, weightfactor=NULL, skip.border=FALSE, clip.interaction=TRUE, splitInf=FALSE) { ## Q: quadrature scheme ## X = data.quad(Q) ## P = union.quad(Q) if(missing(want.trend)) want.trend <- !is.null(trend) && !identical.formulae(trend, ~1) if(missing(want.inter)) want.inter <- !is.null(interaction) && !is.null(interaction$family) want.subset <- !is.null(subsetexpr) computed <- list() problems <- list() names.precomputed <- names(precomputed) likelihood.is.zero <- FALSE is.identifiable <- TRUE if(!missing(vnamebase)) { if(length(vnamebase) == 1) vnamebase <- rep.int(vnamebase, 2) if(!is.character(vnamebase) || length(vnamebase) != 2) stop("Internal error: illegal format of vnamebase") } if(!is.null(vnameprefix)) { if(!is.character(vnameprefix) || length(vnameprefix) != 1) stop("Internal error: illegal format of vnameprefix") } ################ C o m p u t e d a t a #################### ## Extract covariate values updatecovariates <- FALSE covariates.df <- NULL if(allcovar || want.trend || want.subset) { if("covariates.df" %in% names.precomputed) { covariates.df <- precomputed$covariates.df } else { if(!is.data.frame(covariates)) { ## names of 'external' covariates to be found covnames <- variablesinformula(trend) if(want.subset) covnames <- union(covnames, all.vars(subsetexpr)) if(allcovar) covnames <- union(covnames, names(covariates)) covnames <- setdiff(covnames, c("x", "y", "marks")) ## resolve 'external' covariates tenv <- environment(trend) covariates <- getdataobjects(covnames, tenv, covariates, fatal=TRUE) updatecovariates <- any(attr(covariates, "external")) } ## extract values of covariates ('internal' and 'external') covariates.df <- mpl.get.covariates(covariates, P, Pname, covfunargs) } if(savecomputed) computed$covariates.df <- covariates.df } ## Form the weights and the ``response variable''. if("dotmplbase" %in% names.precomputed) .mpl <- precomputed$dotmplbase else { nQ <- n.quad(Q) wQ <- w.quad(Q) mQ <- marks.quad(Q) ## is NULL for unmarked patterns zQ <- is.data(Q) yQ <- numeric(nQ) yQ[zQ] <- 1/wQ[zQ] zeroes <- attr(wQ, "zeroes") sQ <- if(is.null(zeroes)) rep.int(TRUE, nQ) else !zeroes ## tweak weights ONLY if(!is.null(weightfactor)) wQ <- wQ * weightfactor ## pack up .mpl <- list(W = wQ, Z = zQ, Y = yQ, MARKS = mQ, SUBSET = sQ) } if(savecomputed) computed$dotmplbase <- .mpl glmdata <- data.frame(.mpl.W = .mpl$W, .mpl.Y = .mpl$Y) ## count data and dummy points in specified subset izdat <- .mpl$Z[.mpl$SUBSET] ndata <- sum(izdat) # ndummy <- sum(!izdat) ## Determine the domain of integration for the pseudolikelihood. if(correction == "border") { bdP <- if("bdP" %in% names.precomputed) precomputed$bdP else bdist.points(P) if(savecomputed) computed$bdP <- bdP .mpl$DOMAIN <- (bdP >= rbord) } skip.border <- skip.border && (correction == "border") ####################### T r e n d ############################## internal.names <- c(".mpl.W", ".mpl.Y", ".mpl.Z", ".mpl.SUBSET", "SUBSET", ".mpl") reserved.names <- c("x", "y", "marks", internal.names) if(allcovar || want.trend || want.subset) { trendvariables <- variablesinformula(trend) ## Check for use of internal names in trend cc <- check.clashes(internal.names, trendvariables, "the model formula") if(cc != "") stop(cc) if(want.subset) { subsetvariables <- all.vars(subsetexpr) cc <- check.clashes(internal.names, trendvariables, "the subset expression") if(cc != "") stop(cc) trendvariables <- union(trendvariables, subsetvariables) } ## Standard variables if(allcovar || "x" %in% trendvariables) glmdata <- data.frame(glmdata, x=P$x) if(allcovar || "y" %in% trendvariables) glmdata <- data.frame(glmdata, y=P$y) if(("marks" %in% trendvariables) || !is.null(.mpl$MARKS)) { if(is.null(.mpl$MARKS)) stop("Model formula depends on marks, but data do not have marks", call.=FALSE) glmdata <- data.frame(glmdata, marks=.mpl$MARKS) } ## ## Check covariates if(!is.null(covariates.df)) { ## Check for duplication of reserved names cc <- check.clashes(reserved.names, names(covariates), sQuote("covariates")) if(cc != "") stop(cc) ## Take only those covariates that are named in the trend formula if(!allcovar) needed <- names(covariates.df) %in% trendvariables else needed <- rep.int(TRUE, ncol(covariates.df)) if(any(needed)) { covariates.needed <- covariates.df[, needed, drop=FALSE] ## Append to `glmdata' glmdata <- data.frame(glmdata,covariates.needed) ## Ignore any quadrature points that have NA's in the covariates nbg <- is.na(covariates.needed) if(any(nbg)) { offending <- matcolany(nbg) covnames.na <- names(covariates.needed)[offending] quadpoints.na <- matrowany(nbg) n.na <- sum(quadpoints.na) n.tot <- length(quadpoints.na) errate <- n.na/n.tot pcerror <- round(signif(100 * errate, 2), 2) complaint <- paste("Values of the", ngettext(length(covnames.na), "covariate", "covariates"), paste(sQuote(covnames.na), collapse=", "), "were NA or undefined at", paste(pcerror, "%", " (", n.na, " out of ", n.tot, ")", sep=""), "of the", Pname) warning(paste(complaint, ". Occurred while executing: ", callstring, sep=""), call. = FALSE) .mpl$SUBSET <- .mpl$SUBSET & !quadpoints.na details <- list(covnames.na = covnames.na, quadpoints.na = quadpoints.na, print = complaint) problems <- append(problems, list(na.covariates=details)) } } } } ###################### I n t e r a c t i o n #################### Vnames <- NULL IsOffset <- NULL forbid <- NULL if(want.inter) { ## Form the matrix of "regression variables" V. ## The rows of V correspond to the rows of P (quadrature points) ## while the column(s) of V are the regression variables (log-potentials) E <- precomputed$E %orifnull% equalpairs.quad(Q) if(!skip.border) { ## usual case V <- evalInteraction(X, P, E, interaction, correction, ..., splitInf=splitInf, precomputed=precomputed, savecomputed=savecomputed) } else { ## evaluate only in eroded domain if(all(c("Esub", "Usub", "Retain") %in% names.precomputed)) { ## use precomputed data Psub <- precomputed$Usub Esub <- precomputed$Esub Retain <- precomputed$Retain } else { ## extract subset of quadrature points Retain <- .mpl$DOMAIN | is.data(Q) Psub <- P[Retain] ## map serial numbers in P to serial numbers in Psub Pmap <- cumsum(Retain) ## extract subset of equal-pairs matrix keepE <- Retain[ E[,2] ] Esub <- E[ keepE, , drop=FALSE] ## adjust indices in equal pairs matrix Esub[,2] <- Pmap[Esub[,2]] } ## call evaluator on reduced data if(all(c("X", "Q", "U") %in% names.precomputed)) { subcomputed <- resolve.defaults(list(E=Esub, U=Psub, Q=Q[Retain]), precomputed) } else subcomputed <- NULL if(clip.interaction) { ## normal V <- evalInteraction(X, Psub, Esub, interaction, correction, ..., splitInf=splitInf, precomputed=subcomputed, savecomputed=savecomputed) } else { ## ignore window when calculating interaction ## by setting 'W=NULL' (currently detected only by AreaInter) V <- evalInteraction(X, Psub, Esub, interaction, correction, ..., W=NULL, splitInf=splitInf, precomputed=subcomputed, savecomputed=savecomputed) } if(savecomputed) { computed$Usub <- Psub computed$Esub <- Esub computed$Retain <- Retain } } if(!is.matrix(V)) stop("interaction evaluator did not return a matrix") ## extract information about offsets IsOffset <- attr(V, "IsOffset") if(is.null(IsOffset)) IsOffset <- FALSE if(splitInf) { ## extract information about hard core terms forbid <- attr(V, "-Inf") %orifnull% logical(nrow(V)) } if(skip.border) { ## fill in the values in the border region with zeroes. Vnew <- matrix(0, nrow=npoints(P), ncol=ncol(V)) colnames(Vnew) <- colnames(V) Vnew[Retain, ] <- V ## retain attributes attr(Vnew, "IsOffset") <- IsOffset attr(Vnew, "computed") <- attr(V, "computed") attr(Vnew, "POT") <- attr(V, "POT") V <- Vnew if(splitInf) { fnew <- logical(nrow(Vnew)) fnew[Retain] <- forbid forbid <- fnew } } ## extract intermediate computation results if(savecomputed) computed <- resolve.defaults(attr(V, "computed"), computed) ## Augment data frame by appending the regression variables ## for interactions. ## ## First determine the names of the variables ## Vnames <- dimnames(V)[[2]] if(is.null(Vnames)) { ## No names were provided for the columns of V. ## Give them default names. ## In ppm the names will be "Interaction" ## or "Interact.1", "Interact.2", ... ## In mppm an alternative tag will be specified by vnamebase. nc <- ncol(V) Vnames <- if(nc == 1) vnamebase[1] else paste0(vnamebase[2], 1:nc) dimnames(V) <- list(dimnames(V)[[1]], Vnames) } else if(!is.null(vnameprefix)) { ## Variable names were provided by the evaluator (e.g. MultiStrauss). ## Prefix the variable names by a string ## (typically required by mppm) Vnames <- paste(vnameprefix, Vnames, sep="") dimnames(V) <- list(dimnames(V)[[1]], Vnames) } ## Check the names are valid as column names in a dataframe okVnames <- make.names(Vnames, unique=TRUE) if(any(Vnames != okVnames)) { warning(paste("Names of interaction terms", "contained illegal characters;", "names have been repaired.")) Vnames <- okVnames } ## Check for name clashes between the interaction variables ## and the formula cc <- check.clashes(Vnames, termsinformula(trend), "model formula") if(cc != "") stop(cc) ## and with the variables in 'covariates' if(!is.null(covariates)) { cc <- check.clashes(Vnames, names(covariates), sQuote("covariates")) if(cc != "") stop(cc) } ## OK. append variables. glmdata <- data.frame(glmdata, V) ## check IsOffset matches Vnames if(length(IsOffset) != length(Vnames)) { if(length(IsOffset) == 1) IsOffset <- rep.int(IsOffset, length(Vnames)) else stop("Internal error: IsOffset has wrong length", call.=FALSE) } ## Keep only those quadrature points for which the ## conditional intensity is nonzero. ##KEEP <- apply(V != -Inf, 1, all) .mpl$KEEP <- matrowall(V != -Inf) .mpl$SUBSET <- .mpl$SUBSET & .mpl$KEEP ## Check that there are at least some data and dummy points remaining datremain <- .mpl$Z[.mpl$SUBSET] somedat <- any(datremain) somedum <- !all(datremain) if(warn.unidentifiable && !(somedat && somedum)) { ## Model would be unidentifiable if it were fitted. ## Register problem is.identifiable <- FALSE if(ndata == 0) { complaint <- "model is unidentifiable: data pattern is empty" } else { offending <- !c(somedat, somedum) offending <- c("all data points", "all dummy points")[offending] offending <- paste(offending, collapse=" and ") complaint <- paste("model is unidentifiable:", offending, "have zero conditional intensity") } details <- list(data=!somedat, dummy=!somedum, print=complaint) problems <- append(problems, list(unidentifiable=details)) } ## check whether the model has zero likelihood: ## check whether ANY data points have zero conditional intensity if(any(.mpl$Z & !.mpl$KEEP)) { howmany <- sum(.mpl$Z & !.mpl$KEEP) complaint <- paste(howmany, "data point(s) are illegal", "(zero conditional intensity under the model)") details <- list(illegal=howmany, print=complaint) problems <- append(problems, list(zerolikelihood=details)) if(warn.illegal && is.identifiable) warning(paste(complaint, ". Occurred while executing: ", callstring, sep=""), call. = FALSE) likelihood.is.zero <- TRUE } } ################## S u b s e t ################### if(correction == "border") .mpl$SUBSET <- .mpl$SUBSET & .mpl$DOMAIN if(!is.null(subsetexpr)) { ## user-defined subset expression USER.SUBSET <- eval(subsetexpr, glmdata, environment(trend)) if(is.owin(USER.SUBSET)) { USER.SUBSET <- inside.owin(P$x, P$y, USER.SUBSET) } else if(is.im(USER.SUBSET)) { USER.SUBSET <- as.logical(USER.SUBSET[P, drop=FALSE]) if(anyNA(USER.SUBSET)) USER.SUBSET[is.na(USER.SUBSET)] <- FALSE } if(!(is.logical(USER.SUBSET) || is.numeric(USER.SUBSET))) stop("Argument 'subset' should yield logical values", call.=FALSE) if(anyNA(USER.SUBSET)) { USER.SUBSET[is.na(USER.SUBSET)] <- FALSE warning("NA values in argument 'subset' were changed to FALSE", call.=FALSE) } .mpl$SUBSET <- .mpl$SUBSET & USER.SUBSET } glmdata <- cbind(glmdata, data.frame(.mpl.SUBSET=.mpl$SUBSET, stringsAsFactors=FALSE)) ################# F o r m u l a ################################## if(!want.trend) trend <- ~1 trendpart <- paste(as.character(trend), collapse=" ") if(!want.inter) rhs <- trendpart else { VN <- Vnames ## enclose offset potentials in 'offset(.)' if(any(IsOffset)) VN[IsOffset] <- paste("offset(", VN[IsOffset], ")", sep="") rhs <- paste(c(trendpart, VN), collapse= "+") } fmla <- paste(".mpl.Y ", rhs) fmla <- as.formula(fmla) ## character string of trend formula (without Vnames) trendfmla <- paste(".mpl.Y ", trendpart) #### result <- list(fmla=fmla, trendfmla=trendfmla, covariates=if(updatecovariates) covariates else NULL, glmdata=glmdata, Vnames=Vnames, IsOffset=IsOffset, subsetexpr=subsetexpr, problems=problems, likelihood.is.zero=likelihood.is.zero, is.identifiable=is.identifiable, computed=computed, vnamebase=vnamebase, vnameprefix=vnameprefix, forbid=forbid) return(result) } check.clashes <- function(forbidden, offered, where) { name.match <- outer(forbidden, offered, "==") if(any(name.match)) { is.matched <- apply(name.match, 2, any) matched.names <- (offered)[is.matched] if(sum(is.matched) == 1) { return(paste("The variable",sQuote(matched.names), "in", where, "is a reserved name")) } else { return(paste("The variables", paste(sQuote(matched.names), collapse=", "), "in", where, "are reserved names")) } } return("") } mpl.prepare }) #################################################################### #################################################################### mpl.usable <- function(x) { ## silently remove covariates that don't have recognised format if(length(x) == 0 || is.data.frame(x)) return(x) isim <- sapply(x, is.im) isfun <- sapply(x, is.function) iswin <- sapply(x, is.owin) istess <- sapply(x, is.tess) isnum <- sapply(x, is.numeric) & (lengths(x) == 1) recognised <- isim | isfun | iswin | istess | isnum if(!all(recognised)) x <- x[recognised] return(x) } mpl.get.covariates <- local({ mpl.get.covariates <- function(covariates, locations, type="locations", covfunargs=list(), need.deriv=FALSE) { covargname <- sQuote(short.deparse(substitute(covariates))) locargname <- sQuote(short.deparse(substitute(locations))) if(is.null(covfunargs)) covfunargs <- list() ## extract spatial coordinates x <- locations$x y <- locations$y if(is.null(x) || is.null(y)) { xy <- xy.coords(locations) x <- xy$x y <- xy$y } if(is.null(x) || is.null(y)) stop(paste("Can't interpret", locargname, "as x,y coordinates")) ## extract marks if any m <- locations$marks markinfo <- if(is.null(m)) NULL else list(marks=m) ## validate covariates and extract values n <- length(x) if(is.data.frame(covariates)) { if(nrow(covariates) != n) stop(paste("Number of rows in", covargname, "does not equal the number of", type)) return(covariates) } else if(is.list(covariates)) { if(length(covariates) == 0) return(as.data.frame(matrix(, n, 0))) if(any(islin <- unlist(lapply(covariates, inherits, what="lintess")))) { ## convert 'lintess' objects to 'linfun' for handling if(requireNamespace("spatstat.linnet", quietly=TRUE) && isNamespaceLoaded("spatstat.linnet")) { covariates[islin] <- lapply(covariates[islin], spatstat.linnet::as.linfun) } else { stop(paste("The package", sQuote("spatstat.linnet"), "is required to handle", sQuote("lintess"), "objects")) } } ## recognise type of each covariate isim <- unlist(lapply(covariates, is.im)) isfun <- unlist(lapply(covariates, is.function)) iswin <- unlist(lapply(covariates, is.owin)) istess <- unlist(lapply(covariates, is.tess)) isnum <- unlist(lapply(covariates, is.number)) if(!all(isim | isfun | isnum | iswin | istess)) stop(paste("Each entry in the list", covargname, "should be an image, a function,", "a window, a tessellation or a single number")) if(sum(nzchar(names(covariates))) < length(covariates)) stop(paste("Some entries in the list", covargname, "are un-named")) ## look up values of each covariate at the quadrature points values <- unclass(covariates) values[isim] <- lapply(covariates[isim], lookup.im, x=x, y=y, naok=TRUE, strict=FALSE) values[isfun] <- vf <- lapply(covariates[isfun], evalfxy, x=x, y=y, extra=append(covfunargs, markinfo)) values[isnum] <- lapply(covariates[isnum], rep, length(x)) values[iswin] <- lapply(covariates[iswin], insidexy, x=x, y=y) values[istess] <- lapply(covariates[istess], tileindex, x=x, y=y) result <- as.data.frame(values) if(need.deriv && any(isfun)) { ## check for gradient/hessian attributes of function values grad <- lapply(vf, attr, which="gradient") hess <- lapply(vf, attr, which="hessian") grad <- grad[!unlist(lapply(grad, is.null))] hess <- hess[!unlist(lapply(hess, is.null))] if(length(grad) > 0 || length(hess) > 0) attr(result, "derivatives") <- list(gradient=grad, hessian=hess) } return(result) } stop(paste(covargname, "must be either a data frame or a list")) } ## functions for 'apply' evalfxy <- function(f, x, y, extra) { if(length(extra) == 0) return(f(x,y)) ## extra arguments must be matched explicitly by name ok <- names(extra) %in% names(formals(f)) z <- do.call(f, append(list(x,y), extra[ok])) return(z) } insidexy <- function(w, x, y) { inside.owin(x, y, w) } is.number <- function(x) { is.numeric(x) && (length(x) == 1) } mpl.get.covariates }) bt.frame <- function(Q, trend=~1, interaction=NULL, ..., covariates=NULL, correction="border", rbord=0, use.gam=FALSE, allcovar=FALSE) { prep <- mpl.engine(Q, trend=trend, interaction=interaction, ..., covariates=covariates, correction=correction, rbord=rbord, use.gam=use.gam, allcovar=allcovar, preponly=TRUE, forcefit=TRUE) class(prep) <- c("bt.frame", class(prep)) return(prep) } print.bt.frame <- function(x, ...) { cat("Model frame for Berman-Turner device\n") df <- x$glmdata cat(paste("$glmdata: Data frame with", nrow(df), "rows and", ncol(df), "columns\n")) cat(" Column names:\t") cat(paste(paste(names(df),collapse="\t"), "\n")) cat("Complete model formula ($fmla):\t") print(x$fmla) info <- x$info if(info$want.trend) { cat("Trend:\tyes\nTrend formula string ($trendfmla):\t") cat(paste(x$trendfmla, "\n")) } else cat("Trend:\tno\n") cat("Interaction ($info$interaction):\t") inte <- info$interaction if(is.null(inte)) inte <- Poisson() print(inte, family=FALSE, brief=TRUE) if(!is.poisson.interact(inte)) { cat("Internal names of interaction variables ($Vnames):\t") cat(paste(x$Vnames, collapse="\t")) cat("\n") } edge <- info$correction cat(paste("Edge correction ($info$correction):\t", sQuote(edge), "\n")) if(edge == "border") cat(paste("\tBorder width ($info$rbord):\t", info$rbord, "\n")) if(length(x$problems) > 0) { cat("Problems:\n") print(x$problems) } if(length(x$computed) > 0) cat(paste("Frame contains saved computations for", commasep(dQuote(names(x$computed))))) return(invisible(NULL)) } partialModelMatrix <- function(X, D, model, callstring="", ...) { ## X = 'data' ## D = 'dummy' Q <- quad(X,D) P <- union.quad(Q) trend <- model$trend inter <- model$interaction covar <- model$covariates prep <- mpl.prepare(Q, X, P, trend, inter, covar, correction=model$correction, rbord=model$rbord, Pname="data points", callstring=callstring, warn.unidentifiable=FALSE, ...) fmla <- prep$fmla glmdata <- prep$glmdata mof <- model.frame(fmla, glmdata) mom <- model.matrix(fmla, mof) modelnames <- names(coef(model)) modelnames <- sub("log(lambda)", "(Intercept)", modelnames, fixed=TRUE) if(!isTRUE(all.equal(colnames(mom), modelnames))) warning(paste("Internal error: mismatch between", "column names of model matrix", "and names of coefficient vector in fitted model")) attr(mom, "mplsubset") <- glmdata$.mpl.SUBSET attr(mom, "-Inf") <- prep$forbid return(mom) } oversize.quad <- function(Q, ..., nU, nX, p=1) { ## Determine whether the quadrature scheme is ## too large to handle in one piece (in mpl) ## for a generic interaction ## nU = number of quadrature points ## nX = number of data points ## p = dimension of statistic if(missing(nU)) nU <- n.quad(Q) if(missing(nX)) nX <- npoints(Q$data) nmat <- as.double(nU) * nX nMAX <- spatstat.options("maxmatrix")/p needsplit <- (nmat > nMAX) return(needsplit) } quadBlockSizes <- function(nX, nD, p=1, nMAX=spatstat.options("maxmatrix")/p, announce=TRUE) { if(is.quad(nX) && missing(nD)) { nD <- npoints(nX$dummy) nX <- npoints(nX$data) } ## Calculate number of dummy points in largest permissible X * (X+D) matrix nperblock <- max(1, floor(nMAX/nX - nX)) ## determine number of such blocks nblocks <- ceiling(nD/nperblock) ## make blocks roughly equal (except for the last one) nperblock <- min(nperblock, ceiling(nD/nblocks)) ## announce if(announce && nblocks > 1) { msg <- paste("Large quadrature scheme", "split into blocks to avoid memory size limits;", nD, "dummy points split into", nblocks, "blocks,") nfull <- nblocks - 1 nlastblock <- nD - nperblock * nfull if(nlastblock == nperblock) { msg <- paste(msg, "each containing", nperblock, "dummy points") } else { msg <- paste(msg, "the first", ngettext(nfull, "block", paste(nfull, "blocks")), "containing", nperblock, ngettext(nperblock, "dummy point", "dummy points"), "and the last block containing", nlastblock, ngettext(nlastblock, "dummy point", "dummy points")) } message(msg) } else nlastblock <- nperblock return(list(nblocks=nblocks, nperblock=nperblock, nlastblock=nlastblock)) } ## function that should be called to evaluate interaction terms ## between quadrature points and data points evalInteraction <- function(X, P, E = equalpairs(P, X), interaction, correction, splitInf=FALSE, ..., precomputed=NULL, savecomputed=FALSE) { ## evaluate the interaction potential ## (does not assign/touch the variable names) verifyclass(interaction, "interact") ## handle Poisson case if(is.poisson(interaction)) { out <- matrix(numeric(0), nrow=npoints(P), ncol=0) attr(out, "IsOffset") <- logical(0) if(splitInf) attr(out, "-Inf") <- logical(nrow(out)) return(out) } ## determine whether to use fast evaluation in C dofast <- (spatstat.options("fasteval") %in% c("on", "test")) && !is.null(cando <- interaction$can.do.fast) && cando(X, correction, interaction$par) && !is.null(interaction$fasteval) ## determine whether to split quadscheme into blocks if(dofast) { dosplit <- FALSE } else { ## decide whether the quadrature scheme is too large to handle in one piece needsplit <- oversize.quad(nU=npoints(P), nX=npoints(X)) ## not implemented when savecomputed=TRUE dosplit <- needsplit && !savecomputed if(needsplit && savecomputed) warning(paste("Oversize quadscheme cannot be split into blocks", "because savecomputed=TRUE;", "memory allocation error may occur")) } if(!dosplit) { ## normal case V <- evalInterEngine(X=X, P=P, E=E, interaction=interaction, correction=correction, splitInf=splitInf, ..., precomputed=precomputed, savecomputed=savecomputed) } else { ## Too many quadrature points: split into blocks nX <- npoints(X) nP <- npoints(P) ## Determine which evaluation points are data points Pdata <- E[,2] ## hence which are dummy points Pall <- seq_len(nP) Pdummy <- if(length(Pdata) > 0) Pall[-Pdata] else Pall nD <- length(Pdummy) ## calculate block sizes bls <- quadBlockSizes(nX, nD, announce=TRUE) nblocks <- bls$nblocks nperblock <- bls$nperblock ## seqX <- seq_len(nX) EX <- cbind(seqX, seqX) ## for(iblock in 1:nblocks) { first <- min(nD, (iblock - 1) * nperblock + 1) last <- min(nD, iblock * nperblock) ## extract dummy points Di <- P[Pdummy[first:last]] Pi <- superimpose(X, Di, check=FALSE, W=X$window) ## evaluate potential Vi <- evalInterEngine(X=X, P=Pi, E=EX, interaction=interaction, correction=correction, splitInf=splitInf, ..., savecomputed=FALSE) Mi <- attr(Vi, "-Inf") if(iblock == 1) { V <- Vi M <- Mi } else { ## tack on the glm variables for the extra DUMMY points only V <- rbind(V, Vi[-seqX, , drop=FALSE]) if(splitInf && !is.null(M)) M <- c(M, Mi[-seqX]) } } ## The first 'nX' rows of V contain values for X. ## The remaining rows of V contain values for dummy points. if(length(Pdata) == 0) { ## simply discard rows corresponding to data V <- V[-seqX, , drop=FALSE] if(splitInf && !is.null(M)) M <- M[-seqX] } else { ## replace data in correct position ii <- integer(nP) ii[Pdata] <- seqX ii[Pdummy] <- (nX+1):nrow(V) V <- V[ii, , drop=FALSE] if(splitInf && !is.null(M)) M <- M[ii] } attr(V, "-Inf") <- M } return(V) } ## workhorse function that actually calls relevant code to evaluate interaction evalInterEngine <- function(X, P, E, interaction, correction, splitInf=FALSE, ..., Reach = NULL, precomputed=NULL, savecomputed=FALSE) { ## fast evaluator (C code) may exist fasteval <- interaction$fasteval cando <- interaction$can.do.fast par <- interaction$par feopt <- spatstat.options("fasteval") dofast <- !is.null(fasteval) && (is.null(cando) || cando(X, correction,par)) && (feopt %in% c("on", "test")) && (!splitInf || ("splitInf" %in% names(formals(fasteval)))) V <- NULL if(dofast) { if(feopt == "test") message("Calling fasteval") V <- fasteval(X, P, E, interaction$pot, interaction$par, correction, splitInf=splitInf, ...) } if(is.null(V)) { ## use generic evaluator for family evaluate <- interaction$family$eval evalargs <- names(formals(evaluate)) if(splitInf && !("splitInf" %in% evalargs)) stop("Sorry, the", interaction$family$name, "interaction family", "does not support calculation of the positive part", call.=FALSE) if(is.null(Reach)) Reach <- reach(interaction) if("precomputed" %in% evalargs) { ## Use precomputed data ## version 1.9-3 onward (pairwise and pairsat families) V <- evaluate(X, P, E, interaction$pot, interaction$par, correction=correction, splitInf=splitInf, ..., Reach=Reach, precomputed=precomputed, savecomputed=savecomputed) } else { ## Cannot use precomputed data ## Object created by earlier version of ppm ## or not pairwise/pairsat interaction V <- evaluate(X, P, E, interaction$pot, interaction$par, correction=correction, splitInf=splitInf, ..., Reach=Reach) } } return(V) } deltasuffstat <- local({ deltasuffstat <- function(model, ..., restrict=c("pairs", "first", "none"), dataonly=TRUE, sparseOK=TRUE, quadsub=NULL, force=FALSE, warn.forced=FALSE, verbose=warn.forced, use.special=TRUE) { stopifnot(is.ppm(model)) sparseOK <- !isFALSE(sparseOK) # NULL -> TRUE restrict <- match.arg(restrict) if(dataonly) { X <- data.ppm(model) nX <- npoints(X) } else { X <- quad.ppm(model) if(!is.null(quadsub)) { z <- is.data(X) z[quadsub] <- FALSE if(any(z)) stop("subset 'quadsub' must include all data points", call.=FALSE) X <- X[quadsub] } nX <- n.quad(X) } ncoef <- length(coef(model)) inte <- as.interact(model) if(!sparseOK && exceedsMaxArraySize(nX, nX, ncoef)) stop(paste("Array dimensions too large", paren(paste(c(nX, nX, ncoef), collapse=" x ")), "for non-sparse calculation of variance terms"), call.=FALSE) zeroes <- if(!sparseOK) array(0, dim=c(nX, nX, ncoef)) else sparse3Darray(dims=c(nX, nX, ncoef)) if(is.poisson(inte)) return(zeroes) ## Get names of interaction terms in model (including offsets) f <- fitin(model) Inames <- f$Vnames IsOffset <- f$IsOffset hasInf <- !identical(inte$hasInf, FALSE) ## Offset terms do not contribute to sufficient statistic if(all(IsOffset) && !hasInf) return(zeroes) ## Nontrivial interaction terms must be computed. ## Look for member function $delta2 in the interaction v <- NULL v.is.full <- FALSE if(use.special) { ## Use specialised $delta2 for interaction,if available if(is.function(delta2 <- inte$delta2)) v <- delta2(X, inte, model$correction, sparseOK=sparseOK) ## Use generic $delta2 for the family, if available if(is.null(v) && is.function(delta2 <- inte$family$delta2)) v <- delta2(X, inte, model$correction, sparseOK=sparseOK) } ## no luck? if(is.null(v)) { if(!force) return(NULL) ## use brute force algorithm if(warn.forced) warning("Reverting to brute force to compute interaction terms", call.=FALSE) v <- if(dataonly) deltasufX(model, sparseOK, verbose=verbose) else deltasufQ(model, quadsub, sparseOK, verbose=verbose) v.is.full <- TRUE } ## extract hard core information deltaInf <- attr(v, "deltaInf") ## ensure 'v' is a 3D array if(length(dim(v)) != 3) { if(is.matrix(v)) { v <- array(v, dim=c(dim(v), 1)) } else if(inherits(v, "sparseMatrix")) { v <- as.sparse3Darray(v) } } if(!sparseOK) { if(inherits(v, "sparse3Darray")) v <- as.array(v) if(inherits(deltaInf, "sparseMatrix")) deltaInf <- as.matrix(deltaInf) } if(restrict != "none") { ## kill contributions from points outside the domain of pseudolikelihood ## (e.g. points in the border region) use <- if(dataonly) getppmdatasubset(model) else if(is.null(quadsub)) getglmsubset(model) else getglmsubset(model)[quadsub] if(any(kill <- !use)) { switch(restrict, pairs = { v[kill,kill,] <- 0 }, first = { v[kill,,] <- 0 }, none = {}) if(!is.null(deltaInf)) { switch(restrict, pairs = { deltaInf[kill,kill] <- FALSE }, first = { deltaInf[kill,] <- FALSE }, none = {}) } } } ## Make output array, with planes corresponding to model coefficients if(v.is.full) { ## Planes of 'v' already correspond to coefficients of model cnames <- names(coef(model)) ## Remove any offset interaction terms ## (e.g. Hardcore interaction): these do not contribute to suff stat if(any(IsOffset)) { retain <- is.na(match(cnames, Inames[IsOffset])) v <- v[ , , retain, drop=FALSE] Inames <- Inames[!IsOffset] } result <- v } else { ## Planes of 'v' correspond to interaction terms only. ## Fill out the first order terms with zeroes result <- zeroes if(length(Inames) != dim(v)[3]) stop(paste("Internal error: deltasuffstat:", "number of planes of v =", dim(v)[3], "!= number of interaction terms =", length(Inames)), call.=FALSE) ## Offset terms do not contribute to sufficient statistic if(any(IsOffset)) { v <- v[ , , !IsOffset, drop=FALSE] Inames <- Inames[!IsOffset] } ## Map planes of 'v' into coefficients Imap <- match(Inames, names(coef(model))) if(anyNA(Imap)) stop(paste("Internal error: deltasuffstat:", "cannot match interaction coefficients")) if(length(Imap) > 0) { ## insert 'v' into array result[ , , Imap] <- v } } ## pack up attr(result, "deltaInf") <- deltaInf return(result) } ## compute deltasuffstat using partialModelMatrix deltasufX <- function(model, sparseOK=TRUE, verbose=FALSE) { stopifnot(is.ppm(model)) X <- data.ppm(model) hasInf <- !identical(model$interaction$hasInf, FALSE) nX <- npoints(X) p <- length(coef(model)) m <- model.matrix(model, splitInf=hasInf) if(hasInf) { isInf <- attr(m, "-Inf") hasInf <- !is.null(isInf) } isdata <- is.data(quad.ppm(model)) m <- m[isdata, ,drop=FALSE] if(hasInf) isInf <- isInf[isdata] ok <- getppmdatasubset(model) ## canonical statistic before and after deleting X[j] ## mbefore[ , i, j] = h(X[i] | X) ## mafter[ , i, j] = h(X[i] | X[-j]) ## where h(u|x) is the canonical statistic of the *positive* cif dimwork <- c(p, nX, nX) if(!sparseOK) { mafter <- mbefore <- array(t(m), dim=dimwork) isInfafter <- isInfbefore <- if(!hasInf) NULL else matrix(isInf, dim=dimwork[-1]) } else { ## make empty arrays; fill in values later ## (but only where they might change) mafter <- mbefore <- sparse3Darray(dims=dimwork) isInfafter <- isInfbefore <- if(!hasInf) NULL else sparseMatrix(i=integer(0), j=integer(0), x=logical(0), dims=dimwork[-1]) } ## identify close pairs R <- reach(model) if(is.finite(R)) { cl <- closepairs(X, R, what="indices") I <- cl$i J <- cl$j cl2 <- closepairs(X, 2*R, what="indices") I2 <- cl2$i J2 <- cl2$j } else { ## either infinite reach, or something wrong IJ <- expand.grid(I=1:nX, J=1:nX) IJ <- subset(IJ, I != J) I2 <- I <- IJ$I J2 <- J <- IJ$J } ## DO NOT RESTRICT - THIS IS NOW DONE IN deltasuffstat ## filter: I and J must both belong to the nominated subset ## okIJ <- ok[I] & ok[J] ## I <- I[okIJ] ## J <- J[okIJ] ## if(length(I) > 0 && length(J) > 0) { ## .............. loop over pairs ........................ uniqueI <- unique(I) npairs <- length(uniqueI) pstate <- list() if(verbose) splat("Examining", npairs, "pairs of data points...") ## The following ensures that 'empty' and 'X' have compatible marks empty <- X[integer(0)] ## ## Run through pairs for(iter in seq_len(npairs)) { i <- uniqueI[iter] ## all points within 2R J2i <- unique(J2[I2==i]) ## all points within R Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { Xi <- X[i] ## neighbours of X[i] XJi <- X[Ji] ## replace X[-i] by X[-i] \cap b(0, 2R) X.i <- X[J2i] nX.i <- length(J2i) ## index of XJi in X.i J.i <- match(Ji, J2i) if(anyNA(J.i)) stop("Internal error: Ji not a subset of J2i") ## values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] ## for all j pmj <- snipModelMatrix(X.i, empty, model, J.i, hasInf) if(hasInf) zzj <- attr(pmj, "-Inf") ## sufficient statistic in reverse order ## h(X[i] | X[-j]) = h(X[i] | X[-c(i,j)] ## for all j pmi <- matrix(, nJi, p) zzi <- logical(nJi) for(k in 1:nJi) { ## j <- Ji[k] ## X.ij <- X[-c(i,j)] X.ij <- X.i[-J.i[k]] pmik <- snipModelMatrix(X.ij, Xi, model, nX.i, hasInf) pmi[k, ] <- pmik if(hasInf) zzi[k] <- attr(pmik, "-Inf") } ## if(!sparseOK) { mafter[ , Ji, i] <- t(pmj) mafter[ , i, Ji] <- t(pmi) if(hasInf) { isInfafter[Ji, i] <- zzj isInfafter[i, Ji] <- zzi } } else { mafter[ , Ji, i] <- array(t(pmj), dim=c(p, nJi, 1)) mafter[ , i, Ji] <- array(t(pmi), dim=c(p, 1, nJi)) mbefore[ , Ji, i] <- array(t(m[Ji,]), dim=c(p, nJi, 1)) mbefore[ , i, Ji] <- array(m[i,], dim=c(p, 1, nJi)) if(hasInf) { isInfafter[Ji, i] <- zzj isInfafter[i, Ji] <- zzi isInfbefore[Ji, i] <- isInf[Ji] isInfbefore[i, Ji] <- isInf[i] } } } if(verbose) pstate <- progressreport(iter, npairs, state=pstate) } } ## delta[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j]) delta <- mbefore - mafter ## delta[i, j, ] = h(X[i] | X) - h(X[i] | X[-j]) delta <- aperm(delta, c(2,3,1)) ## if(hasInf) { deltaInf <- isInfbefore - isInfafter attr(delta, "deltaInf") <- deltaInf } return(delta) } deltasufQ <- function(model, quadsub, sparseOK, verbose=FALSE) { stopifnot(is.ppm(model)) hasInf <- !identical(model$interaction$hasInf, FALSE) p <- length(coef(model)) Q <- quad.ppm(model) ok <- getglmsubset(model) m <- model.matrix(model, splitInf=hasInf) if(hasInf) { isInf <- attr(m, "-Inf") hasInf <- !is.null(isInf) } if(!is.null(quadsub)) { Q <- Q[quadsub] m <- m[quadsub, , drop=FALSE] ok <- ok[quadsub] if(hasInf) isInf <- isInf[quadsub] } X <- Q$data U <- union.quad(Q) nU <- npoints(U) nX <- npoints(X) isdata <- is.data(Q) isdummy <- !isdata ## canonical statistic before and after adding/deleting U[j] dimwork <- c(p, nU, nU) if(!sparseOK) { mafter <- mbefore <- array(t(m), dim=dimwork) delta <- array(0, dim=dimwork) isInfafter <- isInfbefore <- deltaInf <- if(!hasInf) NULL else matrix(isInf, dim=dimwork[-1]) } else { ## make empty arrays; fill in values later ## [but only where they might change] mafter <- mbefore <- delta <- sparse3Darray(dims=dimwork) isInfafter <- isInfbefore <- deltaInf <- if(!hasInf) NULL else sparseMatrix(i=integer(0), j=integer(0), x=logical(0), dims=dimwork[-1]) } ## mbefore[ , i, j] = h(U[i] | X) ## For data points X[j] ## mafter[ , i, j] = h(U[i] | X[-j]) ## delta[ , i, j] = h(U[i] | X) - h(U[i] | X[-j]) ## For dummy points X[j] ## mafter[ , i, j] = h(U[i] | X \cup U[j]) ## delta[ , i, j] = h(U[i] | X \cup U[j]) - h(U[i] | X) changesign <- ifelseAB(isdata, -1, 1) ## identify close pairs of quadrature points R <- reach(model) if(is.finite(R)) { cl <- closepairs(U, R, what="indices") I <- cl$i J <- cl$j cl2 <- closepairs(U, 2*R, what="indices") I2 <- cl2$i J2 <- cl2$j } else { ## either infinite reach, or something wrong IJ <- expand.grid(I=1:nU, J=1:nX) IJ <- IJ[ with(IJ, I != J), ] I2 <- I <- IJ$I J2 <- J <- IJ$J } ## filter: I and J must both belong to the nominated subset okIJ <- ok[I] & ok[J] I <- I[okIJ] J <- J[okIJ] ## if(length(I) > 0 && length(J) > 0) { ## .............. loop over pairs of quadrature points ............... ## Run through pairs uI <- unique(I) zI <- isdata[uI] uIdata <- uI[zI] uIdummy <- uI[!zI] nuIdata <- length(uIdata) nuIdummy <- length(uIdummy) if(verbose) splat("Examining", nuIdata, "+", nuIdummy, "=", nuIdata + nuIdummy, "pairs of points") ## Run through pairs i, j where 'i' is a data point pstate <- list() for(iter in seq_len(nuIdata)) { i <- uIdata[iter] ## all DATA points within 2R of X[i] ## This represents X[-i] J2i <- unique(J2[I2==i]) J2i <- J2i[isdata[J2i]] ## all QUADRATURE points within R of X[i] Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { isd <- isdata[Ji] ## data points which are neighbours of X[i] XJi <- X[Ji[isd]] ## dummy points which are neighbours of X[i] DJi <- U[Ji[!isd]] ## replace X[-i] by X[-i] \cap b(0, 2R) X.i <- X[J2i] nX.i <- length(J2i) ## index of XJi in X.i J.i <- match(Ji[isd], J2i) if(anyNA(J.i)) stop("Internal error: Ji[isd] not a subset of J2i") ## index of DJi in superimpose(X.i, DJi) JDi <- nX.i + seq_len(sum(!isd)) ## values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] ## for all j pmj <- snipModelMatrix(X.i, DJi, model, c(J.i, JDi), hasInf) ## mafter[ , Ji, i] <- t(pmj) if(hasInf) isInfafter[Ji, i] <- attr(pmj, "-Inf") if(sparseOK) { mbefore[ , Ji, i] <- array(t(m[Ji,]), dim=c(p, nJi, 1)) if(hasInf) isInfbefore[Ji, i] <- isInf[Ji] } } if(verbose) pstate <- progressreport(iter, nuIdata, state=pstate) } ## Run through pairs i, j where 'i' is a dummy point pstate <- list() for(iter in seq_len(nuIdummy)) { i <- uIdummy[iter] ## all DATA points within 2R of U[i] J2i <- unique(J2[I2==i]) J2i <- J2i[isdata[J2i]] ## all QUADRATURE points within R of U[i] Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { isd <- isdata[Ji] JiData <- Ji[isd] JiDummy <- Ji[!isd] ## data points which are neighbours of U[i] XJi <- X[JiData] ## dummy points which are neighbours of U[i] DJi <- U[JiDummy] ## replace X \cup U[i] by (X \cap b(0, 2R)) \cup U[i] J2Ui <- c(J2i, i) XUi <- U[J2Ui] nXUi <- length(J2Ui) ## index of XJi in X.i J.i <- match(JiData, J2Ui) if(anyNA(J.i)) stop("Internal error: Ji[isd] not a subset of J2i") ## index of DJi in superimpose(X.i, DJi) JDi <- nXUi + seq_len(length(JiDummy)) ## values of sufficient statistic ## h(X[j] | X \cup U[i]) ## for all j pmj <- snipModelMatrix(XUi, DJi, model, c(J.i, JDi), hasInf) ## JiSort <- c(JiData, JiDummy) if(!sparseOK) { mafter[ , JiSort, i] <- t(pmj) if(hasInf) isInfafter[JiSort, i] <- attr(pmj, "-Inf") } else { mafter[ , JiSort, i] <- array(t(pmj), dim=c(p, nJi, 1)) mbefore[ , JiSort, i] <- array(t(m[JiSort,]), dim=c(p, nJi, 1)) if(hasInf) { isInfafter[JiSort, i] <- attr(pmj, "-Inf") isInfbefore[JiSort, i] <- isInf[JiSort] } } } if(verbose) pstate <- progressreport(iter, nuIdummy, state=pstate) } } ## delta[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j]) delta[ , , isdata] <- mbefore[, , isdata] - mafter[ , , isdata] ## delta[ ,i,j] = h(X[i] | X \cup U[j]) - h(X[i] | X) delta[ , , isdummy] <- mafter[, , isdummy] - mbefore[ , , isdummy] ## rearrange: new delta[i,j,] = old delta[, i, j] delta <- aperm(delta, c(2,3,1)) ## if(hasInf) { deltaInf[ , isdata] <- isInfbefore[ , isdata] - isInfafter[ , isdata] deltaInf[ , isdummy] <- isInfafter[ , isdummy] - isInfbefore[ , isdummy] attr(delta, "deltaInf") <- deltaInf } return(delta) } snipModelMatrix <- function(X, D, model, retain, splitInf=FALSE) { M <- partialModelMatrix(X, D, model, splitInf=splitInf) if(splitInf) isInf <- attr(M, "-Inf") M <- M[retain, , drop=FALSE] if(splitInf) attr(M, "-Inf") <- isInf[retain] return(M) } deltasuffstat }) spatstat.model/R/zgibbsmodel.R0000644000176200001440000001014714331173074016047 0ustar liggesusers#' #' zgibbsmodel.R #' #' Experimental #' #' $Revision: 1.3 $ $Date: 2022/04/26 07:56:22 $ zgibbsmodel <- local({ validclass <- function(x) { is.numeric(x) || is.im(x) || is.function(x) } validtrend <- function(x) { validclass(x) || (is.list(x) && all(sapply(x, validclass))) } missingxy <- function(f) { is.function(f) && !all(c("x", "y") %in% names(formals(f))) } zgibbsmodel <- function(beta=1, interaction=NULL, icoef=NULL) { ## validate trend if(!validtrend(beta)) stop("beta should be a number, a numeric vector, a pixel image, a function, or a list of such things") if(missingxy(beta)) stop("Function beta() should have arguments x,y (and possibly others)") if(is.list(beta) && any(sapply(beta, missingxy))) stop("Each function beta() should have arguments x,y (and possibly others)") ## validate interaction if(is.null(interaction)) { interaction <- Poisson() if(length(icoef)) stop("interaction coefficients icoef should not be specified for the Poisson process") icoef <- numeric(0) } else if(inherits(interaction, "fii")) { if(is.null(icoef)) { icoef <- coef(interaction) } else if(length(icoef) != length(coef(interaction))) stop("supplied interaction coefficients icoef have the wrong length") interaction <- as.interact(interaction) } else if(!inherits(interaction, "interact")) stop("Argument 'interaction' should be an object of class 'interact' or 'fii'") ## if(anyNA(interaction$par)) stop("All irregular parameters of the interaction must be supplied") ## build out <- list(beta = beta, interaction = interaction, icoef = icoef) class(out) <- c("zgibbsmodel", class(out)) return(out) } zgibbsmodel }) is.poisson.zgibbsmodel <- function(x) { is.null(x$interaction$family) } is.stationary.zgibbsmodel <- function(x) { is.numeric(x$beta) } as.interact.zgibbsmodel <- function(object) { object$interaction } as.isf.zgibbsmodel <- function(object) { object$interaction$family } interactionorder.zgibbsmodel <- function(object) { interactionorder(as.interact(object)) } print.zgibbsmodel <- function(x, ...) { splat(if(is.stationary(x)) "Stationary" else "Non-stationary", if(is.poisson(x)) "Poisson" else "Gibbs", "point process model") beta <- x$beta tname <- if(is.poisson(x)) "Intensity" else "Trend" tcolon <- paste0(tname, ":") if(is.numeric(beta)) { if(length(beta) == 1) { splat(tcolon, "numeric value =", beta) } else { splat(tcolon, "numeric vector =", paren(paste(beta, collapse=" "), "[")) } } else if(is.function(beta)) { splat(tname, "= function:") print(beta) } else if(is.im(beta)) { splat(tname, "= pixel image:") print(beta) } else { splat(tname, "= list:") print(beta) } if(!is.poisson(x)) { print(as.interact(x)) splat("Iinteraction coefficients:") print(x$icoef) } invisible(NULL) } fakefii <- function(model) { ## create a 'fake' fii object from a zgibbsmodel stopifnot(inherits(model, "zgibbsmodel")) inte <- as.interact(model) if(is.multitype(inte)) stop("Not implemented for multitype interactions") ## determine dimension of potential, etc fakePOT <- inte$pot(d=matrix(, 0, 0), par=inte$par) IsOffset <- attr(fakePOT, "IsOffset") fakePOT <- ensure3Darray(fakePOT) Vnames <- dimnames(fakePOT)[[3]] p <- dim(fakePOT)[3] if(sum(nzchar(Vnames)) < p) Vnames <- if(p == 1) "Interaction" else paste0("Interaction.", 1:p) if(length(IsOffset) < p) IsOffset <- logical(p) ## determine interaction coefficients icoef <- model$icoef if(!any(nzchar(names(icoef)))) names(icoef) <- Vnames ## create fake object fii(inte, icoef, Vnames, IsOffset) } ## contributed by Frederic Lavancier (hacked by Adrian) intensity.zgibbsmodel <- function(X, ..., approx=c("Poisson","DPP")) { approx <- match.arg(approx) fint <- fakefii(X) beta <- X$beta lambda <- switch(approx, Poisson = PoisSaddle(beta, fint), DPP = DPPSaddle(beta, fint)) return(lambda) } spatstat.model/R/predict.ppm.R0000644000176200001440000007054314424137177016010 0ustar liggesusers# # predict.ppm.S # # $Revision: 1.117 $ $Date: 2023/05/02 07:31:44 $ # # predict.ppm() # From fitted model obtained by ppm(), # evaluate the fitted trend or conditional intensity # at a grid/list of other locations # # # ------------------------------------------------------------------- predict.ppm <- local({ ## ## extract undocumented/outdated arguments, and trap others ## xtract <- function(..., newdata=NULL, sumobj=NULL, E=NULL, total=NULL, getoutofjail=FALSE) { if(!is.null(newdata)) warning(paste("The use of the argument", sQuote("newdata"), "is out-of-date. See help(predict.ppm)")) if(!is.null(total)) message(paste("The use of the argument", sQuote("total"), "is out-of-date. See help(predict.ppm)")) trap.extra.arguments(..., .Context="In predict.ppm") return(list(sumobj=sumobj, E=E, total=total, getoutofjail=getoutofjail)) } ## ## confidence/prediction intervals for number of points predconfPois <- function(region, object, level, what=c("estimate", "se", "confidence", "prediction"), new.coef=NULL) { what <- match.arg(what) stopifnot(0 < level && level < 1) lam <- predict(object, window=region, new.coef=new.coef) mu.hat <- integral.im(lam) if(what == "estimate") return(mu.hat) mo <- model.images(object, W=as.owin(lam)) ZL <- unlist(lapply(mo, function(z, w) integral.im(eval.im(z * w)), w = lam)) ZL <- matrix(ZL, nrow=1) vc <- vcov(object, new.coef=new.coef) var.muhat <- as.numeric(ZL %*% vc %*% t(ZL)) sd.muhat <- sqrt(var.muhat) if(what == "se") return(sd.muhat) alpha2 <- (1-level)/2 pp <- sort(c(alpha2, 1-alpha2)) out <- switch(what, confidence = mu.hat + qnorm(pp) * sd.muhat, prediction = qmixpois(pp, mu.hat, sd.muhat, I)) names(out) <- paste0(signif(100 * pp, 3), "%") out } typepublic <- c("trend", "cif", "intensity", "count") typeaccept <- c(typepublic, "lambda", "se", "SE", "covariates") typeuse <- c(typepublic, "cif", "se", "se", "covariates") predict.ppm <- function(object, window=NULL, ngrid=NULL, locations=NULL, covariates=NULL, type=c("trend", "cif", "intensity", "count"), se=FALSE, interval=c("none", "confidence", "prediction"), level = 0.95, X=data.ppm(object), correction, ignore.hardcore=FALSE, ..., dimyx=NULL, eps=NULL, rule.eps=c("adjust.eps","grow.frame","shrink.frame"), new.coef=NULL, check=TRUE, repair=TRUE) { interval <- match.arg(interval) rule.eps <- match.arg(rule.eps) ## extract undocumented arguments xarg <- xtract(...) sumobj <- xarg$sumobj E <- xarg$E total <- xarg$total getoutofjail <- xarg$getoutofjail ## match 'type' argument including 'legacy' options seonly <- FALSE if(missing(type)) type <- type[1] else { if(length(type) > 1) stop("Argument 'type' should be a single value") mt <- pmatch(type, typeaccept) if(is.na(mt)) stop("Argument 'type' should be one of", commasep(sQuote(typepublic), " or ")) type <- typeuse[mt] if(type == "se") { if(!getoutofjail) message(paste("Outdated syntax:", "type='se' should be replaced by se=TRUE;", "then the standard error is predict(...)$se")) type <- "trend" se <- TRUE seonly <- TRUE } } if(!is.null(total)) { message("Outdated argument 'total': use 'window' and set type='count'") type <- "count" if(!is.logical(total)) window <- if(is.tess(total)) total else as.owin(total) } ## model <- object verifyclass(model, "ppm") ## if(check && damaged.ppm(object)) { if(!repair) stop("object format corrupted; try update(object, use.internal=TRUE)") message("object format corrupted; repairing it.") object <- update(object, use.internal=TRUE) } if(missing(correction) || is.null(correction)) correction <- object$correction fitcoef <- coef(object) if(!is.null(new.coef)) { ## validate coefs if(length(new.coef) != length(fitcoef)) stop(paste("Argument new.coef has wrong length", length(new.coef), ": should be", length(fitcoef))) coeffs <- new.coef } else { coeffs <- fitcoef } ## find out what kind of model it is if(is.null(sumobj)) sumobj <- summary(model, quick="entries") # undocumented hack! # stationary <- sumobj$stationary poisson <- sumobj$poisson marked <- sumobj$marked multitype <- sumobj$multitype notrend <- sumobj$no.trend changedcoef <- sumobj$changedcoef || !is.null(new.coef) trivial <- poisson && notrend need.covariates <- sumobj$uses.covars covnames.needed <- sumobj$covars.used if(sumobj$antiquated) warning("The model was fitted by an out-of-date version of spatstat") ## determine mark space if(marked) { if(!multitype) stop("Prediction not yet implemented for general marked point processes") else types <- levels(marks(sumobj$entries$data)) } ## For Poisson models cif=intensity=trend if(poisson && type %in% c("cif", "intensity")) type <- "trend" ## ............. trap un-implemented cases ................... ## Standard errors not yet available for cif, intensity if(se && type %in% c("cif", "intensity")) stop(paste("Standard error for", type, "is not yet implemented"), call.=FALSE) ## Intervals are only available for unmarked Poisson models if(type == "count" && interval != "none" && (marked || !poisson)) { stop(paste0(interval, " intervals for counts are only implemented for", if(marked) " unmarked" else "", if(!poisson) " Poisson", " models"), call.=FALSE) } if(interval == "prediction" && type != "count") stop("Prediction intervals are only available for type='count'", call.=FALSE) if(interval == "confidence" && type %in% c("intensity", "cif")) stop(paste("Confidence intervals are not yet available for", type), call.=FALSE) estimatename <- if(interval == "none") "estimate" else interval ## ............. start computing ............................. ## Total count in a region if(type == "count") { ## point or interval estimate, optionally with SE if(is.null(window)) { ## domain of the original data if(!seonly) est <- predconfPois(NULL, model, level, estimatename, new.coef=new.coef) if(se) sem <- predconfPois(NULL, model, level, "se", new.coef=new.coef) } else if(is.tess(window)) { ## quadrats tilz <- tiles(window) if(!seonly) { est <- lapply(tilz, predconfPois, object=model, level=level, what=estimatename, new.coef=new.coef) est <- switch(interval, none = unlist(est), confidence =, prediction = t(simplify2array(est))) } if(se) sem <- sapply(tilz, predconfPois, object=model, level=level, what="se", new.coef=new.coef) } else { ## window if(!seonly) est <- predconfPois(window, model, level, estimatename, new.coef=new.coef) if(se) sem <- predconfPois(window, model, level, "se", new.coef=new.coef) } if(!se) return(est) if(seonly) return(sem) result <- list(est, sem) names(result) <- c(estimatename, "se") return(result) } ## ..... Predict a spatial function ....... if(interval != "none") { ## Prepare for confidence interval alpha2 <- (1-level)/2 pp <- sort(c(alpha2, 1-alpha2)) ci.names <- paste0(signif(100 * pp, 3), "%") ci.q <- qnorm(pp) } ## determine what kind of output is required: ## (arguments present) (output) ## window, ngrid -> image ## locations (mask) -> image ## locations (image) -> image ## locations (rectangle) -> treat locations as 'window' ## locations (polygonal) -> treat locations as 'window' ## locations (other) -> data frame ## if(is.im(locations)) locations <- as.owin(locations) if(is.null(window) && is.owin(locations) && !is.mask(locations)) { window <- locations locations <- NULL } #' incompatible: if(!is.null(locations)) { #' other arguments are incompatible offending <- c(!is.null(ngrid), !is.null(dimyx), !is.null(eps)) if(any(offending)) { offenders <- c("grid", "dimyx", "eps")[offending] nbad <- sum(offending) stop(paste(ngettext(nbad, "The argument", "The arguments"), commasep(sQuote(offenders)), ngettext(nbad, "is", "are"), "incompatible with", sQuote("locations")), call.=FALSE) } } #' equivalent: if(!is.null(ngrid) && !is.null(dimyx)) warning(paste("The arguments", sQuote("ngrid"), "and", sQuote("dimyx"), "are equivalent: only one should be given"), call.=FALSE) ngrid <- ngrid %orifnull% dimyx if(is.null(ngrid) && is.null(locations)) ## use regular grid ngrid <- rev(spatstat.options("npixel")) want.image <- is.null(locations) || is.mask(locations) make.grid <- !is.null(ngrid) ## ############## Determine prediction points ##################### if(!want.image) { ## (A) list of (x,y) coordinates given by `locations' xpredict <- locations$x ypredict <- locations$y if(is.null(xpredict) || is.null(ypredict)) { xy <- xy.coords(locations) xpredict <- xy$x xpredict <- xy$y } if(is.null(xpredict) || is.null(ypredict)) stop(paste("Don't know how to extract x,y coordinates from", sQuote("locations"))) ## marks if required if(marked) { ## extract marks from data frame `locations' mpredict <- locations$marks if(is.null(mpredict)) stop(paste("The argument", sQuote("locations"), "does not contain a column of marks", "(required since the fitted model", "is a marked point process)")) if(is.factor(mpredict)) { ## verify mark levels match those in model if(!isTRUE(all.equal(levels(mpredict), types))) { if(all(levels(mpredict) %in% types)) mpredict <- factor(mpredict, levels=types) else stop(paste("The marks in", sQuote("locations"), "do not have the same levels as", "the marks in the model")) } } else { ## coerce to factor if possible if(all(mpredict %in% types)) mpredict <- factor(mpredict, levels=types) else stop(paste("The marks in", sQuote("locations"), "do not have the same values as the marks in the model")) } } } else { ## (B) pixel grid of points if(!make.grid) ## (B)(i) The grid is given in `locations' masque <- locations else { ## (B)(ii) We have to make the grid ourselves ## Validate ngrid if(!is.null(ngrid)) { if(!is.numeric(ngrid)) stop("ngrid should be a numeric vector") ngrid <- ensure2vector(ngrid) } if(is.null(window)) window <- sumobj$entries$data$window masque <- as.mask(window, dimyx=ngrid, eps=eps, rule.eps=rule.eps) } ## Hack ----------------------------------------------- ## gam with lo() will not allow extrapolation beyond the range of x,y ## values actually used for the fit. Check this: tums <- termsinformula(model$trend) if(any( tums == "lo(x)" | tums == "lo(y)" | tums == "lo(x,y)" | tums == "lo(y,x)") ) { ## determine range of x,y used for fit gg <- model$internal$glmdata gxr <- range(gg$x[gg$SUBSET]) gyr <- range(gg$y[gg$SUBSET]) ## trim window to this range masque <- intersect.owin(masque, owin(gxr, gyr)) } ## ------------------------------------ End Hack ## ## Finally, determine x and y vectors for grid rxy <- rasterxy.mask(masque, drop=TRUE) xpredict <- rxy$x ypredict <- rxy$y } ## ################ CREATE DATA FRAME ########################## ## ... to be passed to predict.glm() ## ## First the x, y coordinates if(!marked) newdata <- data.frame(x=xpredict, y=ypredict) else if(!want.image) newdata <- data.frame(x=xpredict, y=ypredict, marks=mpredict) else { ## replicate nt <- length(types) np <- length(xpredict) xpredict <- rep.int(xpredict,nt) ypredict <- rep.int(ypredict,nt) mpredict <- rep.int(types, rep.int(np, nt)) mpredict <- factor(mpredict, levels=types) newdata <- data.frame(x = xpredict, y = ypredict, marks=mpredict) } ## ## Next the external covariates, if any ## if(need.covariates) { if(is.null(covariates)) { ## Extract covariates from fitted model object ## They have to be images. oldcov <- model$covariates if(is.null(oldcov)) stop("External covariates are required, and are not available") if(is.data.frame(oldcov)) stop(paste("External covariates are required.", "Prediction is not possible at new locations")) covariates <- oldcov } ## restrict to covariates actually required for formula covariates <- if(is.data.frame(covariates)) { covariates[,covnames.needed, drop=FALSE] } else covariates[covnames.needed] covfunargs <- model$covfunargs covariates.df <- mpl.get.covariates(covariates, list(x=xpredict, y=ypredict), "prediction points", covfunargs) newdata <- cbind(newdata, covariates.df) } ## ###### Set up prediction variables ################################ ## ## Provide SUBSET variable ## if(is.null(newdata$SUBSET)) newdata$SUBSET <- rep.int(TRUE, nrow(newdata)) ## ## Dig out information used in Berman-Turner device ## Vnames: the names for the ``interaction variables'' ## glmdata: the data frame used for the glm fit ## glmfit: the fitted glm object ## if(!trivial) { Vnames <- model$internal$Vnames vnameprefix <- model$internal$vnameprefix glmdata <- getglmdata(model) glmfit <- getglmfit(model) if(object$method=="logi") newdata$.logi.B <- rep(glmdata$.logi.B[1], nrow(newdata)) } ## Undocumented secret exit if(type == "covariates") return(list(newdata=newdata, mask=if(want.image) masque else NULL)) ## ########## COMPUTE PREDICTION ############################## ## ## Compute the predicted value z[i] for each row of 'newdata' ## Store in a vector z and reshape it later ## ## ## ############################################################# needSE <- se || (interval != "none") attribeauts <- list() if(trivial) { ## ########### UNIFORM POISSON PROCESS ##################### lambda <- exp(coeffs[[1]]) if(needSE) { npts <- nobs(model) se.lambda <- lambda/sqrt(npts) } switch(interval, none = { z <- rep.int(lambda, nrow(newdata)) }, confidence = { z <- matrix(lambda + se.lambda * ci.q, byrow=TRUE, nrow=nrow(newdata), ncol=2, dimnames=list(NULL, ci.names)) }, stop("Internal error: unreached")) if(se) zse <- rep.int(se.lambda, nrow(newdata)) ## ############################################################## } else if((type %in% c("trend", "intensity")) || poisson) { ## ## ########## COMPUTE TREND ################################### ## ## set explanatory variables to zero ## zeroes <- numeric(nrow(newdata)) for(vn in Vnames) newdata[[vn]] <- zeroes ## ## predict trend ## z <- lambda <- GLMpredict(glmfit, newdata, coeffs, changecoef=changedcoef) ## if(type == "intensity") z <- PoisSaddle(z, fitin(model)) if(needSE) { ## compute standard error, needed for some purpose if(inherits(glmfit, "gam")) { ## compute SE using predict.gam if(!is.null(new.coef)) warning("new.coef ignored in standard error calculation") SE <- predict(glmfit, newdata=newdata, type="response", se.fit=TRUE)[[2]] } else { ## Use vcov.ppm ## extract variance-covariance matrix of parameters vc <- vcov(model, new.coef=new.coef) ## compute model matrix fmla <- rhs.of.formula(formula(glmfit)) # mf <- model.frame(fmla, newdata, ..., na.action=na.pass) # mm <- model.matrix(fmla, mf, ..., na.action=na.pass) mf <- model.frame(fmla, newdata, na.action=na.pass) mm <- model.matrix(fmla, mf, na.action=na.pass) if(nrow(mm) != nrow(newdata)) stop("Internal error: row mismatch in SE calculation") ## compute relative variance = diagonal of quadratic form if(ncol(mm) != ncol(vc)) stop("Internal error: column mismatch in SE calculation") vv <- quadform(mm, vc) ## standard error SE <- lambda * sqrt(vv) } ## Save desired quantities depending on SE if(se) zse <- SE if(interval == "confidence") { z <- lambda + outer(SE, ci.q, "*") colnames(z) <- ci.names } } ## ############################################################ } else if(type == "cif" || type =="lambda") { ## ####### COMPUTE FITTED CONDITIONAL INTENSITY ################ ## ## set up arguments inter <- model$interaction if(!missing(X)) stopifnot(is.ppp(X)) W <- as.owin(data.ppm(model)) U <- ppp(newdata$x, y=newdata$y, window=W, check=FALSE) if(marked) marks(U) <- newdata$marks ## determine which prediction points are data points if(is.null(E)) E <- equalpairs(U, X, marked) ## evaluate interaction Vnew <- evalInteraction(X, U, E, inter, correction=correction, splitInf=ignore.hardcore, check=check) if(!ignore.hardcore) { ## Negative infinite values of potential signify cif = zero cif.equals.zero <- matrowany(Vnew == -Inf) } else { ## returned as attribute, unless vacuous cif.equals.zero <- attr(Vnew, "-Inf") %orifnull% logical(nrow(Vnew)) } attribeauts <- c(attribeauts, list(isZero=cif.equals.zero)) ## Insert the potential into the relevant column(s) of `newdata' if(ncol(Vnew) == 1) { ## Potential is real valued (Vnew is a column vector) ## Assign values to a column of the same name in newdata newdata[[Vnames]] <- as.vector(Vnew) ## } else if(is.null(avail <- colnames(Vnew))) { ## Potential is vector-valued (Vnew is a matrix) ## with unnamed components. ## Assign the components, in order of their appearance, ## to the columns of newdata labelled Vnames[1], Vnames[2],... for(i in seq_along(Vnames)) newdata[[Vnames[i] ]] <- Vnew[,i] ## } else { ## Potential is vector-valued (Vnew is a matrix) ## with named components. ## Match variables by name if(all(Vnames %in% avail)) { for(vn in Vnames) newdata[[ vn ]] <- Vnew[ , vn] } else if(all(Vnames %in% (Pavail <- paste0(vnameprefix, avail)))) { for(vn in Vnames) newdata[[ vn ]] <- Vnew[ , match(vn, Pavail)] } else stop(paste( "Internal error: unable to match names", "of available interaction terms", commasep(sQuote(avail)), "to required interaction terms", commasep(sQuote(Vnames)) ), call.=FALSE) } ## invoke predict.glm or compute prediction z <- GLMpredict(glmfit, newdata, coeffs, changecoef=changedcoef) ## reset to zero if potential was zero if(!ignore.hardcore && any(cif.equals.zero)) z[cif.equals.zero] <- 0 ## ############################################################### } else stop(paste("Unrecognised type", sQuote(type))) ## ############################################################### ## ## reshape the result ## if(!want.image) { if(!se) { z <- as.vector(z) attributes(z) <- c(attributes(z), attribeauts) out <- z } else if(seonly) { out <- as.vector(zse) } else { z <- as.vector(z) attributes(z) <- c(attributes(z), attribeauts) out <- list(z, as.vector(zse)) names(out) <- c(estimatename, "se") } } else { ## make an image of the right shape and value imago <- as.im(masque, value=1.0) if(!marked && interval=="none") { ## single image if(!se) { out <- imago ## set entries out[] <- z } else if(seonly) { out <- imago out[] <- zse } else { est <- std <- imago est[] <- z std[] <- zse out <- list(est, std) names(out) <- c(estimatename, "se") } } else if(interval != "none") { ## list of 2 images for CI if(!seonly) { hi <- lo <- imago hi[] <- z[,1] lo[] <- z[,2] est <- solist(hi, lo) names(est) <- ci.names } if(se) { std <- imago std[] <- zse } if(!se) { out <- est } else if(seonly) { out <- std } else { out <- list(est, std) names(out) <- c(estimatename, "se") } } else { ## list of images, one for each level of marks out <- list() for(i in seq_along(types)) { outi <- imago ## set entries outi[] <- z[newdata$marks == types[i]] out[[i]] <- outi } out <- as.solist(out) names(out) <- as.character(types) } } ## ## FINISHED ## return(out) } predict.ppm }) #################################################################### # # compute pointwise uncertainty of fitted intensity # model.se.image <- function(fit, W=as.owin(fit), ..., what="sd", new.coef=NULL) { if(!is.poisson.ppm(fit)) stop("Only implemented for Poisson point process models", call.=FALSE) what <- pickoption("option", what, c(sd="sd", var="var", cv="cv", CV="cv", ce="ce", CE="ce")) W <- as.mask(as.owin(W)) # variance-covariance matrix of coefficients vc <- vcov(fit, new.coef=new.coef) np <- dim(vc)[1] # extract sufficient statistic for each coefficient mm <- model.images(fit, W, ...) # compute fitted intensity lam <- predict(fit, locations=W, new.coef=new.coef) # initialise resulting image U <- as.im(W) U[] <- 0 # compute pointwise matrix product, assuming vc is symmetric for(i in 1:np) { Si <- mm[[i]] aii <- vc[i,i] U <- eval.im(U + aii * Si^2) if(i > 1) { for(j in 1:(i-1)) { Sj <- mm[[j]] aij <- vc[i,j] twoaij <- 2 * aij U <- eval.im(U + twoaij * Si * Sj) } } } # the matrix product is the relative variance (CV) if(what=="cv") return(U) # relative sd if(what=="ce") { U <- eval.im(sqrt(U)) return(U) } # multiply by squared intensity to obtain variance U <- eval.im(U * lam^2) # variance if(what=="var") return(U) # compute SD and return U <- eval.im(sqrt(U)) return(U) } GLMpredict <- function(fit, data, coefs, changecoef=TRUE, type=c("response", "link")) { type <- match.arg(type) if(!changecoef && all(is.finite(unlist(coefs)))) { answer <- predict(fit, newdata=data, type=type) } else { if(inherits(fit, "gam")) stop("This calculation is not supported for GAM fits", call.=FALSE) # do it by hand fmla <- formula(fit) data$.mpl.Y <- 1 fram <- model.frame(fmla, data=data, na.action=NULL) # linear predictor mm <- model.matrix(fmla, data=fram) # ensure all required coefficients are present coefs <- fill.coefs(coefs, colnames(mm)) ok <- is.finite(coefs) # if(all(ok)) { eta <- as.vector(mm %*% coefs) } else { #' ensure 0 * anything = 0 eta <- as.vector(mm[ , ok, drop=FALSE] %*% coefs[ok]) for(j in which(!ok)) { mmj <- mm[, j] nonzero <- is.na(mmj) | (mmj != 0) if(any(nonzero)) eta[nonzero] <- eta[nonzero] + mmj[nonzero] * coefs[j] } } # offset mo <- model.offset(fram) if(!is.null(mo)) { if(is.matrix(mo)) mo <- apply(mo, 1, sum) eta <- mo + eta } switch(type, link = { answer <- eta }, response = { linkinv <- family(fit)$linkinv answer <- linkinv(eta) }) } # Convert from fitted logistic prob. to lambda for logistic fit if(type == "response" && family(fit)$family=="binomial") answer <- fit$data$.logi.B[1] * answer/(1-answer) return(answer) } # An 'equalpairs' matrix E is needed in the ppm class # to determine which quadrature points and data points are identical # (not just which quadrature points are data points). # It is a two-column matrix specifying all the identical pairs. # The first column gives the index of a data point (in the data pattern X) # and the second column gives the corresponding index in U. # The following function determines the equal pair information # from the coordinates (and marks) of U and X alone; # it should be used only if we can't figure out this information otherwise. equalpairs <- function(U, X, marked=FALSE) { nn <- nncross(U, X) coincides <- (nn$dist == 0) Xind <- nn$which[coincides] Uind <- which(coincides) if(marked) { samemarks <- (marks(X)[Xind] == marks(U)[Uind]) Xind <- Xind[samemarks] Uind <- Uind[samemarks] } return(cbind(Xind, Uind)) } fill.coefs <- function(coefs, required) { # 'coefs' should contain all the 'required' values coefsname <- short.deparse(substitute(coefs)) nama <- names(coefs) if(is.null(nama)) { #' names cannot be matched if(length(coefs) != length(required)) stop(paste("The unnamed argument", sQuote(coefsname), "has", length(coefs), "entries, but", length(required), "are required"), call.=FALSE) # blithely assume they match 1-1 names(coefs) <- required return(coefs) } stopifnot(is.character(required)) if(identical(nama, required)) return(coefs) inject <- match(nama, required) if(any(notneeded <- is.na(inject))) { warning(paste("Internal glitch: some coefficients were not required:", commasep(sQuote(nama[notneeded]))), call.=FALSE) coefs <- coefs[!notneeded] nama <- names(coefs) inject <- match(nama, required) } y <- numeric(length(required)) names(y) <- required y[inject] <- coefs return(y) } spatstat.model/R/suffstat.R0000644000176200001440000000622114331173073015402 0ustar liggesusers# # suffstat.R # # calculate sufficient statistic # # $Revision: 1.19 $ $Date: 2022/11/03 11:08:33 $ # # suffstat <- function(model, X=data.ppm(model)) { cl <- sys.call() callstring <- short.deparse(cl) verifyclass(model, "ppm") if(!missing(X)) verifyclass(X, "ppp") else X <- NULL inter <- model$interaction func <- if(is.null(inter) || is.poisson(inter)) suffstat.poisson else if(!is.null(ssinter <- inter$suffstat)) ssinter else if(!is.null(ssfamily <- inter$family$suffstat)) ssfamily else suffstat.generic return(func(model, X, callstring)) } suffstat.generic <- function(model, X=NULL, callstring="suffstat.generic") { # This should work for an arbitrary ppm # since it uses the fundamental relation between # conditional intensity and likelihood. # But it is computationally intensive. verifyclass(model, "ppm") coefnames <- names(coef(model)) if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") # refit the model to new data (only to determine which data points are used) modelX <- update(model, X, improve.type="none") } # determine which data points contribute to pseudolikelihood contribute <- getppmdatasubset(modelX) if(!any(contribute)) # result is zero vector return(0 * coef(model)) # Add points one-by-one # If there are points which don't contribute, condition on them use <- which(contribute) dontuse <- which(!contribute) for(i in seq_along(use)) { prior <- if(i == 1) c() else use[1:(i-1)] prior <- c(dontuse, prior) Xprior <- X[prior] Xcurrent <- X[use[i]] mom <- partialModelMatrix(Xprior, Xcurrent, model, "suffstat") lastrow <- length(prior) + 1 momrow <- mom[lastrow, ] if(i == 1) result <- momrow else result <- momrow + result } names(result) <- coefnames attr(result, "mplsubset") <- NULL return(result) } killinteraction <- function(model) { verifyclass(model, "ppm") ispoisson <- summary(model, quick=TRUE)$poisson if(ispoisson) return(model) # surgery required newmodel <- model newmodel$interaction <- NULL if(!is.null(Vnames <- model$internal$Vnames)) { matches <- names(model$coef) %in% Vnames newmodel$coef <- model$coef[!matches] newmodel$internal$Vnames <- NULL } # the other 'internal' stuff may still be wrong (or `preserved') return(newmodel) } suffstat.poisson <- function(model, X, callstring="suffstat.poisson") { verifyclass(model, "ppm") if(is.null(X)) X <- data.ppm(model) else verifyclass(X, "ppp") if(!is.poisson(model)) stop("Model is not a Poisson process") Empty <- X[numeric(0)] mom <- partialModelMatrix(X, Empty, model, "suffstat") nmom <- ncol(mom) ncoef <- length(coef(model)) if(nmom != ncoef) stop("Internal error: number of columns of model matrix does not match number of coefficients in fitted model") if(nmom > 1 && any(colnames(mom) != names(coef(model)))) warning("Internal error: mismatch between column names of model matrix and names of coefficient vector in fitted model") o1sum <- apply(mom, 2, sum) return(o1sum) } spatstat.model/R/anova.mppm.R0000644000176200001440000003200714331173074015621 0ustar liggesusers# # anova.mppm.R # # $Revision: 1.25 $ $Date: 2022/04/26 07:20:39 $ # anova.mppm <- local({ do.gripe <- function(...) warning(paste(...), call.=FALSE) dont.gripe <- function(...) NULL tests.choices <- c("Chisq", "LRT", "Rao", "score", "F", "Cp") tests.avail <- c("Chisq", "LRT", "Rao", "score") tests.random <- c("Chisq", "LRT") tests.Gibbs <- c("Chisq", "LRT") totalnquad <- function(fit) sum(sapply(quad.mppm(fit), n.quad)) totalusedquad <- function(fit) with(fit$Fit$moadf, sum(.mpl.SUBSET)) fmlaString <- function(object) { paste(as.expression(formula(object))) } creatorString <- function(inter) { inter$creator } creatorStrings <- function(interlist) { unique(sapply(interlist, creatorString)) } interString <- function(object) { inter <- object$Inter$interaction if(is.interact(inter)) { z <- creatorString(inter) } else if(is.hyperframe(inter)) { acti <- active.interactions(object) actinames <- colnames(acti)[apply(acti, 2, any)] z <- unique(unlist(lapply(actinames, function(a, h=inter) { unique(creatorStrings(h[,a,drop=TRUE])) }))) } else { z <- unique(object$Inter$processes) } paste(z, collapse=", ") } anova.mppm <- function(object, ..., test=NULL, adjust=TRUE, fine=FALSE, warn=TRUE) { thecall <- sys.call() gripe <- if(warn) do.gripe else dont.gripe argh <- list(...) ## trap outmoded usage if("override" %in% names(argh)) { gripe("Argument 'override' is superseded and was ignored") argh <- argh[-which(names(argh) == "override")] } ## list of models objex <- append(list(object), argh) ## Check each model is an mppm object if(!all(sapply(objex, is.mppm))) stop(paste("Arguments must all be", sQuote("mppm"), "objects")) ## are all models Poisson? pois <- all(sapply(objex, is.poisson.mppm)) gibbs <- !pois ## single/multiple objects given singleobject <- (length(objex) == 1L) expandedfrom1 <- FALSE if(!singleobject) { ## several objects given ## require short names of models for output argnames <- names(thecall) %orifnull% rep("", length(thecall)) retain <- is.na(match(argnames, c("test", "adjust", "fine", "warn", "override"))) shortcall <- thecall[retain] modelnames <- vapply(as.list(shortcall[-1L]), short.deparse, "") } else if(gibbs) { ## single Gibbs model given. ## we can't rely on anova.glm in this case ## so we have to re-fit explicitly Terms <- drop.scope(object) if((nT <- length(Terms)) > 0) { ## generate models by adding terms sequentially objex <- vector(mode="list", length=nT+1) envy <- environment(terms(object)) for(n in 1L:nT) { ## model containing terms 1, ..., n-1 fmla <- paste(". ~ . - ", paste(Terms[n:nT], collapse=" - ")) fmla <- as.formula(fmla) calln <- update(object, fmla, evaluate=FALSE) objex[[n]] <- eval(calln, envy) } ## full model objex[[nT+1L]] <- object expandedfrom1 <- TRUE } } ## All models fitted using same method? Fits <- lapply(objex, getElement, name="Fit") fitter <- unique(unlist(lapply(Fits, getElement, name="fitter"))) if(length(fitter) > 1) stop(paste("Models are incompatible;", "they were fitted by different methods (", paste(fitter, collapse=", "), ")" )) ## Choice of test if(fitter == "glmmPQL") { # HACK <- spatstat.options("developer") # if(!HACK) # stop("Sorry, analysis of deviance is currently not supported for models with random effects, due to changes in the nlme package", call.=FALSE) ## anova.lme requires different format of `test' argument ## and does not recognise 'dispersion' if(is.null(test)) test <- FALSE else { test <- match.arg(test, tests.choices) if(!(test %in% tests.random)) stop(paste("Test", dQuote(test), "is not implemented for random effects models")) test <- TRUE } if(adjust) { warn.once("AnovaMppmLMEnoadjust", "adjust=TRUE was ignored; not supported for random-effects models") adjust <- FALSE } } else if(!is.null(test)) { test <- match.arg(test, tests.choices) if(!(test %in% tests.avail)) stop(paste("test=", dQuote(test), "is not yet implemented"), call.=FALSE) if(!pois && !(test %in% tests.Gibbs)) stop(paste("test=", dQuote(test), "is only implemented for Poisson models"), call.=FALSE) } ## Extract glm fit objects fitz <- lapply(Fits, getElement, name="FIT") ## Ensure all models were fitted using GLM, or all were fitted using GAM isgam <- sapply(fitz, inherits, what="gam") isglm <- sapply(fitz, inherits, what="glm") usegam <- any(isgam) if(usegam && any(isglm)) { gripe("Models were re-fitted with use.gam=TRUE") objex <- lapply(objex, update, use.gam=TRUE) } ## Finally do the appropriate ANOVA opt <- list(test=test) if(fitter == "glmmPQL") { ## anova.lme does not recognise 'dispersion' argument ## Disgraceful hack: ## Modify object to conform to requirements of anova.lme fitz <- lapply(fitz, stripGLMM) for(i in seq_along(fitz)) { call.i <- getCall(objex[[i]]) names(call.i) <- sub("formula", "fixed", names(call.i)) fitz[[i]]$call <- call.i } warning("anova is not strictly valid for penalised quasi-likelihood fits") } else { ## normal case opt <- append(opt, list(dispersion=1)) } result <- try(do.call(anova, append(fitz, opt))) if(inherits(result, "try-error")) stop("anova failed") if(fitter == "glmmPQL" && !singleobject && length(modelnames) == nrow(result)) row.names(result) <- modelnames ## Remove approximation-dependent columns if present result[, "Resid. Dev"] <- NULL ## replace 'residual df' by number of parameters in model if("Resid. Df" %in% names(result)) { ## count number of quadrature points used in each model nq <- totalusedquad(objex[[1L]]) result[, "Resid. Df"] <- nq - result[, "Resid. Df"] names(result)[match("Resid. Df", names(result))] <- "Npar" } ## edit header if(!is.null(h <- attr(result, "heading"))) { ## remove .mpl.Y and .logi.Y from formulae if present h <- gsub(".mpl.Y", "", h) h <- gsub(".logi.Y", "", h) ## delete GLM information if present h <- gsub("Model: quasi, link: log", "", h) h <- gsub("Model: binomial, link: logit", "", h) h <- gsub("Response: ", "", h) ## remove blank lines (up to 4 consecutive blanks can occur) for(i in 1L:5L) h <- gsub("\n\n", "\n", h) if(length(objex) > 1 && length(h) > 1) { ## anova(mod1, mod2, ...) ## change names of models fmlae <- unlist(lapply(objex, fmlaString)) intrx <- unlist(lapply(objex, interString)) h[2L] <- paste("Model", paste0(1L:length(objex), ":"), fmlae, "\t", intrx, collapse="\n") } ## Add explanation if we did the stepwise thing ourselves if(expandedfrom1) h <- c(h[1L], "Terms added sequentially (first to last)\n", h[-1]) ## Contract spaces in output if spatstat.options('terse') >= 2 if(!waxlyrical('space')) h <- gsub("\n$", "", h) ## Put back attr(result, "heading") <- h } if(adjust && !pois) { ## issue warning, if not already given if(warn) warn.once("anovaMppmAdjust", "anova.mppm now computes the *adjusted* deviances", "when the models are not Poisson processes.") ## Corrected pseudolikelihood ratio nmodels <- length(objex) if(nmodels > 1) { cfac <- rep(1, nmodels) for(i in 2:nmodels) { a <- objex[[i-1]] b <- objex[[i]] df <- length(coef(a)) - length(coef(b)) if(df > 0) { ibig <- i-1 ismal <- i } else { ibig <- i ismal <- i-1 df <- -df } bigger <- objex[[ibig]] smaller <- objex[[ismal]] if(df == 0) { gripe("Models", i-1, "and", i, "have the same dimension") } else { bignames <- names(coef(bigger)) smallnames <- names(coef(smaller)) injection <- match(smallnames, bignames) if(any(uhoh <- is.na(injection))) { gripe("Unable to match", ngettext(sum(uhoh), "coefficient", "coefficients"), commasep(sQuote(smallnames[uhoh])), "of model", ismal, "to coefficients in model", ibig) } else { thetaDot <- 0 * coef(bigger) thetaDot[injection] <- coef(smaller) JH <- vcov(bigger, what="all", new.coef=thetaDot, fine=fine) # J <- if(!logi) JH$Sigma else (JH$Sigma1log+JH$Sigma2log) # H <- if(!logi) JH$A1 else JH$Slog J <- JH$fisher H <- JH$internals$A1 G <- H%*%solve(J)%*%H if(df == 1) { cfac[i] <- H[-injection,-injection]/G[-injection,-injection] } else { subs <- subfits(bigger, new.coef=thetaDot) Res <- lapply(subs, residuals, type="score", drop=TRUE, dropcoef=TRUE) #' pseudoscore for each row Ueach <- lapply(Res, integral.msr) #' total pseudoscore maps <- mapInterVars(bigger, subs) U <- sumMapped(Ueach, maps, 0*thetaDot) #' apply adjustment Uo <- U[-injection] Uo <- matrix(Uo, ncol=1) Hinv <- solve(H) Ginv <- solve(G) Hoo <- Hinv[-injection,-injection, drop=FALSE] Goo <- Ginv[-injection,-injection, drop=FALSE] ## ScoreStat <- t(Uo) %*% Hoo %*% solve(Goo) %*% Hoo %*% Uo HooUo <- Hoo %*% Uo ScoreStat <- t(HooUo) %*% solve(Goo) %*% HooUo ## cfac[i] <- ScoreStat/(t(Uo) %*% Hoo %*% Uo) cfac[i] <- ScoreStat/(t(HooUo) %*% Uo) } } } } ## apply Pace et al (2011) adjustment to pseudo-deviances ## (save attributes of 'result' for later reinstatement) oldresult <- result result$Deviance <- AdjDev <- result$Deviance * cfac cn <- colnames(result) colnames(result)[cn == "Deviance"] <- "AdjDeviance" if("Pr(>Chi)" %in% colnames(result)) result[["Pr(>Chi)"]] <- c(NA, pchisq(abs(AdjDev[-1L]), df=abs(result$Df[-1L]), lower.tail=FALSE)) class(result) <- class(oldresult) attr(result, "heading") <- attr(oldresult, "heading") } } return(result) } sumMapped <- function(xlist, maps, initial) { result <- initial wantnames <- names(initial) for(i in seq_along(xlist)) { x <- xlist[[i]] gotnames <- names(x) unchanged <- gotnames %in% wantnames if(any(unchanged)) { unames <- gotnames[unchanged] result[unames] <- result[unames] + x[unames] x <- x[!unchanged] gotnames <- names(x) } cmap <- maps[[i]] mapinputs <- names(cmap) for(j in seq_along(x)) { inputname <- gotnames[j] k <- match(inputname, mapinputs) if(is.na(k)) { warning("Internal error: cannot map variable", sQuote(inputname), "from submodel to full model") } else { targetnames <- cmap[[k]] if(length(unknown <- setdiff(targetnames, wantnames)) > 0) { warning("Internal error: unexpected target", ngettext(length(unknown), "variable", "variables"), commasep(sQuote(unknown))) targetnames <- intersect(targetnames, wantnames) } result[targetnames] <- result[targetnames] + x[inputname] } } } result } sumcompatible <- function(xlist, required) { result <- numeric(length(required)) names(result) <- required for(i in seq_along(xlist)) { x <- xlist[[i]] namx <- names(x) if(!all(ok <- (namx %in% required))) stop(paste("Internal error in sumcompatible:", "list entry", i, "contains unrecognised", ngettext(sum(!ok), "value", "values"), commasep(sQuote(namx[!ok]))), call.=FALSE) inject <- match(namx, required) result[inject] <- result[inject] + x } return(result) } anova.mppm }) spatstat.model/MD50000644000176200001440000005474214515425132013542 0ustar liggesusers1cb2bdba9ecec36c7ca5649dcabe1190 *DESCRIPTION 7d2d9ddd815fc4a6f0f5888cd2d093f3 *NAMESPACE cc06a65d406b6dd426dabe7b36b41776 *NEWS 1cb9c3f0b25b431722e03617f05b1aac *R/First.R 52520d3b1f07aa126ad8d36dd1b4a794 *R/Gcom.R 1101d753c8f35af184bfa8ff36a64486 *R/Gres.R 9c1ed0282f2dfd541fe9eb7c55882630 *R/Kcom.R a8742620874e75312563d5844b76ac3d *R/Kmodel.R abbf6db440f53371a4f757cb17274de6 *R/Kres.R b58a1f9410f53357b581a729e8e2fadb *R/aaaa.R 0753395efe1ed33003e05cf9a808ff57 *R/addvar.R b47091a376ad8650a9861d61003b3b46 *R/anova.mppm.R c95ffd3d971d61e1c60b038741fc1916 *R/anova.ppm.R 995e5e87814f942ef22477532ffb1637 *R/areainter.R af17aae404df2b28967e96dcdfac3be1 *R/auc.R d4db6751867578ce0bcb408cb1284ec7 *R/augment.msr.R 7270a0b79a1555829f4a7b6417d31d33 *R/badgey.R a0fae8aec838875945ad41c0a9703ba0 *R/bc.R 91ee29047e751be76c75241263cb1297 *R/bermantest.R 95c7c072d25fae9c3d6b29e4a458685e *R/cdf.test.mppm.R 4025fc98f7b2119ca60d331433366687 *R/cdftest.R f305a9e38310d5f63f7109edee7f3bde *R/clusterfunctions.R 26860886cc833bbc52fdec5e60eadc04 *R/compareFit.R a00723d970d1312c47c47dd2ada3ba37 *R/concom.R 34c4f28d9bb553500e9e68fef3a95c2b *R/detPPF-class.R a9e236eec70c2542bb8a64743f3c2fc9 *R/detpointprocfamilyfun.R 19f251b81e75d58d5d7445baca29bfb3 *R/dffit.R 98b814947de7b92b2158e36cb26e8b0c *R/dg.R 73f0d1ebdedb3a1a21ba95a9683d1c98 *R/dgs.R bed9220a5fa073e11ec9d3b4a2029d84 *R/diagnoseppm.R 73c369041bbbe8aab23a42f969e33e2e *R/dppm.R 3e07b7fd60e16cd4fe992e5f9a13c54b *R/dppmclass.R 8f2bcae6d53854add940dd4f128076b9 *R/dummify.R deb500eb5d874c0415fb4416c8fe4948 *R/eem.R a816a44b139137b878fb2bcc08c5e4b8 *R/effectfun.R ca6a7874ed6bf1f3c388318cac2a3ac3 *R/enet.R b78783eea67bb9182187de0cd9d88ec3 *R/envelope.R 50521f5521e7a9d7890430f29cd5019d *R/evidence.R 47db1eb89c59ac77194b4e0f993ca57d *R/evidenceslrm.R c539b3e2b2f0419923d74f327368d15d *R/exactMPLEstrauss.R a59cbdaba5b58d814afbf184318d06e2 *R/fii.R 27680db011a0aa15048334fc76718816 *R/fiksel.R 333540c7557558e25879eb3854199f8d *R/fitted.mppm.R 5a812ad278b44340b55d6437b3732e6c *R/fitted.ppm.R a7445ec4197d769e1351e747f8296edf *R/geyer.R dcd5054104d976456da38e2f35169965 *R/hackglmm.R 2f40a714ac26a4b8158587d748656a5a *R/hardcore.R f1e16ee9c975eda27002f44980d1ea57 *R/harmonic.R 0fb7e0bb6cdf5a5a43a68bf3f63b04c4 *R/hierarchy.R 138f65f4f2108c979303b9847c39c238 *R/hierhard.R 5d2df2c8899886a66549d2fdc93ed833 *R/hierpair.family.R 3c1a28af26ec7ce13f8b8176dc74b64f *R/hierstrauss.R ca5f41813eb9ada2538c71d9670e6919 *R/hierstrhard.R fd320f7fbd34c35e8d6585f5ca9de3ac *R/ho.R 179509c56123d403f7fa317729b4efc1 *R/hybrid.R 51f886a93b22602995481e91600bc048 *R/hybrid.family.R 0427fe10b8680200e8590c7e5e854d8b *R/ic.kppm.R e1dc822473131ceb13b8c9b458f7bc35 *R/inforder.family.R cb4fd42bce22250266106ec3fa1200b9 *R/intensity.ppm.R a0a746189a6b7ffd4f2dc02cd529ba69 *R/interact.R b1ffe51e9b04c84a86bb37e0a0b18e89 *R/interactions.R 4ecfc8a655d6c827da464741771443d2 *R/ippm.R 28efc8c34c3e37e4c4f8dd456549f069 *R/is.R ed338d0a0711cb1bf1f6ad603ecc966b *R/kppm.R b941d940d5c3010b7d6d58105ca42c49 *R/lennard.R 9caf08601603120f2f7b5184cff7b0f1 *R/leverage.R ef71edad32656c539a8828b26e598cf9 *R/logistic.R 022dc30686800096e0f3ddedb9bee8ce *R/lurking.R fae8105e5c72c1e1e26e3dcd7e0cb083 *R/lurkmppm.R ce9e429b1c61227fb4a0e2af05ec297f *R/makepspace.R d5be568db57d12fa3a2b4711a5b60b8d *R/measures.R bd6af9f08e0f086ce497f9c7aeb3cffd *R/mincontrast.R 9649ecae60b096f55b55ac25acc84674 *R/model.depends.R 83a83501cf9f4c5d374c89116cbcca70 *R/mpl.R f6a9b3a9e70733bec7dc90e34e5c7250 *R/mppm.R 1ac61059562f1d8dafb26f828841db0e *R/multihard.R 21dee90242243823e6504b70762e3135 *R/multistrauss.R b66e29f48374f2e7c2b4dc9e1465a0b1 *R/multistrhard.R 17bcec6f5bed5bdbbc0db9db982c6438 *R/newformula.R 34eb39eb17e089bdd2df8f659698c8ea *R/objsurf.R 0579ac687f57e918145495b397827a03 *R/ord.R 351116d5be6f0c962874e927ccf6c416 *R/ord.family.R f48d82982ecb34d893bfce2ef96a8a09 *R/ordthresh.R 2777bc6953c191af22a9149102c20580 *R/pairpiece.R 2938ce29be6f87913b542c40abf23b76 *R/pairsat.family.R b9643bbc4c4e7a336fcc535afc002c58 *R/pairwise.R 827bdad041413785df5e4de9b294ab61 *R/pairwise.family.R b2b283260e932005e262006bc8945b36 *R/palmdiagnose.R 4e50e2c71c4289770e0ce270e06146bf *R/panysib.R 7e219c0e44487ff76d3c06bb86a48984 *R/parameters.R 10a2ecf76550b1e50adc72a515f770aa *R/parres.R 7e1de57c9c386631f2ef7313259883d8 *R/penttinen.R c261ac7062c277d45465c58714bc0870 *R/percy.R 4f83c29426d706bade84cd79595c0073 *R/plot.mppm.R bebdfb5dac9c359ec09f312338c20969 *R/plot.plotppm.R 125840a6a0920802ff82d6989a807046 *R/plot.ppm.R fd63caf54f403f547b8847dab3b5cab8 *R/poisson.R c9429e1e2b5fdc83b774e7f7d9d0b110 *R/poissonfitsbetter.R eff91a3e3e11682e74644666801c4fc9 *R/polynom.R 89972386d36e629c3e0fad9a89d1c6f4 *R/ppm.R e19e95e2c7dc166c79f27050ba577857 *R/ppmclass.R 27c8724f9e862d6b920622c7eb0ae218 *R/ppmcovariates.R e1aba0074b87055e1dbb42e2de8b51e2 *R/predict.ppm.R b16a5912c2c3e79d2868d90d8cf4dd2c *R/predictmppm.R 45c0c1f40e4ab8eb773e6b6da329918d *R/profilepl.R 6296b94b8966088cbbba21abf4cf0826 *R/psst.R 3e0ff319e186f140df9d2c546e609f22 *R/psstA.R c77cd0f0a4b065242f7a0cbdb4dbd66f *R/psstG.R b2ee01d8a2825e539ff3794d971d0d75 *R/qqplotppm.R 99aa240d18a1c29ee5156bc18017f63f *R/quadratmtest.R 53a3fa74d3aca679834aa16b39660bfc *R/quadrattest.R a0d7c692fc0f8f00655eb0df125ee134 *R/reach.R 511b88658d51796d9a0daf71b44b9cb4 *R/reduceformula.R c7084f7d598252e624a777b5c9e2dc1c *R/relrisk.ppm.R ac55aea892428d316f5eff49f587bc63 *R/resid4plot.R 8d47b2de2a8942a06591ad486b7ecc29 *R/residppm.R 15f1ea6eff30e3b2b696ca6e9a3f5d4f *R/residuals.mppm.R 33df985bd31bccf01ee715c7c4d30854 *R/response.R a850e312ded923aab35c7b449790b192 *R/rhohat.R 54dd7d6e9b1454ed9d5bedeaaa79f28d *R/rhohat.slrm.R 65b0c7ab11362a091411e77794942cbf *R/rmh.ppm.R 26afbaf959e8fafb6a773fecceb57b53 *R/rmhmodel.ppm.R 5f39627a3ee0c8b9188193678066e24f *R/rppm.R d6c9955f67b6ba632995629e59fcbea3 *R/satpiece.R 06ef1d6ad3f5b348e3c584f4e5411644 *R/saturated.R 2ba0f9045410335518d031b3e17c34db *R/simulate.detPPF.R 9ccf453508daa73baabd0860a9609f16 *R/simulatekppm.R 32fe2067451152aa5affd26792691e7a *R/slrm.R a7516b3caac186b37354494ae04215a1 *R/softcore.R dbf90bf31822af309792488892888558 *R/strauss.R 08a4aae6d8710ad1842c2a301194bc3b *R/strausshard.R 88e750609b0180707fb545ddd2b4ad43 *R/subfits.R 3efc2ba2c42cf5706a82f6d22336f4f2 *R/suffstat.R a77335ca398732a1c1ef4ee5fdea8e45 *R/summary.dppm.R d693b05f5b9f62d639735107f7ec4371 *R/summary.kppm.R 8efb82a7e8b81c496030b406d795cd9e *R/summary.mppm.R 22fa572d558fe64e539562831951a247 *R/summary.ppm.R 6d642b808b76ad02ca3fcedbe53a1d9c *R/sysdata.rda 376feb5fb96d24210e75a80c04b50c48 *R/traj.R d5d5535a1b90ca8cdd70be99d7a444c3 *R/triplet.family.R 954f26a72a4d5e59c68dbe379bbffe26 *R/triplets.R 9121a88c4fa8704ba9116d0f312d47eb *R/update.ppm.R 52fa1cef364a7a6dacc4d4cebc1522ac *R/varcount.R 9bf4ac1feaa193c7169d2d9e3c2b7e23 *R/vblogistic.R 13de3993f910683b44865cb8b40c3dc5 *R/vcov.kppm.R b97bc65916071ddab52ee712de6dd07a *R/vcov.mppm.R 5b729952a7364da6c371d7ea7fe99bb3 *R/vcov.ppm.R 323f7eef768ac040328b5dfb24554a73 *R/zclustermodel.R 61eff43100ed2a0cb74628e4d16ecbfc *R/zgibbsmodel.R f2d49fc8f50fef7f76bcb3fc7f34efb4 *inst/CITATION cbb2a61c61f5267b2a0db0be0b24399c *inst/doc/packagesizes.txt f5e2a9ab33f184325298589067db733c *man/AreaInter.Rd 77fc2d941ecf582f33fa9ea263fe1cb0 *man/BadGey.Rd 77010074c3c171dbafd1477064cb382c *man/Concom.Rd 65cc5132e22a9e9b36038f45189a7226 *man/DiggleGatesStibbard.Rd c59392fc5fa60782bf91c8c0675de144 *man/DiggleGratton.Rd 37b59cf12e86b4b386f26b71124e3576 *man/Extract.influence.ppm.Rd 13dd9f5f22d6722c410e52d5a8e891f8 *man/Extract.leverage.ppm.Rd 91e440f304e3e4a4e021236bcce45108 *man/Extract.msr.Rd 0af9e31e0266388d2015b5c60e88a13a *man/Fiksel.Rd 9aa95551cfb81a4645bece1172f787d6 *man/Gcom.Rd 48f800b52cb48c1f124a2dfeba305f29 *man/Geyer.Rd 4e9d8b7ce84c66d3e9535034bdb049c4 *man/Gres.Rd cf63c130bad5185033df541d11eee4a2 *man/Hardcore.Rd 77c49a32e912ecaced766cadad6476ee *man/HierHard.Rd ebcb391ba5dcf25006f76797e8140278 *man/HierStrauss.Rd 1234e600c429e1b4e513e6aafa007cec *man/HierStraussHard.Rd bc7042186f3964f72f5c761c1712fe17 *man/Hybrid.Rd e5a3d2d980667e1f6484f12e25bf0332 *man/Kcom.Rd c396e17ccc63be7995b7902317b7f3e6 *man/Kmodel.Rd b334b6e417a927af6c3ce4fd6f5a49b9 *man/Kmodel.dppm.Rd 8215ea9143c1cf169398e803a3d820e3 *man/Kmodel.kppm.Rd 95d34521ae4303bf01c8b58eecefe2ef *man/Kmodel.ppm.Rd f4b728e8b6cf41b08cffa9057567eb32 *man/Kres.Rd 9115a22a373040ef2d7209718e4fbe29 *man/LambertW.Rd dd9b87f89f595e1396d0d6d43cfd39b1 *man/LennardJones.Rd 26a9db71cd8fa55fdc1eb42afaa2907f *man/MultiHard.Rd 729411367178c9886a0aff7178bb09ab *man/MultiStrauss.Rd bf2dcf70457431c00a3049bb814dbb33 *man/MultiStraussHard.Rd 176bbee178c7111abc5d6a0fe97ba0fd *man/Ops.msr.Rd e61d4cfd0d9bacea2346f5c064f28fe6 *man/Ord.Rd 37b2dff8a8916eea7e7927961b3c86bc *man/OrdThresh.Rd 4602f7fedd028155f8663ff192023d91 *man/PairPiece.Rd 4f0137f0b0b6645c1c4b122e3164bc8d *man/Pairwise.Rd 084575ea7ae835815f09f0f3db1824f4 *man/Penttinen.Rd ed9f6967bf14ff3cf8625e3a1f481f81 *man/Poisson.Rd 42d2f2d41a103374a96089a73cb7e60d *man/SatPiece.Rd 586b157510810340fd0b1f34adba6819 *man/Saturated.Rd 4ee08fb76d07d7b1bf88ab740bd752f1 *man/Smooth.msr.Rd 07cfbc769f9ea63f4108bb3081942a03 *man/Softcore.Rd 1c7941698ed3ca0bdd7cfd5509ac85fc *man/Strauss.Rd 71d3fde122987d843db32b64937d4c65 *man/StraussHard.Rd 3ae98879f59c7f332081af424ada8d9a *man/Triplets.Rd 7c3e370cb6be06ff2f9d6c697aa7e113 *man/Window.ppm.Rd cafd398c293e2ed459a246483377dcdc *man/addvar.Rd cfe4ff9c272512c02c0075df2c2ecccb *man/anova.mppm.Rd 0284ba980e4228fdb2bf9f7dc0c80b0b *man/anova.ppm.Rd 2202ffac2661f560bfbbcd1bd16ea05a *man/anova.slrm.Rd efc0f7c7039d0500fc6f641949fd3859 *man/as.function.leverage.ppm.Rd 31020bac0b411f085466275e629027cd *man/as.fv.kppm.Rd ee03c3361b0e7068797ac6c189b856d4 *man/as.interact.Rd 8958109033ee84063a9046e7ac0cc0e8 *man/as.layered.msr.Rd 5cbfc596c78ffc9e6501b7b67fa6300d *man/as.owin.ppm.Rd 87caa8d70e11c26ea329d4becfaaa4fc *man/as.ppm.Rd 173b6759c3663191b13fe876efa65b4c *man/auc.Rd 3df7907e462038f9d2f410983c4be948 *man/bc.ppm.Rd bd8e47962231b637774099fd9200ecf1 *man/berman.test.Rd eeb6d11aafdb2d9606a07d6b950461cc *man/cauchy.estK.Rd 188e6aae6833a4e1ccaec2444b388a38 *man/cauchy.estpcf.Rd 36df616403688ce2e24cc7fcab2b11ce *man/cdf.test.Rd 11d2beb1ee135535dde9e211f8505922 *man/cdf.test.mppm.Rd 2e371442c54b15af5deaeb81badf3821 *man/closepaircounts.Rd ef2c1dffdd490033a64aee97eadfa701 *man/clusterfield.kppm.Rd 5ed7e3428163d94a583877715851f082 *man/clusterfit.Rd c6c31674fae6888fc4f99ce50dcb0e70 *man/clusterkernel.kppm.Rd 4732c80e73297157d2b8a290e86a9f64 *man/clusterradius.kppm.Rd 451e54545c3f0e1f6293384879428999 *man/coef.mppm.Rd 5d8447453f99ca0190e6969ce44e73bc *man/coef.ppm.Rd 653eedb3fb7e53b0f95806ac6435d9ca *man/coef.slrm.Rd 94b0f846eb376a7d73f4f08a8430d01e *man/compareFit.Rd 6e0c8912ceae9ee89bec670c9de6f135 *man/data.ppm.Rd 2d3c93bd98b836b4d9e36fbdee0bd596 *man/detpointprocfamilyfun.Rd 0a0b26263084e4f120a8d91c4a53bd72 *man/dfbetas.ppm.Rd dc84b805cd54c511675e9427adf28391 *man/dffit.ppm.Rd 06e79fa6ccd0342f62e31f27bc7d2a1a *man/diagnose.ppm.Rd 382a56f92a804d52582cf716cdf79b09 *man/dim.detpointprocfamily.Rd f9745ab059df2ca68025faa7276daafe *man/domain.ppm.Rd e93b2a5629b7d0568ee47583179a89eb *man/dppBessel.Rd 80e324406c3b304135aaacfbc5965428 *man/dppCauchy.Rd 417e2a934a63afe19d76cacdddc90775 *man/dppGauss.Rd f7a4425077102442130d630aff76f500 *man/dppMatern.Rd 4c762a8765980d7d7dacfee820d913ca *man/dppPowerExp.Rd 43fca14b1c64f1a91fbdd8ec2e56b40f *man/dppapproxkernel.Rd cd8c645ef7166114d397f3132e73d3b3 *man/dppapproxpcf.Rd edb8c34118a8284a8946cddfda3392d6 *man/dppeigen.Rd 96abd9ffe6be8d538ddffd6bcab72001 *man/dppkernel.Rd ab33fe64e341fa5771c32a420c896de5 *man/dppm.Rd 435c26403f233030ea066d2135f121c8 *man/dppparbounds.Rd 976360c41648d086a5d572c80803ee46 *man/dppspecden.Rd 1f7ad57545508cbaf3ebdf359509c96f *man/dppspecdenrange.Rd cfe1652069012d2187f56f99553015aa *man/dummify.Rd 7a2ecdd31b2b36423367755c6b328b70 *man/dummy.ppm.Rd 2f422aec12d5b327f5598ae9d6dda94c *man/eem.Rd 92f555a540e6030d41a3ef67b6aa5519 *man/effectfun.Rd eaf551ffc6fe85878440efe55327409b *man/emend.Rd c0fd21d2e0fa25cb2a538abed97caa31 *man/emend.ppm.Rd e46e37c463079dbc7b7011bab1e56448 *man/emend.slrm.Rd 00ed57a14838f93e9a95c519d0508c75 *man/envelope.Rd 5185efc1b4872c1a16505cea8d895f87 *man/exactMPLEstrauss.Rd 0daf1da7e9646e66c05f1202aac28638 *man/fitin.Rd f616c422a2a34c9fb3cbdf19fa03f7f6 *man/fitted.mppm.Rd 259c85984dc95363e1ada09018756be6 *man/fitted.ppm.Rd 86d12a95371dce5e73747e2ca0ac6000 *man/fitted.slrm.Rd 126dcf3300a4a2c481a3f96746fac86a *man/fixef.mppm.Rd 4195b6ccd4a3e30da91410ed007d2451 *man/formula.ppm.Rd fc0a0029900b70e4098c9091ba618b7d *man/hardcoredist.Rd f64195f95861374ed4477ef1aea0accf *man/harmonic.Rd b2477cde55321142f298c95657f38e34 *man/harmonise.msr.Rd 63542aed06ba7cc630711a415b13b15d *man/hierpair.family.Rd f810f76144f2264bfb9c93ba7da0eaf1 *man/hybrid.family.Rd 2339410f62cf4165667269b5479cc7f2 *man/ic.kppm.Rd e06064f73ff7014f24fb4d5905d73cb8 *man/improve.kppm.Rd 416b112c17bc7880ff96b2550f386346 *man/influence.ppm.Rd a42d4e31bcb6e5c0f2e605e57abd12f4 *man/inforder.family.Rd cc53f329d606ac3d65e811ca3429f8a2 *man/integral.msr.Rd 753620f42fe3e253ec926fc3a250add3 *man/intensity.dppm.Rd 059bbd1f5a9d837885b9fe9c8b616ea9 *man/intensity.ppm.Rd 60aba05382729cf1a5a8ba0e426a82b8 *man/intensity.slrm.Rd ea0fdcb46644fcf474376bdcb47be750 *man/interactionorder.Rd d6a061df58310496ea224a9c31ce65de *man/ippm.Rd ec14e120d0b24191639206945452d00c *man/is.dppm.Rd e62b5a7e6c75235da53f4c10df82c297 *man/is.hybrid.Rd 9a248a8c22b8ae9d0b9aa8f22633c70e *man/is.marked.ppm.Rd 446c5d72134417b38a7b3491d0daca16 *man/is.multitype.ppm.Rd e7ebfe3380f8e69af94ba4cd82d91d27 *man/is.poissonclusterprocess.Rd da965936384d55138f38bd22464a0b93 *man/is.ppm.Rd 95fd4fc7b7016bf8163f04d00f73d221 *man/is.stationary.ppm.Rd 4251149a436d1bf150f48f0158677bac *man/isf.object.Rd f8a79af4e1a11ddaa1f465ab1aa1b8c3 *man/kppm.Rd f2dd1380172c22650a64338b605fab80 *man/leverage.ppm.Rd 3963b8608bf5137f28e329cd401510ff *man/leverage.slrm.Rd ca6ed925465dba6ade1a636305dfde6d *man/lgcp.estK.Rd 03ed0414c823b0530291447133888cdf *man/lgcp.estpcf.Rd 7caeac313765d2713f998e992babcd1b *man/logLik.dppm.Rd ec6b9c54f62b790559987bb0e9a84149 *man/logLik.kppm.Rd f80b4614ef2a05e2b4f075522c9cab14 *man/logLik.mppm.Rd fe577f7503dbd3350aa7f62cda171edf *man/logLik.ppm.Rd 9dfb75042a18e996784832572efdb49f *man/logLik.slrm.Rd e6f9da6f717d13561c0436d75bf8745f *man/lurking.Rd 275fc51666be46726ae9f7687bfa7017 *man/lurking.mppm.Rd a2f94d84ffd9011c01a6d3e9e9a6c7e8 *man/macros/defns.Rd 3b34743a171aa19f9b4bebdc00442480 *man/matclust.estK.Rd b714a1466d550e8a1495da21d8473352 *man/matclust.estpcf.Rd eb7dd4a1eedec9dc245c35a321495e7f *man/measureContinuous.Rd ead8b17a0c7f48cbac4bbcf65f0346c9 *man/measureVariation.Rd e9d654d3139488dc3d00dd0016403e7e *man/measureWeighted.Rd 1f715bf18a1b42e00aa7c2b5af6bc491 *man/methods.dppm.Rd 42495c3b43ec06a025587dbbbc45c833 *man/methods.fii.Rd 4e0c51c79367035211628880616354d0 *man/methods.influence.ppm.Rd e3c47fa078c482fd8d25959dd35e5b57 *man/methods.kppm.Rd b56dda71cbc4fcff91743c302b8bd0ba *man/methods.leverage.ppm.Rd 96c4e8083fa5f17ae4ecbf665fdf4241 *man/methods.objsurf.Rd 5764ea8447559f139adb0860787ca3f7 *man/methods.slrm.Rd a136561728d59a318407e851e99d62b5 *man/methods.traj.Rd cc83f970d148b8a2178e7c7b649207c5 *man/methods.zclustermodel.Rd 2b48fe35d986e94632f1e727b4b68985 *man/methods.zgibbsmodel.Rd e329bdb6b376760646474a7af1161c7b *man/mincontrast.Rd bc47733e01454abe17ed482062b87989 *man/model.depends.Rd 6652aa4bbcc732174dc98f7bf45af982 *man/model.frame.ppm.Rd 0cf20410063f83d0d76757eb0f91bd3d *man/model.images.Rd fc52408f38c7ba88f9a29fc21a865ebb *man/model.matrix.mppm.Rd 05ac435e3d1d92523320e729cfc5245e *man/model.matrix.ppm.Rd 2a367d19222418868a904b9379310b6a *man/model.matrix.slrm.Rd c4dd89ffc2944a5b89a2993272e6761f *man/mppm.Rd e904d6dd37feb03e1ea21ac968096e2e *man/msr.Rd 0ac08ae5b07184e0f102d8be4902207d *man/npfun.Rd 3040a23e36d16577cfd7580045e58558 *man/objsurf.Rd 1dec8ec3fdb5d8a1261e0e5e009779eb *man/ord.family.Rd 6c62a61638bb61fe61df3b0de17a1ac6 *man/pairsat.family.Rd 40f0043072bbeeceb6624ab10b135daa *man/pairwise.family.Rd 48cb3372afd411bc9da301de32fbc984 *man/palmdiagnose.Rd d80f08886b5ba53b2460411d07c5ed22 *man/panel.contour.Rd 8ac7d1f4c6f4650cb26fb1b1c75a9b91 *man/panysib.Rd 2c8ca1a2061b8cb04b0a66f027b1061e *man/parameters.Rd 46bd1b421cfdb322efd834dda5eb7e0a *man/parres.Rd 57cf07ec4c547ff8053d8cd431c9223f *man/plot.dppm.Rd 9d978e38a72b7cc42645d68a3b481eb1 *man/plot.influence.ppm.Rd 77366e361c23004906ba4fa11d377966 *man/plot.kppm.Rd 6da123c7fdf4ccd2b79757ba1de732f6 *man/plot.leverage.ppm.Rd 7def4361ce622ad668ac9c0571a35255 *man/plot.mppm.Rd 3bcbfcd3b88b35277c8d1d8531cc5dfb *man/plot.msr.Rd a051719251531280012c1d9a0091e3c4 *man/plot.palmdiag.Rd 717553512b9781dac73bcaf0d8c65a97 *man/plot.plotppm.Rd 423654fd5bb7167f57d9044cad307ca7 *man/plot.ppm.Rd d76dfac613d199ca04eccda9407765a6 *man/plot.profilepl.Rd 29a48bdc9d2be508ee8f66afaf0f475d *man/plot.rppm.Rd 8e1ec2835dfde51d677fb67db777ba5e *man/plot.slrm.Rd daf959532330f2c700243ef0693ffa37 *man/polynom.Rd ec02e4c545bcc24367b6f183ca8261f7 *man/ppm.Rd 84dee48d44391f70ba37241f67541738 *man/ppm.object.Rd b7e050cbd54f0c09f8229336b791688d *man/ppm.ppp.Rd 8cefc7e95a4f2333f1045bfed055e37c *man/ppmInfluence.Rd b09507146205e71a0951ccb14a0a8e6b *man/predict.dppm.Rd d9c49bac00c2bba8e4a39634b2fc24c9 *man/predict.kppm.Rd 31104a99b65b1bb03ac3c8b41ea2dec6 *man/predict.mppm.Rd 54fbc522a3440c334cce60f35cd9abcf *man/predict.ppm.Rd baf7a18910afda5c22e97a8b394f35ec *man/predict.rppm.Rd 50919f5b3bae6bb7921705dc31f8d360 *man/predict.slrm.Rd 0bf0e574c2e59c38aa75d2062fc41f3a *man/print.ppm.Rd 61098a851f6d3a52f8b1d723f65c0218 *man/profilepl.Rd a8d0bb1cb4832789478c2fd9fdff494c *man/prune.rppm.Rd b59f5f0d6b7edb61d6a0c5b1cf51da23 *man/pseudoR2.Rd 147d6cacaa5001a88042a6fcc164f4a7 *man/psib.Rd de155688c021f81d03ef837951db4113 *man/psst.Rd 72dcb8cd67280b657eb72d0adc06ba5f *man/psstA.Rd 438b5bd33fc0f17f500e583660f6b235 *man/psstG.Rd a5071e609e89fcea2ce6852d31a9b1c7 *man/qqplot.ppm.Rd d7d24fc67e99edbd5c05486de1b94b5a *man/quad.ppm.Rd d8f6b8768f42a297d3f9355a7e293362 *man/quadrat.test.Rd 893e322c102a5972d7458454063f0f2e *man/quadrat.test.mppm.Rd 9447fa98ad8da7fe327f4a5e305a926d *man/ranef.mppm.Rd 886acf818af35dbd8def4913c1818718 *man/rdpp.Rd 7713bde5c2d50ffec120a9d389273c74 *man/reach.Rd 759ba7977ab1b8e0a8dec1d606541e17 *man/reach.dppm.Rd 10f5a1a3d3c655d971399a31763aaf89 *man/reach.kppm.Rd f79b65f13e8b2eda544cd6f9a85be22c *man/relrisk.ppm.Rd ac347300d2bc3a23f9bfe4cb89aac5d9 *man/repul.Rd 2da5ea8f7abbac7632214a4b91afff91 *man/residualMeasure.Rd ff399f335cf2f573c0553b44dbe50572 *man/residuals.dppm.Rd 0418405470085449656f5fc8d4d87999 *man/residuals.kppm.Rd e71f00eff708e20cd550a5b214474e5c *man/residuals.mppm.Rd 3a6a26c32daff337e5d10a33831705dd *man/residuals.ppm.Rd 451a4e593b42f3328c1c96b915b65677 *man/residuals.rppm.Rd 8b78fdfb7fd19dbfd2473e2fa927622a *man/residuals.slrm.Rd 9e9489c133f8ff62d47b8c132de5836e *man/response.Rd 672653f694e9368d2274b3bc069c3a88 *man/rex.Rd 8e4a9b82c714a50d3cf2ca58899c3680 *man/rhohat.Rd ad1a5993e70cad9b5720da73ff9192d4 *man/rmh.ppm.Rd b74fce12c103d1c45c14c78ebf3e4495 *man/rmhmodel.ppm.Rd 4a83662de1ce0402451f866f2390938f *man/roc.Rd 4cd675664ab4763c2390efb0c9e92a99 *man/rppm.Rd 6d5dc98c63b345f6c10e62a5673f7296 *man/simulate.dppm.Rd d84322695f4e7fd7ab9180160e9d5338 *man/simulate.kppm.Rd 33b7b6d9e3c230b14c9197929c78676d *man/simulate.mppm.Rd 2cc959f26817dfd0b0d679f761d24aef *man/simulate.ppm.Rd 4e92e07224d7111e24cadf4b88a12b6b *man/simulate.slrm.Rd 09c1c038fcfa968e4aa1c7ce3c69cb67 *man/slrm.Rd bb7cb42edf49471a2709b8fe6c0cc49f *man/spatstat.model-internal.Rd dcfa56b7229917cfc5b3800a524971bc *man/spatstat.model-package.Rd a85f30741ee936b330a7aba10aa312d9 *man/split.msr.Rd d126bcef08db458fab3c66a99a0af3f7 *man/subfits.Rd a733bb706605ce612aca29b0401cd3fe *man/suffstat.Rd 533ab659806b6e668fa64e8e4a4ef5b0 *man/summary.dppm.Rd 373de3fc0a3e52e5cc96bef7fdbf8e22 *man/summary.kppm.Rd b4efa3cce197e3a41ea79bf622f888d2 *man/summary.ppm.Rd 75cced67f8bcb54286457554e5b615b6 *man/thomas.estK.Rd 6042bec9ffd0aafcb689dbf66ebd55bd *man/thomas.estpcf.Rd c218f32e7c8653c6ee7c8fd56eb8e2df *man/traj.Rd a9a21b880aab4e36278cd8b3d5336ed1 *man/triplet.family.Rd 7bf87c2af4b43852fae55caa442392c1 *man/unitname.Rd d11a2ad5dd1042b6caff2a7aac7aa935 *man/unstack.msr.Rd 711574530501aeff2d9a83857ccfafc6 *man/update.detpointprocfamily.Rd f49348dab19d13dfc3b087df320e5919 *man/update.dppm.Rd 7e613050b5075767ca9d48d7070dd386 *man/update.interact.Rd 8d43029f455e767d5ddf95a68d63ec09 *man/update.kppm.Rd b6bee56f84d768c3e6a362efedaf1496 *man/update.ppm.Rd 4723a912b06e488634d52908df01b1da *man/update.rppm.Rd 47bd28833a40a74899d25734078640d6 *man/valid.Rd 9449cb5c1fec24621c998a506be0eac2 *man/valid.detpointprocfamily.Rd 1ed9f6e59dad62161fc6867d14156a24 *man/valid.ppm.Rd 14f272eb6e7d0267ae74fd5a691f4f53 *man/valid.slrm.Rd 1bfd1444f843fd8ec087ab66f50934eb *man/varcount.Rd 82c2654fe6b74ae1c9550293e3c37839 *man/vargamma.estK.Rd 20bdec51627e17637f8e487de060250e *man/vargamma.estpcf.Rd b434f6798cc2ebe70ac4172191c3d800 *man/vcov.kppm.Rd e687d06726f80093deb37b6a146dcc2b *man/vcov.mppm.Rd 46c53cc502e8a04493f3027dad48789c *man/vcov.ppm.Rd 06c0ef68c545c8667e0ed7245ac0e27c *man/vcov.slrm.Rd 967466b70f99daef536e805505a877d6 *man/with.msr.Rd 46bdd584bf584fb298bfe431934e36cd *man/zclustermodel.Rd 37cbb37d6fe22ce41af0473638f0d20a *man/zgibbsmodel.Rd 105fe6d92a5238f84a485b6fc723840d *src/Ediggatsti.c f24780916477da00996c7547b140d23d *src/Ediggra.c a65b4f28f016a8588d982c6993d86485 *src/Efiksel.c 709efca16c81d63efefc4e09d0c1d264 *src/Egeyer.c cbedabde5dac5f4dab98c7c12bcea8ad *src/Estrauss.c 680e920079c2df3186e273d2055cf53a *src/areapair.c 542e6e474340e0aac3ed97f6a92777b4 *src/chunkloop.h becea4a1ea42b8a39021c01abdc6459d *src/constants.h dcf500827ff4c8b060819be2b274a7d7 *src/crossloop.h 6e43de7d33d4c17c5a20cf5ced119289 *src/init.c d4f690790bb0e2585fd2a2645e0556d2 *src/looptest.h acdf88b1cfedbbb87d81bb727761decd *src/pairloop.h 8f237ad7a5735d8bb3622b9f37a14780 *src/proto.h 615b05e6d7796156017ceac8f9589df0 *src/raster.c 668e8318237175ac56af1fcfdb41be29 *src/raster.h 5c127a9d5ddeaee8cc8f34b32218a3a5 *src/yesno.h 99f5dbf7fcf1569ec313bbd752bba2dc *tests/testsAtoC.R 5a7777f0676fc205f414d79e5db8eac3 *tests/testsD.R 12eb1116ec68c287655097abc45c8314 *tests/testsEtoF.R de52edae9405c3c404d16d3b634238dd *tests/testsGtoJ.R 377845a4d792a122e0b5dbcd0c792789 *tests/testsK.R deb59c95affaf85791b00d9d5926289e *tests/testsL.R 7551ded8d0522a088a7f5eb2217e6f3c *tests/testsM.R cc2085dc020a742fd90306a1d8ae92e8 *tests/testsNtoO.R 2828a280bab1d91d9a3b84bb984a856d *tests/testsP1.R 7ca4783743403774b91a513ce21b707b *tests/testsP2.R 20e5c8322de418abe3986f58c48a3d0f *tests/testsQ.R aae0bb2006690df3251719bfc7d512c7 *tests/testsR1.R aedb94f03eba6a97ac1b9cd05c5d4448 *tests/testsR2.R efe83ed9ea03ba160ef36e9e46afcd04 *tests/testsS.R 226fd8fc48c3a6a68e6bf94e46bf1279 *tests/testsT.R 460f14afe67299e170d4fc6b8b880318 *tests/testsUtoZ.R spatstat.model/inst/0000755000176200001440000000000014243551505014175 5ustar liggesusersspatstat.model/inst/doc/0000755000176200001440000000000014243551505014742 5ustar liggesusersspatstat.model/inst/doc/packagesizes.txt0000755000176200001440000000106314515336521020160 0ustar liggesusersdate version nhelpfiles nobjects ndatasets Rlines srclines "2022-05-25" "3.0-0" 258 680 0 37534 1114 "2022-11-06" "3.0-3" 263 692 0 37739 1155 "2023-01-26" "3.1-2" 267 706 0 38848 1155 "2023-02-14" "3.2-0" 269 709 0 38930 1155 "2023-02-15" "3.2-1" 269 709 0 38930 1155 "2023-04-21" "3.2-2" 269 709 0 38944 1155 "2023-04-21" "3.2-3" 269 710 0 38944 1155 "2023-05-13" "3.2-4" 269 710 0 38957 1155 "2023-09-07" "3.2-5" 269 710 0 38974 1155 "2023-09-07" "3.2-6" 269 712 0 38974 1155 "2023-10-22" "3.2-7" 269 712 0 38922 1155 "2023-10-23" "3.2-8" 269 712 0 38922 1155 spatstat.model/inst/CITATION0000755000176200001440000000357114373051623015343 0ustar liggesusersc( bibentry(bibtype = "Book", title = "Spatial Point Patterns: Methodology and Applications with {R}", author = c(person("Adrian", "Baddeley"), person("Ege", "Rubak"), person("Rolf", "Turner")), year = "2015", publisher = "Chapman and Hall/CRC Press", address = "London", isbn = 9781482210200, url = "https://www.routledge.com/Spatial-Point-Patterns-Methodology-and-Applications-with-R/Baddeley-Rubak-Turner/p/book/9781482210200/", header = "To cite spatstat in publications, please use:" ), bibentry(bibtype = "Article", title = "Hybrids of Gibbs Point Process Models and Their Implementation", author = c(person("Adrian", "Baddeley"), person("Rolf", "Turner"), person("Jorge", "Mateu"), person("Andrew", "Bevan")), journal = "Journal of Statistical Software", year = "2013", volume = "55", number = "11", pages = "1--43", doi = "10.18637/jss.v055.i11", header = "If you use hybrid models, please also cite:" ), bibentry(bibtype = "Article", title = "{spatstat}: An {R} Package for Analyzing Spatial Point Patterns", author = c(person("Adrian", "Baddeley"), person("Rolf", "Turner")), journal = "Journal of Statistical Software", year = "2005", volume = "12", number = "6", pages = "1--42", doi = "10.18637/jss.v012.i06", header = "In survey articles, please also cite the original paper on spatstat:" ) )